shipwright-cli 1.7.1 → 1.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. package/.claude/agents/code-reviewer.md +90 -0
  2. package/.claude/agents/devops-engineer.md +142 -0
  3. package/.claude/agents/pipeline-agent.md +80 -0
  4. package/.claude/agents/shell-script-specialist.md +150 -0
  5. package/.claude/agents/test-specialist.md +196 -0
  6. package/.claude/hooks/post-tool-use.sh +45 -0
  7. package/.claude/hooks/pre-tool-use.sh +25 -0
  8. package/.claude/hooks/session-started.sh +37 -0
  9. package/README.md +212 -814
  10. package/claude-code/CLAUDE.md.shipwright +54 -0
  11. package/claude-code/hooks/notify-idle.sh +2 -2
  12. package/claude-code/hooks/session-start.sh +24 -0
  13. package/claude-code/hooks/task-completed.sh +6 -2
  14. package/claude-code/settings.json.template +12 -0
  15. package/dashboard/public/app.js +4422 -0
  16. package/dashboard/public/index.html +816 -0
  17. package/dashboard/public/styles.css +4755 -0
  18. package/dashboard/server.ts +4315 -0
  19. package/docs/KNOWN-ISSUES.md +18 -10
  20. package/docs/TIPS.md +38 -26
  21. package/docs/patterns/README.md +33 -23
  22. package/package.json +9 -5
  23. package/scripts/adapters/iterm2-adapter.sh +1 -1
  24. package/scripts/adapters/tmux-adapter.sh +52 -23
  25. package/scripts/adapters/wezterm-adapter.sh +26 -14
  26. package/scripts/lib/compat.sh +200 -0
  27. package/scripts/lib/helpers.sh +72 -0
  28. package/scripts/postinstall.mjs +72 -13
  29. package/scripts/{cct → sw} +118 -22
  30. package/scripts/sw-adversarial.sh +274 -0
  31. package/scripts/sw-architecture-enforcer.sh +330 -0
  32. package/scripts/sw-checkpoint.sh +468 -0
  33. package/scripts/sw-cleanup.sh +359 -0
  34. package/scripts/sw-connect.sh +619 -0
  35. package/scripts/{cct-cost.sh → sw-cost.sh} +368 -34
  36. package/scripts/sw-daemon.sh +5574 -0
  37. package/scripts/sw-dashboard.sh +477 -0
  38. package/scripts/sw-developer-simulation.sh +252 -0
  39. package/scripts/sw-docs.sh +635 -0
  40. package/scripts/sw-doctor.sh +907 -0
  41. package/scripts/{cct-fix.sh → sw-fix.sh} +10 -6
  42. package/scripts/{cct-fleet.sh → sw-fleet.sh} +498 -22
  43. package/scripts/sw-github-checks.sh +521 -0
  44. package/scripts/sw-github-deploy.sh +533 -0
  45. package/scripts/sw-github-graphql.sh +972 -0
  46. package/scripts/sw-heartbeat.sh +293 -0
  47. package/scripts/{cct-init.sh → sw-init.sh} +144 -11
  48. package/scripts/sw-intelligence.sh +1196 -0
  49. package/scripts/sw-jira.sh +643 -0
  50. package/scripts/sw-launchd.sh +364 -0
  51. package/scripts/sw-linear.sh +648 -0
  52. package/scripts/{cct-logs.sh → sw-logs.sh} +72 -2
  53. package/scripts/sw-loop.sh +2217 -0
  54. package/scripts/{cct-memory.sh → sw-memory.sh} +514 -36
  55. package/scripts/sw-patrol-meta.sh +417 -0
  56. package/scripts/sw-pipeline-composer.sh +455 -0
  57. package/scripts/sw-pipeline-vitals.sh +1096 -0
  58. package/scripts/sw-pipeline.sh +7593 -0
  59. package/scripts/sw-predictive.sh +820 -0
  60. package/scripts/{cct-prep.sh → sw-prep.sh} +339 -49
  61. package/scripts/{cct-ps.sh → sw-ps.sh} +9 -6
  62. package/scripts/{cct-reaper.sh → sw-reaper.sh} +10 -6
  63. package/scripts/sw-remote.sh +687 -0
  64. package/scripts/sw-self-optimize.sh +1048 -0
  65. package/scripts/sw-session.sh +541 -0
  66. package/scripts/sw-setup.sh +234 -0
  67. package/scripts/sw-status.sh +796 -0
  68. package/scripts/{cct-templates.sh → sw-templates.sh} +9 -4
  69. package/scripts/sw-tmux.sh +591 -0
  70. package/scripts/sw-tracker-jira.sh +277 -0
  71. package/scripts/sw-tracker-linear.sh +292 -0
  72. package/scripts/sw-tracker.sh +409 -0
  73. package/scripts/{cct-upgrade.sh → sw-upgrade.sh} +103 -46
  74. package/scripts/{cct-worktree.sh → sw-worktree.sh} +3 -0
  75. package/templates/pipelines/autonomous.json +35 -6
  76. package/templates/pipelines/cost-aware.json +21 -0
  77. package/templates/pipelines/deployed.json +40 -6
  78. package/templates/pipelines/enterprise.json +16 -2
  79. package/templates/pipelines/fast.json +19 -0
  80. package/templates/pipelines/full.json +28 -2
  81. package/templates/pipelines/hotfix.json +19 -0
  82. package/templates/pipelines/standard.json +31 -0
  83. package/tmux/{claude-teams-overlay.conf → shipwright-overlay.conf} +27 -9
  84. package/tmux/templates/accessibility.json +34 -0
  85. package/tmux/templates/api-design.json +35 -0
  86. package/tmux/templates/architecture.json +1 -0
  87. package/tmux/templates/bug-fix.json +9 -0
  88. package/tmux/templates/code-review.json +1 -0
  89. package/tmux/templates/compliance.json +36 -0
  90. package/tmux/templates/data-pipeline.json +36 -0
  91. package/tmux/templates/debt-paydown.json +34 -0
  92. package/tmux/templates/devops.json +1 -0
  93. package/tmux/templates/documentation.json +1 -0
  94. package/tmux/templates/exploration.json +1 -0
  95. package/tmux/templates/feature-dev.json +1 -0
  96. package/tmux/templates/full-stack.json +8 -0
  97. package/tmux/templates/i18n.json +34 -0
  98. package/tmux/templates/incident-response.json +36 -0
  99. package/tmux/templates/migration.json +1 -0
  100. package/tmux/templates/observability.json +35 -0
  101. package/tmux/templates/onboarding.json +33 -0
  102. package/tmux/templates/performance.json +35 -0
  103. package/tmux/templates/refactor.json +1 -0
  104. package/tmux/templates/release.json +35 -0
  105. package/tmux/templates/security-audit.json +8 -0
  106. package/tmux/templates/spike.json +34 -0
  107. package/tmux/templates/testing.json +1 -0
  108. package/tmux/tmux.conf +98 -9
  109. package/scripts/cct-cleanup.sh +0 -172
  110. package/scripts/cct-daemon.sh +0 -3189
  111. package/scripts/cct-doctor.sh +0 -414
  112. package/scripts/cct-loop.sh +0 -1332
  113. package/scripts/cct-pipeline.sh +0 -3844
  114. package/scripts/cct-session.sh +0 -284
  115. package/scripts/cct-status.sh +0 -169
@@ -0,0 +1,1048 @@
1
+ #!/usr/bin/env bash
2
+ # ╔═══════════════════════════════════════════════════════════════════════════╗
3
+ # ║ shipwright self-optimize — Learning & Self-Tuning System ║
4
+ # ║ Outcome analysis · Template tuning · Model routing · Memory evolution ║
5
+ # ╚═══════════════════════════════════════════════════════════════════════════╝
6
+ set -euo pipefail
7
+ trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
+
9
+ VERSION="1.10.0"
10
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
+ REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
+
13
+ # ─── Colors (matches Seth's tmux theme) ─────────────────────────────────────
14
+ CYAN='\033[38;2;0;212;255m' # #00d4ff — primary accent
15
+ PURPLE='\033[38;2;124;58;237m' # #7c3aed — secondary
16
+ BLUE='\033[38;2;0;102;255m' # #0066ff — tertiary
17
+ GREEN='\033[38;2;74;222;128m' # success
18
+ YELLOW='\033[38;2;250;204;21m' # warning
19
+ RED='\033[38;2;248;113;113m' # error
20
+ DIM='\033[2m'
21
+ BOLD='\033[1m'
22
+ RESET='\033[0m'
23
+
24
+ # ─── Cross-platform compatibility ──────────────────────────────────────────
25
+ # shellcheck source=lib/compat.sh
26
+ [[ -f "$SCRIPT_DIR/lib/compat.sh" ]] && source "$SCRIPT_DIR/lib/compat.sh"
27
+
28
+ info() { echo -e "${CYAN}${BOLD}▸${RESET} $*"; }
29
+ success() { echo -e "${GREEN}${BOLD}✓${RESET} $*"; }
30
+ warn() { echo -e "${YELLOW}${BOLD}⚠${RESET} $*"; }
31
+ error() { echo -e "${RED}${BOLD}✗${RESET} $*" >&2; }
32
+
33
+ now_iso() { date -u +"%Y-%m-%dT%H:%M:%SZ"; }
34
+ now_epoch() { date +%s; }
35
+
36
+ # ─── Structured Event Log ────────────────────────────────────────────────────
37
+ EVENTS_FILE="${HOME}/.shipwright/events.jsonl"
38
+
39
+ emit_event() {
40
+ local event_type="$1"
41
+ shift
42
+ local json_fields=""
43
+ for kv in "$@"; do
44
+ local key="${kv%%=*}"
45
+ local val="${kv#*=}"
46
+ if [[ "$val" =~ ^-?[0-9]+\.?[0-9]*$ ]]; then
47
+ json_fields="${json_fields},\"${key}\":${val}"
48
+ else
49
+ val="${val//\"/\\\"}"
50
+ json_fields="${json_fields},\"${key}\":\"${val}\""
51
+ fi
52
+ done
53
+ mkdir -p "${HOME}/.shipwright"
54
+ echo "{\"ts\":\"$(now_iso)\",\"ts_epoch\":$(now_epoch),\"type\":\"${event_type}\"${json_fields}}" >> "$EVENTS_FILE"
55
+ }
56
+
57
+ # ─── Storage Paths ───────────────────────────────────────────────────────────
58
+ OPTIMIZATION_DIR="${HOME}/.shipwright/optimization"
59
+ OUTCOMES_FILE="${OPTIMIZATION_DIR}/outcomes.jsonl"
60
+ TEMPLATE_WEIGHTS_FILE="${OPTIMIZATION_DIR}/template-weights.json"
61
+ MODEL_ROUTING_FILE="${OPTIMIZATION_DIR}/model-routing.json"
62
+ ITERATION_MODEL_FILE="${OPTIMIZATION_DIR}/iteration-model.json"
63
+
64
+ ensure_optimization_dir() {
65
+ mkdir -p "$OPTIMIZATION_DIR"
66
+ [[ -f "$TEMPLATE_WEIGHTS_FILE" ]] || echo '{}' > "$TEMPLATE_WEIGHTS_FILE"
67
+ [[ -f "$MODEL_ROUTING_FILE" ]] || echo '{}' > "$MODEL_ROUTING_FILE"
68
+ [[ -f "$ITERATION_MODEL_FILE" ]] || echo '{}' > "$ITERATION_MODEL_FILE"
69
+ }
70
+
71
+ # ─── GitHub Metrics ──────────────────────────────────────────────────────
72
+
73
+ _optimize_github_metrics() {
74
+ type _gh_detect_repo &>/dev/null 2>&1 || { echo "{}"; return 0; }
75
+ _gh_detect_repo 2>/dev/null || { echo "{}"; return 0; }
76
+
77
+ local owner="${GH_OWNER:-}" repo="${GH_REPO:-}"
78
+ [[ -z "$owner" || -z "$repo" ]] && { echo "{}"; return 0; }
79
+
80
+ if type gh_actions_runs &>/dev/null 2>&1; then
81
+ local runs
82
+ runs=$(gh_actions_runs "$owner" "$repo" "" 50 2>/dev/null || echo "[]")
83
+ local success_rate avg_duration
84
+ success_rate=$(echo "$runs" | jq '[.[] | select(.conclusion == "success")] | length as $s | ([length, 1] | max) as $t | ($s / $t * 100) | floor' 2>/dev/null || echo "0")
85
+ avg_duration=$(echo "$runs" | jq '[.[] | .duration_seconds // 0] | if length > 0 then add / length | floor else 0 end' 2>/dev/null || echo "0")
86
+ jq -n --argjson rate "${success_rate:-0}" --argjson dur "${avg_duration:-0}" \
87
+ '{ci_success_rate: $rate, ci_avg_duration_s: $dur}'
88
+ else
89
+ echo "{}"
90
+ fi
91
+ }
92
+
93
+ # ═════════════════════════════════════════════════════════════════════════════
94
+ # OUTCOME ANALYSIS
95
+ # ═════════════════════════════════════════════════════════════════════════════
96
+
97
+ # optimize_analyze_outcome <pipeline_state_file>
98
+ # Extract metrics from a completed pipeline and append to outcomes.jsonl
99
+ optimize_analyze_outcome() {
100
+ local state_file="${1:-}"
101
+
102
+ if [[ -z "$state_file" || ! -f "$state_file" ]]; then
103
+ error "Pipeline state file not found: ${state_file:-<empty>}"
104
+ return 1
105
+ fi
106
+
107
+ ensure_optimization_dir
108
+
109
+ # Extract fields from the state file (markdown-style key: value)
110
+ local issue_number template_used result total_iterations total_cost labels model
111
+ issue_number=$(sed -n 's/^issue: *#*//p' "$state_file" | head -1 | tr -d ' ')
112
+ template_used=$(sed -n 's/^template: *//p' "$state_file" | head -1 | tr -d ' ')
113
+ result=$(sed -n 's/^status: *//p' "$state_file" | head -1 | tr -d ' ')
114
+ total_iterations=$(sed -n 's/^iterations: *//p' "$state_file" | head -1 | tr -d ' ')
115
+ total_cost=$(sed -n 's/^cost: *\$*//p' "$state_file" | head -1 | tr -d ' ')
116
+ labels=$(sed -n 's/^labels: *//p' "$state_file" | head -1)
117
+ model=$(sed -n 's/^model: *//p' "$state_file" | head -1 | tr -d ' ')
118
+
119
+ # Extract complexity score if present
120
+ local complexity
121
+ complexity=$(sed -n 's/^complexity: *//p' "$state_file" | head -1 | tr -d ' ')
122
+
123
+ # Extract stage durations from stages section
124
+ local stages_json="[]"
125
+ local stages_section=""
126
+ stages_section=$(sed -n '/^stages:/,/^---/p' "$state_file" 2>/dev/null || true)
127
+ if [[ -n "$stages_section" ]]; then
128
+ # Build JSON array of stage results
129
+ local stage_entries=""
130
+ while IFS= read -r line; do
131
+ local stage_name stage_status
132
+ stage_name=$(echo "$line" | sed 's/:.*//' | tr -d ' ')
133
+ stage_status=$(echo "$line" | sed 's/.*: *//' | tr -d ' ')
134
+ if [[ -n "$stage_name" && "$stage_name" != "stages" && "$stage_name" != "---" ]]; then
135
+ if [[ -n "$stage_entries" ]]; then
136
+ stage_entries="${stage_entries},"
137
+ fi
138
+ stage_entries="${stage_entries}{\"name\":\"${stage_name}\",\"status\":\"${stage_status}\"}"
139
+ fi
140
+ done <<< "$stages_section"
141
+ if [[ -n "$stage_entries" ]]; then
142
+ stages_json="[${stage_entries}]"
143
+ fi
144
+ fi
145
+
146
+ # Build outcome record using jq for proper escaping
147
+ local tmp_outcome
148
+ tmp_outcome=$(mktemp)
149
+ jq -c -n \
150
+ --arg ts "$(now_iso)" \
151
+ --arg issue "${issue_number:-unknown}" \
152
+ --arg template "${template_used:-unknown}" \
153
+ --arg result "${result:-unknown}" \
154
+ --arg model "${model:-opus}" \
155
+ --arg labels "${labels:-}" \
156
+ --argjson iterations "${total_iterations:-0}" \
157
+ --argjson cost "${total_cost:-0}" \
158
+ --argjson complexity "${complexity:-0}" \
159
+ --argjson stages "$stages_json" \
160
+ '{
161
+ ts: $ts,
162
+ issue: $issue,
163
+ template: $template,
164
+ result: $result,
165
+ model: $model,
166
+ labels: $labels,
167
+ iterations: $iterations,
168
+ cost: $cost,
169
+ complexity: $complexity,
170
+ stages: $stages
171
+ }' > "$tmp_outcome"
172
+
173
+ # Append to outcomes file (atomic: write to tmp, then cat + mv)
174
+ local outcome_line
175
+ outcome_line=$(cat "$tmp_outcome")
176
+ rm -f "$tmp_outcome"
177
+ echo "$outcome_line" >> "$OUTCOMES_FILE"
178
+
179
+ # Record GitHub CI metrics alongside outcome
180
+ local gh_ci_metrics
181
+ gh_ci_metrics=$(_optimize_github_metrics 2>/dev/null || echo "{}")
182
+ local ci_success_rate ci_avg_dur
183
+ ci_success_rate=$(echo "$gh_ci_metrics" | jq -r '.ci_success_rate // 0' 2>/dev/null || echo "0")
184
+ ci_avg_dur=$(echo "$gh_ci_metrics" | jq -r '.ci_avg_duration_s // 0' 2>/dev/null || echo "0")
185
+ if [[ "${ci_success_rate:-0}" -gt 0 || "${ci_avg_dur:-0}" -gt 0 ]]; then
186
+ # Append CI metrics to the outcome line
187
+ local ci_record
188
+ ci_record=$(jq -c -n \
189
+ --arg ts "$(now_iso)" \
190
+ --arg issue "${issue_number:-unknown}" \
191
+ --argjson ci_rate "${ci_success_rate:-0}" \
192
+ --argjson ci_dur "${ci_avg_dur:-0}" \
193
+ '{ts: $ts, type: "ci_metrics", issue: $issue, ci_success_rate: $ci_rate, ci_avg_duration_s: $ci_dur}')
194
+ echo "$ci_record" >> "$OUTCOMES_FILE"
195
+
196
+ # Warn if CI success rate is dropping
197
+ if [[ "${ci_success_rate:-0}" -lt 70 && "${ci_success_rate:-0}" -gt 0 ]]; then
198
+ warn "CI success rate is ${ci_success_rate}% — consider template escalation"
199
+ fi
200
+ fi
201
+
202
+ emit_event "optimize.outcome_analyzed" \
203
+ "issue=${issue_number:-unknown}" \
204
+ "template=${template_used:-unknown}" \
205
+ "result=${result:-unknown}" \
206
+ "iterations=${total_iterations:-0}" \
207
+ "cost=${total_cost:-0}"
208
+
209
+ success "Outcome recorded for issue #${issue_number:-unknown} (${result:-unknown})"
210
+ }
211
+
212
+ # ═════════════════════════════════════════════════════════════════════════════
213
+ # TEMPLATE TUNING
214
+ # ═════════════════════════════════════════════════════════════════════════════
215
+
216
+ # optimize_tune_templates [outcomes_file]
217
+ # Adjust template selection weights based on success/failure rates per label
218
+ optimize_tune_templates() {
219
+ local outcomes_file="${1:-$OUTCOMES_FILE}"
220
+
221
+ if [[ ! -f "$outcomes_file" ]]; then
222
+ warn "No outcomes data found at: $outcomes_file"
223
+ return 0
224
+ fi
225
+
226
+ ensure_optimization_dir
227
+
228
+ info "Tuning template weights..."
229
+
230
+ # Process outcomes: group by template+label, calculate success rates
231
+ # Uses a temp file approach compatible with Bash 3.2 (no associative arrays)
232
+ local tmp_stats tmp_weights
233
+ tmp_stats=$(mktemp)
234
+ tmp_weights=$(mktemp)
235
+
236
+ # Extract template, labels, result from each outcome line
237
+ while IFS= read -r line; do
238
+ local template result labels_str
239
+ template=$(echo "$line" | jq -r '.template // "unknown"' 2>/dev/null) || continue
240
+ result=$(echo "$line" | jq -r '.result // "unknown"' 2>/dev/null) || continue
241
+ labels_str=$(echo "$line" | jq -r '.labels // ""' 2>/dev/null) || continue
242
+
243
+ # Default label if none
244
+ if [[ -z "$labels_str" ]]; then
245
+ labels_str="unlabeled"
246
+ fi
247
+
248
+ # Record template+label combination with result
249
+ local label
250
+ # Split labels by comma
251
+ echo "$labels_str" | tr ',' '\n' | while IFS= read -r label; do
252
+ label=$(echo "$label" | tr -d ' ')
253
+ [[ -z "$label" ]] && continue
254
+ local is_success=0
255
+ if [[ "$result" == "success" || "$result" == "completed" ]]; then
256
+ is_success=1
257
+ fi
258
+ echo "${template}|${label}|${is_success}" >> "$tmp_stats"
259
+ done
260
+ done < "$outcomes_file"
261
+
262
+ # Calculate weights per template+label
263
+ local current_weights='{}'
264
+ if [[ -f "$TEMPLATE_WEIGHTS_FILE" ]]; then
265
+ current_weights=$(cat "$TEMPLATE_WEIGHTS_FILE")
266
+ fi
267
+
268
+ # Get unique template|label combos
269
+ if [[ -f "$tmp_stats" ]]; then
270
+ local combos
271
+ combos=$(cut -d'|' -f1,2 "$tmp_stats" | sort -u || true)
272
+
273
+ local new_weights="$current_weights"
274
+ while IFS= read -r combo; do
275
+ [[ -z "$combo" ]] && continue
276
+ local tmpl lbl
277
+ tmpl=$(echo "$combo" | cut -d'|' -f1)
278
+ lbl=$(echo "$combo" | cut -d'|' -f2)
279
+
280
+ local total successes rate
281
+ total=$(grep -c "^${tmpl}|${lbl}|" "$tmp_stats" || true)
282
+ total="${total:-0}"
283
+ successes=$(grep -c "^${tmpl}|${lbl}|1$" "$tmp_stats" || true)
284
+ successes="${successes:-0}"
285
+
286
+ if [[ "$total" -gt 0 ]]; then
287
+ rate=$(awk "BEGIN{printf \"%.2f\", ($successes/$total)*100}")
288
+ else
289
+ rate="0"
290
+ fi
291
+
292
+ # Get current weight (default 1.0)
293
+ local current_weight
294
+ current_weight=$(echo "$new_weights" | jq -r --arg t "$tmpl" --arg l "$lbl" '.[$t + "|" + $l] // 1.0' 2>/dev/null)
295
+ current_weight="${current_weight:-1.0}"
296
+
297
+ # Adjust weight: proportional update if enough samples, else skip
298
+ local new_weight="$current_weight"
299
+ if [[ "$total" -ge 5 ]]; then
300
+ # Calculate average success rate across all combos for dynamic thresholds
301
+ local all_total all_successes avg_rate
302
+ all_total=$(wc -l < "$tmp_stats" | tr -d ' ')
303
+ all_total="${all_total:-1}"
304
+ all_successes=$(grep -c "|1$" "$tmp_stats" || true)
305
+ all_successes="${all_successes:-0}"
306
+ avg_rate=$(awk -v s="$all_successes" -v t="$all_total" 'BEGIN { if (t > 0) printf "%.2f", (s/t)*100; else print "50" }')
307
+
308
+ # Proportional update: new_weight = old_weight * (rate / avg_rate), clamp [0.1, 2.0]
309
+ if awk -v ar="$avg_rate" 'BEGIN { exit !(ar > 0) }' 2>/dev/null; then
310
+ new_weight=$(awk -v cw="$current_weight" -v r="$rate" -v ar="$avg_rate" \
311
+ 'BEGIN { w = cw * (r / ar); if (w < 0.1) w = 0.1; if (w > 2.0) w = 2.0; printf "%.3f", w }')
312
+ fi
313
+ fi
314
+
315
+ # Update weights JSON
316
+ new_weights=$(echo "$new_weights" | jq --arg key "${tmpl}|${lbl}" --argjson w "$new_weight" '.[$key] = $w')
317
+ done <<< "$combos"
318
+
319
+ # Build consumer-friendly format with per-template aggregates
320
+ local consumer_weights
321
+ consumer_weights=$(echo "$new_weights" | jq '
322
+ . as $raw |
323
+ # Extract unique template names
324
+ [keys[] | split("|")[0]] | unique | map(. as $tmpl |
325
+ {
326
+ key: $tmpl,
327
+ value: {
328
+ success_rate: ([$raw | to_entries[] | select(.key | startswith($tmpl + "|")) | .value] | if length > 0 then (add / length) else 0 end),
329
+ avg_duration_min: 0,
330
+ sample_size: ([$raw | to_entries[] | select(.key | startswith($tmpl + "|"))] | length),
331
+ raw_weights: ([$raw | to_entries[] | select(.key | startswith($tmpl + "|"))] | from_entries)
332
+ }
333
+ }
334
+ ) | from_entries |
335
+ {weights: ., updated_at: (now | strftime("%Y-%m-%dT%H:%M:%SZ"))}
336
+ ' 2>/dev/null || echo "$new_weights")
337
+
338
+ # Atomic write
339
+ local tmp_cw
340
+ tmp_cw=$(mktemp "${TEMPLATE_WEIGHTS_FILE}.tmp.XXXXXX")
341
+ echo "$consumer_weights" > "$tmp_cw" && mv "$tmp_cw" "$TEMPLATE_WEIGHTS_FILE" || rm -f "$tmp_cw"
342
+ fi
343
+
344
+ rm -f "$tmp_stats" "$tmp_weights" 2>/dev/null || true
345
+
346
+ emit_event "optimize.template_tuned"
347
+ success "Template weights updated"
348
+ }
349
+
350
+ # ═════════════════════════════════════════════════════════════════════════════
351
+ # ITERATION LEARNING
352
+ # ═════════════════════════════════════════════════════════════════════════════
353
+
354
+ # optimize_learn_iterations [outcomes_file]
355
+ # Build a prediction model for iterations by complexity bucket
356
+ optimize_learn_iterations() {
357
+ local outcomes_file="${1:-$OUTCOMES_FILE}"
358
+
359
+ if [[ ! -f "$outcomes_file" ]]; then
360
+ warn "No outcomes data found at: $outcomes_file"
361
+ return 0
362
+ fi
363
+
364
+ ensure_optimization_dir
365
+
366
+ info "Learning iteration patterns..."
367
+
368
+ # Read complexity bucket boundaries from config or use defaults (3, 6)
369
+ local clusters_file="${OPTIMIZATION_DIR}/complexity-clusters.json"
370
+ local low_max=3
371
+ local med_max=6
372
+
373
+ if [[ -f "$clusters_file" ]]; then
374
+ local cfg_low cfg_med
375
+ cfg_low=$(jq -r '.low_max // empty' "$clusters_file" 2>/dev/null || true)
376
+ cfg_med=$(jq -r '.med_max // empty' "$clusters_file" 2>/dev/null || true)
377
+ [[ -n "$cfg_low" && "$cfg_low" != "null" ]] && low_max="$cfg_low"
378
+ [[ -n "$cfg_med" && "$cfg_med" != "null" ]] && med_max="$cfg_med"
379
+ fi
380
+
381
+ # Group by complexity bucket
382
+ local tmp_low tmp_med tmp_high tmp_all_pairs
383
+ tmp_low=$(mktemp)
384
+ tmp_med=$(mktemp)
385
+ tmp_high=$(mktemp)
386
+ tmp_all_pairs=$(mktemp)
387
+
388
+ while IFS= read -r line; do
389
+ local complexity iterations
390
+ complexity=$(echo "$line" | jq -r '.complexity // 0' 2>/dev/null) || continue
391
+ iterations=$(echo "$line" | jq -r '.iterations // 0' 2>/dev/null) || continue
392
+
393
+ # Skip entries without iteration data
394
+ [[ "$iterations" == "0" || "$iterations" == "null" ]] && continue
395
+
396
+ # Store (complexity, iterations) pairs for potential k-means
397
+ echo "${complexity} ${iterations}" >> "$tmp_all_pairs"
398
+
399
+ if [[ "$complexity" -le "$low_max" ]]; then
400
+ echo "$iterations" >> "$tmp_low"
401
+ elif [[ "$complexity" -le "$med_max" ]]; then
402
+ echo "$iterations" >> "$tmp_med"
403
+ else
404
+ echo "$iterations" >> "$tmp_high"
405
+ fi
406
+ done < "$outcomes_file"
407
+
408
+ # If 50+ data points, compute k-means (3 clusters) to find natural boundaries
409
+ local pair_count=0
410
+ [[ -s "$tmp_all_pairs" ]] && pair_count=$(wc -l < "$tmp_all_pairs" | tr -d ' ')
411
+ if [[ "$pair_count" -ge 50 ]]; then
412
+ # Simple k-means in awk: cluster by complexity value into 3 groups
413
+ local new_boundaries
414
+ new_boundaries=$(awk '
415
+ BEGIN { n=0 }
416
+ { c[n]=$1; it[n]=$2; n++ }
417
+ END {
418
+ if (n < 50) exit
419
+ # Sort by complexity (simple bubble sort — small n)
420
+ for (i=0; i<n-1; i++)
421
+ for (j=i+1; j<n; j++)
422
+ if (c[i] > c[j]) {
423
+ tmp=c[i]; c[i]=c[j]; c[j]=tmp
424
+ tmp=it[i]; it[i]=it[j]; it[j]=tmp
425
+ }
426
+ # Split into 3 equal groups and find boundaries
427
+ third = int(n / 3)
428
+ low_boundary = c[third - 1]
429
+ med_boundary = c[2 * third - 1]
430
+ # Ensure boundaries are sane (1-9 range)
431
+ if (low_boundary < 1) low_boundary = 1
432
+ if (low_boundary > 5) low_boundary = 5
433
+ if (med_boundary < low_boundary + 1) med_boundary = low_boundary + 1
434
+ if (med_boundary > 8) med_boundary = 8
435
+ printf "%d %d", low_boundary, med_boundary
436
+ }' "$tmp_all_pairs")
437
+
438
+ if [[ -n "$new_boundaries" ]]; then
439
+ local new_low new_med
440
+ new_low=$(echo "$new_boundaries" | cut -d' ' -f1)
441
+ new_med=$(echo "$new_boundaries" | cut -d' ' -f2)
442
+
443
+ if [[ -n "$new_low" && -n "$new_med" ]]; then
444
+ # Write boundaries back to config (atomic)
445
+ local tmp_clusters
446
+ tmp_clusters=$(mktemp "${TMPDIR:-/tmp}/sw-clusters.XXXXXX")
447
+ jq -n \
448
+ --argjson low_max "$new_low" \
449
+ --argjson med_max "$new_med" \
450
+ --argjson samples "$pair_count" \
451
+ --arg updated "$(now_iso)" \
452
+ '{low_max: $low_max, med_max: $med_max, samples: $samples, updated: $updated}' \
453
+ > "$tmp_clusters" && mv "$tmp_clusters" "$clusters_file" || rm -f "$tmp_clusters"
454
+
455
+ emit_event "optimize.clusters_updated" \
456
+ "low_max=$new_low" \
457
+ "med_max=$new_med" \
458
+ "samples=$pair_count"
459
+ fi
460
+ fi
461
+ fi
462
+ rm -f "$tmp_all_pairs" 2>/dev/null || true
463
+
464
+ # Calculate mean and stddev for each bucket using awk
465
+ calc_stats() {
466
+ local file="$1"
467
+ if [[ ! -s "$file" ]]; then
468
+ echo '{"mean":0,"stddev":0,"samples":0}'
469
+ return
470
+ fi
471
+ awk '{
472
+ sum += $1; sumsq += ($1 * $1); n++
473
+ } END {
474
+ if (n == 0) { print "{\"mean\":0,\"stddev\":0,\"samples\":0}"; exit }
475
+ mean = sum / n
476
+ if (n > 1) {
477
+ variance = (sumsq / n) - (mean * mean)
478
+ if (variance < 0) variance = 0
479
+ stddev = sqrt(variance)
480
+ } else {
481
+ stddev = 0
482
+ }
483
+ printf "{\"mean\":%.1f,\"stddev\":%.1f,\"samples\":%d}\n", mean, stddev, n
484
+ }' "$file"
485
+ }
486
+
487
+ local low_stats med_stats high_stats
488
+ low_stats=$(calc_stats "$tmp_low")
489
+ med_stats=$(calc_stats "$tmp_med")
490
+ high_stats=$(calc_stats "$tmp_high")
491
+
492
+ # Build iteration model with predictions wrapper
493
+ local tmp_model
494
+ tmp_model=$(mktemp "${ITERATION_MODEL_FILE}.tmp.XXXXXX")
495
+ jq -n \
496
+ --argjson low "$low_stats" \
497
+ --argjson medium "$med_stats" \
498
+ --argjson high "$high_stats" \
499
+ --arg updated "$(now_iso)" \
500
+ '{
501
+ predictions: {
502
+ low: {max_iterations: (if $low.mean > 0 then (($low.mean + $low.stddev) | floor | if . < 5 then 5 else . end) else 10 end), confidence: (if $low.samples >= 10 then 0.8 elif $low.samples >= 5 then 0.6 else 0.4 end), mean: $low.mean, stddev: $low.stddev, samples: $low.samples},
503
+ medium: {max_iterations: (if $medium.mean > 0 then (($medium.mean + $medium.stddev) | floor | if . < 10 then 10 else . end) else 20 end), confidence: (if $medium.samples >= 10 then 0.8 elif $medium.samples >= 5 then 0.6 else 0.4 end), mean: $medium.mean, stddev: $medium.stddev, samples: $medium.samples},
504
+ high: {max_iterations: (if $high.mean > 0 then (($high.mean + $high.stddev) | floor | if . < 15 then 15 else . end) else 30 end), confidence: (if $high.samples >= 10 then 0.8 elif $high.samples >= 5 then 0.6 else 0.4 end), mean: $high.mean, stddev: $high.stddev, samples: $high.samples}
505
+ },
506
+ updated_at: $updated
507
+ }' \
508
+ > "$tmp_model" && mv "$tmp_model" "$ITERATION_MODEL_FILE" || rm -f "$tmp_model"
509
+
510
+ rm -f "$tmp_low" "$tmp_med" "$tmp_high" 2>/dev/null || true
511
+
512
+ success "Iteration model updated"
513
+ }
514
+
515
+ # ═════════════════════════════════════════════════════════════════════════════
516
+ # MODEL ROUTING
517
+ # ═════════════════════════════════════════════════════════════════════════════
518
+
519
+ # optimize_should_ab_test <stage>
520
+ # Returns 0 (true) ~20% of the time for A/B testing
521
+ optimize_should_ab_test() {
522
+ local threshold=20
523
+ local roll=$((RANDOM % 100))
524
+ [[ "$roll" -lt "$threshold" ]]
525
+ }
526
+
527
+ # optimize_route_models [outcomes_file]
528
+ # Track per-stage model success rates and recommend cheaper models when viable
529
+ optimize_route_models() {
530
+ local outcomes_file="${1:-$OUTCOMES_FILE}"
531
+
532
+ if [[ ! -f "$outcomes_file" ]]; then
533
+ warn "No outcomes data found at: $outcomes_file"
534
+ return 0
535
+ fi
536
+
537
+ ensure_optimization_dir
538
+
539
+ info "Analyzing model routing..."
540
+
541
+ # Collect per-stage, per-model stats
542
+ local tmp_stage_stats
543
+ tmp_stage_stats=$(mktemp)
544
+
545
+ while IFS= read -r line; do
546
+ local model result stages_arr
547
+ model=$(echo "$line" | jq -r '.model // "opus"' 2>/dev/null) || continue
548
+ result=$(echo "$line" | jq -r '.result // "unknown"' 2>/dev/null) || continue
549
+ local cost
550
+ cost=$(echo "$line" | jq -r '.cost // 0' 2>/dev/null) || continue
551
+
552
+ # Extract stage names from the stages array
553
+ local stage_count
554
+ stage_count=$(echo "$line" | jq '.stages | length' 2>/dev/null || echo "0")
555
+
556
+ local i=0
557
+ while [[ "$i" -lt "$stage_count" ]]; do
558
+ local stage_name stage_status
559
+ stage_name=$(echo "$line" | jq -r ".stages[$i].name" 2>/dev/null)
560
+ stage_status=$(echo "$line" | jq -r ".stages[$i].status" 2>/dev/null)
561
+ local is_success=0
562
+ if [[ "$stage_status" == "complete" || "$stage_status" == "success" ]]; then
563
+ is_success=1
564
+ fi
565
+ echo "${stage_name}|${model}|${is_success}|${cost}" >> "$tmp_stage_stats"
566
+ i=$((i + 1))
567
+ done
568
+ done < "$outcomes_file"
569
+
570
+ # Build routing recommendations
571
+ local routing='{}'
572
+ if [[ -f "$MODEL_ROUTING_FILE" ]]; then
573
+ routing=$(cat "$MODEL_ROUTING_FILE")
574
+ fi
575
+
576
+ if [[ -f "$tmp_stage_stats" && -s "$tmp_stage_stats" ]]; then
577
+ local stages
578
+ stages=$(cut -d'|' -f1 "$tmp_stage_stats" | sort -u || true)
579
+
580
+ while IFS= read -r stage; do
581
+ [[ -z "$stage" ]] && continue
582
+
583
+ # Sonnet stats for this stage
584
+ local sonnet_total sonnet_success sonnet_rate
585
+ sonnet_total=$(grep -c "^${stage}|sonnet|" "$tmp_stage_stats" || true)
586
+ sonnet_total="${sonnet_total:-0}"
587
+ sonnet_success=$(grep -c "^${stage}|sonnet|1|" "$tmp_stage_stats" || true)
588
+ sonnet_success="${sonnet_success:-0}"
589
+
590
+ if [[ "$sonnet_total" -gt 0 ]]; then
591
+ sonnet_rate=$(awk "BEGIN{printf \"%.1f\", ($sonnet_success/$sonnet_total)*100}")
592
+ else
593
+ sonnet_rate="0"
594
+ fi
595
+
596
+ # Opus stats for this stage
597
+ local opus_total opus_success opus_rate
598
+ opus_total=$(grep -c "^${stage}|opus|" "$tmp_stage_stats" || true)
599
+ opus_total="${opus_total:-0}"
600
+ opus_success=$(grep -c "^${stage}|opus|1|" "$tmp_stage_stats" || true)
601
+ opus_success="${opus_success:-0}"
602
+
603
+ if [[ "$opus_total" -gt 0 ]]; then
604
+ opus_rate=$(awk "BEGIN{printf \"%.1f\", ($opus_success/$opus_total)*100}")
605
+ else
606
+ opus_rate="0"
607
+ fi
608
+
609
+ # Recommend sonnet if it succeeds 90%+ with enough samples
610
+ local recommendation="opus"
611
+ if [[ "$sonnet_total" -ge 3 ]] && awk "BEGIN{exit !($sonnet_rate >= 90)}" 2>/dev/null; then
612
+ recommendation="sonnet"
613
+ emit_event "optimize.model_switched" \
614
+ "stage=$stage" \
615
+ "from=opus" \
616
+ "to=sonnet" \
617
+ "sonnet_rate=$sonnet_rate"
618
+ fi
619
+
620
+ routing=$(echo "$routing" | jq \
621
+ --arg stage "$stage" \
622
+ --arg rec "$recommendation" \
623
+ --argjson sonnet_rate "$sonnet_rate" \
624
+ --argjson opus_rate "$opus_rate" \
625
+ --argjson sonnet_n "$sonnet_total" \
626
+ --argjson opus_n "$opus_total" \
627
+ '.[$stage] = {
628
+ recommended: $rec,
629
+ sonnet_rate: $sonnet_rate,
630
+ opus_rate: $opus_rate,
631
+ sonnet_samples: $sonnet_n,
632
+ opus_samples: $opus_n
633
+ }')
634
+ done <<< "$stages"
635
+ fi
636
+
637
+ # Wrap in consumer-friendly format
638
+ local consumer_routing
639
+ consumer_routing=$(echo "$routing" | jq '{
640
+ routes: (. | to_entries | map({
641
+ key: .key,
642
+ value: {
643
+ model: .value.recommended,
644
+ confidence: (if .value.sonnet_samples + .value.opus_samples >= 10 then 0.9
645
+ elif .value.sonnet_samples + .value.opus_samples >= 5 then 0.7
646
+ else 0.5 end),
647
+ sonnet_rate: .value.sonnet_rate,
648
+ opus_rate: .value.opus_rate,
649
+ sonnet_samples: .value.sonnet_samples,
650
+ opus_samples: .value.opus_samples
651
+ }
652
+ }) | from_entries),
653
+ updated_at: (now | strftime("%Y-%m-%dT%H:%M:%SZ"))
654
+ }' 2>/dev/null || echo "$routing")
655
+
656
+ # Atomic write
657
+ local tmp_routing
658
+ tmp_routing=$(mktemp "${MODEL_ROUTING_FILE}.tmp.XXXXXX")
659
+ echo "$consumer_routing" > "$tmp_routing" && mv "$tmp_routing" "$MODEL_ROUTING_FILE" || rm -f "$tmp_routing"
660
+
661
+ rm -f "$tmp_stage_stats" 2>/dev/null || true
662
+
663
+ success "Model routing updated"
664
+ }
665
+
666
+ # ═════════════════════════════════════════════════════════════════════════════
667
+ # MEMORY EVOLUTION
668
+ # ═════════════════════════════════════════════════════════════════════════════
669
+
670
+ # optimize_evolve_memory
671
+ # Prune stale patterns, strengthen confirmed ones, promote cross-repo patterns
672
+ optimize_evolve_memory() {
673
+ local memory_root="${HOME}/.shipwright/memory"
674
+
675
+ if [[ ! -d "$memory_root" ]]; then
676
+ warn "No memory directory found"
677
+ return 0
678
+ fi
679
+
680
+ info "Evolving memory patterns..."
681
+
682
+ local pruned=0
683
+ local strengthened=0
684
+ local promoted=0
685
+ local now_e
686
+ now_e=$(now_epoch)
687
+
688
+ # Read adaptive timescales from config or use defaults
689
+ local timescales_file="${OPTIMIZATION_DIR}/memory-timescales.json"
690
+ local prune_days=30
691
+ local boost_days=7
692
+ local strength_threshold=3
693
+ local promotion_threshold=3
694
+
695
+ if [[ -f "$timescales_file" ]]; then
696
+ local cfg_prune cfg_boost
697
+ cfg_prune=$(jq -r '.prune_days // empty' "$timescales_file" 2>/dev/null || true)
698
+ cfg_boost=$(jq -r '.boost_days // empty' "$timescales_file" 2>/dev/null || true)
699
+ [[ -n "$cfg_prune" && "$cfg_prune" != "null" ]] && prune_days="$cfg_prune"
700
+ [[ -n "$cfg_boost" && "$cfg_boost" != "null" ]] && boost_days="$cfg_boost"
701
+ fi
702
+
703
+ # Read strength and cross-repo thresholds from config
704
+ local thresholds_file="${OPTIMIZATION_DIR}/memory-thresholds.json"
705
+ if [[ -f "$thresholds_file" ]]; then
706
+ local cfg_strength cfg_promotion
707
+ cfg_strength=$(jq -r '.strength_threshold // empty' "$thresholds_file" 2>/dev/null || true)
708
+ cfg_promotion=$(jq -r '.promotion_threshold // empty' "$thresholds_file" 2>/dev/null || true)
709
+ [[ -n "$cfg_strength" && "$cfg_strength" != "null" ]] && strength_threshold="$cfg_strength"
710
+ [[ -n "$cfg_promotion" && "$cfg_promotion" != "null" ]] && promotion_threshold="$cfg_promotion"
711
+ fi
712
+
713
+ local prune_seconds=$((prune_days * 86400))
714
+ local boost_seconds=$((boost_days * 86400))
715
+ local prune_cutoff=$((now_e - prune_seconds))
716
+ local boost_cutoff=$((now_e - boost_seconds))
717
+
718
+ # Process each repo's failures.json
719
+ local repo_dir
720
+ for repo_dir in "$memory_root"/*/; do
721
+ [[ -d "$repo_dir" ]] || continue
722
+ local failures_file="${repo_dir}failures.json"
723
+ [[ -f "$failures_file" ]] || continue
724
+
725
+ local entry_count
726
+ entry_count=$(jq '.failures | length' "$failures_file" 2>/dev/null || echo "0")
727
+ [[ "$entry_count" -eq 0 ]] && continue
728
+
729
+ local tmp_file
730
+ tmp_file=$(mktemp)
731
+
732
+ # Prune entries not seen within prune window
733
+ local pruned_json
734
+ pruned_json=$(jq --arg cutoff "$(date -u -r "$prune_cutoff" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || date -u +"%Y-%m-%dT%H:%M:%SZ")" \
735
+ '[.failures[] | select(.last_seen >= $cutoff or .last_seen == null)]' \
736
+ "$failures_file" 2>/dev/null || echo "[]")
737
+
738
+ local after_count
739
+ after_count=$(echo "$pruned_json" | jq 'length' 2>/dev/null || echo "0")
740
+ local delta=$((entry_count - after_count))
741
+ pruned=$((pruned + delta))
742
+
743
+ # Strengthen entries seen N+ times within boost window (adaptive thresholds)
744
+ pruned_json=$(echo "$pruned_json" | jq \
745
+ --arg cutoff_b "$(date -u -r "$boost_cutoff" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || date -u +"%Y-%m-%dT%H:%M:%SZ")" \
746
+ --argjson st "$strength_threshold" '
747
+ [.[] | if (.seen_count >= $st and .last_seen >= $cutoff_b) then
748
+ .weight = ((.weight // 1.0) * 1.5)
749
+ else . end]')
750
+
751
+ local strong_count
752
+ strong_count=$(echo "$pruned_json" | jq '[.[] | select(.weight != null and .weight > 1.0)] | length' 2>/dev/null || echo "0")
753
+ strengthened=$((strengthened + strong_count))
754
+
755
+ # Write back
756
+ jq -n --argjson f "$pruned_json" '{failures: $f}' > "$tmp_file" && mv "$tmp_file" "$failures_file"
757
+ done
758
+
759
+ # Promote patterns that appear in 3+ repos to global.json
760
+ local global_file="${memory_root}/global.json"
761
+ if [[ ! -f "$global_file" ]]; then
762
+ echo '{"common_patterns":[],"cross_repo_learnings":[]}' > "$global_file"
763
+ fi
764
+
765
+ # Collect all patterns across repos
766
+ local tmp_all_patterns
767
+ tmp_all_patterns=$(mktemp)
768
+ for repo_dir in "$memory_root"/*/; do
769
+ [[ -d "$repo_dir" ]] || continue
770
+ local failures_file="${repo_dir}failures.json"
771
+ [[ -f "$failures_file" ]] || continue
772
+ jq -r '.failures[]?.pattern // empty' "$failures_file" 2>/dev/null >> "$tmp_all_patterns" || true
773
+ done
774
+
775
+ if [[ -s "$tmp_all_patterns" ]]; then
776
+ # Find patterns appearing in N+ repos (adaptive threshold)
777
+ local promoted_patterns
778
+ promoted_patterns=$(sort "$tmp_all_patterns" | uniq -c | sort -rn | awk -v pt="$promotion_threshold" '$1 >= pt {$1=""; print substr($0,2)}' || true)
779
+
780
+ if [[ -n "$promoted_patterns" ]]; then
781
+ local tmp_global
782
+ tmp_global=$(mktemp)
783
+ local pcount=0
784
+ while IFS= read -r pattern; do
785
+ [[ -z "$pattern" ]] && continue
786
+ # Check if already in global
787
+ local exists
788
+ exists=$(jq --arg p "$pattern" '[.common_patterns[] | select(.pattern == $p)] | length' "$global_file" 2>/dev/null || echo "0")
789
+ if [[ "$exists" == "0" ]]; then
790
+ jq --arg p "$pattern" --arg ts "$(now_iso)" \
791
+ '.common_patterns += [{pattern: $p, promoted_at: $ts, source: "cross-repo"}]' \
792
+ "$global_file" > "$tmp_global" && mv "$tmp_global" "$global_file"
793
+ pcount=$((pcount + 1))
794
+ fi
795
+ done <<< "$promoted_patterns"
796
+ promoted=$((promoted + pcount))
797
+ fi
798
+ fi
799
+
800
+ rm -f "$tmp_all_patterns" 2>/dev/null || true
801
+
802
+ emit_event "optimize.memory_pruned" \
803
+ "pruned=$pruned" \
804
+ "strengthened=$strengthened" \
805
+ "promoted=$promoted"
806
+
807
+ success "Memory evolved: pruned=$pruned, strengthened=$strengthened, promoted=$promoted"
808
+ }
809
+
810
+ # ═════════════════════════════════════════════════════════════════════════════
811
+ # FULL ANALYSIS (DAILY)
812
+ # ═════════════════════════════════════════════════════════════════════════════
813
+
814
+ # optimize_full_analysis
815
+ # Run all optimization steps — designed for daily execution
816
+ optimize_full_analysis() {
817
+ echo ""
818
+ echo -e "${PURPLE}${BOLD}╔═══════════════════════════════════════════════════════════════╗${RESET}"
819
+ echo -e "${PURPLE}${BOLD}║ Self-Optimization — Full Analysis ║${RESET}"
820
+ echo -e "${PURPLE}${BOLD}╚═══════════════════════════════════════════════════════════════╝${RESET}"
821
+ echo ""
822
+
823
+ ensure_optimization_dir
824
+
825
+ optimize_tune_templates
826
+ optimize_learn_iterations
827
+ optimize_route_models
828
+ optimize_evolve_memory
829
+ optimize_report >> "${OPTIMIZATION_DIR}/last-report.txt" 2>/dev/null || true
830
+ optimize_adjust_audit_intensity 2>/dev/null || true
831
+
832
+ echo ""
833
+ success "Full optimization analysis complete"
834
+ }
835
+
836
+ # ═════════════════════════════════════════════════════════════════════════════
837
+ # REPORT
838
+ # ═════════════════════════════════════════════════════════════════════════════
839
+
840
+ # optimize_report
841
+ # Generate a summary report of optimization trends over last 7 days
842
+ optimize_report() {
843
+ ensure_optimization_dir
844
+
845
+ echo ""
846
+ echo -e "${PURPLE}${BOLD}╔═══════════════════════════════════════════════════════════════╗${RESET}"
847
+ echo -e "${PURPLE}${BOLD}║ Self-Optimization Report ║${RESET}"
848
+ echo -e "${PURPLE}${BOLD}╚═══════════════════════════════════════════════════════════════╝${RESET}"
849
+ echo ""
850
+
851
+ if [[ ! -f "$OUTCOMES_FILE" ]]; then
852
+ warn "No outcomes data available yet"
853
+ return 0
854
+ fi
855
+
856
+ local now_e seven_days_ago
857
+ now_e=$(now_epoch)
858
+ seven_days_ago=$((now_e - 604800))
859
+ local cutoff_iso
860
+ cutoff_iso=$(date -u -r "$seven_days_ago" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || date -u +"%Y-%m-%dT%H:%M:%SZ")
861
+
862
+ # Count outcomes in last 7 days
863
+ local total_recent=0
864
+ local success_recent=0
865
+ local total_cost_recent=0
866
+ local total_iterations_recent=0
867
+
868
+ while IFS= read -r line; do
869
+ local ts result cost iterations
870
+ ts=$(echo "$line" | jq -r '.ts // ""' 2>/dev/null) || continue
871
+ [[ "$ts" < "$cutoff_iso" ]] && continue
872
+
873
+ result=$(echo "$line" | jq -r '.result // "unknown"' 2>/dev/null) || continue
874
+ cost=$(echo "$line" | jq -r '.cost // 0' 2>/dev/null) || continue
875
+ iterations=$(echo "$line" | jq -r '.iterations // 0' 2>/dev/null) || continue
876
+
877
+ total_recent=$((total_recent + 1))
878
+ if [[ "$result" == "success" || "$result" == "completed" ]]; then
879
+ success_recent=$((success_recent + 1))
880
+ fi
881
+ total_cost_recent=$(awk "BEGIN{printf \"%.2f\", $total_cost_recent + $cost}")
882
+ total_iterations_recent=$((total_iterations_recent + iterations))
883
+ done < "$OUTCOMES_FILE"
884
+
885
+ # Calculate rates
886
+ local success_rate="0"
887
+ local avg_iterations="0"
888
+ local avg_cost="0"
889
+ if [[ "$total_recent" -gt 0 ]]; then
890
+ success_rate=$(awk "BEGIN{printf \"%.1f\", ($success_recent/$total_recent)*100}")
891
+ avg_iterations=$(awk "BEGIN{printf \"%.1f\", $total_iterations_recent/$total_recent}")
892
+ avg_cost=$(awk "BEGIN{printf \"%.2f\", $total_cost_recent/$total_recent}")
893
+ fi
894
+
895
+ echo -e "${CYAN}${BOLD} Last 7 Days${RESET}"
896
+ echo -e " ${DIM}─────────────────────────────────${RESET}"
897
+ echo -e " Pipelines: ${BOLD}$total_recent${RESET}"
898
+ echo -e " Success rate: ${BOLD}${success_rate}%${RESET}"
899
+ echo -e " Avg iterations: ${BOLD}${avg_iterations}${RESET}"
900
+ echo -e " Avg cost: ${BOLD}\$${avg_cost}${RESET}"
901
+ echo -e " Total cost: ${BOLD}\$${total_cost_recent}${RESET}"
902
+ echo ""
903
+
904
+ # Template weights summary
905
+ if [[ -f "$TEMPLATE_WEIGHTS_FILE" ]]; then
906
+ local weight_count
907
+ weight_count=$(jq 'keys | length' "$TEMPLATE_WEIGHTS_FILE" 2>/dev/null || echo "0")
908
+ if [[ "$weight_count" -gt 0 ]]; then
909
+ echo -e "${CYAN}${BOLD} Template Weights${RESET}"
910
+ echo -e " ${DIM}─────────────────────────────────${RESET}"
911
+ jq -r 'to_entries[] | " \(.key): \(.value)"' "$TEMPLATE_WEIGHTS_FILE" 2>/dev/null || true
912
+ echo ""
913
+ fi
914
+ fi
915
+
916
+ # Model routing summary
917
+ if [[ -f "$MODEL_ROUTING_FILE" ]]; then
918
+ local route_count
919
+ route_count=$(jq 'keys | length' "$MODEL_ROUTING_FILE" 2>/dev/null || echo "0")
920
+ if [[ "$route_count" -gt 0 ]]; then
921
+ echo -e "${CYAN}${BOLD} Model Routing${RESET}"
922
+ echo -e " ${DIM}─────────────────────────────────${RESET}"
923
+ jq -r 'to_entries[] | " \(.key): \(.value.recommended) (sonnet: \(.value.sonnet_rate)%, opus: \(.value.opus_rate)%)"' \
924
+ "$MODEL_ROUTING_FILE" 2>/dev/null || true
925
+ echo ""
926
+ fi
927
+ fi
928
+
929
+ # Iteration model summary
930
+ if [[ -f "$ITERATION_MODEL_FILE" ]]; then
931
+ local has_data
932
+ has_data=$(jq '.low.samples // 0' "$ITERATION_MODEL_FILE" 2>/dev/null || echo "0")
933
+ if [[ "$has_data" -gt 0 ]]; then
934
+ echo -e "${CYAN}${BOLD} Iteration Model${RESET}"
935
+ echo -e " ${DIM}─────────────────────────────────${RESET}"
936
+ echo -e " Low complexity: $(jq -r '.low | "\(.mean) ± \(.stddev) (\(.samples) samples)"' "$ITERATION_MODEL_FILE" 2>/dev/null)"
937
+ echo -e " Med complexity: $(jq -r '.medium | "\(.mean) ± \(.stddev) (\(.samples) samples)"' "$ITERATION_MODEL_FILE" 2>/dev/null)"
938
+ echo -e " High complexity: $(jq -r '.high | "\(.mean) ± \(.stddev) (\(.samples) samples)"' "$ITERATION_MODEL_FILE" 2>/dev/null)"
939
+ echo ""
940
+ fi
941
+ fi
942
+
943
+ emit_event "optimize.report" \
944
+ "pipelines=$total_recent" \
945
+ "success_rate=$success_rate" \
946
+ "avg_cost=$avg_cost"
947
+
948
+ success "Report complete"
949
+ }
950
+
951
+ # optimize_adjust_audit_intensity
952
+ # Reads quality-scores.jsonl trends and adjusts intelligence feature flags
953
+ # to increase audit intensity when quality is declining.
954
+ optimize_adjust_audit_intensity() {
955
+ local quality_file="${HOME}/.shipwright/optimization/quality-scores.jsonl"
956
+ local daemon_config="${REPO_DIR:-.}/.claude/daemon-config.json"
957
+
958
+ [[ ! -f "$quality_file" ]] && return 0
959
+ [[ ! -f "$daemon_config" ]] && return 0
960
+
961
+ # Get last 10 quality scores
962
+ local recent_scores avg_quality trend
963
+ recent_scores=$(tail -10 "$quality_file" 2>/dev/null || true)
964
+ [[ -z "$recent_scores" ]] && return 0
965
+
966
+ avg_quality=$(echo "$recent_scores" | jq -r '.quality_score // 70' 2>/dev/null \
967
+ | awk '{ sum += $1; count++ } END { if (count > 0) printf "%.0f", sum/count; else print 70 }')
968
+ avg_quality="${avg_quality:-70}"
969
+
970
+ # Detect trend: compare first half vs second half
971
+ local first_half_avg second_half_avg
972
+ first_half_avg=$(echo "$recent_scores" | head -5 | jq -r '.quality_score // 70' 2>/dev/null \
973
+ | awk '{ sum += $1; count++ } END { if (count > 0) printf "%.0f", sum/count; else print 70 }')
974
+ second_half_avg=$(echo "$recent_scores" | tail -5 | jq -r '.quality_score // 70' 2>/dev/null \
975
+ | awk '{ sum += $1; count++ } END { if (count > 0) printf "%.0f", sum/count; else print 70 }')
976
+
977
+ if [[ "${second_half_avg:-70}" -lt "${first_half_avg:-70}" ]]; then
978
+ trend="declining"
979
+ else
980
+ trend="stable_or_improving"
981
+ fi
982
+
983
+ # Declining quality → enable more audits
984
+ if [[ "$trend" == "declining" || "${avg_quality:-70}" -lt 60 ]]; then
985
+ info "Quality trend: ${trend} (avg: ${avg_quality}) — increasing audit intensity"
986
+ local tmp_dc
987
+ tmp_dc=$(mktemp "${daemon_config}.tmp.XXXXXX")
988
+ jq '.intelligence.adversarial_enabled = true | .intelligence.architecture_enabled = true' \
989
+ "$daemon_config" > "$tmp_dc" 2>/dev/null && mv "$tmp_dc" "$daemon_config" || rm -f "$tmp_dc"
990
+ emit_event "optimize.audit_intensity" \
991
+ "avg_quality=$avg_quality" \
992
+ "trend=$trend" \
993
+ "action=increase"
994
+ elif [[ "${avg_quality:-70}" -gt 85 ]]; then
995
+ info "Quality trend: excellent (avg: ${avg_quality}) — maintaining standard audits"
996
+ emit_event "optimize.audit_intensity" \
997
+ "avg_quality=$avg_quality" \
998
+ "trend=$trend" \
999
+ "action=maintain"
1000
+ fi
1001
+ }
1002
+
1003
+ # ═════════════════════════════════════════════════════════════════════════════
1004
+ # HELP
1005
+ # ═════════════════════════════════════════════════════════════════════════════
1006
+
1007
+ show_help() {
1008
+ echo ""
1009
+ echo -e "${PURPLE}${BOLD}shipwright self-optimize${RESET} — Learning & Self-Tuning System"
1010
+ echo ""
1011
+ echo -e "${CYAN}USAGE${RESET}"
1012
+ echo " shipwright self-optimize <command>"
1013
+ echo ""
1014
+ echo -e "${CYAN}COMMANDS${RESET}"
1015
+ echo " analyze-outcome <state-file> Analyze a completed pipeline outcome"
1016
+ echo " tune Run full optimization analysis"
1017
+ echo " report Show optimization report (last 7 days)"
1018
+ echo " evolve-memory Prune/strengthen/promote memory patterns"
1019
+ echo " help Show this help"
1020
+ echo ""
1021
+ echo -e "${CYAN}STORAGE${RESET}"
1022
+ echo " ~/.shipwright/optimization/outcomes.jsonl Outcome history"
1023
+ echo " ~/.shipwright/optimization/template-weights.json Template selection weights"
1024
+ echo " ~/.shipwright/optimization/model-routing.json Per-stage model routing"
1025
+ echo " ~/.shipwright/optimization/iteration-model.json Iteration predictions"
1026
+ echo ""
1027
+ }
1028
+
1029
+ # ═════════════════════════════════════════════════════════════════════════════
1030
+ # MAIN
1031
+ # ═════════════════════════════════════════════════════════════════════════════
1032
+
1033
+ main() {
1034
+ local cmd="${1:-help}"
1035
+ shift 2>/dev/null || true
1036
+ case "$cmd" in
1037
+ analyze-outcome) optimize_analyze_outcome "$@" ;;
1038
+ tune) optimize_full_analysis ;;
1039
+ report) optimize_report ;;
1040
+ evolve-memory) optimize_evolve_memory ;;
1041
+ help|--help|-h) show_help ;;
1042
+ *) error "Unknown command: $cmd"; exit 1 ;;
1043
+ esac
1044
+ }
1045
+
1046
+ if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
1047
+ main "$@"
1048
+ fi