shipwright-cli 2.4.0 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. package/README.md +16 -11
  2. package/completions/_shipwright +248 -94
  3. package/completions/shipwright.bash +68 -19
  4. package/completions/shipwright.fish +310 -42
  5. package/config/decision-tiers.json +55 -0
  6. package/config/defaults.json +111 -0
  7. package/config/event-schema.json +218 -0
  8. package/config/policy.json +21 -18
  9. package/dashboard/coverage/coverage-summary.json +14 -0
  10. package/dashboard/public/index.html +1 -1
  11. package/dashboard/server.ts +306 -17
  12. package/dashboard/src/components/charts/bar.test.ts +79 -0
  13. package/dashboard/src/components/charts/donut.test.ts +68 -0
  14. package/dashboard/src/components/charts/pipeline-rail.test.ts +117 -0
  15. package/dashboard/src/components/charts/sparkline.test.ts +125 -0
  16. package/dashboard/src/core/api.test.ts +309 -0
  17. package/dashboard/src/core/helpers.test.ts +301 -0
  18. package/dashboard/src/core/router.test.ts +307 -0
  19. package/dashboard/src/core/router.ts +7 -0
  20. package/dashboard/src/core/sse.test.ts +144 -0
  21. package/dashboard/src/views/metrics.test.ts +186 -0
  22. package/dashboard/src/views/overview.test.ts +173 -0
  23. package/dashboard/src/views/pipelines.test.ts +183 -0
  24. package/dashboard/src/views/team.test.ts +253 -0
  25. package/dashboard/vitest.config.ts +14 -5
  26. package/docs/TIPS.md +1 -1
  27. package/docs/patterns/README.md +1 -1
  28. package/package.json +7 -9
  29. package/scripts/adapters/docker-deploy.sh +1 -1
  30. package/scripts/adapters/tmux-adapter.sh +11 -1
  31. package/scripts/adapters/wezterm-adapter.sh +1 -1
  32. package/scripts/check-version-consistency.sh +1 -1
  33. package/scripts/lib/architecture.sh +127 -0
  34. package/scripts/lib/bootstrap.sh +75 -0
  35. package/scripts/lib/compat.sh +89 -6
  36. package/scripts/lib/config.sh +91 -0
  37. package/scripts/lib/daemon-adaptive.sh +3 -3
  38. package/scripts/lib/daemon-dispatch.sh +63 -17
  39. package/scripts/lib/daemon-failure.sh +0 -0
  40. package/scripts/lib/daemon-health.sh +1 -1
  41. package/scripts/lib/daemon-patrol.sh +64 -17
  42. package/scripts/lib/daemon-poll.sh +54 -25
  43. package/scripts/lib/daemon-state.sh +125 -23
  44. package/scripts/lib/daemon-triage.sh +31 -9
  45. package/scripts/lib/decide-autonomy.sh +295 -0
  46. package/scripts/lib/decide-scoring.sh +228 -0
  47. package/scripts/lib/decide-signals.sh +462 -0
  48. package/scripts/lib/fleet-failover.sh +63 -0
  49. package/scripts/lib/helpers.sh +29 -6
  50. package/scripts/lib/pipeline-detection.sh +2 -2
  51. package/scripts/lib/pipeline-github.sh +9 -9
  52. package/scripts/lib/pipeline-intelligence.sh +105 -38
  53. package/scripts/lib/pipeline-quality-checks.sh +17 -16
  54. package/scripts/lib/pipeline-quality.sh +1 -1
  55. package/scripts/lib/pipeline-stages.sh +440 -59
  56. package/scripts/lib/pipeline-state.sh +54 -4
  57. package/scripts/lib/policy.sh +0 -0
  58. package/scripts/lib/test-helpers.sh +247 -0
  59. package/scripts/postinstall.mjs +78 -12
  60. package/scripts/signals/example-collector.sh +36 -0
  61. package/scripts/sw +17 -7
  62. package/scripts/sw-activity.sh +1 -11
  63. package/scripts/sw-adaptive.sh +109 -85
  64. package/scripts/sw-adversarial.sh +4 -14
  65. package/scripts/sw-architecture-enforcer.sh +1 -11
  66. package/scripts/sw-auth.sh +8 -17
  67. package/scripts/sw-autonomous.sh +111 -49
  68. package/scripts/sw-changelog.sh +1 -11
  69. package/scripts/sw-checkpoint.sh +144 -20
  70. package/scripts/sw-ci.sh +2 -12
  71. package/scripts/sw-cleanup.sh +13 -17
  72. package/scripts/sw-code-review.sh +16 -36
  73. package/scripts/sw-connect.sh +5 -12
  74. package/scripts/sw-context.sh +9 -26
  75. package/scripts/sw-cost.sh +17 -18
  76. package/scripts/sw-daemon.sh +76 -71
  77. package/scripts/sw-dashboard.sh +57 -17
  78. package/scripts/sw-db.sh +524 -26
  79. package/scripts/sw-decide.sh +685 -0
  80. package/scripts/sw-decompose.sh +1 -11
  81. package/scripts/sw-deps.sh +15 -25
  82. package/scripts/sw-developer-simulation.sh +1 -11
  83. package/scripts/sw-discovery.sh +138 -30
  84. package/scripts/sw-doc-fleet.sh +7 -17
  85. package/scripts/sw-docs-agent.sh +6 -16
  86. package/scripts/sw-docs.sh +4 -12
  87. package/scripts/sw-doctor.sh +134 -43
  88. package/scripts/sw-dora.sh +11 -19
  89. package/scripts/sw-durable.sh +35 -52
  90. package/scripts/sw-e2e-orchestrator.sh +11 -27
  91. package/scripts/sw-eventbus.sh +115 -115
  92. package/scripts/sw-evidence.sh +114 -30
  93. package/scripts/sw-feedback.sh +3 -13
  94. package/scripts/sw-fix.sh +2 -20
  95. package/scripts/sw-fleet-discover.sh +1 -11
  96. package/scripts/sw-fleet-viz.sh +10 -18
  97. package/scripts/sw-fleet.sh +13 -17
  98. package/scripts/sw-github-app.sh +6 -16
  99. package/scripts/sw-github-checks.sh +1 -11
  100. package/scripts/sw-github-deploy.sh +1 -11
  101. package/scripts/sw-github-graphql.sh +2 -12
  102. package/scripts/sw-guild.sh +1 -11
  103. package/scripts/sw-heartbeat.sh +49 -12
  104. package/scripts/sw-hygiene.sh +45 -43
  105. package/scripts/sw-incident.sh +48 -74
  106. package/scripts/sw-init.sh +35 -37
  107. package/scripts/sw-instrument.sh +1 -11
  108. package/scripts/sw-intelligence.sh +368 -53
  109. package/scripts/sw-jira.sh +5 -14
  110. package/scripts/sw-launchd.sh +2 -12
  111. package/scripts/sw-linear.sh +8 -17
  112. package/scripts/sw-logs.sh +4 -12
  113. package/scripts/sw-loop.sh +905 -104
  114. package/scripts/sw-memory.sh +263 -20
  115. package/scripts/sw-mission-control.sh +2 -12
  116. package/scripts/sw-model-router.sh +73 -34
  117. package/scripts/sw-otel.sh +15 -23
  118. package/scripts/sw-oversight.sh +1 -11
  119. package/scripts/sw-patrol-meta.sh +5 -11
  120. package/scripts/sw-pipeline-composer.sh +7 -17
  121. package/scripts/sw-pipeline-vitals.sh +1 -11
  122. package/scripts/sw-pipeline.sh +550 -122
  123. package/scripts/sw-pm.sh +2 -12
  124. package/scripts/sw-pr-lifecycle.sh +33 -28
  125. package/scripts/sw-predictive.sh +16 -22
  126. package/scripts/sw-prep.sh +6 -16
  127. package/scripts/sw-ps.sh +1 -11
  128. package/scripts/sw-public-dashboard.sh +2 -12
  129. package/scripts/sw-quality.sh +85 -14
  130. package/scripts/sw-reaper.sh +1 -11
  131. package/scripts/sw-recruit.sh +15 -25
  132. package/scripts/sw-regression.sh +11 -21
  133. package/scripts/sw-release-manager.sh +19 -28
  134. package/scripts/sw-release.sh +8 -16
  135. package/scripts/sw-remote.sh +1 -11
  136. package/scripts/sw-replay.sh +48 -44
  137. package/scripts/sw-retro.sh +70 -92
  138. package/scripts/sw-review-rerun.sh +1 -1
  139. package/scripts/sw-scale.sh +174 -41
  140. package/scripts/sw-security-audit.sh +12 -22
  141. package/scripts/sw-self-optimize.sh +239 -23
  142. package/scripts/sw-session.sh +5 -15
  143. package/scripts/sw-setup.sh +8 -18
  144. package/scripts/sw-standup.sh +5 -15
  145. package/scripts/sw-status.sh +32 -23
  146. package/scripts/sw-strategic.sh +129 -13
  147. package/scripts/sw-stream.sh +1 -11
  148. package/scripts/sw-swarm.sh +76 -36
  149. package/scripts/sw-team-stages.sh +10 -20
  150. package/scripts/sw-templates.sh +4 -14
  151. package/scripts/sw-testgen.sh +3 -13
  152. package/scripts/sw-tmux-pipeline.sh +1 -19
  153. package/scripts/sw-tmux-role-color.sh +0 -10
  154. package/scripts/sw-tmux-status.sh +3 -11
  155. package/scripts/sw-tmux.sh +2 -20
  156. package/scripts/sw-trace.sh +1 -19
  157. package/scripts/sw-tracker-github.sh +0 -10
  158. package/scripts/sw-tracker-jira.sh +1 -11
  159. package/scripts/sw-tracker-linear.sh +1 -11
  160. package/scripts/sw-tracker.sh +7 -24
  161. package/scripts/sw-triage.sh +29 -39
  162. package/scripts/sw-upgrade.sh +5 -23
  163. package/scripts/sw-ux.sh +1 -19
  164. package/scripts/sw-webhook.sh +18 -32
  165. package/scripts/sw-widgets.sh +3 -21
  166. package/scripts/sw-worktree.sh +11 -27
  167. package/scripts/update-homebrew-sha.sh +73 -0
  168. package/templates/pipelines/tdd.json +72 -0
  169. package/scripts/sw-pipeline.sh.mock +0 -7
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="2.4.0"
9
+ VERSION="3.1.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
  REPO_DIR="${REPO_DIR:-$(cd "$SCRIPT_DIR/.." && pwd)}"
12
12
 
@@ -34,15 +34,9 @@ if [[ "$(type -t emit_event 2>/dev/null)" != "function" ]]; then
34
34
  echo "${payload}}" >> "${HOME}/.shipwright/events.jsonl"
35
35
  }
36
36
  fi
37
- CYAN="${CYAN:-\033[38;2;0;212;255m}"
38
- PURPLE="${PURPLE:-\033[38;2;124;58;237m}"
39
- BLUE="${BLUE:-\033[38;2;0;102;255m}"
40
- GREEN="${GREEN:-\033[38;2;74;222;128m}"
41
- YELLOW="${YELLOW:-\033[38;2;250;204;21m}"
42
- RED="${RED:-\033[38;2;248;113;113m}"
43
- DIM="${DIM:-\033[2m}"
44
- BOLD="${BOLD:-\033[1m}"
45
- RESET="${RESET:-\033[0m}"
37
+ # ─── Database (for dual-write memory to DB) ───────────────────────────────────
38
+ # shellcheck source=sw-db.sh
39
+ [[ -f "$SCRIPT_DIR/sw-db.sh" ]] && source "$SCRIPT_DIR/sw-db.sh"
46
40
 
47
41
  # ─── Intelligence Engine (optional) ──────────────────────────────────────────
48
42
  # shellcheck source=sw-intelligence.sh
@@ -52,6 +46,192 @@ RESET="${RESET:-\033[0m}"
52
46
  MEMORY_ROOT="${HOME}/.shipwright/memory"
53
47
  GLOBAL_MEMORY="${MEMORY_ROOT}/global.json"
54
48
 
49
+ # ─── Domain keyword expansion (shared semantic concept) ──────────────────────
50
+
51
+ _expand_domain_keywords() {
52
+ local text="$1"
53
+ local expanded="$text"
54
+
55
+ local dom
56
+ for dom in auth api db ui test deploy error perf; do
57
+ case "$dom" in
58
+ auth) [[ "$text" =~ [aA]uth ]] && expanded="$expanded authentication authorization login session token credential permission access" ;;
59
+ api) [[ "$text" =~ [aA]pi ]] && expanded="$expanded endpoint route handler request response rest graphql" ;;
60
+ db) [[ "$text" =~ [dD]b ]] && expanded="$expanded database query migration schema model table sql" ;;
61
+ ui) [[ "$text" =~ [uU]i ]] && expanded="$expanded component view render template layout style css frontend" ;;
62
+ test) [[ "$text" =~ [tT]est ]] && expanded="$expanded testing assertion coverage mock stub fixture spec" ;;
63
+ deploy) [[ "$text" =~ [dD]eploy ]] && expanded="$expanded deployment release publish ship ci cd pipeline" ;;
64
+ error) [[ "$text" =~ [eE]rror ]] && expanded="$expanded exception failure crash bug issue defect" ;;
65
+ perf) [[ "$text" =~ [pP]erf ]] && expanded="$expanded performance optimization speed latency throughput cache" ;;
66
+ esac
67
+ done
68
+
69
+ echo "$expanded"
70
+ }
71
+
72
+ # ─── Embedding & Semantic Search ───────────────────────────────────────────
73
+
74
+ # Generate content hash for deduplication
75
+ _memory_content_hash() {
76
+ echo -n "$1" | shasum -a 256 | cut -d' ' -f1
77
+ }
78
+
79
+ # TF-IDF-like ranked search across failures, patterns, decisions
80
+ # Returns JSON array of {source_type, content_text} for injection compatibility
81
+ memory_ranked_search() {
82
+ local query="$1"
83
+ local memory_dir="$2"
84
+ local max_results="${3:-5}"
85
+
86
+ # Use repo memory dir when not specified
87
+ if [[ -z "$memory_dir" ]] && type repo_memory_dir &>/dev/null 2>&1; then
88
+ memory_dir="$(repo_memory_dir)"
89
+ fi
90
+ memory_dir="${memory_dir:-$HOME/.shipwright/memory}"
91
+ if [[ ! -d "$memory_dir" ]]; then
92
+ info "Memory dir not found at ${memory_dir} — auto-creating"
93
+ mkdir -p "$memory_dir"
94
+ emit_event "memory.not_available" "path=$memory_dir" "action=auto_created"
95
+ echo "[]"
96
+ return 0
97
+ fi
98
+
99
+ # Extract and expand query keywords
100
+ local keywords
101
+ keywords=$(echo "$query" | tr '[:upper:]' '[:lower:]' | tr -cs '[:alnum:]' '\n' | sort -u | \
102
+ grep -vxE '^.{1,2}$|^(the|and|for|not|with|this|that|from)$' || true)
103
+ keywords=$(_expand_domain_keywords "$keywords" 2>/dev/null || echo "$keywords")
104
+
105
+ local results_file
106
+ results_file=$(mktemp)
107
+
108
+ # Search failures.json
109
+ if [[ -f "$memory_dir/failures.json" ]]; then
110
+ jq -c '.failures[]? // empty' "$memory_dir/failures.json" 2>/dev/null | while IFS= read -r entry; do
111
+ [[ -z "$entry" ]] && continue
112
+ local entry_text
113
+ entry_text=$(echo "$entry" | jq -r '(.pattern // "") + " " + (.root_cause // "") + " " + (.fix // "")' 2>/dev/null)
114
+ local score=0
115
+ while IFS= read -r kw; do
116
+ [[ -z "$kw" ]] && continue
117
+ if echo "$entry_text" | grep -qiF "$kw" 2>/dev/null; then
118
+ score=$((score + 1))
119
+ fi
120
+ done <<< "$keywords"
121
+
122
+ # Boost by effectiveness
123
+ local effectiveness
124
+ effectiveness=$(echo "$entry" | jq -r '.fix_effectiveness_rate // 0' 2>/dev/null)
125
+ if [[ "$effectiveness" =~ ^[0-9]+$ ]] && [[ "$effectiveness" -gt 50 ]]; then
126
+ score=$((score + 2))
127
+ fi
128
+
129
+ if [[ "$score" -gt 0 ]]; then
130
+ local content
131
+ content=$(echo "$entry" | jq -r '(.pattern // "") + " | " + (.root_cause // "") + " | " + (.fix // "")' 2>/dev/null)
132
+ echo "${score}|{\"source_type\":\"failure\",\"content_text\":$(echo "$content" | jq -Rs .)}" >> "$results_file"
133
+ fi
134
+ done
135
+ fi
136
+
137
+ # Search decisions.json
138
+ if [[ -f "$memory_dir/decisions.json" ]]; then
139
+ jq -c '.decisions[]? // empty' "$memory_dir/decisions.json" 2>/dev/null | while IFS= read -r entry; do
140
+ [[ -z "$entry" ]] && continue
141
+ local entry_text
142
+ entry_text=$(echo "$entry" | jq -r '(.summary // "") + " " + (.detail // "") + " " + (.type // "")' 2>/dev/null)
143
+ local score=0
144
+ while IFS= read -r kw; do
145
+ [[ -z "$kw" ]] && continue
146
+ echo "$entry_text" | grep -qiF "$kw" 2>/dev/null && score=$((score + 1))
147
+ done <<< "$keywords"
148
+ if [[ "$score" -gt 0 ]]; then
149
+ local content
150
+ content=$(echo "$entry" | jq -r '(.summary // "") + " | " + (.detail // "")' 2>/dev/null)
151
+ echo "${score}|{\"source_type\":\"decision\",\"content_text\":$(echo "$content" | jq -Rs .)}" >> "$results_file"
152
+ fi
153
+ done
154
+ fi
155
+
156
+ # Search patterns.json (project, conventions, known_issues as text)
157
+ if [[ -f "$memory_dir/patterns.json" ]]; then
158
+ local entry_text
159
+ entry_text=$(jq -r 'to_entries | map(select(.key != "known_issues")) | from_entries | tostring' "$memory_dir/patterns.json" 2>/dev/null || echo "")
160
+ entry_text="$entry_text $(jq -r '.known_issues[]? // empty' "$memory_dir/patterns.json" 2>/dev/null | tr '\n' ' ')"
161
+ local score=0
162
+ while IFS= read -r kw; do
163
+ [[ -z "$kw" ]] && continue
164
+ echo "$entry_text" | grep -qiF "$kw" 2>/dev/null && score=$((score + 1))
165
+ done <<< "$keywords"
166
+ if [[ "$score" -gt 0 ]]; then
167
+ local content
168
+ content=$(jq -r 'to_entries | map("\(.key): \(.value)") | join(" | ")' "$memory_dir/patterns.json" 2>/dev/null | head -c 500)
169
+ echo "${score}|{\"source_type\":\"pattern\",\"content_text\":$(echo "$content" | jq -Rs .)}" >> "$results_file"
170
+ fi
171
+ fi
172
+
173
+ # Sort by score and output as JSON array
174
+ local output
175
+ if [[ -s "$results_file" ]]; then
176
+ output=$(sort -t'|' -k1 -rn "$results_file" | head -"$max_results" | cut -d'|' -f2- | jq -s '.' 2>/dev/null || echo "[]")
177
+ else
178
+ output="[]"
179
+ fi
180
+ rm -f "$results_file" 2>/dev/null || true
181
+ echo "$output"
182
+ }
183
+
184
+ # Store a memory with its text content for future embedding
185
+ memory_store_for_embedding() {
186
+ local source_type="$1" content_text="$2" repo_hash="${3:-}"
187
+ local content_hash
188
+ content_hash=$(_memory_content_hash "$content_text")
189
+
190
+ if type db_save_embedding >/dev/null 2>&1; then
191
+ db_save_embedding "$content_hash" "$source_type" "$content_text" "$repo_hash" 2>/dev/null || true
192
+ fi
193
+ }
194
+
195
+ # Check if vector embeddings search is available (future: SQLite vec0, etc.)
196
+ _has_embeddings() {
197
+ return 1 # No embedding-based search yet
198
+ }
199
+
200
+ # Semantic search: embeddings when available, else TF-IDF-like ranked keyword search
201
+ memory_semantic_search() {
202
+ local query="$1" repo_hash="${2:-}" limit="${3:-5}"
203
+
204
+ if _has_embeddings 2>/dev/null; then
205
+ # Future: _search_embeddings "$query" "$repo_hash" "$limit"
206
+ :
207
+ fi
208
+
209
+ # Fall back to ranked keyword search (better than SQL LIKE or grep)
210
+ local mem_dir
211
+ mem_dir=""
212
+ if type repo_memory_dir &>/dev/null 2>&1; then
213
+ mem_dir="$(repo_memory_dir)"
214
+ fi
215
+ memory_ranked_search "$query" "$mem_dir" "$limit"
216
+ }
217
+
218
+ # Inject relevant memories into agent prompts (goal-based)
219
+ memory_inject_goal_context() {
220
+ local goal="$1" repo_hash="${2:-}" max_tokens="${3:-2000}"
221
+
222
+ local memories
223
+ memories=$(memory_semantic_search "$goal" "$repo_hash" 5 2>/dev/null || echo "[]")
224
+
225
+ if [[ "$memories" == "[]" || -z "$memories" ]]; then
226
+ return
227
+ fi
228
+
229
+ echo "## Relevant Past Context"
230
+ echo ""
231
+ echo "$memories" | jq -r '.[] | "- [\(.source_type)] \(.content_text | .[0:200])"' 2>/dev/null || true
232
+ echo ""
233
+ }
234
+
55
235
  # Get a deterministic hash for the current repo
56
236
  repo_hash() {
57
237
  local origin
@@ -198,7 +378,10 @@ memory_capture_failure() {
198
378
  pattern=$(echo "$error_output" | head -1 | cut -c1-200)
199
379
  fi
200
380
 
201
- [[ -z "$pattern" ]] && return 0
381
+ if [[ -z "$pattern" ]]; then
382
+ warn "Memory capture: empty error pattern — skipping"
383
+ return 0
384
+ fi
202
385
 
203
386
  # Check for duplicate — increment seen_count if pattern already exists
204
387
  local existing_idx
@@ -207,7 +390,7 @@ memory_capture_failure() {
207
390
  "$failures_file" 2>/dev/null || echo "-1")
208
391
 
209
392
  (
210
- if command -v flock &>/dev/null; then
393
+ if command -v flock >/dev/null 2>&1; then
211
394
  flock -w 10 200 2>/dev/null || { warn "Memory lock timeout"; return 1; }
212
395
  fi
213
396
  local tmp_file
@@ -237,6 +420,15 @@ memory_capture_failure() {
237
420
  fi
238
421
  ) 200>"${failures_file}.lock"
239
422
 
423
+ # Dual-write to DB
424
+ if type db_record_failure >/dev/null 2>&1; then
425
+ local rhash
426
+ rhash="$(repo_hash)"
427
+ db_record_failure "$rhash" "unknown" "$pattern" "" "" "" "$stage" 2>/dev/null || true
428
+ fi
429
+
430
+ memory_store_for_embedding "failure" "$pattern" "$(repo_hash)" 2>/dev/null || true
431
+
240
432
  emit_event "memory.failure" "stage=${stage}" "pattern=${pattern:0:80}"
241
433
  }
242
434
 
@@ -274,7 +466,7 @@ memory_record_fix_outcome() {
274
466
  [[ "$fix_resolved" == "true" ]] && resolved_inc=1
275
467
 
276
468
  (
277
- if command -v flock &>/dev/null; then
469
+ if command -v flock >/dev/null 2>&1; then
278
470
  flock -w 10 200 2>/dev/null || { warn "Memory lock timeout"; return 1; }
279
471
  fi
280
472
  local tmp_file
@@ -583,7 +775,7 @@ Return JSON only, no markdown fences, no explanation."
583
775
  fi
584
776
 
585
777
  # Validate category against shared taxonomy (compat.sh) or built-in list
586
- if type sw_valid_error_category &>/dev/null 2>&1; then
778
+ if type sw_valid_error_category >/dev/null 2>&1; then
587
779
  if ! sw_valid_error_category "$category"; then
588
780
  category="unknown"
589
781
  fi
@@ -725,6 +917,14 @@ memory_capture_pattern() {
725
917
  }
726
918
  }' "$patterns_file" > "$tmp_file" && mv "$tmp_file" "$patterns_file"
727
919
 
920
+ # Dual-write to DB
921
+ if type db_save_pattern >/dev/null 2>&1; then
922
+ local rhash proj_desc
923
+ rhash="$(repo_hash)"
924
+ proj_desc="type=$proj_type,framework=$framework,test_runner=$test_runner,package_manager=$pkg_mgr,language=$language"
925
+ db_save_pattern "$rhash" "project" "project" "$proj_desc" "" 2>/dev/null || true
926
+ fi
927
+ memory_store_for_embedding "pattern" "project: $proj_type/$framework, $pkg_mgr, $language" "$(repo_hash)" 2>/dev/null || true
728
928
  emit_event "memory.pattern" "type=project" "proj_type=${proj_type}" "framework=${framework}"
729
929
  success "Captured project patterns (${proj_type}/${framework:-none})"
730
930
  ;;
@@ -740,6 +940,14 @@ memory_capture_pattern() {
740
940
  else . + {known_issues: [$issue]}
741
941
  end | .known_issues = (.known_issues | .[-50:])' \
742
942
  "$patterns_file" > "$tmp_file" && mv "$tmp_file" "$patterns_file"
943
+ # Dual-write to DB
944
+ if type db_save_pattern >/dev/null 2>&1; then
945
+ local rhash issue_key
946
+ rhash="$(repo_hash)"
947
+ issue_key=$(echo -n "$pattern_data" | shasum -a 256 | cut -c1-16)
948
+ db_save_pattern "$rhash" "known_issue" "$issue_key" "$pattern_data" "" 2>/dev/null || true
949
+ fi
950
+ memory_store_for_embedding "pattern" "known_issue: $pattern_data" "$(repo_hash)" 2>/dev/null || true
743
951
  emit_event "memory.pattern" "type=known_issue"
744
952
  fi
745
953
  ;;
@@ -758,7 +966,7 @@ memory_inject_context() {
758
966
  local stage_id="${1:-}"
759
967
 
760
968
  # Try intelligence-ranked search first
761
- if type intelligence_search_memory &>/dev/null 2>&1; then
969
+ if type intelligence_search_memory >/dev/null 2>&1; then
762
970
  local config="${REPO_DIR:-.}/.claude/daemon-config.json"
763
971
  local intel_enabled="false"
764
972
  if [[ -f "$config" ]]; then
@@ -788,6 +996,7 @@ memory_inject_context() {
788
996
  done
789
997
 
790
998
  if [[ "$has_memory" == "false" ]]; then
999
+ info "No memory available for repo (${mem_dir}) — first pipeline run will seed it"
791
1000
  echo "# No memory available for this repository yet."
792
1001
  return 0
793
1002
  fi
@@ -928,7 +1137,17 @@ memory_inject_context() {
928
1137
  ;;
929
1138
 
930
1139
  *)
931
- # Generic context for any stage inject top-K most relevant across all categories
1140
+ # Generic context — use ranked semantic search when intelligence unavailable
1141
+ if ! type intelligence_search_memory &>/dev/null 2>&1; then
1142
+ local ranked_json
1143
+ ranked_json=$(memory_ranked_search "${stage_id} stage context" "$mem_dir" 5 2>/dev/null || echo "[]")
1144
+ if [[ -n "$ranked_json" && "$ranked_json" != "[]" ]]; then
1145
+ echo "## Ranked Relevant Memory"
1146
+ echo "$ranked_json" | jq -r '.[]? | "- [\(.source_type)] \(.content_text[0:200])"' 2>/dev/null || true
1147
+ echo ""
1148
+ fi
1149
+ fi
1150
+
932
1151
  echo "## Repository Patterns"
933
1152
  if [[ -f "$mem_dir/patterns.json" ]]; then
934
1153
  jq -r 'to_entries | map(select(.key != "known_issues")) | from_entries' \
@@ -1155,6 +1374,15 @@ memory_capture_decision() {
1155
1374
  }] | .decisions = (.decisions | .[-100:])' \
1156
1375
  "$decisions_file" > "$tmp_file" && mv "$tmp_file" "$decisions_file"
1157
1376
 
1377
+ # Dual-write to DB
1378
+ if type db_save_decision >/dev/null 2>&1; then
1379
+ local rhash
1380
+ rhash="$(repo_hash)"
1381
+ db_save_decision "$rhash" "$dec_type" "${detail:-}" "$summary" "" 2>/dev/null || true
1382
+ fi
1383
+
1384
+ memory_store_for_embedding "decision" "${dec_type}: ${summary} - ${detail:-}" "$(repo_hash)" 2>/dev/null || true
1385
+
1158
1386
  emit_event "memory.decision" "type=${dec_type}" "summary=${summary:0:80}"
1159
1387
  success "Recorded decision: ${summary}"
1160
1388
  }
@@ -1276,10 +1504,17 @@ memory_show() {
1276
1504
  }
1277
1505
 
1278
1506
  memory_search() {
1507
+ if [[ "${1:-}" == "--semantic" ]]; then
1508
+ shift
1509
+ memory_semantic_search "$*" "" 10
1510
+ exit 0
1511
+ fi
1512
+
1279
1513
  local keyword="${1:-}"
1280
1514
 
1281
1515
  if [[ -z "$keyword" ]]; then
1282
1516
  error "Usage: shipwright memory search <keyword>"
1517
+ echo -e " ${DIM}Or: shipwright memory search --semantic <query>${RESET}"
1283
1518
  return 1
1284
1519
  fi
1285
1520
 
@@ -1296,10 +1531,10 @@ memory_search() {
1296
1531
  local found=0
1297
1532
 
1298
1533
  # ── Semantic search via intelligence (if available) ──
1299
- if type intelligence_search_memory &>/dev/null 2>&1; then
1534
+ if type intelligence_search_memory >/dev/null 2>&1; then
1300
1535
  local semantic_results
1301
1536
  semantic_results=$(intelligence_search_memory "$keyword" "$mem_dir" 5 2>/dev/null || echo "")
1302
- if [[ -n "$semantic_results" ]] && echo "$semantic_results" | jq -e '.results | length > 0' &>/dev/null; then
1537
+ if [[ -n "$semantic_results" ]] && echo "$semantic_results" | jq -e '.results | length > 0' >/dev/null 2>&1; then
1303
1538
  echo -e " ${BOLD}${CYAN}Semantic Results (AI-ranked):${RESET}"
1304
1539
  local result_count
1305
1540
  result_count=$(echo "$semantic_results" | jq '.results | length')
@@ -1417,6 +1652,11 @@ memory_export() {
1417
1652
  local mem_dir
1418
1653
  mem_dir="$(repo_memory_dir)"
1419
1654
 
1655
+ # Ensure all memory files exist (jq --slurpfile fails on missing files)
1656
+ for f in patterns.json failures.json decisions.json metrics.json; do
1657
+ [[ -f "$mem_dir/$f" ]] || echo '{}' > "$mem_dir/$f"
1658
+ done
1659
+
1420
1660
  # Merge all memory files into a single JSON export
1421
1661
  local export_json
1422
1662
  export_json=$(jq -n \
@@ -1532,8 +1772,10 @@ memory_stats() {
1532
1772
  # Event-based hit rate
1533
1773
  local inject_count capture_count
1534
1774
  if [[ -f "$EVENTS_FILE" ]]; then
1535
- inject_count=$(grep -c '"memory.inject"' "$EVENTS_FILE" 2>/dev/null || echo 0)
1536
- capture_count=$(grep -c '"memory.capture"' "$EVENTS_FILE" 2>/dev/null || echo 0)
1775
+ inject_count=$(grep -c '"memory.inject"' "$EVENTS_FILE" 2>/dev/null || true)
1776
+ inject_count="${inject_count:-0}"
1777
+ capture_count=$(grep -c '"memory.capture"' "$EVENTS_FILE" 2>/dev/null || true)
1778
+ capture_count="${capture_count:-0}"
1537
1779
  echo ""
1538
1780
  echo -e " ${BOLD}Usage${RESET}"
1539
1781
  printf " %-18s %s\n" "Context injections:" "$inject_count"
@@ -1557,6 +1799,7 @@ show_help() {
1557
1799
  echo -e " ${CYAN}show${RESET} Display memory for current repo"
1558
1800
  echo -e " ${CYAN}show${RESET} --global Display cross-repo learnings"
1559
1801
  echo -e " ${CYAN}search${RESET} <keyword> Search memory for keyword"
1802
+ echo -e " ${CYAN}search${RESET} --semantic <query> Semantic search via memory_embeddings"
1560
1803
  echo -e " ${CYAN}forget${RESET} --all Clear memory for current repo"
1561
1804
  echo -e " ${CYAN}export${RESET} Export memory as JSON"
1562
1805
  echo -e " ${CYAN}import${RESET} <file> Import memory from JSON"
@@ -7,7 +7,7 @@
7
7
  set -euo pipefail
8
8
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
9
9
 
10
- VERSION="2.4.0"
10
+ VERSION="3.1.0"
11
11
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
12
12
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
13
13
 
@@ -35,16 +35,6 @@ if [[ "$(type -t emit_event 2>/dev/null)" != "function" ]]; then
35
35
  echo "${payload}}" >> "${HOME}/.shipwright/events.jsonl"
36
36
  }
37
37
  fi
38
- CYAN="${CYAN:-\033[38;2;0;212;255m}"
39
- PURPLE="${PURPLE:-\033[38;2;124;58;237m}"
40
- BLUE="${BLUE:-\033[38;2;0;102;255m}"
41
- GREEN="${GREEN:-\033[38;2;74;222;128m}"
42
- YELLOW="${YELLOW:-\033[38;2;250;204;21m}"
43
- RED="${RED:-\033[38;2;248;113;113m}"
44
- DIM="${DIM:-\033[2m}"
45
- BOLD="${BOLD:-\033[1m}"
46
- RESET="${RESET:-\033[0m}"
47
-
48
38
  format_duration() {
49
39
  local secs="$1"
50
40
  if [[ "$secs" -ge 3600 ]]; then
@@ -268,7 +258,7 @@ show_resource_usage() {
268
258
 
269
259
  echo -e "${BOLD}System Resources${RESET}"
270
260
 
271
- if command -v top &>/dev/null || command -v ps &>/dev/null; then
261
+ if command -v top >/dev/null 2>&1 || command -v ps >/dev/null 2>&1; then
272
262
  # Get system memory and CPU stats
273
263
  local mem_pct=65
274
264
  local cpu_pct=42
@@ -7,7 +7,7 @@
7
7
  set -euo pipefail
8
8
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
9
9
 
10
- VERSION="2.4.0"
10
+ VERSION="3.1.0"
11
11
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
12
12
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
13
13
 
@@ -35,21 +35,17 @@ if [[ "$(type -t emit_event 2>/dev/null)" != "function" ]]; then
35
35
  echo "${payload}}" >> "${HOME}/.shipwright/events.jsonl"
36
36
  }
37
37
  fi
38
- CYAN="${CYAN:-\033[38;2;0;212;255m}"
39
- PURPLE="${PURPLE:-\033[38;2;124;58;237m}"
40
- BLUE="${BLUE:-\033[38;2;0;102;255m}"
41
- GREEN="${GREEN:-\033[38;2;74;222;128m}"
42
- YELLOW="${YELLOW:-\033[38;2;250;204;21m}"
43
- RED="${RED:-\033[38;2;248;113;113m}"
44
- DIM="${DIM:-\033[2m}"
45
- BOLD="${BOLD:-\033[1m}"
46
- RESET="${RESET:-\033[0m}"
47
-
48
38
  # ─── File Paths ────────────────────────────────────────────────────────────
49
- MODEL_ROUTING_CONFIG="${HOME}/.shipwright/model-routing.json"
50
- MODEL_USAGE_LOG="${HOME}/.shipwright/model-usage.jsonl"
39
+ # Unified: prefer optimization dir (written by self-optimize), fallback to legacy
40
+ OPTIMIZATION_DIR="${HOME}/.shipwright/optimization"
41
+ MODEL_ROUTING_OPTIMIZATION="${OPTIMIZATION_DIR}/model-routing.json"
42
+ MODEL_ROUTING_LEGACY="${HOME}/.shipwright/model-routing.json"
43
+ MODEL_USAGE_LOG="${OPTIMIZATION_DIR}/model-usage.jsonl"
51
44
  AB_RESULTS_FILE="${HOME}/.shipwright/ab-results.jsonl"
52
45
 
46
+ # Resolve which config file to use (set by _resolve_routing_config)
47
+ MODEL_ROUTING_CONFIG=""
48
+
53
49
  # ─── Model Costs (per million tokens) ───────────────────────────────────────
54
50
  HAIKU_INPUT_COST="0.80"
55
51
  HAIKU_OUTPUT_COST="4.00"
@@ -70,9 +66,25 @@ OPUS_STAGES="plan|design|build|compound_quality"
70
66
  COMPLEXITY_LOW=30 # Below this: use sonnet
71
67
  COMPLEXITY_HIGH=80 # Above this: use opus
72
68
 
69
+ # ─── Resolve Routing Config Path ────────────────────────────────────────────
70
+ # Priority: optimization (self-optimize writes) > legacy > create in optimization
71
+ _resolve_routing_config() {
72
+ if [[ -f "$MODEL_ROUTING_OPTIMIZATION" ]]; then
73
+ MODEL_ROUTING_CONFIG="$MODEL_ROUTING_OPTIMIZATION"
74
+ return
75
+ fi
76
+ if [[ -f "$MODEL_ROUTING_LEGACY" ]]; then
77
+ MODEL_ROUTING_CONFIG="$MODEL_ROUTING_LEGACY"
78
+ return
79
+ fi
80
+ # Neither exists — use optimization as canonical location
81
+ MODEL_ROUTING_CONFIG="$MODEL_ROUTING_OPTIMIZATION"
82
+ }
83
+
73
84
  # ─── Ensure Config File Exists ──────────────────────────────────────────────
74
85
  ensure_config() {
75
- mkdir -p "${HOME}/.shipwright"
86
+ _resolve_routing_config
87
+ mkdir -p "$(dirname "$MODEL_ROUTING_CONFIG")"
76
88
 
77
89
  if [[ ! -f "$MODEL_ROUTING_CONFIG" ]]; then
78
90
  cat > "$MODEL_ROUTING_CONFIG" <<'CONFIG'
@@ -127,26 +139,53 @@ route_model() {
127
139
  fi
128
140
 
129
141
  local model=""
142
+ _resolve_routing_config
143
+
144
+ # Strategy 1: Optimization file (self-optimize format) — .routes.stage.model or .routes.stage.recommended
145
+ if [[ -n "$MODEL_ROUTING_CONFIG" && -f "$MODEL_ROUTING_CONFIG" ]] && command -v jq >/dev/null 2>&1; then
146
+ local from_routes
147
+ from_routes=$(jq -r --arg s "$stage" '.routes[$s].model // .routes[$s].recommended // .[$s].recommended // .[$s].model // empty' "$MODEL_ROUTING_CONFIG" 2>/dev/null || true)
148
+ if [[ -n "$from_routes" && "$from_routes" =~ ^(haiku|sonnet|opus)$ ]]; then
149
+ model="$from_routes"
150
+ fi
151
+ # Fallback: legacy default_routing format
152
+ if [[ -z "$model" ]]; then
153
+ local from_default
154
+ from_default=$(jq -r --arg s "$stage" '.default_routing[$s] // empty' "$MODEL_ROUTING_CONFIG" 2>/dev/null || true)
155
+ if [[ -n "$from_default" && "$from_default" =~ ^(haiku|sonnet|opus)$ ]]; then
156
+ model="$from_default"
157
+ fi
158
+ fi
159
+ fi
130
160
 
131
- # Complexity-based override (applies to all stages)
132
- if [[ "$complexity" -lt "$COMPLEXITY_LOW" ]]; then
133
- model="sonnet"
134
- elif [[ "$complexity" -gt "$COMPLEXITY_HIGH" ]]; then
135
- model="opus"
136
- else
137
- # Stage-based routing for medium complexity
138
- if [[ "$stage" =~ $HAIKU_STAGES ]]; then
139
- model="haiku"
140
- elif [[ "$stage" =~ $SONNET_STAGES ]]; then
161
+ # Strategy 2: Built-in defaults (complexity + stage rules)
162
+ if [[ -z "$model" ]]; then
163
+ if [[ "$complexity" -lt "$COMPLEXITY_LOW" ]]; then
141
164
  model="sonnet"
142
- elif [[ "$stage" =~ $OPUS_STAGES ]]; then
165
+ elif [[ "$complexity" -gt "$COMPLEXITY_HIGH" ]]; then
143
166
  model="opus"
144
167
  else
145
- # Default to sonnet for unknown stages
146
- model="sonnet"
168
+ if [[ "$stage" =~ $HAIKU_STAGES ]]; then
169
+ model="haiku"
170
+ elif [[ "$stage" =~ $SONNET_STAGES ]]; then
171
+ model="sonnet"
172
+ elif [[ "$stage" =~ $OPUS_STAGES ]]; then
173
+ model="opus"
174
+ else
175
+ model="sonnet"
176
+ fi
147
177
  fi
148
178
  fi
149
179
 
180
+ # Complexity override: upgrade/downgrade based on complexity even when config says otherwise
181
+ if [[ "$complexity" -lt "$COMPLEXITY_LOW" && "$model" == "opus" ]]; then
182
+ model="sonnet"
183
+ elif [[ "$complexity" -gt "$COMPLEXITY_HIGH" && "$model" == "haiku" ]]; then
184
+ model="opus"
185
+ elif [[ "$complexity" -gt "$COMPLEXITY_HIGH" ]]; then
186
+ model="opus"
187
+ fi
188
+
150
189
  echo "$model"
151
190
  }
152
191
 
@@ -177,7 +216,7 @@ show_config() {
177
216
  info "Model Routing Configuration"
178
217
  echo ""
179
218
 
180
- if command -v jq &>/dev/null; then
219
+ if command -v jq >/dev/null 2>&1; then
181
220
  jq . "$MODEL_ROUTING_CONFIG" 2>/dev/null || cat "$MODEL_ROUTING_CONFIG"
182
221
  else
183
222
  cat "$MODEL_ROUTING_CONFIG"
@@ -196,7 +235,7 @@ set_config() {
196
235
 
197
236
  ensure_config
198
237
 
199
- if ! command -v jq &>/dev/null; then
238
+ if ! command -v jq >/dev/null 2>&1; then
200
239
  error "jq is required for config updates"
201
240
  return 1
202
241
  fi
@@ -298,7 +337,7 @@ record_usage() {
298
337
  local input_tokens="${3:-0}"
299
338
  local output_tokens="${4:-0}"
300
339
 
301
- mkdir -p "${HOME}/.shipwright"
340
+ mkdir -p "$(dirname "$MODEL_USAGE_LOG")"
302
341
 
303
342
  local cost
304
343
  cost=$(awk "BEGIN {}" ) # Calculate actual cost
@@ -330,7 +369,7 @@ configure_ab_test() {
330
369
 
331
370
  ensure_config
332
371
 
333
- if ! command -v jq &>/dev/null; then
372
+ if ! command -v jq >/dev/null 2>&1; then
334
373
  error "jq is required for A/B test configuration"
335
374
  return 1
336
375
  fi
@@ -370,7 +409,7 @@ show_report() {
370
409
  return 0
371
410
  fi
372
411
 
373
- if ! command -v jq &>/dev/null; then
412
+ if ! command -v jq >/dev/null 2>&1; then
374
413
  error "jq is required to view reports"
375
414
  return 1
376
415
  fi
@@ -432,7 +471,7 @@ show_ab_results() {
432
471
  return 0
433
472
  fi
434
473
 
435
- if ! command -v jq &>/dev/null; then
474
+ if ! command -v jq >/dev/null 2>&1; then
436
475
  error "jq is required to view A/B test results"
437
476
  return 1
438
477
  fi
@@ -520,7 +559,7 @@ main() {
520
559
  elif [[ "${1:-}" == "disable" ]]; then
521
560
  # Disable A/B testing
522
561
  ensure_config
523
- if command -v jq &>/dev/null; then
562
+ if command -v jq >/dev/null 2>&1; then
524
563
  local tmp_config
525
564
  tmp_config=$(mktemp)
526
565
  trap "rm -f '$tmp_config'" RETURN