shipwright-cli 2.2.1 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (156) hide show
  1. package/README.md +19 -19
  2. package/dashboard/public/index.html +224 -8
  3. package/dashboard/public/styles.css +1078 -4
  4. package/dashboard/server.ts +1100 -15
  5. package/dashboard/src/canvas/interactions.ts +74 -0
  6. package/dashboard/src/canvas/layout.ts +85 -0
  7. package/dashboard/src/canvas/overlays.ts +117 -0
  8. package/dashboard/src/canvas/particles.ts +105 -0
  9. package/dashboard/src/canvas/renderer.ts +191 -0
  10. package/dashboard/src/components/charts/bar.ts +54 -0
  11. package/dashboard/src/components/charts/donut.ts +25 -0
  12. package/dashboard/src/components/charts/pipeline-rail.ts +105 -0
  13. package/dashboard/src/components/charts/sparkline.ts +82 -0
  14. package/dashboard/src/components/header.ts +616 -0
  15. package/dashboard/src/components/modal.ts +413 -0
  16. package/dashboard/src/components/terminal.ts +144 -0
  17. package/dashboard/src/core/api.ts +381 -0
  18. package/dashboard/src/core/helpers.ts +118 -0
  19. package/dashboard/src/core/router.ts +190 -0
  20. package/dashboard/src/core/sse.ts +38 -0
  21. package/dashboard/src/core/state.ts +150 -0
  22. package/dashboard/src/core/ws.ts +143 -0
  23. package/dashboard/src/design/icons.ts +131 -0
  24. package/dashboard/src/design/tokens.ts +160 -0
  25. package/dashboard/src/main.ts +68 -0
  26. package/dashboard/src/types/api.ts +337 -0
  27. package/dashboard/src/views/activity.ts +185 -0
  28. package/dashboard/src/views/agent-cockpit.ts +236 -0
  29. package/dashboard/src/views/agents.ts +72 -0
  30. package/dashboard/src/views/fleet-map.ts +299 -0
  31. package/dashboard/src/views/insights.ts +298 -0
  32. package/dashboard/src/views/machines.ts +162 -0
  33. package/dashboard/src/views/metrics.ts +420 -0
  34. package/dashboard/src/views/overview.ts +409 -0
  35. package/dashboard/src/views/pipeline-theater.ts +219 -0
  36. package/dashboard/src/views/pipelines.ts +595 -0
  37. package/dashboard/src/views/team.ts +362 -0
  38. package/dashboard/src/views/timeline.ts +389 -0
  39. package/dashboard/tsconfig.json +21 -0
  40. package/docs/AGI-PLATFORM-PLAN.md +5 -5
  41. package/docs/AGI-WHATS-NEXT.md +19 -16
  42. package/docs/README.md +2 -0
  43. package/package.json +8 -1
  44. package/scripts/check-version-consistency.sh +72 -0
  45. package/scripts/lib/daemon-adaptive.sh +610 -0
  46. package/scripts/lib/daemon-dispatch.sh +489 -0
  47. package/scripts/lib/daemon-failure.sh +387 -0
  48. package/scripts/lib/daemon-patrol.sh +1113 -0
  49. package/scripts/lib/daemon-poll.sh +1202 -0
  50. package/scripts/lib/daemon-state.sh +550 -0
  51. package/scripts/lib/daemon-triage.sh +490 -0
  52. package/scripts/lib/helpers.sh +81 -0
  53. package/scripts/lib/pipeline-intelligence.sh +0 -6
  54. package/scripts/lib/pipeline-quality-checks.sh +3 -1
  55. package/scripts/lib/pipeline-stages.sh +20 -0
  56. package/scripts/sw +109 -168
  57. package/scripts/sw-activity.sh +1 -1
  58. package/scripts/sw-adaptive.sh +2 -2
  59. package/scripts/sw-adversarial.sh +1 -1
  60. package/scripts/sw-architecture-enforcer.sh +1 -1
  61. package/scripts/sw-auth.sh +14 -6
  62. package/scripts/sw-autonomous.sh +1 -1
  63. package/scripts/sw-changelog.sh +2 -2
  64. package/scripts/sw-checkpoint.sh +1 -1
  65. package/scripts/sw-ci.sh +1 -1
  66. package/scripts/sw-cleanup.sh +1 -1
  67. package/scripts/sw-code-review.sh +1 -1
  68. package/scripts/sw-connect.sh +1 -1
  69. package/scripts/sw-context.sh +1 -1
  70. package/scripts/sw-cost.sh +1 -1
  71. package/scripts/sw-daemon.sh +53 -4817
  72. package/scripts/sw-dashboard.sh +1 -1
  73. package/scripts/sw-db.sh +1 -1
  74. package/scripts/sw-decompose.sh +1 -1
  75. package/scripts/sw-deps.sh +1 -1
  76. package/scripts/sw-developer-simulation.sh +1 -1
  77. package/scripts/sw-discovery.sh +1 -1
  78. package/scripts/sw-doc-fleet.sh +1 -1
  79. package/scripts/sw-docs-agent.sh +1 -1
  80. package/scripts/sw-docs.sh +1 -1
  81. package/scripts/sw-doctor.sh +49 -1
  82. package/scripts/sw-dora.sh +1 -1
  83. package/scripts/sw-durable.sh +1 -1
  84. package/scripts/sw-e2e-orchestrator.sh +1 -1
  85. package/scripts/sw-eventbus.sh +1 -1
  86. package/scripts/sw-feedback.sh +1 -1
  87. package/scripts/sw-fix.sh +6 -5
  88. package/scripts/sw-fleet-discover.sh +1 -1
  89. package/scripts/sw-fleet-viz.sh +3 -3
  90. package/scripts/sw-fleet.sh +1 -1
  91. package/scripts/sw-github-app.sh +5 -2
  92. package/scripts/sw-github-checks.sh +1 -1
  93. package/scripts/sw-github-deploy.sh +1 -1
  94. package/scripts/sw-github-graphql.sh +1 -1
  95. package/scripts/sw-guild.sh +1 -1
  96. package/scripts/sw-heartbeat.sh +1 -1
  97. package/scripts/sw-hygiene.sh +1 -1
  98. package/scripts/sw-incident.sh +1 -1
  99. package/scripts/sw-init.sh +112 -9
  100. package/scripts/sw-instrument.sh +6 -1
  101. package/scripts/sw-intelligence.sh +5 -1
  102. package/scripts/sw-jira.sh +1 -1
  103. package/scripts/sw-launchd.sh +1 -1
  104. package/scripts/sw-linear.sh +20 -9
  105. package/scripts/sw-logs.sh +1 -1
  106. package/scripts/sw-loop.sh +2 -1
  107. package/scripts/sw-memory.sh +10 -1
  108. package/scripts/sw-mission-control.sh +1 -1
  109. package/scripts/sw-model-router.sh +4 -1
  110. package/scripts/sw-otel.sh +4 -4
  111. package/scripts/sw-oversight.sh +1 -1
  112. package/scripts/sw-pipeline-composer.sh +3 -1
  113. package/scripts/sw-pipeline-vitals.sh +4 -6
  114. package/scripts/sw-pipeline.sh +19 -56
  115. package/scripts/sw-pipeline.sh.mock +7 -0
  116. package/scripts/sw-pm.sh +5 -2
  117. package/scripts/sw-pr-lifecycle.sh +1 -1
  118. package/scripts/sw-predictive.sh +4 -1
  119. package/scripts/sw-prep.sh +3 -2
  120. package/scripts/sw-ps.sh +1 -1
  121. package/scripts/sw-public-dashboard.sh +10 -4
  122. package/scripts/sw-quality.sh +1 -1
  123. package/scripts/sw-reaper.sh +1 -1
  124. package/scripts/sw-recruit.sh +25 -1
  125. package/scripts/sw-regression.sh +2 -1
  126. package/scripts/sw-release-manager.sh +1 -1
  127. package/scripts/sw-release.sh +7 -5
  128. package/scripts/sw-remote.sh +1 -1
  129. package/scripts/sw-replay.sh +1 -1
  130. package/scripts/sw-retro.sh +1 -1
  131. package/scripts/sw-scale.sh +11 -5
  132. package/scripts/sw-security-audit.sh +1 -1
  133. package/scripts/sw-self-optimize.sh +172 -7
  134. package/scripts/sw-session.sh +1 -1
  135. package/scripts/sw-setup.sh +1 -1
  136. package/scripts/sw-standup.sh +4 -3
  137. package/scripts/sw-status.sh +1 -1
  138. package/scripts/sw-strategic.sh +2 -1
  139. package/scripts/sw-stream.sh +8 -2
  140. package/scripts/sw-swarm.sh +12 -10
  141. package/scripts/sw-team-stages.sh +1 -1
  142. package/scripts/sw-templates.sh +1 -1
  143. package/scripts/sw-testgen.sh +3 -2
  144. package/scripts/sw-tmux-pipeline.sh +2 -1
  145. package/scripts/sw-tmux.sh +1 -1
  146. package/scripts/sw-trace.sh +1 -1
  147. package/scripts/sw-tracker-jira.sh +1 -0
  148. package/scripts/sw-tracker-linear.sh +1 -0
  149. package/scripts/sw-tracker.sh +24 -6
  150. package/scripts/sw-triage.sh +1 -1
  151. package/scripts/sw-upgrade.sh +1 -1
  152. package/scripts/sw-ux.sh +1 -1
  153. package/scripts/sw-webhook.sh +1 -1
  154. package/scripts/sw-widgets.sh +2 -2
  155. package/scripts/sw-worktree.sh +1 -1
  156. package/dashboard/public/app.js +0 -4422
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="2.2.1"
9
+ VERSION="2.3.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
12
 
@@ -11,7 +11,7 @@
11
11
  # ║ shipwright reaper --watch Continuous loop (default: 5s) ║
12
12
  # ║ shipwright reaper --dry-run Preview what would be reaped ║
13
13
  # ╚═══════════════════════════════════════════════════════════════════════════╝
14
- VERSION="2.2.1"
14
+ VERSION="2.3.0"
15
15
  set -euo pipefail
16
16
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
17
17
 
@@ -284,7 +284,16 @@ initialize_builtin_roles() {
284
284
  }
285
285
  EOF
286
286
  )
287
- echo "$roles_json" | jq '.' > "$ROLES_DB"
287
+ local _tmp_roles
288
+ _tmp_roles=$(mktemp)
289
+ trap "rm -f '$_tmp_roles'" RETURN
290
+ if echo "$roles_json" | jq '.' > "$_tmp_roles" 2>/dev/null && [[ -s "$_tmp_roles" ]]; then
291
+ mv "$_tmp_roles" "$ROLES_DB"
292
+ else
293
+ rm -f "$_tmp_roles"
294
+ error "Failed to initialize roles DB"
295
+ return 1
296
+ fi
288
297
  success "Initialized 10 built-in agent roles"
289
298
  }
290
299
 
@@ -418,6 +427,7 @@ _recruit_record_match() {
418
427
  if [[ "$current_lines" -gt "$max_history" ]]; then
419
428
  local tmp_trunc
420
429
  tmp_trunc=$(mktemp)
430
+ trap "rm -f '$tmp_trunc'" RETURN
421
431
  tail -n "$max_history" "$MATCH_HISTORY" > "$tmp_trunc" && _recruit_locked_write "$MATCH_HISTORY" "$tmp_trunc" || rm -f "$tmp_trunc"
422
432
  fi
423
433
 
@@ -484,6 +494,7 @@ Return JSON only."
484
494
  # Persist to roles DB
485
495
  local tmp_file
486
496
  tmp_file=$(mktemp)
497
+ trap "rm -f '$tmp_file'" RETURN
487
498
  if jq --arg key "$role_key" --argjson role "$(echo "$result" | jq 'del(.key)')" '.[$key] = $role' "$ROLES_DB" > "$tmp_file"; then
488
499
  _recruit_locked_write "$ROLES_DB" "$tmp_file"
489
500
  else
@@ -536,6 +547,7 @@ Return JSON only."
536
547
 
537
548
  local tmp_file
538
549
  tmp_file=$(mktemp)
550
+ trap "rm -f '$tmp_file'" RETURN
539
551
  if jq --arg key "$role_key" --argjson role "$role_json" '.[$key] = $role' "$ROLES_DB" > "$tmp_file"; then
540
552
  _recruit_locked_write "$ROLES_DB" "$tmp_file"
541
553
  else
@@ -614,6 +626,7 @@ cmd_record_outcome() {
614
626
 
615
627
  local tmp_file
616
628
  tmp_file=$(mktemp)
629
+ trap "rm -f '$tmp_file'" RETURN
617
630
  jq --arg id "$agent_id" \
618
631
  --argjson tc "$tasks_completed" \
619
632
  --argjson sc "$success_count" \
@@ -651,6 +664,7 @@ cmd_record_outcome() {
651
664
  if [[ -f "$MATCH_HISTORY" ]]; then
652
665
  local tmp_mh
653
666
  tmp_mh=$(mktemp)
667
+ trap "rm -f '$tmp_mh'" RETURN
654
668
  # Find the most recent match for this agent_id with null outcome, and backfill
655
669
  awk -v agent="$agent_id" -v outcome="$outcome" '
656
670
  BEGIN { found = 0 }
@@ -745,6 +759,7 @@ _recruit_track_role_usage() {
745
759
 
746
760
  local tmp_file
747
761
  tmp_file=$(mktemp)
762
+ trap "rm -f '$tmp_file'" RETURN
748
763
  jq --arg role "$role" --arg event "$event" --arg ts "$(now_iso)" '
749
764
  .[$role] = (.[$role] // {matches: 0, successes: 0, failures: 0, last_used: ""}) |
750
765
  .[$role].last_used = $ts |
@@ -1213,6 +1228,7 @@ _recruit_meta_learning_check() {
1213
1228
 
1214
1229
  local tmp_file
1215
1230
  tmp_file=$(mktemp)
1231
+ trap "rm -f '$tmp_file'" RETURN
1216
1232
  jq --argjson corr "$correction" '
1217
1233
  .corrections = ((.corrections // []) + [$corr] | .[-100:])
1218
1234
  ' "$META_LEARNING_DB" > "$tmp_file" && _recruit_locked_write "$META_LEARNING_DB" "$tmp_file" || rm -f "$tmp_file"
@@ -1259,6 +1275,7 @@ _recruit_reflect() {
1259
1275
  # Track accuracy trend
1260
1276
  local tmp_file
1261
1277
  tmp_file=$(mktemp)
1278
+ trap "rm -f '$tmp_file'" RETURN
1262
1279
  jq --argjson acc "$accuracy" --arg ts "$(now_iso)" '
1263
1280
  .accuracy_trend = ((.accuracy_trend // []) + [{accuracy: $acc, ts: $ts}] | .[-50:]) |
1264
1281
  .last_reflection = $ts
@@ -1347,6 +1364,7 @@ _recruit_meta_validate_self_tune() {
1347
1364
  # Reset heuristics to empty (forces fallback to keyword_match defaults)
1348
1365
  local tmp_heur
1349
1366
  tmp_heur=$(mktemp)
1367
+ trap "rm -f '$tmp_heur'" RETURN
1350
1368
  echo '{"keyword_weights": {}, "meta_reverted_at": "'"$(now_iso)"'", "revert_reason": "accuracy_below_floor"}' > "$tmp_heur"
1351
1369
  _recruit_locked_write "$HEURISTICS_DB" "$tmp_heur" || rm -f "$tmp_heur"
1352
1370
  emit_event "recruit_meta_revert" "accuracy=${current_accuracy}" "floor=${accuracy_floor}" "reason=declining_below_floor"
@@ -1446,6 +1464,7 @@ Return JSON only."
1446
1464
 
1447
1465
  local tmp_file
1448
1466
  tmp_file=$(mktemp)
1467
+ trap "rm -f '$tmp_file'" RETURN
1449
1468
  jq --arg key "$role_key" --argjson role "$role_json" '.[$key] = $role' "$ROLES_DB" > "$tmp_file" && _recruit_locked_write "$ROLES_DB" "$tmp_file" || rm -f "$tmp_file"
1450
1469
 
1451
1470
  # Update heuristics with trigger keywords
@@ -1454,6 +1473,7 @@ Return JSON only."
1454
1473
  if [[ -n "$keywords" ]]; then
1455
1474
  local heur_tmp
1456
1475
  heur_tmp=$(mktemp)
1476
+ trap "rm -f '$heur_tmp'" RETURN
1457
1477
  while IFS= read -r kw; do
1458
1478
  [[ -z "$kw" ]] && continue
1459
1479
  jq --arg kw "$kw" --arg role "$role_key" \
@@ -1580,6 +1600,7 @@ Return JSON only."
1580
1600
  # Save the LLM-generated mind profile
1581
1601
  local tmp_file
1582
1602
  tmp_file=$(mktemp)
1603
+ trap "rm -f '$tmp_file'" RETURN
1583
1604
  jq --arg id "$agent_id" --argjson mind "$result" '.[$id] = ($mind + {updated: (now | todate)})' "$AGENT_MINDS_DB" > "$tmp_file" && _recruit_locked_write "$AGENT_MINDS_DB" "$tmp_file" || rm -f "$tmp_file"
1584
1605
 
1585
1606
  success "Mind profile generated:"
@@ -1621,6 +1642,7 @@ Return JSON only."
1621
1642
 
1622
1643
  local tmp_file
1623
1644
  tmp_file=$(mktemp)
1645
+ trap "rm -f '$tmp_file'" RETURN
1624
1646
  jq --arg id "$agent_id" --argjson mind "$mind_json" '.[$id] = $mind' "$AGENT_MINDS_DB" > "$tmp_file" && _recruit_locked_write "$AGENT_MINDS_DB" "$tmp_file" || rm -f "$tmp_file"
1625
1647
 
1626
1648
  local strengths_display="none detected"
@@ -1807,6 +1829,7 @@ cmd_self_tune() {
1807
1829
 
1808
1830
  local tmp_heuristics
1809
1831
  tmp_heuristics=$(mktemp)
1832
+ trap "rm -f '$tmp_heuristics'" RETURN
1810
1833
  cp "$HEURISTICS_DB" "$tmp_heuristics"
1811
1834
 
1812
1835
  local i=0
@@ -2553,6 +2576,7 @@ cmd_audit() {
2553
2576
  if [[ -f "$META_LEARNING_DB" ]]; then
2554
2577
  local tmp_audit
2555
2578
  tmp_audit=$(mktemp)
2579
+ trap "rm -f '$tmp_audit'" RETURN
2556
2580
  jq --argjson score "$score" --arg ts "$(now_iso)" --argjson fails "$fail_count" '
2557
2581
  .audit_trend = ((.audit_trend // []) + [{score: $score, ts: $ts, failures: $fails}] | .[-50:])
2558
2582
  ' "$META_LEARNING_DB" > "$tmp_audit" && _recruit_locked_write "$META_LEARNING_DB" "$tmp_audit" || rm -f "$tmp_audit"
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="2.2.1"
9
+ VERSION="2.3.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
12
 
@@ -198,6 +198,7 @@ cmd_baseline() {
198
198
 
199
199
  local tmp_file
200
200
  tmp_file=$(mktemp "${baseline_file}.tmp.XXXXXX")
201
+ trap "rm -f '$tmp_file'" RETURN
201
202
 
202
203
  echo "$metrics" > "$tmp_file"
203
204
  mv "$tmp_file" "$baseline_file"
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="2.2.1"
9
+ VERSION="2.3.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
12
 
@@ -5,8 +5,9 @@
5
5
  # ╚═══════════════════════════════════════════════════════════════════════════╝
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
+ trap 'rm -f "${tmp_file:-}"' EXIT
8
9
 
9
- VERSION="2.2.1"
10
+ VERSION="2.3.0"
10
11
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
12
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
13
 
@@ -258,7 +259,7 @@ generate_changelog_md() {
258
259
  echo ""
259
260
  echo "$breaking_commits" | while IFS='|' read -r hash subject; do
260
261
  [[ -z "$hash" ]] && continue
261
- echo "- $subject ([\`$hash\`](https://github.com/sethdford/shipwright/commit/$hash))"
262
+ echo "- $subject ([\`$hash\`]($(_sw_github_url)/commit/$hash))"
262
263
  done
263
264
  echo ""
264
265
  fi
@@ -269,7 +270,7 @@ generate_changelog_md() {
269
270
  echo ""
270
271
  echo "$features_commits" | while IFS='|' read -r hash subject; do
271
272
  [[ -z "$hash" ]] && continue
272
- echo "- $subject ([\`$hash\`](https://github.com/sethdford/shipwright/commit/$hash))"
273
+ echo "- $subject ([\`$hash\`]($(_sw_github_url)/commit/$hash))"
273
274
  done
274
275
  echo ""
275
276
  fi
@@ -280,7 +281,7 @@ generate_changelog_md() {
280
281
  echo ""
281
282
  echo "$fixes_commits" | while IFS='|' read -r hash subject; do
282
283
  [[ -z "$hash" ]] && continue
283
- echo "- $subject ([\`$hash\`](https://github.com/sethdford/shipwright/commit/$hash))"
284
+ echo "- $subject ([\`$hash\`]($(_sw_github_url)/commit/$hash))"
284
285
  done
285
286
  echo ""
286
287
  fi
@@ -291,7 +292,7 @@ generate_changelog_md() {
291
292
  echo ""
292
293
  echo "$docs_commits" | while IFS='|' read -r hash subject; do
293
294
  [[ -z "$hash" ]] && continue
294
- echo "- $subject ([\`$hash\`](https://github.com/sethdford/shipwright/commit/$hash))"
295
+ echo "- $subject ([\`$hash\`]($(_sw_github_url)/commit/$hash))"
295
296
  done
296
297
  echo ""
297
298
  fi
@@ -346,6 +347,7 @@ update_version_in_files() {
346
347
  # This is shell-safe: VERSION="1.11.0" → VERSION="1.12.0"
347
348
  local tmp_file
348
349
  tmp_file=$(mktemp)
350
+ trap "rm -f '$tmp_file'" RETURN
349
351
  sed 's/^VERSION="[^"]*"$/VERSION="'"$version_num"'"/' "$file" > "$tmp_file"
350
352
  mv "$tmp_file" "$file"
351
353
  success "Updated VERSION in $(basename "$file")"
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="2.2.1"
9
+ VERSION="2.3.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
12
 
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="2.2.1"
9
+ VERSION="2.3.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
  EVENTS_FILE="${HOME}/.shipwright/events.jsonl"
12
12
 
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="2.2.1"
9
+ VERSION="2.3.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
 
12
12
  # ─── Cross-platform compatibility ──────────────────────────────────────────
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="2.2.1"
9
+ VERSION="2.3.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
 
12
12
  # ─── Dependency check ─────────────────────────────────────────────────────────
@@ -64,6 +64,7 @@ init_rules() {
64
64
  if [[ ! -f "$SCALE_RULES_FILE" ]]; then
65
65
  local tmp_file
66
66
  tmp_file=$(mktemp)
67
+ trap "rm -f '$tmp_file'" RETURN
67
68
  cat > "$tmp_file" << 'JSON'
68
69
  {
69
70
  "iteration_threshold": 3,
@@ -105,10 +106,15 @@ in_cooldown() {
105
106
  update_scale_state() {
106
107
  local tmp_file
107
108
  tmp_file=$(mktemp)
109
+ trap "rm -f '$tmp_file'" RETURN
108
110
 
109
111
  if [[ -f "$SCALE_STATE_FILE" ]]; then
110
112
  # Update existing state
111
- jq --arg now "$(now_epoch)" '.last_scale_time = ($now | tonumber)' "$SCALE_STATE_FILE" > "$tmp_file"
113
+ if ! jq --arg now "$(now_epoch)" '.last_scale_time = ($now | tonumber)' "$SCALE_STATE_FILE" > "$tmp_file" 2>/dev/null || [[ ! -s "$tmp_file" ]]; then
114
+ rm -f "$tmp_file"
115
+ warn "Failed to update scale state"
116
+ return 1
117
+ fi
112
118
  else
113
119
  # Create new state
114
120
  cat > "$tmp_file" << JSON
@@ -237,15 +243,15 @@ cmd_rules() {
237
243
 
238
244
  local tmp_file
239
245
  tmp_file=$(mktemp)
246
+ trap "rm -f '$tmp_file'" RETURN
240
247
 
241
248
  jq --arg key "$key" --arg value "$value" \
242
249
  'if ($value | test("^[0-9]+$")) then
243
250
  .[$key] = ($value | tonumber)
244
251
  else
245
252
  .[$key] = $value
246
- end' "$SCALE_RULES_FILE" > "$tmp_file"
247
-
248
- mv "$tmp_file" "$SCALE_RULES_FILE"
253
+ end' "$SCALE_RULES_FILE" > "$tmp_file" && [[ -s "$tmp_file" ]] && \
254
+ mv "$tmp_file" "$SCALE_RULES_FILE" || { rm -f "$tmp_file"; error "Failed to update config"; return 1; }
249
255
  success "Updated: ${key} = ${value}"
250
256
  ;;
251
257
  reset)
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="2.2.1"
9
+ VERSION="2.3.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
12
 
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="2.2.1"
9
+ VERSION="2.3.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
12
 
@@ -139,6 +139,7 @@ optimize_analyze_outcome() {
139
139
  # Build outcome record using jq for proper escaping
140
140
  local tmp_outcome
141
141
  tmp_outcome=$(mktemp)
142
+ trap "rm -f '$tmp_outcome'" RETURN
142
143
  jq -c -n \
143
144
  --arg ts "$(now_iso)" \
144
145
  --arg issue "${issue_number:-unknown}" \
@@ -228,6 +229,7 @@ optimize_tune_templates() {
228
229
  local tmp_stats tmp_weights
229
230
  tmp_stats=$(mktemp)
230
231
  tmp_weights=$(mktemp)
232
+ trap "rm -f '$tmp_stats' '$tmp_weights'" RETURN
231
233
 
232
234
  # Extract template, labels, result from each outcome line
233
235
  while IFS= read -r line; do
@@ -334,6 +336,7 @@ optimize_tune_templates() {
334
336
  # Atomic write
335
337
  local tmp_cw
336
338
  tmp_cw=$(mktemp "${TEMPLATE_WEIGHTS_FILE}.tmp.XXXXXX")
339
+ trap "rm -f '$tmp_cw'" RETURN
337
340
  echo "$consumer_weights" > "$tmp_cw" && mv "$tmp_cw" "$TEMPLATE_WEIGHTS_FILE" || rm -f "$tmp_cw"
338
341
  fi
339
342
 
@@ -380,6 +383,7 @@ optimize_learn_iterations() {
380
383
  tmp_med=$(mktemp)
381
384
  tmp_high=$(mktemp)
382
385
  tmp_all_pairs=$(mktemp)
386
+ trap "rm -f '$tmp_low' '$tmp_med' '$tmp_high' '$tmp_all_pairs'" RETURN
383
387
 
384
388
  while IFS= read -r line; do
385
389
  local complexity iterations
@@ -440,6 +444,7 @@ optimize_learn_iterations() {
440
444
  # Write boundaries back to config (atomic)
441
445
  local tmp_clusters
442
446
  tmp_clusters=$(mktemp "${TMPDIR:-/tmp}/sw-clusters.XXXXXX")
447
+ trap "rm -f '$tmp_clusters'" RETURN
443
448
  jq -n \
444
449
  --argjson low_max "$new_low" \
445
450
  --argjson med_max "$new_med" \
@@ -485,20 +490,19 @@ optimize_learn_iterations() {
485
490
  med_stats=$(calc_stats "$tmp_med")
486
491
  high_stats=$(calc_stats "$tmp_high")
487
492
 
488
- # Build iteration model with predictions wrapper
493
+ # Build iteration model (flat format for readers: .low, .medium, .high)
489
494
  local tmp_model
490
495
  tmp_model=$(mktemp "${ITERATION_MODEL_FILE}.tmp.XXXXXX")
496
+ trap "rm -f '$tmp_model'" RETURN
491
497
  jq -n \
492
498
  --argjson low "$low_stats" \
493
499
  --argjson medium "$med_stats" \
494
500
  --argjson high "$high_stats" \
495
501
  --arg updated "$(now_iso)" \
496
502
  '{
497
- predictions: {
498
- low: {max_iterations: (if $low.mean > 0 then (($low.mean + $low.stddev) | floor | if . < 5 then 5 else . end) else 10 end), confidence: (if $low.samples >= 10 then 0.8 elif $low.samples >= 5 then 0.6 else 0.4 end), mean: $low.mean, stddev: $low.stddev, samples: $low.samples},
499
- medium: {max_iterations: (if $medium.mean > 0 then (($medium.mean + $medium.stddev) | floor | if . < 10 then 10 else . end) else 20 end), confidence: (if $medium.samples >= 10 then 0.8 elif $medium.samples >= 5 then 0.6 else 0.4 end), mean: $medium.mean, stddev: $medium.stddev, samples: $medium.samples},
500
- high: {max_iterations: (if $high.mean > 0 then (($high.mean + $high.stddev) | floor | if . < 15 then 15 else . end) else 30 end), confidence: (if $high.samples >= 10 then 0.8 elif $high.samples >= 5 then 0.6 else 0.4 end), mean: $high.mean, stddev: $high.stddev, samples: $high.samples}
501
- },
503
+ low: {max_iterations: (if $low.mean > 0 then (($low.mean + $low.stddev) | floor | if . < 5 then 5 else . end) else 10 end), confidence: (if $low.samples >= 10 then 0.8 elif $low.samples >= 5 then 0.6 else 0.4 end), mean: $low.mean, stddev: $low.stddev, samples: $low.samples},
504
+ medium: {max_iterations: (if $medium.mean > 0 then (($medium.mean + $medium.stddev) | floor | if . < 10 then 10 else . end) else 20 end), confidence: (if $medium.samples >= 10 then 0.8 elif $medium.samples >= 5 then 0.6 else 0.4 end), mean: $medium.mean, stddev: $medium.stddev, samples: $medium.samples},
505
+ high: {max_iterations: (if $high.mean > 0 then (($high.mean + $high.stddev) | floor | if . < 15 then 15 else . end) else 30 end), confidence: (if $high.samples >= 10 then 0.8 elif $high.samples >= 5 then 0.6 else 0.4 end), mean: $high.mean, stddev: $high.stddev, samples: $high.samples},
502
506
  updated_at: $updated
503
507
  }' \
504
508
  > "$tmp_model" && mv "$tmp_model" "$ITERATION_MODEL_FILE" || rm -f "$tmp_model"
@@ -506,6 +510,81 @@ optimize_learn_iterations() {
506
510
  rm -f "$tmp_low" "$tmp_med" "$tmp_high" 2>/dev/null || true
507
511
 
508
512
  success "Iteration model updated"
513
+
514
+ # Apply prediction error bias correction from validation data
515
+ _optimize_apply_prediction_bias
516
+ }
517
+
518
+ # _optimize_apply_prediction_bias
519
+ # Reads prediction-validation.jsonl and applies bias correction to iteration model.
520
+ # If predictions consistently over/under-estimate, shift the model's means.
521
+ _optimize_apply_prediction_bias() {
522
+ local validation_file="${HOME}/.shipwright/optimization/prediction-validation.jsonl"
523
+ [[ ! -f "$validation_file" ]] && return 0
524
+
525
+ local model_file="$ITERATION_MODEL_FILE"
526
+ [[ ! -f "$model_file" ]] && return 0
527
+
528
+ # Compute mean delta (predicted - actual) from recent validations
529
+ local recent_count=50
530
+ local bias_data
531
+ bias_data=$(tail -n "$recent_count" "$validation_file" | jq -s '
532
+ if length == 0 then empty
533
+ else
534
+ group_by(
535
+ if .predicted_complexity <= 3 then "low"
536
+ elif .predicted_complexity <= 6 then "medium"
537
+ else "high" end
538
+ ) | map({
539
+ bucket: (.[0] | if .predicted_complexity <= 3 then "low" elif .predicted_complexity <= 6 then "medium" else "high" end),
540
+ mean_delta: ([.[].delta] | add / length),
541
+ count: length
542
+ })
543
+ end' 2>/dev/null || true)
544
+
545
+ [[ -z "$bias_data" || "$bias_data" == "null" ]] && return 0
546
+
547
+ # Apply bias correction: if mean_delta > 0, predictions are too high → increase model mean
548
+ # (model mean drives estimates, and positive delta = predicted > actual = model underestimates actual iterations needed)
549
+ local updated_model
550
+ updated_model=$(cat "$model_file")
551
+ local changed=false
552
+
553
+ for bucket in low medium high; do
554
+ local bucket_bias count
555
+ bucket_bias=$(echo "$bias_data" | jq -r --arg b "$bucket" '.[] | select(.bucket == $b) | .mean_delta // 0' 2>/dev/null || echo "0")
556
+ count=$(echo "$bias_data" | jq -r --arg b "$bucket" '.[] | select(.bucket == $b) | .count // 0' 2>/dev/null || echo "0")
557
+
558
+ # Only correct if enough samples and significant bias (|delta| > 1)
559
+ if [[ "${count:-0}" -ge 5 ]]; then
560
+ local abs_bias
561
+ abs_bias=$(awk -v b="$bucket_bias" 'BEGIN { v = b < 0 ? -b : b; printf "%.1f", v }')
562
+ if awk -v ab="$abs_bias" 'BEGIN { exit !(ab > 1.0) }' 2>/dev/null; then
563
+ # Correction = -delta * 0.3 (partial correction to avoid overshooting)
564
+ local correction
565
+ correction=$(awk -v d="$bucket_bias" 'BEGIN { printf "%.2f", -d * 0.3 }')
566
+ updated_model=$(echo "$updated_model" | jq --arg b "$bucket" --argjson c "$correction" \
567
+ '.[$b].mean = ((.[$b].mean // 0) + $c) | .[$b].bias_correction = $c' 2>/dev/null || echo "$updated_model")
568
+ changed=true
569
+ info "Prediction bias correction for $bucket: delta=${bucket_bias}, correction=${correction} (${count} samples)"
570
+ fi
571
+ fi
572
+ done
573
+
574
+ if [[ "$changed" == true ]]; then
575
+ local tmp_model
576
+ tmp_model=$(mktemp)
577
+ trap "rm -f '$tmp_model'" RETURN
578
+ if echo "$updated_model" | jq '.' > "$tmp_model" 2>/dev/null && [[ -s "$tmp_model" ]]; then
579
+ mv "$tmp_model" "$model_file"
580
+ emit_event "optimize.prediction_bias_corrected"
581
+ else
582
+ rm -f "$tmp_model"
583
+ fi
584
+ fi
585
+
586
+ # Rotate validation file
587
+ type rotate_jsonl &>/dev/null 2>&1 && rotate_jsonl "$validation_file" 5000
509
588
  }
510
589
 
511
590
  # ═════════════════════════════════════════════════════════════════════════════
@@ -537,6 +616,7 @@ optimize_route_models() {
537
616
  # Collect per-stage, per-model stats
538
617
  local tmp_stage_stats
539
618
  tmp_stage_stats=$(mktemp)
619
+ trap "rm -f '$tmp_stage_stats'" RETURN
540
620
 
541
621
  while IFS= read -r line; do
542
622
  local model result stages_arr
@@ -652,6 +732,7 @@ optimize_route_models() {
652
732
  # Atomic write
653
733
  local tmp_routing
654
734
  tmp_routing=$(mktemp "${MODEL_ROUTING_FILE}.tmp.XXXXXX")
735
+ trap "rm -f '$tmp_routing'" RETURN
655
736
  echo "$consumer_routing" > "$tmp_routing" && mv "$tmp_routing" "$MODEL_ROUTING_FILE" || rm -f "$tmp_routing"
656
737
 
657
738
  rm -f "$tmp_stage_stats" 2>/dev/null || true
@@ -659,6 +740,85 @@ optimize_route_models() {
659
740
  success "Model routing updated"
660
741
  }
661
742
 
743
+ # ═════════════════════════════════════════════════════════════════════════════
744
+ # RISK KEYWORD LEARNING
745
+ # ═════════════════════════════════════════════════════════════════════════════
746
+
747
+ # optimize_learn_risk_keywords [outcomes_file]
748
+ # Learns keyword→risk-weight mapping from pipeline outcomes for predictive risk scoring.
749
+ # Failed pipelines with labels/keywords get positive weights; successful ones get negative.
750
+ optimize_learn_risk_keywords() {
751
+ local outcomes_file="${1:-$OUTCOMES_FILE}"
752
+
753
+ if [[ ! -f "$outcomes_file" ]]; then
754
+ return 0
755
+ fi
756
+
757
+ ensure_optimization_dir
758
+
759
+ info "Learning risk keywords from outcomes..."
760
+
761
+ local risk_file="${OPTIMIZATION_DIR}/risk-keywords.json"
762
+ local keywords='{}'
763
+ if [[ -f "$risk_file" ]]; then
764
+ keywords=$(jq '.' "$risk_file" 2>/dev/null || echo '{}')
765
+ fi
766
+
767
+ local decay=0.95
768
+ local learn_rate=5
769
+
770
+ # Read outcomes and extract keywords from labels
771
+ local updated=false
772
+ while IFS= read -r line; do
773
+ local result labels
774
+ result=$(echo "$line" | jq -r '.result // "unknown"' 2>/dev/null) || continue
775
+ labels=$(echo "$line" | jq -r '.labels // ""' 2>/dev/null) || continue
776
+ [[ -z "$labels" || "$labels" == "null" ]] && continue
777
+
778
+ # Split labels on comma/space and learn from each keyword
779
+ local IFS=', '
780
+ for kw in $labels; do
781
+ kw=$(echo "$kw" | tr '[:upper:]' '[:lower:]' | tr -cd '[:alnum:]-')
782
+ [[ -z "$kw" || ${#kw} -lt 3 ]] && continue
783
+
784
+ local current_weight
785
+ current_weight=$(echo "$keywords" | jq -r --arg k "$kw" '.[$k] // 0' 2>/dev/null || echo "0")
786
+
787
+ local delta=0
788
+ if [[ "$result" == "failed" || "$result" == "error" ]]; then
789
+ delta=$learn_rate
790
+ elif [[ "$result" == "success" || "$result" == "complete" ]]; then
791
+ delta=$((-learn_rate / 2))
792
+ fi
793
+
794
+ if [[ "$delta" -ne 0 ]]; then
795
+ local new_weight
796
+ new_weight=$(awk -v cw="$current_weight" -v d="$decay" -v dw="$delta" 'BEGIN { printf "%.0f", (cw * d) + dw }')
797
+ # Clamp to -50..50
798
+ new_weight=$(awk -v w="$new_weight" 'BEGIN { if(w>50) w=50; if(w<-50) w=-50; printf "%.0f", w }')
799
+ keywords=$(echo "$keywords" | jq --arg k "$kw" --argjson w "$new_weight" '.[$k] = $w' 2>/dev/null || echo "$keywords")
800
+ updated=true
801
+ fi
802
+ done
803
+ done < "$outcomes_file"
804
+
805
+ if [[ "$updated" == true ]]; then
806
+ # Prune zero-weight keywords
807
+ keywords=$(echo "$keywords" | jq 'to_entries | map(select(.value != 0)) | from_entries' 2>/dev/null || echo "$keywords")
808
+ local tmp_risk
809
+ tmp_risk=$(mktemp)
810
+ trap "rm -f '$tmp_risk'" RETURN
811
+ if echo "$keywords" | jq '.' > "$tmp_risk" 2>/dev/null && [[ -s "$tmp_risk" ]]; then
812
+ mv "$tmp_risk" "$risk_file"
813
+ success "Risk keywords updated ($(echo "$keywords" | jq 'length' 2>/dev/null || echo '?') keywords)"
814
+ else
815
+ rm -f "$tmp_risk"
816
+ fi
817
+ else
818
+ info "No label data in outcomes — risk keywords unchanged"
819
+ fi
820
+ }
821
+
662
822
  # ═════════════════════════════════════════════════════════════════════════════
663
823
  # MEMORY EVOLUTION
664
824
  # ═════════════════════════════════════════════════════════════════════════════
@@ -724,6 +884,7 @@ optimize_evolve_memory() {
724
884
 
725
885
  local tmp_file
726
886
  tmp_file=$(mktemp)
887
+ trap "rm -f '$tmp_file'" RETURN
727
888
 
728
889
  # Prune entries not seen within prune window
729
890
  local pruned_json
@@ -761,6 +922,7 @@ optimize_evolve_memory() {
761
922
  # Collect all patterns across repos
762
923
  local tmp_all_patterns
763
924
  tmp_all_patterns=$(mktemp)
925
+ trap "rm -f '$tmp_all_patterns'" RETURN
764
926
  for repo_dir in "$memory_root"/*/; do
765
927
  [[ -d "$repo_dir" ]] || continue
766
928
  local failures_file="${repo_dir}failures.json"
@@ -776,6 +938,7 @@ optimize_evolve_memory() {
776
938
  if [[ -n "$promoted_patterns" ]]; then
777
939
  local tmp_global
778
940
  tmp_global=$(mktemp)
941
+ trap "rm -f '$tmp_global'" RETURN
779
942
  local pcount=0
780
943
  while IFS= read -r pattern; do
781
944
  [[ -z "$pattern" ]] && continue
@@ -821,6 +984,7 @@ optimize_full_analysis() {
821
984
  optimize_tune_templates
822
985
  optimize_learn_iterations
823
986
  optimize_route_models
987
+ optimize_learn_risk_keywords
824
988
  optimize_evolve_memory
825
989
  optimize_report >> "${OPTIMIZATION_DIR}/last-report.txt" 2>/dev/null || true
826
990
  optimize_adjust_audit_intensity 2>/dev/null || true
@@ -981,6 +1145,7 @@ optimize_adjust_audit_intensity() {
981
1145
  info "Quality trend: ${trend} (avg: ${avg_quality}) — increasing audit intensity"
982
1146
  local tmp_dc
983
1147
  tmp_dc=$(mktemp "${daemon_config}.tmp.XXXXXX")
1148
+ trap "rm -f '$tmp_dc'" RETURN
984
1149
  jq '.intelligence.adversarial_enabled = true | .intelligence.architecture_enabled = true' \
985
1150
  "$daemon_config" > "$tmp_dc" 2>/dev/null && mv "$tmp_dc" "$daemon_config" || rm -f "$tmp_dc"
986
1151
  emit_event "optimize.audit_intensity" \
@@ -8,7 +8,7 @@
8
8
  # ║ Supports --template to scaffold from a team template and --terminal ║
9
9
  # ║ to select a terminal adapter (tmux, iterm2, wezterm). ║
10
10
  # ╚═══════════════════════════════════════════════════════════════════════════╝
11
- VERSION="2.2.1"
11
+ VERSION="2.3.0"
12
12
  set -euo pipefail
13
13
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
14
14
 
@@ -10,7 +10,7 @@
10
10
  set -euo pipefail
11
11
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
12
12
 
13
- VERSION="2.2.1"
13
+ VERSION="2.3.0"
14
14
 
15
15
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
16
16
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"