shipwright-cli 3.0.0 → 3.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +21 -7
- package/completions/_shipwright +247 -93
- package/completions/shipwright.bash +69 -15
- package/completions/shipwright.fish +309 -41
- package/config/decision-tiers.json +55 -0
- package/config/defaults.json +25 -2
- package/config/event-schema.json +142 -5
- package/config/policy.json +8 -0
- package/dashboard/public/index.html +6 -0
- package/dashboard/public/styles.css +76 -0
- package/dashboard/server.ts +51 -0
- package/dashboard/src/core/api.ts +5 -0
- package/dashboard/src/types/api.ts +10 -0
- package/dashboard/src/views/metrics.ts +69 -1
- package/package.json +3 -3
- package/scripts/lib/architecture.sh +2 -1
- package/scripts/lib/bootstrap.sh +0 -0
- package/scripts/lib/config.sh +0 -0
- package/scripts/lib/daemon-adaptive.sh +4 -2
- package/scripts/lib/daemon-dispatch.sh +24 -1
- package/scripts/lib/daemon-failure.sh +0 -0
- package/scripts/lib/daemon-health.sh +0 -0
- package/scripts/lib/daemon-patrol.sh +42 -7
- package/scripts/lib/daemon-poll.sh +17 -0
- package/scripts/lib/daemon-state.sh +17 -0
- package/scripts/lib/daemon-triage.sh +1 -1
- package/scripts/lib/decide-autonomy.sh +295 -0
- package/scripts/lib/decide-scoring.sh +228 -0
- package/scripts/lib/decide-signals.sh +462 -0
- package/scripts/lib/fleet-failover.sh +0 -0
- package/scripts/lib/helpers.sh +19 -18
- package/scripts/lib/pipeline-detection.sh +1 -1
- package/scripts/lib/pipeline-github.sh +0 -0
- package/scripts/lib/pipeline-intelligence.sh +23 -4
- package/scripts/lib/pipeline-quality-checks.sh +11 -6
- package/scripts/lib/pipeline-quality.sh +0 -0
- package/scripts/lib/pipeline-stages.sh +330 -33
- package/scripts/lib/pipeline-state.sh +14 -0
- package/scripts/lib/policy.sh +0 -0
- package/scripts/lib/test-helpers.sh +0 -0
- package/scripts/postinstall.mjs +75 -1
- package/scripts/signals/example-collector.sh +36 -0
- package/scripts/sw +8 -4
- package/scripts/sw-activity.sh +1 -7
- package/scripts/sw-adaptive.sh +7 -7
- package/scripts/sw-adversarial.sh +1 -1
- package/scripts/sw-architecture-enforcer.sh +1 -1
- package/scripts/sw-auth.sh +1 -1
- package/scripts/sw-autonomous.sh +1 -1
- package/scripts/sw-changelog.sh +1 -1
- package/scripts/sw-checkpoint.sh +1 -1
- package/scripts/sw-ci.sh +11 -6
- package/scripts/sw-cleanup.sh +1 -1
- package/scripts/sw-code-review.sh +36 -17
- package/scripts/sw-connect.sh +1 -1
- package/scripts/sw-context.sh +1 -1
- package/scripts/sw-cost.sh +71 -5
- package/scripts/sw-daemon.sh +6 -3
- package/scripts/sw-dashboard.sh +1 -1
- package/scripts/sw-db.sh +53 -38
- package/scripts/sw-decide.sh +685 -0
- package/scripts/sw-decompose.sh +1 -1
- package/scripts/sw-deps.sh +1 -1
- package/scripts/sw-developer-simulation.sh +1 -1
- package/scripts/sw-discovery.sh +80 -4
- package/scripts/sw-doc-fleet.sh +1 -1
- package/scripts/sw-docs-agent.sh +1 -1
- package/scripts/sw-docs.sh +1 -1
- package/scripts/sw-doctor.sh +1 -1
- package/scripts/sw-dora.sh +1 -1
- package/scripts/sw-durable.sh +9 -5
- package/scripts/sw-e2e-orchestrator.sh +1 -1
- package/scripts/sw-eventbus.sh +7 -4
- package/scripts/sw-evidence.sh +1 -1
- package/scripts/sw-feedback.sh +1 -1
- package/scripts/sw-fix.sh +1 -1
- package/scripts/sw-fleet-discover.sh +1 -1
- package/scripts/sw-fleet-viz.sh +6 -4
- package/scripts/sw-fleet.sh +1 -1
- package/scripts/sw-github-app.sh +3 -2
- package/scripts/sw-github-checks.sh +1 -1
- package/scripts/sw-github-deploy.sh +1 -1
- package/scripts/sw-github-graphql.sh +1 -1
- package/scripts/sw-guild.sh +1 -1
- package/scripts/sw-heartbeat.sh +1 -1
- package/scripts/sw-hygiene.sh +5 -3
- package/scripts/sw-incident.sh +9 -5
- package/scripts/sw-init.sh +1 -1
- package/scripts/sw-instrument.sh +1 -1
- package/scripts/sw-intelligence.sh +11 -6
- package/scripts/sw-jira.sh +1 -1
- package/scripts/sw-launchd.sh +1 -1
- package/scripts/sw-linear.sh +1 -1
- package/scripts/sw-logs.sh +1 -1
- package/scripts/sw-loop.sh +338 -32
- package/scripts/sw-memory.sh +23 -6
- package/scripts/sw-mission-control.sh +1 -1
- package/scripts/sw-model-router.sh +3 -2
- package/scripts/sw-otel.sh +8 -4
- package/scripts/sw-oversight.sh +1 -1
- package/scripts/sw-pipeline-composer.sh +3 -1
- package/scripts/sw-pipeline-vitals.sh +11 -6
- package/scripts/sw-pipeline.sh +92 -8
- package/scripts/sw-pm.sh +5 -4
- package/scripts/sw-pr-lifecycle.sh +7 -4
- package/scripts/sw-predictive.sh +11 -5
- package/scripts/sw-prep.sh +1 -1
- package/scripts/sw-ps.sh +1 -1
- package/scripts/sw-public-dashboard.sh +3 -2
- package/scripts/sw-quality.sh +21 -10
- package/scripts/sw-reaper.sh +1 -1
- package/scripts/sw-recruit.sh +1 -1
- package/scripts/sw-regression.sh +1 -1
- package/scripts/sw-release-manager.sh +1 -1
- package/scripts/sw-release.sh +1 -1
- package/scripts/sw-remote.sh +1 -1
- package/scripts/sw-replay.sh +1 -1
- package/scripts/sw-retro.sh +1 -1
- package/scripts/sw-review-rerun.sh +1 -1
- package/scripts/sw-scale.sh +69 -11
- package/scripts/sw-security-audit.sh +1 -1
- package/scripts/sw-self-optimize.sh +168 -4
- package/scripts/sw-session.sh +3 -3
- package/scripts/sw-setup.sh +1 -1
- package/scripts/sw-standup.sh +1 -1
- package/scripts/sw-status.sh +1 -1
- package/scripts/sw-strategic.sh +11 -6
- package/scripts/sw-stream.sh +7 -4
- package/scripts/sw-swarm.sh +3 -2
- package/scripts/sw-team-stages.sh +1 -1
- package/scripts/sw-templates.sh +3 -3
- package/scripts/sw-testgen.sh +11 -6
- package/scripts/sw-tmux-pipeline.sh +1 -1
- package/scripts/sw-tmux.sh +35 -1
- package/scripts/sw-trace.sh +1 -1
- package/scripts/sw-tracker.sh +1 -1
- package/scripts/sw-triage.sh +7 -7
- package/scripts/sw-upgrade.sh +1 -1
- package/scripts/sw-ux.sh +1 -1
- package/scripts/sw-webhook.sh +3 -2
- package/scripts/sw-widgets.sh +7 -4
- package/scripts/sw-worktree.sh +1 -1
- package/scripts/update-homebrew-sha.sh +21 -15
package/scripts/sw-daemon.sh
CHANGED
|
@@ -8,8 +8,11 @@ trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
|
8
8
|
|
|
9
9
|
# Allow spawning Claude CLI from within a Claude Code session (daemon, fleet, etc.)
|
|
10
10
|
unset CLAUDECODE 2>/dev/null || true
|
|
11
|
+
# Ignore SIGHUP so daemon survives tmux attach/detach
|
|
12
|
+
trap '' HUP
|
|
13
|
+
trap '' SIGPIPE
|
|
11
14
|
|
|
12
|
-
VERSION="3.
|
|
15
|
+
VERSION="3.2.0"
|
|
13
16
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
14
17
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
15
18
|
|
|
@@ -515,7 +518,7 @@ load_config() {
|
|
|
515
518
|
PROGRESS_MONITORING=$(jq -r '.health.progress_based // true' "$config_file")
|
|
516
519
|
PROGRESS_CHECKS_BEFORE_WARN=$(jq -r '.health.stale_checks_before_warn // 20' "$config_file")
|
|
517
520
|
PROGRESS_CHECKS_BEFORE_KILL=$(jq -r '.health.stale_checks_before_kill // 120' "$config_file")
|
|
518
|
-
PROGRESS_HARD_LIMIT_S=$(jq -r '.health.hard_limit_s //
|
|
521
|
+
PROGRESS_HARD_LIMIT_S=$(jq -r '.health.hard_limit_s // 21600' "$config_file") # 21600s = 6h default; 0 = disabled
|
|
519
522
|
NUDGE_ENABLED=$(jq -r '.health.nudge_enabled // true' "$config_file")
|
|
520
523
|
NUDGE_AFTER_CHECKS=$(jq -r '.health.nudge_after_checks // 40' "$config_file")
|
|
521
524
|
|
|
@@ -554,7 +557,7 @@ setup_dirs() {
|
|
|
554
557
|
# ─── Adaptive Threshold Helpers ──────────────────────────────────────────────
|
|
555
558
|
# When intelligence.adaptive_enabled=true, operational thresholds are learned
|
|
556
559
|
# from historical data instead of using fixed defaults.
|
|
557
|
-
# Every function falls back to the
|
|
560
|
+
# Every function falls back to the config default when no data exists.
|
|
558
561
|
|
|
559
562
|
ADAPTIVE_THRESHOLDS_ENABLED="${ADAPTIVE_THRESHOLDS_ENABLED:-false}"
|
|
560
563
|
PRIORITY_STRATEGY="${PRIORITY_STRATEGY:-quick-wins-first}"
|
package/scripts/sw-dashboard.sh
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.2.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
|
|
12
12
|
# ─── Cross-platform compatibility ──────────────────────────────────────────
|
package/scripts/sw-db.sh
CHANGED
|
@@ -14,7 +14,7 @@ if [[ -n "${_SW_DB_LOADED:-}" ]] && [[ "${BASH_SOURCE[0]}" != "$0" ]]; then
|
|
|
14
14
|
fi
|
|
15
15
|
_SW_DB_LOADED=1
|
|
16
16
|
|
|
17
|
-
VERSION="3.
|
|
17
|
+
VERSION="3.2.0"
|
|
18
18
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
19
19
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
20
20
|
|
|
@@ -93,6 +93,12 @@ db_available() {
|
|
|
93
93
|
check_sqlite3 && [[ -f "$DB_FILE" ]] && _db_feature_enabled
|
|
94
94
|
}
|
|
95
95
|
|
|
96
|
+
# ─── SQL Escaping ──────────────────────────────────────────────────────────
|
|
97
|
+
# Bash 3.2 (macOS default) breaks ${var//$_SQL_SQ/$_SQL_SQ$_SQL_SQ} — backslashes leak into output.
|
|
98
|
+
# This helper uses a variable to hold the single quote for reliable escaping.
|
|
99
|
+
_SQL_SQ="'"
|
|
100
|
+
_sql_escape() { local _v="$1"; echo "${_v//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"; }
|
|
101
|
+
|
|
96
102
|
# ─── Ensure Database Directory ──────────────────────────────────────────────
|
|
97
103
|
ensure_db_dir() {
|
|
98
104
|
mkdir -p "$DB_DIR"
|
|
@@ -760,7 +766,7 @@ db_query_events_since() {
|
|
|
760
766
|
# db_get_consumer_offset <consumer_id> — returns last_event_id or "0"
|
|
761
767
|
db_get_consumer_offset() {
|
|
762
768
|
local consumer_id="$1"
|
|
763
|
-
consumer_id="${consumer_id
|
|
769
|
+
consumer_id="${consumer_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
764
770
|
_db_query "SELECT last_event_id FROM event_consumers WHERE consumer_id = '${consumer_id}';" 2>/dev/null || echo "0"
|
|
765
771
|
}
|
|
766
772
|
|
|
@@ -768,7 +774,7 @@ db_get_consumer_offset() {
|
|
|
768
774
|
db_set_consumer_offset() {
|
|
769
775
|
local consumer_id="$1"
|
|
770
776
|
local last_event_id="$2"
|
|
771
|
-
consumer_id="${consumer_id
|
|
777
|
+
consumer_id="${consumer_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
772
778
|
_db_exec "INSERT OR REPLACE INTO event_consumers (consumer_id, last_event_id, last_consumed_at) VALUES ('${consumer_id}', ${last_event_id}, '$(now_iso)');"
|
|
773
779
|
}
|
|
774
780
|
|
|
@@ -776,9 +782,9 @@ db_set_consumer_offset() {
|
|
|
776
782
|
db_save_checkpoint() {
|
|
777
783
|
local workflow_id="$1"
|
|
778
784
|
local data="$2"
|
|
779
|
-
workflow_id="${workflow_id
|
|
785
|
+
workflow_id="${workflow_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
780
786
|
data="${data//$'\n'/ }"
|
|
781
|
-
data="${data
|
|
787
|
+
data="${data//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
782
788
|
if ! db_available; then return 1; fi
|
|
783
789
|
_db_exec "INSERT OR REPLACE INTO durable_checkpoints (workflow_id, checkpoint_data, created_at) VALUES ('${workflow_id}', '${data}', '$(now_iso)');"
|
|
784
790
|
}
|
|
@@ -786,7 +792,7 @@ db_save_checkpoint() {
|
|
|
786
792
|
# db_load_checkpoint <workflow_id> — returns checkpoint_data or empty
|
|
787
793
|
db_load_checkpoint() {
|
|
788
794
|
local workflow_id="$1"
|
|
789
|
-
workflow_id="${workflow_id
|
|
795
|
+
workflow_id="${workflow_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
790
796
|
if ! db_available; then return 1; fi
|
|
791
797
|
_db_query "SELECT checkpoint_data FROM durable_checkpoints WHERE workflow_id = '${workflow_id}';" 2>/dev/null || echo ""
|
|
792
798
|
}
|
|
@@ -806,7 +812,9 @@ add_event() {
|
|
|
806
812
|
|
|
807
813
|
# Try SQLite first
|
|
808
814
|
if db_available; then
|
|
809
|
-
_db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata, created_at, synced) VALUES ('${ts}', ${ts_epoch}, '${event_type}', '${job_id}', '${stage}', '${status}', ${duration_secs}, '${metadata}', '${ts}', 0);"
|
|
815
|
+
if ! _db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata, created_at, synced) VALUES ('${ts}', ${ts_epoch}, '${event_type}', '${job_id}', '${stage}', '${status}', ${duration_secs}, '${metadata}', '${ts}', 0);" 2>/dev/null; then
|
|
816
|
+
warn "db_add_event: SQLite insert failed for event type=${event_type}" >&2
|
|
817
|
+
fi
|
|
810
818
|
fi
|
|
811
819
|
|
|
812
820
|
# Always write to JSONL for backward compat (dual-write period)
|
|
@@ -842,8 +850,8 @@ db_save_job() {
|
|
|
842
850
|
if ! db_available; then return 1; fi
|
|
843
851
|
|
|
844
852
|
# Escape single quotes in title/goal
|
|
845
|
-
title="${title
|
|
846
|
-
goal="${goal
|
|
853
|
+
title="${title//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
854
|
+
goal="${goal//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
847
855
|
|
|
848
856
|
_db_exec "INSERT OR REPLACE INTO daemon_state (job_id, issue_number, title, goal, pid, worktree, branch, status, template, started_at, updated_at) VALUES ('${job_id}', ${issue_num}, '${title}', '${goal}', ${pid}, '${worktree}', '${branch}', 'active', '${template}', '${ts}', '${ts}');"
|
|
849
857
|
}
|
|
@@ -859,7 +867,7 @@ db_complete_job() {
|
|
|
859
867
|
|
|
860
868
|
if ! db_available; then return 1; fi
|
|
861
869
|
|
|
862
|
-
error_msg="${error_msg
|
|
870
|
+
error_msg="${error_msg//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
863
871
|
|
|
864
872
|
_db_exec "UPDATE daemon_state SET status = 'completed', result = '${result}', duration = '${duration}', error_message = '${error_msg}', completed_at = '${ts}', updated_at = '${ts}' WHERE job_id = '${job_id}' AND status = 'active';"
|
|
865
873
|
}
|
|
@@ -873,7 +881,7 @@ db_fail_job() {
|
|
|
873
881
|
|
|
874
882
|
if ! db_available; then return 1; fi
|
|
875
883
|
|
|
876
|
-
error_msg="${error_msg
|
|
884
|
+
error_msg="${error_msg//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
877
885
|
|
|
878
886
|
_db_exec "UPDATE daemon_state SET status = 'failed', result = 'failure', error_message = '${error_msg}', completed_at = '${ts}', updated_at = '${ts}' WHERE job_id = '${job_id}' AND status = 'active';"
|
|
879
887
|
}
|
|
@@ -917,7 +925,7 @@ db_remove_active_job() {
|
|
|
917
925
|
db_enqueue_issue() {
|
|
918
926
|
local issue_key="$1"
|
|
919
927
|
if ! db_available; then return 1; fi
|
|
920
|
-
issue_key="${issue_key
|
|
928
|
+
issue_key="${issue_key//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
921
929
|
_db_exec "INSERT OR REPLACE INTO daemon_queue (issue_key, added_at) VALUES ('${issue_key}', '$(now_iso)');"
|
|
922
930
|
}
|
|
923
931
|
|
|
@@ -927,8 +935,10 @@ db_dequeue_next() {
|
|
|
927
935
|
local next escaped
|
|
928
936
|
next=$(_db_query "SELECT issue_key FROM daemon_queue ORDER BY added_at ASC LIMIT 1;" || echo "")
|
|
929
937
|
if [[ -n "$next" ]]; then
|
|
930
|
-
escaped="${next
|
|
931
|
-
_db_exec "DELETE FROM daemon_queue WHERE issue_key = '${escaped}';" 2>/dev/null
|
|
938
|
+
escaped="${next//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
939
|
+
if ! _db_exec "DELETE FROM daemon_queue WHERE issue_key = '${escaped}';" 2>/dev/null; then
|
|
940
|
+
warn "db_dequeue_next: failed to delete queue entry for ${next}" >&2
|
|
941
|
+
fi
|
|
932
942
|
echo "$next"
|
|
933
943
|
fi
|
|
934
944
|
}
|
|
@@ -937,7 +947,7 @@ db_dequeue_next() {
|
|
|
937
947
|
db_is_issue_queued() {
|
|
938
948
|
local issue_key="$1"
|
|
939
949
|
if ! db_available; then return 1; fi
|
|
940
|
-
issue_key="${issue_key
|
|
950
|
+
issue_key="${issue_key//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
941
951
|
local count
|
|
942
952
|
count=$(_db_query "SELECT COUNT(*) FROM daemon_queue WHERE issue_key = '${issue_key}';")
|
|
943
953
|
[[ "${count:-0}" -gt 0 ]]
|
|
@@ -947,7 +957,7 @@ db_is_issue_queued() {
|
|
|
947
957
|
db_remove_from_queue() {
|
|
948
958
|
local issue_key="$1"
|
|
949
959
|
if ! db_available; then return 1; fi
|
|
950
|
-
issue_key="${issue_key
|
|
960
|
+
issue_key="${issue_key//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
951
961
|
_db_exec "DELETE FROM daemon_queue WHERE issue_key = '${issue_key}';"
|
|
952
962
|
}
|
|
953
963
|
|
|
@@ -974,9 +984,9 @@ db_record_outcome() {
|
|
|
974
984
|
|
|
975
985
|
if ! db_available; then return 1; fi
|
|
976
986
|
|
|
977
|
-
job_id="${job_id
|
|
978
|
-
issue="${issue
|
|
979
|
-
template="${template
|
|
987
|
+
job_id="${job_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
988
|
+
issue="${issue//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
989
|
+
template="${template//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
980
990
|
|
|
981
991
|
_db_exec "INSERT OR REPLACE INTO pipeline_outcomes
|
|
982
992
|
(job_id, issue_number, template, success, duration_secs, retry_count, cost_usd, complexity, created_at)
|
|
@@ -1087,7 +1097,7 @@ db_record_heartbeat() {
|
|
|
1087
1097
|
|
|
1088
1098
|
if ! db_available; then return 1; fi
|
|
1089
1099
|
|
|
1090
|
-
activity="${activity
|
|
1100
|
+
activity="${activity//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1091
1101
|
|
|
1092
1102
|
_db_exec "INSERT OR REPLACE INTO heartbeats (job_id, pid, issue, stage, iteration, last_activity, memory_mb, updated_at) VALUES ('${job_id}', ${pid}, ${issue}, '${stage}', ${iteration}, '${activity}', ${memory_mb}, '${ts}');"
|
|
1093
1103
|
}
|
|
@@ -1135,9 +1145,9 @@ db_record_failure() {
|
|
|
1135
1145
|
if ! db_available; then return 1; fi
|
|
1136
1146
|
|
|
1137
1147
|
# Escape quotes
|
|
1138
|
-
error_sig="${error_sig
|
|
1139
|
-
root_cause="${root_cause
|
|
1140
|
-
fix_desc="${fix_desc
|
|
1148
|
+
error_sig="${error_sig//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1149
|
+
root_cause="${root_cause//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1150
|
+
fix_desc="${fix_desc//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1141
1151
|
|
|
1142
1152
|
# Upsert: increment occurrences if same signature exists
|
|
1143
1153
|
_db_exec "INSERT INTO memory_failures (repo_hash, failure_class, error_signature, root_cause, fix_description, file_path, stage, occurrences, last_seen_at, created_at, synced) VALUES ('${repo_hash}', '${failure_class}', '${error_sig}', '${root_cause}', '${fix_desc}', '${file_path}', '${stage}', 1, '${ts}', '${ts}', 0) ON CONFLICT(id) DO UPDATE SET occurrences = occurrences + 1, last_seen_at = '${ts}';"
|
|
@@ -1161,8 +1171,8 @@ db_query_similar_failures() {
|
|
|
1161
1171
|
db_save_pattern() {
|
|
1162
1172
|
local repo_hash="$1" pattern_type="$2" pattern_key="$3" description="${4:-}" metadata="${5:-}"
|
|
1163
1173
|
if ! db_available; then return 1; fi
|
|
1164
|
-
description="${description
|
|
1165
|
-
metadata="${metadata
|
|
1174
|
+
description="${description//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1175
|
+
metadata="${metadata//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1166
1176
|
_db_exec "INSERT INTO memory_patterns (repo_hash, pattern_type, pattern_key, description, last_seen_at, created_at, metadata)
|
|
1167
1177
|
VALUES ('$repo_hash', '$pattern_type', '$pattern_key', '$description', '$(now_iso)', '$(now_iso)', '$metadata')
|
|
1168
1178
|
ON CONFLICT(repo_hash, pattern_type, pattern_key) DO UPDATE SET
|
|
@@ -1181,9 +1191,9 @@ db_query_patterns() {
|
|
|
1181
1191
|
db_save_decision() {
|
|
1182
1192
|
local repo_hash="$1" decision_type="$2" context="$3" decision="$4" metadata="${5:-}"
|
|
1183
1193
|
if ! db_available; then return 1; fi
|
|
1184
|
-
context="${context
|
|
1185
|
-
decision="${decision
|
|
1186
|
-
metadata="${metadata
|
|
1194
|
+
context="${context//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1195
|
+
decision="${decision//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1196
|
+
metadata="${metadata//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1187
1197
|
_db_exec "INSERT INTO memory_decisions (repo_hash, decision_type, context, decision, created_at, updated_at, metadata)
|
|
1188
1198
|
VALUES ('$repo_hash', '$decision_type', '$context', '$decision', '$(now_iso)', '$(now_iso)', '$metadata');"
|
|
1189
1199
|
}
|
|
@@ -1191,7 +1201,7 @@ db_save_decision() {
|
|
|
1191
1201
|
db_update_decision_outcome() {
|
|
1192
1202
|
local decision_id="$1" outcome="$2" confidence="${3:-}"
|
|
1193
1203
|
if ! db_available; then return 1; fi
|
|
1194
|
-
outcome="${outcome
|
|
1204
|
+
outcome="${outcome//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1195
1205
|
local set_clause="outcome = '$outcome', updated_at = '$(now_iso)'"
|
|
1196
1206
|
[[ -n "$confidence" ]] && set_clause="$set_clause, confidence = $confidence"
|
|
1197
1207
|
_db_exec "UPDATE memory_decisions SET $set_clause WHERE id = $decision_id;"
|
|
@@ -1209,7 +1219,7 @@ db_query_decisions() {
|
|
|
1209
1219
|
db_save_embedding() {
|
|
1210
1220
|
local content_hash="$1" source_type="$2" content_text="$3" repo_hash="${4:-}"
|
|
1211
1221
|
if ! db_available; then return 1; fi
|
|
1212
|
-
content_text="${content_text
|
|
1222
|
+
content_text="${content_text//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1213
1223
|
_db_exec "INSERT OR IGNORE INTO memory_embeddings (content_hash, source_type, content_text, repo_hash, created_at)
|
|
1214
1224
|
VALUES ('$content_hash', '$source_type', '$content_text', '$repo_hash', '$(now_iso)');"
|
|
1215
1225
|
}
|
|
@@ -1230,8 +1240,8 @@ db_save_reasoning_trace() {
|
|
|
1230
1240
|
escaped_input=$(echo "$input_context" | sed "s/'/''/g")
|
|
1231
1241
|
escaped_reasoning=$(echo "$reasoning" | sed "s/'/''/g")
|
|
1232
1242
|
escaped_output=$(echo "$output_decision" | sed "s/'/''/g")
|
|
1233
|
-
job_id="${job_id
|
|
1234
|
-
step_name="${step_name
|
|
1243
|
+
job_id="${job_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1244
|
+
step_name="${step_name//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1235
1245
|
if ! db_available; then return 1; fi
|
|
1236
1246
|
_db_exec "INSERT INTO reasoning_traces (job_id, step_name, input_context, reasoning, output_decision, confidence, created_at)
|
|
1237
1247
|
VALUES ('$job_id', '$step_name', '$escaped_input', '$escaped_reasoning', '$escaped_output', $confidence, '$(now_iso)');"
|
|
@@ -1239,7 +1249,7 @@ db_save_reasoning_trace() {
|
|
|
1239
1249
|
|
|
1240
1250
|
db_query_reasoning_traces() {
|
|
1241
1251
|
local job_id="$1"
|
|
1242
|
-
job_id="${job_id
|
|
1252
|
+
job_id="${job_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1243
1253
|
if ! db_available; then echo "[]"; return 0; fi
|
|
1244
1254
|
_db_query -json "SELECT * FROM reasoning_traces WHERE job_id = '$job_id' ORDER BY id ASC;" || echo "[]"
|
|
1245
1255
|
}
|
|
@@ -1261,7 +1271,8 @@ add_pipeline_run() {
|
|
|
1261
1271
|
|
|
1262
1272
|
local ts
|
|
1263
1273
|
ts="$(now_iso)"
|
|
1264
|
-
goal="${goal
|
|
1274
|
+
goal="${goal//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1275
|
+
branch="${branch//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1265
1276
|
|
|
1266
1277
|
_db_exec "INSERT OR IGNORE INTO pipeline_runs (job_id, issue_number, goal, branch, status, template, started_at, created_at) VALUES ('${job_id}', ${issue_number}, '${goal}', '${branch}', 'pending', '${template}', '${ts}', '${ts}');" || return 1
|
|
1267
1278
|
}
|
|
@@ -1292,7 +1303,7 @@ record_stage() {
|
|
|
1292
1303
|
|
|
1293
1304
|
local ts
|
|
1294
1305
|
ts="$(now_iso)"
|
|
1295
|
-
error_msg="${error_msg
|
|
1306
|
+
error_msg="${error_msg//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1296
1307
|
|
|
1297
1308
|
_db_exec "INSERT INTO pipeline_stages (job_id, stage_name, status, started_at, completed_at, duration_secs, error_message, created_at) VALUES ('${job_id}', '${stage_name}', '${status}', '${ts}', '${ts}', ${duration_secs}, '${error_msg}', '${ts}');" || return 1
|
|
1298
1309
|
}
|
|
@@ -1530,7 +1541,7 @@ migrate_json_data() {
|
|
|
1530
1541
|
hb_mem=$(jq -r '.memory_mb // 0' "$hb_file" 2>/dev/null || echo "0")
|
|
1531
1542
|
hb_updated=$(jq -r '.updated_at // ""' "$hb_file" 2>/dev/null || echo "$(now_iso)")
|
|
1532
1543
|
|
|
1533
|
-
hb_activity="${hb_activity
|
|
1544
|
+
hb_activity="${hb_activity//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1534
1545
|
_db_exec "INSERT OR REPLACE INTO heartbeats (job_id, pid, issue, stage, iteration, last_activity, memory_mb, updated_at) VALUES ('${hb_job}', ${hb_pid}, ${hb_issue}, '${hb_stage}', ${hb_iter}, '${hb_activity}', ${hb_mem}, '${hb_updated}');" 2>/dev/null && hb_count=$((hb_count + 1))
|
|
1535
1546
|
done
|
|
1536
1547
|
success "Heartbeats: ${hb_count} imported"
|
|
@@ -1822,8 +1833,12 @@ main() {
|
|
|
1822
1833
|
ensure_db_dir
|
|
1823
1834
|
init_schema
|
|
1824
1835
|
# Set schema version
|
|
1825
|
-
_db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (${SCHEMA_VERSION}, '$(now_iso)', '$(now_iso)');" 2>/dev/null
|
|
1826
|
-
|
|
1836
|
+
if ! _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (${SCHEMA_VERSION}, '$(now_iso)', '$(now_iso)');" 2>/dev/null; then
|
|
1837
|
+
warn "db init: failed to write schema version ${SCHEMA_VERSION}" >&2
|
|
1838
|
+
fi
|
|
1839
|
+
if ! _db_exec "INSERT OR IGNORE INTO _sync_metadata (key, value, updated_at) VALUES ('device_id', '$(uname -n)-$$-$(now_epoch)', '$(now_iso)');" 2>/dev/null; then
|
|
1840
|
+
warn "db init: failed to write device_id metadata" >&2
|
|
1841
|
+
fi
|
|
1827
1842
|
success "Database initialized at ${DB_FILE} (WAL mode, schema v${SCHEMA_VERSION})"
|
|
1828
1843
|
;;
|
|
1829
1844
|
migrate)
|