shipwright-cli 3.0.0 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -3
- package/completions/_shipwright +247 -93
- package/completions/shipwright.bash +69 -15
- package/completions/shipwright.fish +309 -41
- package/config/decision-tiers.json +55 -0
- package/config/event-schema.json +142 -5
- package/config/policy.json +8 -0
- package/package.json +3 -3
- package/scripts/lib/architecture.sh +2 -1
- package/scripts/lib/bootstrap.sh +0 -0
- package/scripts/lib/config.sh +0 -0
- package/scripts/lib/daemon-adaptive.sh +0 -0
- package/scripts/lib/daemon-dispatch.sh +24 -1
- package/scripts/lib/daemon-failure.sh +0 -0
- package/scripts/lib/daemon-health.sh +0 -0
- package/scripts/lib/daemon-patrol.sh +40 -5
- package/scripts/lib/daemon-poll.sh +17 -0
- package/scripts/lib/daemon-state.sh +10 -0
- package/scripts/lib/daemon-triage.sh +1 -1
- package/scripts/lib/decide-autonomy.sh +295 -0
- package/scripts/lib/decide-scoring.sh +228 -0
- package/scripts/lib/decide-signals.sh +462 -0
- package/scripts/lib/fleet-failover.sh +0 -0
- package/scripts/lib/helpers.sh +16 -17
- package/scripts/lib/pipeline-detection.sh +0 -0
- package/scripts/lib/pipeline-github.sh +0 -0
- package/scripts/lib/pipeline-intelligence.sh +20 -3
- package/scripts/lib/pipeline-quality-checks.sh +3 -2
- package/scripts/lib/pipeline-quality.sh +0 -0
- package/scripts/lib/pipeline-stages.sh +199 -32
- package/scripts/lib/pipeline-state.sh +14 -0
- package/scripts/lib/policy.sh +0 -0
- package/scripts/lib/test-helpers.sh +0 -0
- package/scripts/postinstall.mjs +75 -1
- package/scripts/signals/example-collector.sh +36 -0
- package/scripts/sw +8 -4
- package/scripts/sw-activity.sh +1 -1
- package/scripts/sw-adaptive.sh +1 -1
- package/scripts/sw-adversarial.sh +1 -1
- package/scripts/sw-architecture-enforcer.sh +1 -1
- package/scripts/sw-auth.sh +1 -1
- package/scripts/sw-autonomous.sh +1 -1
- package/scripts/sw-changelog.sh +1 -1
- package/scripts/sw-checkpoint.sh +1 -1
- package/scripts/sw-ci.sh +1 -1
- package/scripts/sw-cleanup.sh +1 -1
- package/scripts/sw-code-review.sh +1 -1
- package/scripts/sw-connect.sh +1 -1
- package/scripts/sw-context.sh +1 -1
- package/scripts/sw-cost.sh +12 -3
- package/scripts/sw-daemon.sh +2 -2
- package/scripts/sw-dashboard.sh +1 -1
- package/scripts/sw-db.sh +41 -34
- package/scripts/sw-decide.sh +685 -0
- package/scripts/sw-decompose.sh +1 -1
- package/scripts/sw-deps.sh +1 -1
- package/scripts/sw-developer-simulation.sh +1 -1
- package/scripts/sw-discovery.sh +27 -1
- package/scripts/sw-doc-fleet.sh +1 -1
- package/scripts/sw-docs-agent.sh +1 -1
- package/scripts/sw-docs.sh +1 -1
- package/scripts/sw-doctor.sh +1 -1
- package/scripts/sw-dora.sh +1 -1
- package/scripts/sw-durable.sh +1 -1
- package/scripts/sw-e2e-orchestrator.sh +1 -1
- package/scripts/sw-eventbus.sh +1 -1
- package/scripts/sw-evidence.sh +1 -1
- package/scripts/sw-feedback.sh +1 -1
- package/scripts/sw-fix.sh +1 -1
- package/scripts/sw-fleet-discover.sh +1 -1
- package/scripts/sw-fleet-viz.sh +1 -1
- package/scripts/sw-fleet.sh +1 -1
- package/scripts/sw-github-app.sh +1 -1
- package/scripts/sw-github-checks.sh +1 -1
- package/scripts/sw-github-deploy.sh +1 -1
- package/scripts/sw-github-graphql.sh +1 -1
- package/scripts/sw-guild.sh +1 -1
- package/scripts/sw-heartbeat.sh +1 -1
- package/scripts/sw-hygiene.sh +1 -1
- package/scripts/sw-incident.sh +1 -1
- package/scripts/sw-init.sh +1 -1
- package/scripts/sw-instrument.sh +1 -1
- package/scripts/sw-intelligence.sh +9 -5
- package/scripts/sw-jira.sh +1 -1
- package/scripts/sw-launchd.sh +1 -1
- package/scripts/sw-linear.sh +1 -1
- package/scripts/sw-logs.sh +1 -1
- package/scripts/sw-loop.sh +267 -17
- package/scripts/sw-memory.sh +22 -5
- package/scripts/sw-mission-control.sh +1 -1
- package/scripts/sw-model-router.sh +1 -1
- package/scripts/sw-otel.sh +5 -3
- package/scripts/sw-oversight.sh +1 -1
- package/scripts/sw-pipeline-composer.sh +1 -1
- package/scripts/sw-pipeline-vitals.sh +1 -1
- package/scripts/sw-pipeline.sh +73 -1
- package/scripts/sw-pm.sh +1 -1
- package/scripts/sw-pr-lifecycle.sh +7 -4
- package/scripts/sw-predictive.sh +1 -1
- package/scripts/sw-prep.sh +1 -1
- package/scripts/sw-ps.sh +1 -1
- package/scripts/sw-public-dashboard.sh +1 -1
- package/scripts/sw-quality.sh +9 -5
- package/scripts/sw-reaper.sh +1 -1
- package/scripts/sw-regression.sh +1 -1
- package/scripts/sw-release-manager.sh +1 -1
- package/scripts/sw-release.sh +1 -1
- package/scripts/sw-remote.sh +1 -1
- package/scripts/sw-replay.sh +1 -1
- package/scripts/sw-retro.sh +1 -1
- package/scripts/sw-review-rerun.sh +1 -1
- package/scripts/sw-scale.sh +66 -10
- package/scripts/sw-security-audit.sh +1 -1
- package/scripts/sw-self-optimize.sh +1 -1
- package/scripts/sw-session.sh +3 -3
- package/scripts/sw-setup.sh +1 -1
- package/scripts/sw-standup.sh +1 -1
- package/scripts/sw-status.sh +1 -1
- package/scripts/sw-strategic.sh +1 -1
- package/scripts/sw-stream.sh +1 -1
- package/scripts/sw-swarm.sh +1 -1
- package/scripts/sw-team-stages.sh +1 -1
- package/scripts/sw-templates.sh +1 -1
- package/scripts/sw-testgen.sh +1 -1
- package/scripts/sw-tmux-pipeline.sh +1 -1
- package/scripts/sw-tmux.sh +1 -1
- package/scripts/sw-trace.sh +1 -1
- package/scripts/sw-tracker.sh +1 -1
- package/scripts/sw-triage.sh +6 -6
- package/scripts/sw-upgrade.sh +1 -1
- package/scripts/sw-ux.sh +1 -1
- package/scripts/sw-webhook.sh +1 -1
- package/scripts/sw-widgets.sh +1 -1
- package/scripts/sw-worktree.sh +1 -1
- package/scripts/update-homebrew-sha.sh +21 -15
package/scripts/sw-autonomous.sh
CHANGED
package/scripts/sw-changelog.sh
CHANGED
package/scripts/sw-checkpoint.sh
CHANGED
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
set -euo pipefail
|
|
9
9
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
10
10
|
|
|
11
|
-
VERSION="3.
|
|
11
|
+
VERSION="3.1.0"
|
|
12
12
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]:-$0}")" && pwd)"
|
|
13
13
|
|
|
14
14
|
# ─── Cross-platform compatibility ──────────────────────────────────────────
|
package/scripts/sw-ci.sh
CHANGED
package/scripts/sw-cleanup.sh
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
# ║ Default: dry-run (shows what would be cleaned). ║
|
|
6
6
|
# ║ Use --force to actually kill sessions and remove files. ║
|
|
7
7
|
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
8
|
-
VERSION="3.
|
|
8
|
+
VERSION="3.1.0"
|
|
9
9
|
set -euo pipefail
|
|
10
10
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
11
11
|
|
package/scripts/sw-connect.sh
CHANGED
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
set -euo pipefail
|
|
9
9
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
10
10
|
|
|
11
|
-
VERSION="3.
|
|
11
|
+
VERSION="3.1.0"
|
|
12
12
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
13
13
|
|
|
14
14
|
# ─── Cross-platform compatibility ──────────────────────────────────────────
|
package/scripts/sw-context.sh
CHANGED
package/scripts/sw-cost.sh
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.1.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
12
12
|
|
|
@@ -142,9 +142,9 @@ cost_record() {
|
|
|
142
142
|
local cost_usd
|
|
143
143
|
cost_usd=$(cost_calculate "$input_tokens" "$output_tokens" "$model")
|
|
144
144
|
|
|
145
|
-
# Try SQLite first
|
|
145
|
+
# Try SQLite first (arg order must match db_record_cost signature: tokens, tokens, model, cost, stage, issue)
|
|
146
146
|
if type db_record_cost >/dev/null 2>&1; then
|
|
147
|
-
db_record_cost "$input_tokens" "$output_tokens" "$model" "$
|
|
147
|
+
db_record_cost "$input_tokens" "$output_tokens" "$model" "$cost_usd" "$stage" "$issue" 2>/dev/null || true
|
|
148
148
|
fi
|
|
149
149
|
|
|
150
150
|
# Always write to JSON (dual-write period)
|
|
@@ -264,6 +264,11 @@ cost_remaining_budget() {
|
|
|
264
264
|
budget_usd=$(jq -r '.daily_budget_usd' "$BUDGET_FILE" 2>/dev/null || echo "0")
|
|
265
265
|
|
|
266
266
|
if [[ "$budget_enabled" != "true" || "$budget_usd" == "0" ]]; then
|
|
267
|
+
if [[ -z "${_BUDGET_UNCONFIGURED_WARNED:-}" ]]; then
|
|
268
|
+
info "Budget not configured — unlimited. Use 'shipwright cost budget set <amount>'"
|
|
269
|
+
emit_event "cost.budget_unconfigured" "status=unlimited"
|
|
270
|
+
_BUDGET_UNCONFIGURED_WARNED=1
|
|
271
|
+
fi
|
|
267
272
|
echo "unlimited"
|
|
268
273
|
return 0
|
|
269
274
|
fi
|
|
@@ -894,6 +899,8 @@ show_help() {
|
|
|
894
899
|
}
|
|
895
900
|
|
|
896
901
|
# ─── Command Router ─────────────────────────────────────────────────────────
|
|
902
|
+
# Only run CLI when executed directly (not when sourced by sw-pipeline.sh)
|
|
903
|
+
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
|
|
897
904
|
|
|
898
905
|
SUBCOMMAND="${1:-help}"
|
|
899
906
|
shift 2>/dev/null || true
|
|
@@ -943,3 +950,5 @@ case "$SUBCOMMAND" in
|
|
|
943
950
|
exit 1
|
|
944
951
|
;;
|
|
945
952
|
esac
|
|
953
|
+
|
|
954
|
+
fi # end source guard
|
package/scripts/sw-daemon.sh
CHANGED
|
@@ -9,7 +9,7 @@ trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
|
9
9
|
# Allow spawning Claude CLI from within a Claude Code session (daemon, fleet, etc.)
|
|
10
10
|
unset CLAUDECODE 2>/dev/null || true
|
|
11
11
|
|
|
12
|
-
VERSION="3.
|
|
12
|
+
VERSION="3.1.0"
|
|
13
13
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
14
14
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
15
15
|
|
|
@@ -515,7 +515,7 @@ load_config() {
|
|
|
515
515
|
PROGRESS_MONITORING=$(jq -r '.health.progress_based // true' "$config_file")
|
|
516
516
|
PROGRESS_CHECKS_BEFORE_WARN=$(jq -r '.health.stale_checks_before_warn // 20' "$config_file")
|
|
517
517
|
PROGRESS_CHECKS_BEFORE_KILL=$(jq -r '.health.stale_checks_before_kill // 120' "$config_file")
|
|
518
|
-
PROGRESS_HARD_LIMIT_S=$(jq -r '.health.hard_limit_s //
|
|
518
|
+
PROGRESS_HARD_LIMIT_S=$(jq -r '.health.hard_limit_s // 21600' "$config_file") # 21600s = 6h default; 0 = disabled
|
|
519
519
|
NUDGE_ENABLED=$(jq -r '.health.nudge_enabled // true' "$config_file")
|
|
520
520
|
NUDGE_AFTER_CHECKS=$(jq -r '.health.nudge_after_checks // 40' "$config_file")
|
|
521
521
|
|
package/scripts/sw-dashboard.sh
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="3.
|
|
9
|
+
VERSION="3.1.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
|
|
12
12
|
# ─── Cross-platform compatibility ──────────────────────────────────────────
|
package/scripts/sw-db.sh
CHANGED
|
@@ -14,7 +14,7 @@ if [[ -n "${_SW_DB_LOADED:-}" ]] && [[ "${BASH_SOURCE[0]}" != "$0" ]]; then
|
|
|
14
14
|
fi
|
|
15
15
|
_SW_DB_LOADED=1
|
|
16
16
|
|
|
17
|
-
VERSION="3.
|
|
17
|
+
VERSION="3.1.0"
|
|
18
18
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
19
19
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
20
20
|
|
|
@@ -93,6 +93,12 @@ db_available() {
|
|
|
93
93
|
check_sqlite3 && [[ -f "$DB_FILE" ]] && _db_feature_enabled
|
|
94
94
|
}
|
|
95
95
|
|
|
96
|
+
# ─── SQL Escaping ──────────────────────────────────────────────────────────
|
|
97
|
+
# Bash 3.2 (macOS default) breaks ${var//$_SQL_SQ/$_SQL_SQ$_SQL_SQ} — backslashes leak into output.
|
|
98
|
+
# This helper uses a variable to hold the single quote for reliable escaping.
|
|
99
|
+
_SQL_SQ="'"
|
|
100
|
+
_sql_escape() { local _v="$1"; echo "${_v//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"; }
|
|
101
|
+
|
|
96
102
|
# ─── Ensure Database Directory ──────────────────────────────────────────────
|
|
97
103
|
ensure_db_dir() {
|
|
98
104
|
mkdir -p "$DB_DIR"
|
|
@@ -760,7 +766,7 @@ db_query_events_since() {
|
|
|
760
766
|
# db_get_consumer_offset <consumer_id> — returns last_event_id or "0"
|
|
761
767
|
db_get_consumer_offset() {
|
|
762
768
|
local consumer_id="$1"
|
|
763
|
-
consumer_id="${consumer_id
|
|
769
|
+
consumer_id="${consumer_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
764
770
|
_db_query "SELECT last_event_id FROM event_consumers WHERE consumer_id = '${consumer_id}';" 2>/dev/null || echo "0"
|
|
765
771
|
}
|
|
766
772
|
|
|
@@ -768,7 +774,7 @@ db_get_consumer_offset() {
|
|
|
768
774
|
db_set_consumer_offset() {
|
|
769
775
|
local consumer_id="$1"
|
|
770
776
|
local last_event_id="$2"
|
|
771
|
-
consumer_id="${consumer_id
|
|
777
|
+
consumer_id="${consumer_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
772
778
|
_db_exec "INSERT OR REPLACE INTO event_consumers (consumer_id, last_event_id, last_consumed_at) VALUES ('${consumer_id}', ${last_event_id}, '$(now_iso)');"
|
|
773
779
|
}
|
|
774
780
|
|
|
@@ -776,9 +782,9 @@ db_set_consumer_offset() {
|
|
|
776
782
|
db_save_checkpoint() {
|
|
777
783
|
local workflow_id="$1"
|
|
778
784
|
local data="$2"
|
|
779
|
-
workflow_id="${workflow_id
|
|
785
|
+
workflow_id="${workflow_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
780
786
|
data="${data//$'\n'/ }"
|
|
781
|
-
data="${data
|
|
787
|
+
data="${data//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
782
788
|
if ! db_available; then return 1; fi
|
|
783
789
|
_db_exec "INSERT OR REPLACE INTO durable_checkpoints (workflow_id, checkpoint_data, created_at) VALUES ('${workflow_id}', '${data}', '$(now_iso)');"
|
|
784
790
|
}
|
|
@@ -786,7 +792,7 @@ db_save_checkpoint() {
|
|
|
786
792
|
# db_load_checkpoint <workflow_id> — returns checkpoint_data or empty
|
|
787
793
|
db_load_checkpoint() {
|
|
788
794
|
local workflow_id="$1"
|
|
789
|
-
workflow_id="${workflow_id
|
|
795
|
+
workflow_id="${workflow_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
790
796
|
if ! db_available; then return 1; fi
|
|
791
797
|
_db_query "SELECT checkpoint_data FROM durable_checkpoints WHERE workflow_id = '${workflow_id}';" 2>/dev/null || echo ""
|
|
792
798
|
}
|
|
@@ -842,8 +848,8 @@ db_save_job() {
|
|
|
842
848
|
if ! db_available; then return 1; fi
|
|
843
849
|
|
|
844
850
|
# Escape single quotes in title/goal
|
|
845
|
-
title="${title
|
|
846
|
-
goal="${goal
|
|
851
|
+
title="${title//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
852
|
+
goal="${goal//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
847
853
|
|
|
848
854
|
_db_exec "INSERT OR REPLACE INTO daemon_state (job_id, issue_number, title, goal, pid, worktree, branch, status, template, started_at, updated_at) VALUES ('${job_id}', ${issue_num}, '${title}', '${goal}', ${pid}, '${worktree}', '${branch}', 'active', '${template}', '${ts}', '${ts}');"
|
|
849
855
|
}
|
|
@@ -859,7 +865,7 @@ db_complete_job() {
|
|
|
859
865
|
|
|
860
866
|
if ! db_available; then return 1; fi
|
|
861
867
|
|
|
862
|
-
error_msg="${error_msg
|
|
868
|
+
error_msg="${error_msg//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
863
869
|
|
|
864
870
|
_db_exec "UPDATE daemon_state SET status = 'completed', result = '${result}', duration = '${duration}', error_message = '${error_msg}', completed_at = '${ts}', updated_at = '${ts}' WHERE job_id = '${job_id}' AND status = 'active';"
|
|
865
871
|
}
|
|
@@ -873,7 +879,7 @@ db_fail_job() {
|
|
|
873
879
|
|
|
874
880
|
if ! db_available; then return 1; fi
|
|
875
881
|
|
|
876
|
-
error_msg="${error_msg
|
|
882
|
+
error_msg="${error_msg//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
877
883
|
|
|
878
884
|
_db_exec "UPDATE daemon_state SET status = 'failed', result = 'failure', error_message = '${error_msg}', completed_at = '${ts}', updated_at = '${ts}' WHERE job_id = '${job_id}' AND status = 'active';"
|
|
879
885
|
}
|
|
@@ -917,7 +923,7 @@ db_remove_active_job() {
|
|
|
917
923
|
db_enqueue_issue() {
|
|
918
924
|
local issue_key="$1"
|
|
919
925
|
if ! db_available; then return 1; fi
|
|
920
|
-
issue_key="${issue_key
|
|
926
|
+
issue_key="${issue_key//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
921
927
|
_db_exec "INSERT OR REPLACE INTO daemon_queue (issue_key, added_at) VALUES ('${issue_key}', '$(now_iso)');"
|
|
922
928
|
}
|
|
923
929
|
|
|
@@ -927,7 +933,7 @@ db_dequeue_next() {
|
|
|
927
933
|
local next escaped
|
|
928
934
|
next=$(_db_query "SELECT issue_key FROM daemon_queue ORDER BY added_at ASC LIMIT 1;" || echo "")
|
|
929
935
|
if [[ -n "$next" ]]; then
|
|
930
|
-
escaped="${next
|
|
936
|
+
escaped="${next//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
931
937
|
_db_exec "DELETE FROM daemon_queue WHERE issue_key = '${escaped}';" 2>/dev/null || true
|
|
932
938
|
echo "$next"
|
|
933
939
|
fi
|
|
@@ -937,7 +943,7 @@ db_dequeue_next() {
|
|
|
937
943
|
db_is_issue_queued() {
|
|
938
944
|
local issue_key="$1"
|
|
939
945
|
if ! db_available; then return 1; fi
|
|
940
|
-
issue_key="${issue_key
|
|
946
|
+
issue_key="${issue_key//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
941
947
|
local count
|
|
942
948
|
count=$(_db_query "SELECT COUNT(*) FROM daemon_queue WHERE issue_key = '${issue_key}';")
|
|
943
949
|
[[ "${count:-0}" -gt 0 ]]
|
|
@@ -947,7 +953,7 @@ db_is_issue_queued() {
|
|
|
947
953
|
db_remove_from_queue() {
|
|
948
954
|
local issue_key="$1"
|
|
949
955
|
if ! db_available; then return 1; fi
|
|
950
|
-
issue_key="${issue_key
|
|
956
|
+
issue_key="${issue_key//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
951
957
|
_db_exec "DELETE FROM daemon_queue WHERE issue_key = '${issue_key}';"
|
|
952
958
|
}
|
|
953
959
|
|
|
@@ -974,9 +980,9 @@ db_record_outcome() {
|
|
|
974
980
|
|
|
975
981
|
if ! db_available; then return 1; fi
|
|
976
982
|
|
|
977
|
-
job_id="${job_id
|
|
978
|
-
issue="${issue
|
|
979
|
-
template="${template
|
|
983
|
+
job_id="${job_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
984
|
+
issue="${issue//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
985
|
+
template="${template//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
980
986
|
|
|
981
987
|
_db_exec "INSERT OR REPLACE INTO pipeline_outcomes
|
|
982
988
|
(job_id, issue_number, template, success, duration_secs, retry_count, cost_usd, complexity, created_at)
|
|
@@ -1087,7 +1093,7 @@ db_record_heartbeat() {
|
|
|
1087
1093
|
|
|
1088
1094
|
if ! db_available; then return 1; fi
|
|
1089
1095
|
|
|
1090
|
-
activity="${activity
|
|
1096
|
+
activity="${activity//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1091
1097
|
|
|
1092
1098
|
_db_exec "INSERT OR REPLACE INTO heartbeats (job_id, pid, issue, stage, iteration, last_activity, memory_mb, updated_at) VALUES ('${job_id}', ${pid}, ${issue}, '${stage}', ${iteration}, '${activity}', ${memory_mb}, '${ts}');"
|
|
1093
1099
|
}
|
|
@@ -1135,9 +1141,9 @@ db_record_failure() {
|
|
|
1135
1141
|
if ! db_available; then return 1; fi
|
|
1136
1142
|
|
|
1137
1143
|
# Escape quotes
|
|
1138
|
-
error_sig="${error_sig
|
|
1139
|
-
root_cause="${root_cause
|
|
1140
|
-
fix_desc="${fix_desc
|
|
1144
|
+
error_sig="${error_sig//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1145
|
+
root_cause="${root_cause//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1146
|
+
fix_desc="${fix_desc//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1141
1147
|
|
|
1142
1148
|
# Upsert: increment occurrences if same signature exists
|
|
1143
1149
|
_db_exec "INSERT INTO memory_failures (repo_hash, failure_class, error_signature, root_cause, fix_description, file_path, stage, occurrences, last_seen_at, created_at, synced) VALUES ('${repo_hash}', '${failure_class}', '${error_sig}', '${root_cause}', '${fix_desc}', '${file_path}', '${stage}', 1, '${ts}', '${ts}', 0) ON CONFLICT(id) DO UPDATE SET occurrences = occurrences + 1, last_seen_at = '${ts}';"
|
|
@@ -1161,8 +1167,8 @@ db_query_similar_failures() {
|
|
|
1161
1167
|
db_save_pattern() {
|
|
1162
1168
|
local repo_hash="$1" pattern_type="$2" pattern_key="$3" description="${4:-}" metadata="${5:-}"
|
|
1163
1169
|
if ! db_available; then return 1; fi
|
|
1164
|
-
description="${description
|
|
1165
|
-
metadata="${metadata
|
|
1170
|
+
description="${description//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1171
|
+
metadata="${metadata//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1166
1172
|
_db_exec "INSERT INTO memory_patterns (repo_hash, pattern_type, pattern_key, description, last_seen_at, created_at, metadata)
|
|
1167
1173
|
VALUES ('$repo_hash', '$pattern_type', '$pattern_key', '$description', '$(now_iso)', '$(now_iso)', '$metadata')
|
|
1168
1174
|
ON CONFLICT(repo_hash, pattern_type, pattern_key) DO UPDATE SET
|
|
@@ -1181,9 +1187,9 @@ db_query_patterns() {
|
|
|
1181
1187
|
db_save_decision() {
|
|
1182
1188
|
local repo_hash="$1" decision_type="$2" context="$3" decision="$4" metadata="${5:-}"
|
|
1183
1189
|
if ! db_available; then return 1; fi
|
|
1184
|
-
context="${context
|
|
1185
|
-
decision="${decision
|
|
1186
|
-
metadata="${metadata
|
|
1190
|
+
context="${context//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1191
|
+
decision="${decision//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1192
|
+
metadata="${metadata//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1187
1193
|
_db_exec "INSERT INTO memory_decisions (repo_hash, decision_type, context, decision, created_at, updated_at, metadata)
|
|
1188
1194
|
VALUES ('$repo_hash', '$decision_type', '$context', '$decision', '$(now_iso)', '$(now_iso)', '$metadata');"
|
|
1189
1195
|
}
|
|
@@ -1191,7 +1197,7 @@ db_save_decision() {
|
|
|
1191
1197
|
db_update_decision_outcome() {
|
|
1192
1198
|
local decision_id="$1" outcome="$2" confidence="${3:-}"
|
|
1193
1199
|
if ! db_available; then return 1; fi
|
|
1194
|
-
outcome="${outcome
|
|
1200
|
+
outcome="${outcome//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1195
1201
|
local set_clause="outcome = '$outcome', updated_at = '$(now_iso)'"
|
|
1196
1202
|
[[ -n "$confidence" ]] && set_clause="$set_clause, confidence = $confidence"
|
|
1197
1203
|
_db_exec "UPDATE memory_decisions SET $set_clause WHERE id = $decision_id;"
|
|
@@ -1209,7 +1215,7 @@ db_query_decisions() {
|
|
|
1209
1215
|
db_save_embedding() {
|
|
1210
1216
|
local content_hash="$1" source_type="$2" content_text="$3" repo_hash="${4:-}"
|
|
1211
1217
|
if ! db_available; then return 1; fi
|
|
1212
|
-
content_text="${content_text
|
|
1218
|
+
content_text="${content_text//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1213
1219
|
_db_exec "INSERT OR IGNORE INTO memory_embeddings (content_hash, source_type, content_text, repo_hash, created_at)
|
|
1214
1220
|
VALUES ('$content_hash', '$source_type', '$content_text', '$repo_hash', '$(now_iso)');"
|
|
1215
1221
|
}
|
|
@@ -1230,8 +1236,8 @@ db_save_reasoning_trace() {
|
|
|
1230
1236
|
escaped_input=$(echo "$input_context" | sed "s/'/''/g")
|
|
1231
1237
|
escaped_reasoning=$(echo "$reasoning" | sed "s/'/''/g")
|
|
1232
1238
|
escaped_output=$(echo "$output_decision" | sed "s/'/''/g")
|
|
1233
|
-
job_id="${job_id
|
|
1234
|
-
step_name="${step_name
|
|
1239
|
+
job_id="${job_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1240
|
+
step_name="${step_name//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1235
1241
|
if ! db_available; then return 1; fi
|
|
1236
1242
|
_db_exec "INSERT INTO reasoning_traces (job_id, step_name, input_context, reasoning, output_decision, confidence, created_at)
|
|
1237
1243
|
VALUES ('$job_id', '$step_name', '$escaped_input', '$escaped_reasoning', '$escaped_output', $confidence, '$(now_iso)');"
|
|
@@ -1239,7 +1245,7 @@ db_save_reasoning_trace() {
|
|
|
1239
1245
|
|
|
1240
1246
|
db_query_reasoning_traces() {
|
|
1241
1247
|
local job_id="$1"
|
|
1242
|
-
job_id="${job_id
|
|
1248
|
+
job_id="${job_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1243
1249
|
if ! db_available; then echo "[]"; return 0; fi
|
|
1244
1250
|
_db_query -json "SELECT * FROM reasoning_traces WHERE job_id = '$job_id' ORDER BY id ASC;" || echo "[]"
|
|
1245
1251
|
}
|
|
@@ -1261,7 +1267,8 @@ add_pipeline_run() {
|
|
|
1261
1267
|
|
|
1262
1268
|
local ts
|
|
1263
1269
|
ts="$(now_iso)"
|
|
1264
|
-
goal="${goal
|
|
1270
|
+
goal="${goal//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1271
|
+
branch="${branch//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1265
1272
|
|
|
1266
1273
|
_db_exec "INSERT OR IGNORE INTO pipeline_runs (job_id, issue_number, goal, branch, status, template, started_at, created_at) VALUES ('${job_id}', ${issue_number}, '${goal}', '${branch}', 'pending', '${template}', '${ts}', '${ts}');" || return 1
|
|
1267
1274
|
}
|
|
@@ -1292,7 +1299,7 @@ record_stage() {
|
|
|
1292
1299
|
|
|
1293
1300
|
local ts
|
|
1294
1301
|
ts="$(now_iso)"
|
|
1295
|
-
error_msg="${error_msg
|
|
1302
|
+
error_msg="${error_msg//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1296
1303
|
|
|
1297
1304
|
_db_exec "INSERT INTO pipeline_stages (job_id, stage_name, status, started_at, completed_at, duration_secs, error_message, created_at) VALUES ('${job_id}', '${stage_name}', '${status}', '${ts}', '${ts}', ${duration_secs}, '${error_msg}', '${ts}');" || return 1
|
|
1298
1305
|
}
|
|
@@ -1530,7 +1537,7 @@ migrate_json_data() {
|
|
|
1530
1537
|
hb_mem=$(jq -r '.memory_mb // 0' "$hb_file" 2>/dev/null || echo "0")
|
|
1531
1538
|
hb_updated=$(jq -r '.updated_at // ""' "$hb_file" 2>/dev/null || echo "$(now_iso)")
|
|
1532
1539
|
|
|
1533
|
-
hb_activity="${hb_activity
|
|
1540
|
+
hb_activity="${hb_activity//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
|
|
1534
1541
|
_db_exec "INSERT OR REPLACE INTO heartbeats (job_id, pid, issue, stage, iteration, last_activity, memory_mb, updated_at) VALUES ('${hb_job}', ${hb_pid}, ${hb_issue}, '${hb_stage}', ${hb_iter}, '${hb_activity}', ${hb_mem}, '${hb_updated}');" 2>/dev/null && hb_count=$((hb_count + 1))
|
|
1535
1542
|
done
|
|
1536
1543
|
success "Heartbeats: ${hb_count} imported"
|