shipwright-cli 1.10.0 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +221 -55
- package/completions/_shipwright +264 -32
- package/completions/shipwright.bash +118 -26
- package/completions/shipwright.fish +80 -2
- package/dashboard/server.ts +208 -0
- package/docs/strategy/01-market-research.md +619 -0
- package/docs/strategy/02-mission-and-brand.md +587 -0
- package/docs/strategy/03-gtm-and-roadmap.md +759 -0
- package/docs/strategy/QUICK-START.txt +289 -0
- package/docs/strategy/README.md +172 -0
- package/docs/tmux-research/TMUX-ARCHITECTURE.md +567 -0
- package/docs/tmux-research/TMUX-AUDIT.md +925 -0
- package/docs/tmux-research/TMUX-BEST-PRACTICES-2025-2026.md +829 -0
- package/docs/tmux-research/TMUX-QUICK-REFERENCE.md +543 -0
- package/docs/tmux-research/TMUX-RESEARCH-INDEX.md +438 -0
- package/package.json +4 -2
- package/scripts/lib/helpers.sh +7 -0
- package/scripts/sw +323 -2
- package/scripts/sw-activity.sh +500 -0
- package/scripts/sw-adaptive.sh +925 -0
- package/scripts/sw-adversarial.sh +1 -1
- package/scripts/sw-architecture-enforcer.sh +1 -1
- package/scripts/sw-auth.sh +613 -0
- package/scripts/sw-autonomous.sh +754 -0
- package/scripts/sw-changelog.sh +704 -0
- package/scripts/sw-checkpoint.sh +1 -1
- package/scripts/sw-ci.sh +602 -0
- package/scripts/sw-cleanup.sh +1 -1
- package/scripts/sw-code-review.sh +698 -0
- package/scripts/sw-connect.sh +1 -1
- package/scripts/sw-context.sh +605 -0
- package/scripts/sw-cost.sh +44 -3
- package/scripts/sw-daemon.sh +568 -138
- package/scripts/sw-dashboard.sh +1 -1
- package/scripts/sw-db.sh +1380 -0
- package/scripts/sw-decompose.sh +539 -0
- package/scripts/sw-deps.sh +551 -0
- package/scripts/sw-developer-simulation.sh +1 -1
- package/scripts/sw-discovery.sh +412 -0
- package/scripts/sw-docs-agent.sh +539 -0
- package/scripts/sw-docs.sh +1 -1
- package/scripts/sw-doctor.sh +107 -1
- package/scripts/sw-dora.sh +615 -0
- package/scripts/sw-durable.sh +710 -0
- package/scripts/sw-e2e-orchestrator.sh +535 -0
- package/scripts/sw-eventbus.sh +393 -0
- package/scripts/sw-feedback.sh +479 -0
- package/scripts/sw-fix.sh +1 -1
- package/scripts/sw-fleet-discover.sh +567 -0
- package/scripts/sw-fleet-viz.sh +404 -0
- package/scripts/sw-fleet.sh +8 -1
- package/scripts/sw-github-app.sh +596 -0
- package/scripts/sw-github-checks.sh +4 -4
- package/scripts/sw-github-deploy.sh +1 -1
- package/scripts/sw-github-graphql.sh +1 -1
- package/scripts/sw-guild.sh +569 -0
- package/scripts/sw-heartbeat.sh +1 -1
- package/scripts/sw-hygiene.sh +559 -0
- package/scripts/sw-incident.sh +656 -0
- package/scripts/sw-init.sh +237 -24
- package/scripts/sw-instrument.sh +699 -0
- package/scripts/sw-intelligence.sh +1 -1
- package/scripts/sw-jira.sh +1 -1
- package/scripts/sw-launchd.sh +363 -28
- package/scripts/sw-linear.sh +1 -1
- package/scripts/sw-logs.sh +1 -1
- package/scripts/sw-loop.sh +267 -21
- package/scripts/sw-memory.sh +18 -1
- package/scripts/sw-mission-control.sh +487 -0
- package/scripts/sw-model-router.sh +545 -0
- package/scripts/sw-otel.sh +596 -0
- package/scripts/sw-oversight.sh +764 -0
- package/scripts/sw-pipeline-composer.sh +1 -1
- package/scripts/sw-pipeline-vitals.sh +1 -1
- package/scripts/sw-pipeline.sh +947 -35
- package/scripts/sw-pm.sh +758 -0
- package/scripts/sw-pr-lifecycle.sh +522 -0
- package/scripts/sw-predictive.sh +8 -1
- package/scripts/sw-prep.sh +1 -1
- package/scripts/sw-ps.sh +1 -1
- package/scripts/sw-public-dashboard.sh +798 -0
- package/scripts/sw-quality.sh +595 -0
- package/scripts/sw-reaper.sh +1 -1
- package/scripts/sw-recruit.sh +2248 -0
- package/scripts/sw-regression.sh +642 -0
- package/scripts/sw-release-manager.sh +736 -0
- package/scripts/sw-release.sh +706 -0
- package/scripts/sw-remote.sh +1 -1
- package/scripts/sw-replay.sh +520 -0
- package/scripts/sw-retro.sh +691 -0
- package/scripts/sw-scale.sh +444 -0
- package/scripts/sw-security-audit.sh +505 -0
- package/scripts/sw-self-optimize.sh +1 -1
- package/scripts/sw-session.sh +1 -1
- package/scripts/sw-setup.sh +263 -127
- package/scripts/sw-standup.sh +712 -0
- package/scripts/sw-status.sh +44 -2
- package/scripts/sw-strategic.sh +806 -0
- package/scripts/sw-stream.sh +450 -0
- package/scripts/sw-swarm.sh +620 -0
- package/scripts/sw-team-stages.sh +511 -0
- package/scripts/sw-templates.sh +4 -4
- package/scripts/sw-testgen.sh +566 -0
- package/scripts/sw-tmux-pipeline.sh +554 -0
- package/scripts/sw-tmux-role-color.sh +58 -0
- package/scripts/sw-tmux-status.sh +128 -0
- package/scripts/sw-tmux.sh +1 -1
- package/scripts/sw-trace.sh +485 -0
- package/scripts/sw-tracker-github.sh +188 -0
- package/scripts/sw-tracker-jira.sh +172 -0
- package/scripts/sw-tracker-linear.sh +251 -0
- package/scripts/sw-tracker.sh +117 -2
- package/scripts/sw-triage.sh +627 -0
- package/scripts/sw-upgrade.sh +1 -1
- package/scripts/sw-ux.sh +677 -0
- package/scripts/sw-webhook.sh +627 -0
- package/scripts/sw-widgets.sh +530 -0
- package/scripts/sw-worktree.sh +1 -1
- package/templates/pipelines/autonomous.json +2 -2
- package/tmux/shipwright-overlay.conf +35 -17
- package/tmux/tmux.conf +23 -21
package/scripts/sw-db.sh
ADDED
|
@@ -0,0 +1,1380 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ╔═══════════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
# ║ shipwright db — SQLite Persistence Layer ║
|
|
4
|
+
# ║ Unified state store: events, runs, daemon state, costs, heartbeats ║
|
|
5
|
+
# ║ Backward compatible: falls back to JSON if SQLite unavailable ║
|
|
6
|
+
# ║ Cross-device sync via HTTP (Turso/sqld/any REST endpoint) ║
|
|
7
|
+
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
8
|
+
set -euo pipefail
|
|
9
|
+
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
10
|
+
|
|
11
|
+
# ─── Double-source guard ─────────────────────────────────────────
|
|
12
|
+
if [[ -n "${_SW_DB_LOADED:-}" ]] && [[ "${BASH_SOURCE[0]}" != "$0" ]]; then
|
|
13
|
+
return 0 2>/dev/null || true
|
|
14
|
+
fi
|
|
15
|
+
_SW_DB_LOADED=1
|
|
16
|
+
|
|
17
|
+
VERSION="2.1.0"
|
|
18
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
19
|
+
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
20
|
+
|
|
21
|
+
# ─── Colors (matches Seth's tmux theme) ─────────────────────────────────────
|
|
22
|
+
CYAN='\033[38;2;0;212;255m' # #00d4ff — primary accent
|
|
23
|
+
PURPLE='\033[38;2;124;58;237m' # #7c3aed — secondary
|
|
24
|
+
BLUE='\033[38;2;0;102;255m' # #0066ff — tertiary
|
|
25
|
+
GREEN='\033[38;2;74;222;128m' # success
|
|
26
|
+
YELLOW='\033[38;2;250;204;21m' # warning
|
|
27
|
+
RED='\033[38;2;248;113;113m' # error
|
|
28
|
+
DIM='\033[2m'
|
|
29
|
+
BOLD='\033[1m'
|
|
30
|
+
RESET='\033[0m'
|
|
31
|
+
|
|
32
|
+
# ─── Cross-platform compatibility ──────────────────────────────────────────
|
|
33
|
+
# shellcheck source=lib/compat.sh
|
|
34
|
+
[[ -f "$SCRIPT_DIR/lib/compat.sh" ]] && source "$SCRIPT_DIR/lib/compat.sh"
|
|
35
|
+
|
|
36
|
+
# ─── Output Helpers ─────────────────────────────────────────────────────────
|
|
37
|
+
info() { echo -e "${CYAN}${BOLD}▸${RESET} $*"; }
|
|
38
|
+
success() { echo -e "${GREEN}${BOLD}✓${RESET} $*"; }
|
|
39
|
+
warn() { echo -e "${YELLOW}${BOLD}⚠${RESET} $*"; }
|
|
40
|
+
error() { echo -e "${RED}${BOLD}✗${RESET} $*" >&2; }
|
|
41
|
+
|
|
42
|
+
now_iso() { date -u +"%Y-%m-%dT%H:%M:%SZ"; }
|
|
43
|
+
now_epoch() { date +%s; }
|
|
44
|
+
|
|
45
|
+
# ─── Database Configuration ──────────────────────────────────────────────────
|
|
46
|
+
DB_DIR="${HOME}/.shipwright"
|
|
47
|
+
DB_FILE="${DB_DIR}/shipwright.db"
|
|
48
|
+
SCHEMA_VERSION=2
|
|
49
|
+
|
|
50
|
+
# JSON fallback paths
|
|
51
|
+
EVENTS_FILE="${DB_DIR}/events.jsonl"
|
|
52
|
+
DAEMON_STATE_FILE="${DB_DIR}/daemon-state.json"
|
|
53
|
+
DEVELOPER_REGISTRY_FILE="${DB_DIR}/developer-registry.json"
|
|
54
|
+
COST_FILE_JSON="${DB_DIR}/costs.json"
|
|
55
|
+
BUDGET_FILE_JSON="${DB_DIR}/budget.json"
|
|
56
|
+
HEARTBEAT_DIR="${DB_DIR}/heartbeats"
|
|
57
|
+
|
|
58
|
+
# Sync config
|
|
59
|
+
SYNC_CONFIG_FILE="${DB_DIR}/sync-config.json"
|
|
60
|
+
|
|
61
|
+
# ─── Feature Flag ─────────────────────────────────────────────────────────────
|
|
62
|
+
# Check if DB is enabled in daemon config (default: true)
|
|
63
|
+
_db_feature_enabled() {
|
|
64
|
+
local config_file=".claude/daemon-config.json"
|
|
65
|
+
if [[ -f "$config_file" ]]; then
|
|
66
|
+
local enabled
|
|
67
|
+
enabled=$(jq -r '.db.enabled // true' "$config_file" 2>/dev/null || echo "true")
|
|
68
|
+
[[ "$enabled" == "true" ]]
|
|
69
|
+
return $?
|
|
70
|
+
fi
|
|
71
|
+
return 0
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
# ─── Check Prerequisites ─────────────────────────────────────────────────────
|
|
75
|
+
_SQLITE3_CHECKED=""
|
|
76
|
+
_SQLITE3_AVAILABLE=""
|
|
77
|
+
|
|
78
|
+
check_sqlite3() {
|
|
79
|
+
# Cache the result to avoid repeated command lookups
|
|
80
|
+
if [[ -z "$_SQLITE3_CHECKED" ]]; then
|
|
81
|
+
_SQLITE3_CHECKED=1
|
|
82
|
+
if command -v sqlite3 &>/dev/null; then
|
|
83
|
+
_SQLITE3_AVAILABLE=1
|
|
84
|
+
else
|
|
85
|
+
_SQLITE3_AVAILABLE=""
|
|
86
|
+
fi
|
|
87
|
+
fi
|
|
88
|
+
[[ -n "$_SQLITE3_AVAILABLE" ]]
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
# Check if DB is ready (sqlite3 available + file exists + feature enabled)
|
|
92
|
+
db_available() {
|
|
93
|
+
check_sqlite3 && [[ -f "$DB_FILE" ]] && _db_feature_enabled
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
# ─── Ensure Database Directory ──────────────────────────────────────────────
|
|
97
|
+
ensure_db_dir() {
|
|
98
|
+
mkdir -p "$DB_DIR"
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
# ─── SQL Execution Helper ──────────────────────────────────────────────────
|
|
102
|
+
# Runs SQL with proper error handling. Silent on success.
|
|
103
|
+
_db_exec() {
|
|
104
|
+
sqlite3 "$DB_FILE" "$@" 2>/dev/null
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
# Runs SQL and returns output. Returns 1 on failure.
|
|
108
|
+
_db_query() {
|
|
109
|
+
sqlite3 "$DB_FILE" "$@" 2>/dev/null || return 1
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
# ─── Initialize Database Schema ──────────────────────────────────────────────
|
|
113
|
+
init_schema() {
|
|
114
|
+
ensure_db_dir
|
|
115
|
+
|
|
116
|
+
if ! check_sqlite3; then
|
|
117
|
+
warn "Skipping SQLite initialization — sqlite3 not available"
|
|
118
|
+
return 0
|
|
119
|
+
fi
|
|
120
|
+
|
|
121
|
+
# Enable WAL mode for crash safety + concurrent readers
|
|
122
|
+
sqlite3 "$DB_FILE" "PRAGMA journal_mode=WAL;" >/dev/null 2>&1 || true
|
|
123
|
+
|
|
124
|
+
sqlite3 "$DB_FILE" <<'SCHEMA'
|
|
125
|
+
-- Schema version tracking
|
|
126
|
+
CREATE TABLE IF NOT EXISTS _schema (
|
|
127
|
+
version INTEGER PRIMARY KEY,
|
|
128
|
+
created_at TEXT NOT NULL,
|
|
129
|
+
applied_at TEXT NOT NULL
|
|
130
|
+
);
|
|
131
|
+
|
|
132
|
+
-- Events log (replaces events.jsonl)
|
|
133
|
+
CREATE TABLE IF NOT EXISTS events (
|
|
134
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
135
|
+
ts TEXT NOT NULL,
|
|
136
|
+
ts_epoch INTEGER NOT NULL,
|
|
137
|
+
type TEXT NOT NULL,
|
|
138
|
+
job_id TEXT,
|
|
139
|
+
stage TEXT,
|
|
140
|
+
status TEXT,
|
|
141
|
+
repo TEXT,
|
|
142
|
+
branch TEXT,
|
|
143
|
+
error TEXT,
|
|
144
|
+
duration_secs INTEGER,
|
|
145
|
+
metadata TEXT,
|
|
146
|
+
created_at TEXT NOT NULL,
|
|
147
|
+
synced INTEGER DEFAULT 0,
|
|
148
|
+
UNIQUE(ts_epoch, type, job_id)
|
|
149
|
+
);
|
|
150
|
+
|
|
151
|
+
-- Pipeline runs tracking
|
|
152
|
+
CREATE TABLE IF NOT EXISTS pipeline_runs (
|
|
153
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
154
|
+
job_id TEXT UNIQUE NOT NULL,
|
|
155
|
+
issue_number INTEGER,
|
|
156
|
+
goal TEXT,
|
|
157
|
+
branch TEXT,
|
|
158
|
+
status TEXT NOT NULL,
|
|
159
|
+
template TEXT,
|
|
160
|
+
started_at TEXT NOT NULL,
|
|
161
|
+
completed_at TEXT,
|
|
162
|
+
duration_secs INTEGER,
|
|
163
|
+
stage_name TEXT,
|
|
164
|
+
stage_status TEXT,
|
|
165
|
+
error_message TEXT,
|
|
166
|
+
commit_hash TEXT,
|
|
167
|
+
pr_number INTEGER,
|
|
168
|
+
metadata TEXT,
|
|
169
|
+
created_at TEXT NOT NULL
|
|
170
|
+
);
|
|
171
|
+
|
|
172
|
+
-- Stage history per pipeline run
|
|
173
|
+
CREATE TABLE IF NOT EXISTS pipeline_stages (
|
|
174
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
175
|
+
job_id TEXT NOT NULL,
|
|
176
|
+
stage_name TEXT NOT NULL,
|
|
177
|
+
status TEXT NOT NULL,
|
|
178
|
+
started_at TEXT,
|
|
179
|
+
completed_at TEXT,
|
|
180
|
+
duration_secs INTEGER,
|
|
181
|
+
error_message TEXT,
|
|
182
|
+
metadata TEXT,
|
|
183
|
+
created_at TEXT NOT NULL,
|
|
184
|
+
FOREIGN KEY (job_id) REFERENCES pipeline_runs(job_id)
|
|
185
|
+
);
|
|
186
|
+
|
|
187
|
+
-- Developer registry
|
|
188
|
+
CREATE TABLE IF NOT EXISTS developers (
|
|
189
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
190
|
+
name TEXT UNIQUE NOT NULL,
|
|
191
|
+
github_login TEXT,
|
|
192
|
+
email TEXT,
|
|
193
|
+
role TEXT,
|
|
194
|
+
avatar_url TEXT,
|
|
195
|
+
bio TEXT,
|
|
196
|
+
expertise TEXT,
|
|
197
|
+
contributed_repos TEXT,
|
|
198
|
+
last_active_at TEXT,
|
|
199
|
+
created_at TEXT NOT NULL,
|
|
200
|
+
updated_at TEXT NOT NULL
|
|
201
|
+
);
|
|
202
|
+
|
|
203
|
+
-- Sessions tracking (teams/agents)
|
|
204
|
+
CREATE TABLE IF NOT EXISTS sessions (
|
|
205
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
206
|
+
session_id TEXT UNIQUE NOT NULL,
|
|
207
|
+
name TEXT NOT NULL,
|
|
208
|
+
template TEXT,
|
|
209
|
+
status TEXT NOT NULL,
|
|
210
|
+
team_members TEXT,
|
|
211
|
+
started_at TEXT NOT NULL,
|
|
212
|
+
completed_at TEXT,
|
|
213
|
+
duration_secs INTEGER,
|
|
214
|
+
goal TEXT,
|
|
215
|
+
metadata TEXT,
|
|
216
|
+
created_at TEXT NOT NULL
|
|
217
|
+
);
|
|
218
|
+
|
|
219
|
+
-- Metrics (DORA, cost, performance)
|
|
220
|
+
CREATE TABLE IF NOT EXISTS metrics (
|
|
221
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
222
|
+
job_id TEXT,
|
|
223
|
+
metric_type TEXT NOT NULL,
|
|
224
|
+
metric_name TEXT NOT NULL,
|
|
225
|
+
value REAL NOT NULL,
|
|
226
|
+
period TEXT,
|
|
227
|
+
unit TEXT,
|
|
228
|
+
tags TEXT,
|
|
229
|
+
created_at TEXT NOT NULL,
|
|
230
|
+
FOREIGN KEY (job_id) REFERENCES pipeline_runs(job_id)
|
|
231
|
+
);
|
|
232
|
+
|
|
233
|
+
-- ═══════════════════════════════════════════════════════════════════════
|
|
234
|
+
-- Phase 1: New tables for state migration
|
|
235
|
+
-- ═══════════════════════════════════════════════════════════════════════
|
|
236
|
+
|
|
237
|
+
-- Daemon state (replaces daemon-state.json)
|
|
238
|
+
CREATE TABLE IF NOT EXISTS daemon_state (
|
|
239
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
240
|
+
job_id TEXT NOT NULL,
|
|
241
|
+
issue_number INTEGER NOT NULL,
|
|
242
|
+
title TEXT,
|
|
243
|
+
goal TEXT,
|
|
244
|
+
pid INTEGER,
|
|
245
|
+
worktree TEXT,
|
|
246
|
+
branch TEXT,
|
|
247
|
+
status TEXT NOT NULL DEFAULT 'active',
|
|
248
|
+
template TEXT,
|
|
249
|
+
started_at TEXT NOT NULL,
|
|
250
|
+
completed_at TEXT,
|
|
251
|
+
result TEXT,
|
|
252
|
+
duration TEXT,
|
|
253
|
+
error_message TEXT,
|
|
254
|
+
retry_count INTEGER DEFAULT 0,
|
|
255
|
+
updated_at TEXT NOT NULL,
|
|
256
|
+
UNIQUE(job_id, status)
|
|
257
|
+
);
|
|
258
|
+
|
|
259
|
+
-- Cost entries (replaces costs.json)
|
|
260
|
+
CREATE TABLE IF NOT EXISTS cost_entries (
|
|
261
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
262
|
+
input_tokens INTEGER NOT NULL DEFAULT 0,
|
|
263
|
+
output_tokens INTEGER NOT NULL DEFAULT 0,
|
|
264
|
+
model TEXT NOT NULL DEFAULT 'sonnet',
|
|
265
|
+
stage TEXT,
|
|
266
|
+
issue TEXT,
|
|
267
|
+
cost_usd REAL NOT NULL DEFAULT 0,
|
|
268
|
+
ts TEXT NOT NULL,
|
|
269
|
+
ts_epoch INTEGER NOT NULL,
|
|
270
|
+
synced INTEGER DEFAULT 0
|
|
271
|
+
);
|
|
272
|
+
|
|
273
|
+
-- Budgets (replaces budget.json)
|
|
274
|
+
CREATE TABLE IF NOT EXISTS budgets (
|
|
275
|
+
id INTEGER PRIMARY KEY CHECK (id = 1),
|
|
276
|
+
daily_budget_usd REAL NOT NULL DEFAULT 0,
|
|
277
|
+
enabled INTEGER NOT NULL DEFAULT 0,
|
|
278
|
+
updated_at TEXT NOT NULL
|
|
279
|
+
);
|
|
280
|
+
|
|
281
|
+
-- Heartbeats (replaces heartbeats/*.json)
|
|
282
|
+
CREATE TABLE IF NOT EXISTS heartbeats (
|
|
283
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
284
|
+
job_id TEXT UNIQUE NOT NULL,
|
|
285
|
+
pid INTEGER,
|
|
286
|
+
issue INTEGER,
|
|
287
|
+
stage TEXT,
|
|
288
|
+
iteration INTEGER DEFAULT 0,
|
|
289
|
+
last_activity TEXT,
|
|
290
|
+
memory_mb INTEGER DEFAULT 0,
|
|
291
|
+
updated_at TEXT NOT NULL
|
|
292
|
+
);
|
|
293
|
+
|
|
294
|
+
-- Memory: failure patterns (replaces memory/*/failures.json)
|
|
295
|
+
CREATE TABLE IF NOT EXISTS memory_failures (
|
|
296
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
297
|
+
repo_hash TEXT NOT NULL,
|
|
298
|
+
failure_class TEXT NOT NULL,
|
|
299
|
+
error_signature TEXT,
|
|
300
|
+
root_cause TEXT,
|
|
301
|
+
fix_description TEXT,
|
|
302
|
+
file_path TEXT,
|
|
303
|
+
stage TEXT,
|
|
304
|
+
occurrences INTEGER DEFAULT 1,
|
|
305
|
+
last_seen_at TEXT NOT NULL,
|
|
306
|
+
created_at TEXT NOT NULL,
|
|
307
|
+
synced INTEGER DEFAULT 0
|
|
308
|
+
);
|
|
309
|
+
|
|
310
|
+
-- ═══════════════════════════════════════════════════════════════════════
|
|
311
|
+
-- Sync tables
|
|
312
|
+
-- ═══════════════════════════════════════════════════════════════════════
|
|
313
|
+
|
|
314
|
+
-- Track unsynced local changes
|
|
315
|
+
CREATE TABLE IF NOT EXISTS _sync_log (
|
|
316
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
317
|
+
table_name TEXT NOT NULL,
|
|
318
|
+
row_id INTEGER NOT NULL,
|
|
319
|
+
operation TEXT NOT NULL,
|
|
320
|
+
ts_epoch INTEGER NOT NULL,
|
|
321
|
+
synced INTEGER DEFAULT 0
|
|
322
|
+
);
|
|
323
|
+
|
|
324
|
+
-- Replication state
|
|
325
|
+
CREATE TABLE IF NOT EXISTS _sync_metadata (
|
|
326
|
+
key TEXT PRIMARY KEY,
|
|
327
|
+
value TEXT NOT NULL,
|
|
328
|
+
updated_at TEXT NOT NULL
|
|
329
|
+
);
|
|
330
|
+
|
|
331
|
+
-- ═══════════════════════════════════════════════════════════════════════
|
|
332
|
+
-- Indexes
|
|
333
|
+
-- ═══════════════════════════════════════════════════════════════════════
|
|
334
|
+
|
|
335
|
+
CREATE INDEX IF NOT EXISTS idx_events_type ON events(type);
|
|
336
|
+
CREATE INDEX IF NOT EXISTS idx_events_job_id ON events(job_id);
|
|
337
|
+
CREATE INDEX IF NOT EXISTS idx_events_ts_epoch ON events(ts_epoch DESC);
|
|
338
|
+
CREATE INDEX IF NOT EXISTS idx_events_synced ON events(synced) WHERE synced = 0;
|
|
339
|
+
CREATE INDEX IF NOT EXISTS idx_pipeline_runs_job_id ON pipeline_runs(job_id);
|
|
340
|
+
CREATE INDEX IF NOT EXISTS idx_pipeline_runs_status ON pipeline_runs(status);
|
|
341
|
+
CREATE INDEX IF NOT EXISTS idx_pipeline_runs_created ON pipeline_runs(created_at DESC);
|
|
342
|
+
CREATE INDEX IF NOT EXISTS idx_pipeline_stages_job_id ON pipeline_stages(job_id);
|
|
343
|
+
CREATE INDEX IF NOT EXISTS idx_developers_name ON developers(name);
|
|
344
|
+
CREATE INDEX IF NOT EXISTS idx_sessions_status ON sessions(status);
|
|
345
|
+
CREATE INDEX IF NOT EXISTS idx_metrics_job_id ON metrics(job_id);
|
|
346
|
+
CREATE INDEX IF NOT EXISTS idx_metrics_type ON metrics(metric_type);
|
|
347
|
+
CREATE INDEX IF NOT EXISTS idx_daemon_state_status ON daemon_state(status);
|
|
348
|
+
CREATE INDEX IF NOT EXISTS idx_daemon_state_job ON daemon_state(job_id);
|
|
349
|
+
CREATE INDEX IF NOT EXISTS idx_cost_entries_epoch ON cost_entries(ts_epoch DESC);
|
|
350
|
+
CREATE INDEX IF NOT EXISTS idx_cost_entries_synced ON cost_entries(synced) WHERE synced = 0;
|
|
351
|
+
CREATE INDEX IF NOT EXISTS idx_heartbeats_job ON heartbeats(job_id);
|
|
352
|
+
CREATE INDEX IF NOT EXISTS idx_memory_failures_repo ON memory_failures(repo_hash);
|
|
353
|
+
CREATE INDEX IF NOT EXISTS idx_memory_failures_class ON memory_failures(failure_class);
|
|
354
|
+
CREATE INDEX IF NOT EXISTS idx_sync_log_unsynced ON _sync_log(synced) WHERE synced = 0;
|
|
355
|
+
SCHEMA
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
# ─── Schema Migration ───────────────────────────────────────────────────────
|
|
359
|
+
migrate_schema() {
|
|
360
|
+
if ! check_sqlite3; then
|
|
361
|
+
warn "Skipping migration — sqlite3 not available"
|
|
362
|
+
return 0
|
|
363
|
+
fi
|
|
364
|
+
|
|
365
|
+
ensure_db_dir
|
|
366
|
+
|
|
367
|
+
# If DB doesn't exist, initialize fresh
|
|
368
|
+
if [[ ! -f "$DB_FILE" ]]; then
|
|
369
|
+
init_schema
|
|
370
|
+
_db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (${SCHEMA_VERSION}, '$(now_iso)', '$(now_iso)');"
|
|
371
|
+
# Initialize device_id for sync
|
|
372
|
+
_db_exec "INSERT OR REPLACE INTO _sync_metadata (key, value, updated_at) VALUES ('device_id', '$(uname -n)-$$-$(now_epoch)', '$(now_iso)');"
|
|
373
|
+
success "Database schema initialized (v${SCHEMA_VERSION})"
|
|
374
|
+
return 0
|
|
375
|
+
fi
|
|
376
|
+
|
|
377
|
+
local current_version
|
|
378
|
+
current_version=$(_db_query "SELECT COALESCE(MAX(version), 0) FROM _schema;" || echo 0)
|
|
379
|
+
|
|
380
|
+
if [[ "$current_version" -ge "$SCHEMA_VERSION" ]]; then
|
|
381
|
+
info "Database already at schema v${current_version}"
|
|
382
|
+
return 0
|
|
383
|
+
fi
|
|
384
|
+
|
|
385
|
+
# Migration from v1 → v2: add new tables
|
|
386
|
+
if [[ "$current_version" -lt 2 ]]; then
|
|
387
|
+
info "Migrating schema v${current_version} → v2..."
|
|
388
|
+
init_schema # CREATE IF NOT EXISTS is idempotent
|
|
389
|
+
# Enable WAL if not already
|
|
390
|
+
sqlite3 "$DB_FILE" "PRAGMA journal_mode=WAL;" >/dev/null 2>&1 || true
|
|
391
|
+
_db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (2, '$(now_iso)', '$(now_iso)');"
|
|
392
|
+
# Initialize device_id if missing
|
|
393
|
+
_db_exec "INSERT OR IGNORE INTO _sync_metadata (key, value, updated_at) VALUES ('device_id', '$(uname -n)-$$-$(now_epoch)', '$(now_iso)');"
|
|
394
|
+
success "Migrated to schema v2"
|
|
395
|
+
fi
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
399
|
+
# Event Functions (dual-write: SQLite + JSONL)
|
|
400
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
401
|
+
|
|
402
|
+
# db_add_event <type> [key=value ...]
|
|
403
|
+
# Parameterized event insert. Used by emit_event() in helpers.sh.
|
|
404
|
+
db_add_event() {
|
|
405
|
+
local event_type="$1"
|
|
406
|
+
shift
|
|
407
|
+
|
|
408
|
+
local ts ts_epoch job_id="" stage="" status="" duration_secs="0" metadata=""
|
|
409
|
+
ts="$(now_iso)"
|
|
410
|
+
ts_epoch="$(now_epoch)"
|
|
411
|
+
|
|
412
|
+
# Parse key=value pairs
|
|
413
|
+
local kv key val
|
|
414
|
+
for kv in "$@"; do
|
|
415
|
+
key="${kv%%=*}"
|
|
416
|
+
val="${kv#*=}"
|
|
417
|
+
case "$key" in
|
|
418
|
+
job_id) job_id="$val" ;;
|
|
419
|
+
stage) stage="$val" ;;
|
|
420
|
+
status) status="$val" ;;
|
|
421
|
+
duration_secs) duration_secs="$val" ;;
|
|
422
|
+
*) metadata="${metadata:+${metadata},}\"${key}\":\"${val}\"" ;;
|
|
423
|
+
esac
|
|
424
|
+
done
|
|
425
|
+
|
|
426
|
+
[[ -n "$metadata" ]] && metadata="{${metadata}}"
|
|
427
|
+
|
|
428
|
+
if ! db_available; then
|
|
429
|
+
return 1
|
|
430
|
+
fi
|
|
431
|
+
|
|
432
|
+
_db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata, created_at, synced) VALUES ('${ts}', ${ts_epoch}, '${event_type}', '${job_id}', '${stage}', '${status}', ${duration_secs}, '${metadata}', '${ts}', 0);" || return 1
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
# Legacy positional API (backward compat with existing add_event calls)
|
|
436
|
+
add_event() {
|
|
437
|
+
local event_type="$1"
|
|
438
|
+
local job_id="${2:-}"
|
|
439
|
+
local stage="${3:-}"
|
|
440
|
+
local status="${4:-}"
|
|
441
|
+
local duration_secs="${5:-0}"
|
|
442
|
+
local metadata="${6:-}"
|
|
443
|
+
|
|
444
|
+
local ts ts_epoch
|
|
445
|
+
ts="$(now_iso)"
|
|
446
|
+
ts_epoch="$(now_epoch)"
|
|
447
|
+
|
|
448
|
+
# Try SQLite first
|
|
449
|
+
if db_available; then
|
|
450
|
+
_db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata, created_at, synced) VALUES ('${ts}', ${ts_epoch}, '${event_type}', '${job_id}', '${stage}', '${status}', ${duration_secs}, '${metadata}', '${ts}', 0);" || true
|
|
451
|
+
fi
|
|
452
|
+
|
|
453
|
+
# Always write to JSONL for backward compat (dual-write period)
|
|
454
|
+
mkdir -p "$DB_DIR"
|
|
455
|
+
local json_record
|
|
456
|
+
json_record="{\"ts\":\"${ts}\",\"ts_epoch\":${ts_epoch},\"type\":\"${event_type}\""
|
|
457
|
+
[[ -n "$job_id" ]] && json_record="${json_record},\"job_id\":\"${job_id}\""
|
|
458
|
+
[[ -n "$stage" ]] && json_record="${json_record},\"stage\":\"${stage}\""
|
|
459
|
+
[[ -n "$status" ]] && json_record="${json_record},\"status\":\"${status}\""
|
|
460
|
+
[[ "$duration_secs" -gt 0 ]] 2>/dev/null && json_record="${json_record},\"duration_secs\":${duration_secs}"
|
|
461
|
+
[[ -n "$metadata" ]] && json_record="${json_record},\"metadata\":${metadata}"
|
|
462
|
+
json_record="${json_record}}"
|
|
463
|
+
echo "$json_record" >> "$EVENTS_FILE"
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
467
|
+
# Daemon State Functions (replaces daemon-state.json operations)
|
|
468
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
469
|
+
|
|
470
|
+
# db_save_job <job_id> <issue_number> <title> <pid> <worktree> [branch] [template] [goal]
|
|
471
|
+
db_save_job() {
|
|
472
|
+
local job_id="$1"
|
|
473
|
+
local issue_num="$2"
|
|
474
|
+
local title="${3:-}"
|
|
475
|
+
local pid="${4:-0}"
|
|
476
|
+
local worktree="${5:-}"
|
|
477
|
+
local branch="${6:-}"
|
|
478
|
+
local template="${7:-autonomous}"
|
|
479
|
+
local goal="${8:-}"
|
|
480
|
+
local ts
|
|
481
|
+
ts="$(now_iso)"
|
|
482
|
+
|
|
483
|
+
if ! db_available; then return 1; fi
|
|
484
|
+
|
|
485
|
+
# Escape single quotes in title/goal
|
|
486
|
+
title="${title//\'/\'\'}"
|
|
487
|
+
goal="${goal//\'/\'\'}"
|
|
488
|
+
|
|
489
|
+
_db_exec "INSERT OR REPLACE INTO daemon_state (job_id, issue_number, title, goal, pid, worktree, branch, status, template, started_at, updated_at) VALUES ('${job_id}', ${issue_num}, '${title}', '${goal}', ${pid}, '${worktree}', '${branch}', 'active', '${template}', '${ts}', '${ts}');"
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
# db_complete_job <job_id> <result> [duration] [error_message]
|
|
493
|
+
db_complete_job() {
|
|
494
|
+
local job_id="$1"
|
|
495
|
+
local result="$2"
|
|
496
|
+
local duration="${3:-}"
|
|
497
|
+
local error_msg="${4:-}"
|
|
498
|
+
local ts
|
|
499
|
+
ts="$(now_iso)"
|
|
500
|
+
|
|
501
|
+
if ! db_available; then return 1; fi
|
|
502
|
+
|
|
503
|
+
error_msg="${error_msg//\'/\'\'}"
|
|
504
|
+
|
|
505
|
+
_db_exec "UPDATE daemon_state SET status = 'completed', result = '${result}', duration = '${duration}', error_message = '${error_msg}', completed_at = '${ts}', updated_at = '${ts}' WHERE job_id = '${job_id}' AND status = 'active';"
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
# db_fail_job <job_id> [error_message]
|
|
509
|
+
db_fail_job() {
|
|
510
|
+
local job_id="$1"
|
|
511
|
+
local error_msg="${2:-}"
|
|
512
|
+
local ts
|
|
513
|
+
ts="$(now_iso)"
|
|
514
|
+
|
|
515
|
+
if ! db_available; then return 1; fi
|
|
516
|
+
|
|
517
|
+
error_msg="${error_msg//\'/\'\'}"
|
|
518
|
+
|
|
519
|
+
_db_exec "UPDATE daemon_state SET status = 'failed', result = 'failure', error_message = '${error_msg}', completed_at = '${ts}', updated_at = '${ts}' WHERE job_id = '${job_id}' AND status = 'active';"
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
# db_list_active_jobs — outputs JSON array of active daemon jobs
|
|
523
|
+
db_list_active_jobs() {
|
|
524
|
+
if ! db_available; then echo "[]"; return 0; fi
|
|
525
|
+
_db_query "SELECT json_group_array(json_object('job_id', job_id, 'issue', issue_number, 'title', title, 'pid', pid, 'worktree', worktree, 'branch', branch, 'started_at', started_at, 'template', template, 'goal', goal)) FROM daemon_state WHERE status = 'active';" || echo "[]"
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
# db_list_completed_jobs [limit] — outputs JSON array
|
|
529
|
+
db_list_completed_jobs() {
|
|
530
|
+
local limit="${1:-20}"
|
|
531
|
+
if ! db_available; then echo "[]"; return 0; fi
|
|
532
|
+
_db_query "SELECT json_group_array(json_object('job_id', job_id, 'issue', issue_number, 'title', title, 'result', result, 'duration', duration, 'completed_at', completed_at)) FROM (SELECT * FROM daemon_state WHERE status IN ('completed', 'failed') ORDER BY completed_at DESC LIMIT ${limit});" || echo "[]"
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
# db_active_job_count — returns integer
|
|
536
|
+
db_active_job_count() {
|
|
537
|
+
if ! db_available; then echo "0"; return 0; fi
|
|
538
|
+
_db_query "SELECT COUNT(*) FROM daemon_state WHERE status = 'active';" || echo "0"
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
# db_is_issue_active <issue_number> — returns 0 if active, 1 if not
|
|
542
|
+
db_is_issue_active() {
|
|
543
|
+
local issue_num="$1"
|
|
544
|
+
if ! db_available; then return 1; fi
|
|
545
|
+
local count
|
|
546
|
+
count=$(_db_query "SELECT COUNT(*) FROM daemon_state WHERE issue_number = ${issue_num} AND status = 'active';")
|
|
547
|
+
[[ "${count:-0}" -gt 0 ]]
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
# db_remove_active_job <job_id> — delete from active (for cleanup)
|
|
551
|
+
db_remove_active_job() {
|
|
552
|
+
local job_id="$1"
|
|
553
|
+
if ! db_available; then return 1; fi
|
|
554
|
+
_db_exec "DELETE FROM daemon_state WHERE job_id = '${job_id}' AND status = 'active';"
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
# db_daemon_summary — outputs JSON summary for status dashboard
|
|
558
|
+
db_daemon_summary() {
|
|
559
|
+
if ! db_available; then echo "{}"; return 0; fi
|
|
560
|
+
_db_query "SELECT json_object(
|
|
561
|
+
'active_count', (SELECT COUNT(*) FROM daemon_state WHERE status = 'active'),
|
|
562
|
+
'completed_count', (SELECT COUNT(*) FROM daemon_state WHERE status IN ('completed', 'failed')),
|
|
563
|
+
'success_count', (SELECT COUNT(*) FROM daemon_state WHERE result = 'success'),
|
|
564
|
+
'failure_count', (SELECT COUNT(*) FROM daemon_state WHERE result = 'failure')
|
|
565
|
+
);" || echo "{}"
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
569
|
+
# Cost Functions (replaces costs.json)
|
|
570
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
571
|
+
|
|
572
|
+
# db_record_cost <input_tokens> <output_tokens> <model> <cost_usd> <stage> [issue]
|
|
573
|
+
db_record_cost() {
|
|
574
|
+
local input_tokens="${1:-0}"
|
|
575
|
+
local output_tokens="${2:-0}"
|
|
576
|
+
local model="${3:-sonnet}"
|
|
577
|
+
local cost_usd="${4:-0}"
|
|
578
|
+
local stage="${5:-unknown}"
|
|
579
|
+
local issue="${6:-}"
|
|
580
|
+
local ts ts_epoch
|
|
581
|
+
ts="$(now_iso)"
|
|
582
|
+
ts_epoch="$(now_epoch)"
|
|
583
|
+
|
|
584
|
+
if ! db_available; then return 1; fi
|
|
585
|
+
|
|
586
|
+
_db_exec "INSERT INTO cost_entries (input_tokens, output_tokens, model, stage, issue, cost_usd, ts, ts_epoch, synced) VALUES (${input_tokens}, ${output_tokens}, '${model}', '${stage}', '${issue}', ${cost_usd}, '${ts}', ${ts_epoch}, 0);"
|
|
587
|
+
}
|
|
588
|
+
|
|
589
|
+
# db_cost_today — returns total cost for today as a number
|
|
590
|
+
db_cost_today() {
|
|
591
|
+
if ! db_available; then echo "0"; return 0; fi
|
|
592
|
+
local today_start
|
|
593
|
+
today_start=$(date -u +"%Y-%m-%dT00:00:00Z")
|
|
594
|
+
local today_epoch
|
|
595
|
+
today_epoch=$(date -u -jf "%Y-%m-%dT%H:%M:%SZ" "$today_start" +%s 2>/dev/null || date -u -d "$today_start" +%s 2>/dev/null || echo "0")
|
|
596
|
+
_db_query "SELECT COALESCE(ROUND(SUM(cost_usd), 4), 0) FROM cost_entries WHERE ts_epoch >= ${today_epoch};" || echo "0"
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
# db_cost_by_period <days> — returns JSON breakdown
|
|
600
|
+
db_cost_by_period() {
|
|
601
|
+
local days="${1:-7}"
|
|
602
|
+
if ! db_available; then echo "{}"; return 0; fi
|
|
603
|
+
local cutoff_epoch
|
|
604
|
+
cutoff_epoch=$(( $(now_epoch) - (days * 86400) ))
|
|
605
|
+
_db_query "SELECT json_object(
|
|
606
|
+
'total', COALESCE(ROUND(SUM(cost_usd), 4), 0),
|
|
607
|
+
'count', COUNT(*),
|
|
608
|
+
'avg', COALESCE(ROUND(AVG(cost_usd), 4), 0),
|
|
609
|
+
'max', COALESCE(ROUND(MAX(cost_usd), 4), 0),
|
|
610
|
+
'input_tokens', COALESCE(SUM(input_tokens), 0),
|
|
611
|
+
'output_tokens', COALESCE(SUM(output_tokens), 0)
|
|
612
|
+
) FROM cost_entries WHERE ts_epoch >= ${cutoff_epoch};" || echo "{}"
|
|
613
|
+
}
|
|
614
|
+
|
|
615
|
+
# db_cost_by_stage <days> — returns JSON array grouped by stage
|
|
616
|
+
db_cost_by_stage() {
|
|
617
|
+
local days="${1:-7}"
|
|
618
|
+
if ! db_available; then echo "[]"; return 0; fi
|
|
619
|
+
local cutoff_epoch
|
|
620
|
+
cutoff_epoch=$(( $(now_epoch) - (days * 86400) ))
|
|
621
|
+
_db_query "SELECT json_group_array(json_object('stage', stage, 'cost', ROUND(total_cost, 4), 'count', cnt)) FROM (SELECT stage, SUM(cost_usd) as total_cost, COUNT(*) as cnt FROM cost_entries WHERE ts_epoch >= ${cutoff_epoch} GROUP BY stage ORDER BY total_cost DESC);" || echo "[]"
|
|
622
|
+
}
|
|
623
|
+
|
|
624
|
+
# db_remaining_budget — returns remaining budget or "unlimited"
|
|
625
|
+
db_remaining_budget() {
|
|
626
|
+
if ! db_available; then echo "unlimited"; return 0; fi
|
|
627
|
+
local row
|
|
628
|
+
row=$(_db_query "SELECT daily_budget_usd, enabled FROM budgets WHERE id = 1;" || echo "")
|
|
629
|
+
if [[ -z "$row" ]]; then
|
|
630
|
+
echo "unlimited"
|
|
631
|
+
return 0
|
|
632
|
+
fi
|
|
633
|
+
local budget_usd enabled
|
|
634
|
+
budget_usd=$(echo "$row" | cut -d'|' -f1)
|
|
635
|
+
enabled=$(echo "$row" | cut -d'|' -f2)
|
|
636
|
+
if [[ "${enabled:-0}" -ne 1 ]] || [[ "${budget_usd:-0}" == "0" ]]; then
|
|
637
|
+
echo "unlimited"
|
|
638
|
+
return 0
|
|
639
|
+
fi
|
|
640
|
+
local today_spent
|
|
641
|
+
today_spent=$(db_cost_today)
|
|
642
|
+
awk -v budget="$budget_usd" -v spent="$today_spent" 'BEGIN { printf "%.2f", budget - spent }'
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
# db_set_budget <amount_usd>
|
|
646
|
+
db_set_budget() {
|
|
647
|
+
local amount="$1"
|
|
648
|
+
if ! db_available; then return 1; fi
|
|
649
|
+
_db_exec "INSERT OR REPLACE INTO budgets (id, daily_budget_usd, enabled, updated_at) VALUES (1, ${amount}, 1, '$(now_iso)');"
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
# db_get_budget — returns "amount|enabled" or empty
|
|
653
|
+
db_get_budget() {
|
|
654
|
+
if ! db_available; then echo ""; return 0; fi
|
|
655
|
+
_db_query "SELECT daily_budget_usd || '|' || enabled FROM budgets WHERE id = 1;" || echo ""
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
659
|
+
# Heartbeat Functions (replaces heartbeats/*.json)
|
|
660
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
661
|
+
|
|
662
|
+
# db_record_heartbeat <job_id> <pid> <issue> <stage> <iteration> [activity] [memory_mb]
|
|
663
|
+
db_record_heartbeat() {
|
|
664
|
+
local job_id="$1"
|
|
665
|
+
local pid="${2:-0}"
|
|
666
|
+
local issue="${3:-0}"
|
|
667
|
+
local stage="${4:-}"
|
|
668
|
+
local iteration="${5:-0}"
|
|
669
|
+
local activity="${6:-}"
|
|
670
|
+
local memory_mb="${7:-0}"
|
|
671
|
+
local ts
|
|
672
|
+
ts="$(now_iso)"
|
|
673
|
+
|
|
674
|
+
if ! db_available; then return 1; fi
|
|
675
|
+
|
|
676
|
+
activity="${activity//\'/\'\'}"
|
|
677
|
+
|
|
678
|
+
_db_exec "INSERT OR REPLACE INTO heartbeats (job_id, pid, issue, stage, iteration, last_activity, memory_mb, updated_at) VALUES ('${job_id}', ${pid}, ${issue}, '${stage}', ${iteration}, '${activity}', ${memory_mb}, '${ts}');"
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
# db_stale_heartbeats [threshold_secs] — returns JSON array of stale heartbeats
|
|
682
|
+
db_stale_heartbeats() {
|
|
683
|
+
local threshold="${1:-120}"
|
|
684
|
+
if ! db_available; then echo "[]"; return 0; fi
|
|
685
|
+
local cutoff_epoch
|
|
686
|
+
cutoff_epoch=$(( $(now_epoch) - threshold ))
|
|
687
|
+
local cutoff_ts
|
|
688
|
+
cutoff_ts=$(date -u -r "$cutoff_epoch" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || date -u -d "@${cutoff_epoch}" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || echo "2000-01-01T00:00:00Z")
|
|
689
|
+
_db_query "SELECT json_group_array(json_object('job_id', job_id, 'pid', pid, 'stage', stage, 'updated_at', updated_at)) FROM heartbeats WHERE updated_at < '${cutoff_ts}';" || echo "[]"
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
# db_clear_heartbeat <job_id>
|
|
693
|
+
db_clear_heartbeat() {
|
|
694
|
+
local job_id="$1"
|
|
695
|
+
if ! db_available; then return 1; fi
|
|
696
|
+
_db_exec "DELETE FROM heartbeats WHERE job_id = '${job_id}';"
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
# db_list_heartbeats — returns JSON array
|
|
700
|
+
db_list_heartbeats() {
|
|
701
|
+
if ! db_available; then echo "[]"; return 0; fi
|
|
702
|
+
_db_query "SELECT json_group_array(json_object('job_id', job_id, 'pid', pid, 'issue', issue, 'stage', stage, 'iteration', iteration, 'last_activity', last_activity, 'memory_mb', memory_mb, 'updated_at', updated_at)) FROM heartbeats;" || echo "[]"
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
706
|
+
# Memory Failure Functions (replaces memory/*/failures.json)
|
|
707
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
708
|
+
|
|
709
|
+
# db_record_failure <repo_hash> <failure_class> <error_sig> [root_cause] [fix_desc] [file_path] [stage]
|
|
710
|
+
db_record_failure() {
|
|
711
|
+
local repo_hash="$1"
|
|
712
|
+
local failure_class="$2"
|
|
713
|
+
local error_sig="${3:-}"
|
|
714
|
+
local root_cause="${4:-}"
|
|
715
|
+
local fix_desc="${5:-}"
|
|
716
|
+
local file_path="${6:-}"
|
|
717
|
+
local stage="${7:-}"
|
|
718
|
+
local ts
|
|
719
|
+
ts="$(now_iso)"
|
|
720
|
+
|
|
721
|
+
if ! db_available; then return 1; fi
|
|
722
|
+
|
|
723
|
+
# Escape quotes
|
|
724
|
+
error_sig="${error_sig//\'/\'\'}"
|
|
725
|
+
root_cause="${root_cause//\'/\'\'}"
|
|
726
|
+
fix_desc="${fix_desc//\'/\'\'}"
|
|
727
|
+
|
|
728
|
+
# Upsert: increment occurrences if same signature exists
|
|
729
|
+
_db_exec "INSERT INTO memory_failures (repo_hash, failure_class, error_signature, root_cause, fix_description, file_path, stage, occurrences, last_seen_at, created_at, synced) VALUES ('${repo_hash}', '${failure_class}', '${error_sig}', '${root_cause}', '${fix_desc}', '${file_path}', '${stage}', 1, '${ts}', '${ts}', 0) ON CONFLICT(id) DO UPDATE SET occurrences = occurrences + 1, last_seen_at = '${ts}';"
|
|
730
|
+
}
|
|
731
|
+
|
|
732
|
+
# db_query_similar_failures <repo_hash> [failure_class] [limit]
|
|
733
|
+
db_query_similar_failures() {
|
|
734
|
+
local repo_hash="$1"
|
|
735
|
+
local failure_class="${2:-}"
|
|
736
|
+
local limit="${3:-10}"
|
|
737
|
+
|
|
738
|
+
if ! db_available; then echo "[]"; return 0; fi
|
|
739
|
+
|
|
740
|
+
local where_clause="WHERE repo_hash = '${repo_hash}'"
|
|
741
|
+
[[ -n "$failure_class" ]] && where_clause="${where_clause} AND failure_class = '${failure_class}'"
|
|
742
|
+
|
|
743
|
+
_db_query "SELECT json_group_array(json_object('failure_class', failure_class, 'error_signature', error_signature, 'root_cause', root_cause, 'fix_description', fix_description, 'file_path', file_path, 'occurrences', occurrences, 'last_seen_at', last_seen_at)) FROM (SELECT * FROM memory_failures ${where_clause} ORDER BY occurrences DESC, last_seen_at DESC LIMIT ${limit});" || echo "[]"
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
747
|
+
# Pipeline Run Functions (enhanced from existing)
|
|
748
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
749
|
+
|
|
750
|
+
add_pipeline_run() {
|
|
751
|
+
local job_id="$1"
|
|
752
|
+
local issue_number="${2:-0}"
|
|
753
|
+
local goal="${3:-}"
|
|
754
|
+
local branch="${4:-}"
|
|
755
|
+
local template="${5:-standard}"
|
|
756
|
+
|
|
757
|
+
if ! check_sqlite3; then
|
|
758
|
+
return 1
|
|
759
|
+
fi
|
|
760
|
+
|
|
761
|
+
local ts
|
|
762
|
+
ts="$(now_iso)"
|
|
763
|
+
goal="${goal//\'/\'\'}"
|
|
764
|
+
|
|
765
|
+
_db_exec "INSERT OR IGNORE INTO pipeline_runs (job_id, issue_number, goal, branch, status, template, started_at, created_at) VALUES ('${job_id}', ${issue_number}, '${goal}', '${branch}', 'pending', '${template}', '${ts}', '${ts}');" || return 1
|
|
766
|
+
}
|
|
767
|
+
|
|
768
|
+
update_pipeline_status() {
|
|
769
|
+
local job_id="$1"
|
|
770
|
+
local status="$2"
|
|
771
|
+
local stage_name="${3:-}"
|
|
772
|
+
local stage_status="${4:-}"
|
|
773
|
+
local duration_secs="${5:-0}"
|
|
774
|
+
|
|
775
|
+
if ! check_sqlite3; then return 1; fi
|
|
776
|
+
|
|
777
|
+
local ts
|
|
778
|
+
ts="$(now_iso)"
|
|
779
|
+
|
|
780
|
+
_db_exec "UPDATE pipeline_runs SET status = '${status}', stage_name = '${stage_name}', stage_status = '${stage_status}', duration_secs = ${duration_secs}, completed_at = CASE WHEN '${status}' IN ('completed', 'failed') THEN '${ts}' ELSE completed_at END WHERE job_id = '${job_id}';" || return 1
|
|
781
|
+
}
|
|
782
|
+
|
|
783
|
+
record_stage() {
|
|
784
|
+
local job_id="$1"
|
|
785
|
+
local stage_name="$2"
|
|
786
|
+
local status="$3"
|
|
787
|
+
local duration_secs="${4:-0}"
|
|
788
|
+
local error_msg="${5:-}"
|
|
789
|
+
|
|
790
|
+
if ! check_sqlite3; then return 1; fi
|
|
791
|
+
|
|
792
|
+
local ts
|
|
793
|
+
ts="$(now_iso)"
|
|
794
|
+
error_msg="${error_msg//\'/\'\'}"
|
|
795
|
+
|
|
796
|
+
_db_exec "INSERT INTO pipeline_stages (job_id, stage_name, status, started_at, completed_at, duration_secs, error_message, created_at) VALUES ('${job_id}', '${stage_name}', '${status}', '${ts}', '${ts}', ${duration_secs}, '${error_msg}', '${ts}');" || return 1
|
|
797
|
+
}
|
|
798
|
+
|
|
799
|
+
query_runs() {
|
|
800
|
+
local status="${1:-}"
|
|
801
|
+
local limit="${2:-50}"
|
|
802
|
+
|
|
803
|
+
if ! check_sqlite3; then
|
|
804
|
+
warn "Cannot query — sqlite3 not available"
|
|
805
|
+
return 1
|
|
806
|
+
fi
|
|
807
|
+
|
|
808
|
+
local query="SELECT job_id, goal, status, template, started_at, duration_secs FROM pipeline_runs"
|
|
809
|
+
[[ -n "$status" ]] && query="${query} WHERE status = '${status}'"
|
|
810
|
+
query="${query} ORDER BY created_at DESC LIMIT ${limit};"
|
|
811
|
+
|
|
812
|
+
sqlite3 -header -column "$DB_FILE" "$query"
|
|
813
|
+
}
|
|
814
|
+
|
|
815
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
816
|
+
# Sync Functions (HTTP-based, vendor-neutral)
|
|
817
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
818
|
+
|
|
819
|
+
# Load sync configuration
|
|
820
|
+
_sync_load_config() {
|
|
821
|
+
if [[ ! -f "$SYNC_CONFIG_FILE" ]]; then
|
|
822
|
+
return 1
|
|
823
|
+
fi
|
|
824
|
+
SYNC_URL=$(jq -r '.url // empty' "$SYNC_CONFIG_FILE" 2>/dev/null || true)
|
|
825
|
+
SYNC_TOKEN=$(jq -r '.token // empty' "$SYNC_CONFIG_FILE" 2>/dev/null || true)
|
|
826
|
+
[[ -n "$SYNC_URL" ]]
|
|
827
|
+
}
|
|
828
|
+
|
|
829
|
+
# db_sync_push — push unsynced rows to remote endpoint
|
|
830
|
+
db_sync_push() {
|
|
831
|
+
if ! db_available; then return 1; fi
|
|
832
|
+
if ! _sync_load_config; then
|
|
833
|
+
warn "Sync not configured. Set up ${SYNC_CONFIG_FILE}"
|
|
834
|
+
return 1
|
|
835
|
+
fi
|
|
836
|
+
|
|
837
|
+
local device_id
|
|
838
|
+
device_id=$(_db_query "SELECT value FROM _sync_metadata WHERE key = 'device_id';" || echo "unknown")
|
|
839
|
+
|
|
840
|
+
# Collect unsynced events
|
|
841
|
+
local unsynced_events
|
|
842
|
+
unsynced_events=$(_db_query "SELECT json_group_array(json_object('ts', ts, 'ts_epoch', ts_epoch, 'type', type, 'job_id', job_id, 'stage', stage, 'status', status, 'metadata', metadata)) FROM events WHERE synced = 0 LIMIT 500;" || echo "[]")
|
|
843
|
+
|
|
844
|
+
# Collect unsynced cost entries
|
|
845
|
+
local unsynced_costs
|
|
846
|
+
unsynced_costs=$(_db_query "SELECT json_group_array(json_object('input_tokens', input_tokens, 'output_tokens', output_tokens, 'model', model, 'stage', stage, 'cost_usd', cost_usd, 'ts', ts, 'ts_epoch', ts_epoch)) FROM cost_entries WHERE synced = 0 LIMIT 500;" || echo "[]")
|
|
847
|
+
|
|
848
|
+
# Build payload
|
|
849
|
+
local payload
|
|
850
|
+
payload=$(jq -n \
|
|
851
|
+
--arg device "$device_id" \
|
|
852
|
+
--argjson events "$unsynced_events" \
|
|
853
|
+
--argjson costs "$unsynced_costs" \
|
|
854
|
+
'{device_id: $device, events: $events, costs: $costs}')
|
|
855
|
+
|
|
856
|
+
# Push via HTTP
|
|
857
|
+
local response
|
|
858
|
+
local auth_header=""
|
|
859
|
+
[[ -n "${SYNC_TOKEN:-}" ]] && auth_header="-H 'Authorization: Bearer ${SYNC_TOKEN}'"
|
|
860
|
+
|
|
861
|
+
response=$(curl -s -w "%{http_code}" -o /dev/null \
|
|
862
|
+
-X POST "${SYNC_URL}/api/sync/push" \
|
|
863
|
+
-H "Content-Type: application/json" \
|
|
864
|
+
${auth_header} \
|
|
865
|
+
-d "$payload" 2>/dev/null || echo "000")
|
|
866
|
+
|
|
867
|
+
if [[ "$response" == "200" || "$response" == "201" ]]; then
|
|
868
|
+
# Mark as synced
|
|
869
|
+
_db_exec "UPDATE events SET synced = 1 WHERE synced = 0;"
|
|
870
|
+
_db_exec "UPDATE cost_entries SET synced = 1 WHERE synced = 0;"
|
|
871
|
+
success "Pushed unsynced data to ${SYNC_URL}"
|
|
872
|
+
return 0
|
|
873
|
+
else
|
|
874
|
+
warn "Sync push failed (HTTP ${response})"
|
|
875
|
+
return 1
|
|
876
|
+
fi
|
|
877
|
+
}
|
|
878
|
+
|
|
879
|
+
# db_sync_pull — pull new rows from remote endpoint
|
|
880
|
+
db_sync_pull() {
|
|
881
|
+
if ! db_available; then return 1; fi
|
|
882
|
+
if ! _sync_load_config; then
|
|
883
|
+
warn "Sync not configured. Set up ${SYNC_CONFIG_FILE}"
|
|
884
|
+
return 1
|
|
885
|
+
fi
|
|
886
|
+
|
|
887
|
+
local last_sync
|
|
888
|
+
last_sync=$(_db_query "SELECT value FROM _sync_metadata WHERE key = 'last_pull_epoch';" || echo "0")
|
|
889
|
+
|
|
890
|
+
local auth_header=""
|
|
891
|
+
[[ -n "${SYNC_TOKEN:-}" ]] && auth_header="-H 'Authorization: Bearer ${SYNC_TOKEN}'"
|
|
892
|
+
|
|
893
|
+
local response_body
|
|
894
|
+
response_body=$(curl -s \
|
|
895
|
+
"${SYNC_URL}/api/sync/pull?since=${last_sync}" \
|
|
896
|
+
-H "Accept: application/json" \
|
|
897
|
+
${auth_header} 2>/dev/null || echo "{}")
|
|
898
|
+
|
|
899
|
+
if ! echo "$response_body" | jq empty 2>/dev/null; then
|
|
900
|
+
warn "Sync pull returned invalid JSON"
|
|
901
|
+
return 1
|
|
902
|
+
fi
|
|
903
|
+
|
|
904
|
+
# Import events
|
|
905
|
+
local event_count=0
|
|
906
|
+
while IFS= read -r evt; do
|
|
907
|
+
[[ -z "$evt" || "$evt" == "null" ]] && continue
|
|
908
|
+
local e_ts e_epoch e_type e_job
|
|
909
|
+
e_ts=$(echo "$evt" | jq -r '.ts // ""')
|
|
910
|
+
e_epoch=$(echo "$evt" | jq -r '.ts_epoch // 0')
|
|
911
|
+
e_type=$(echo "$evt" | jq -r '.type // ""')
|
|
912
|
+
e_job=$(echo "$evt" | jq -r '.job_id // ""')
|
|
913
|
+
_db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, created_at, synced) VALUES ('${e_ts}', ${e_epoch}, '${e_type}', '${e_job}', '${e_ts}', 1);" 2>/dev/null && event_count=$((event_count + 1))
|
|
914
|
+
done < <(echo "$response_body" | jq -c '.events[]' 2>/dev/null)
|
|
915
|
+
|
|
916
|
+
# Update last pull timestamp
|
|
917
|
+
_db_exec "INSERT OR REPLACE INTO _sync_metadata (key, value, updated_at) VALUES ('last_pull_epoch', '$(now_epoch)', '$(now_iso)');"
|
|
918
|
+
|
|
919
|
+
success "Pulled ${event_count} new events from ${SYNC_URL}"
|
|
920
|
+
}
|
|
921
|
+
|
|
922
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
923
|
+
# JSON Migration (import existing state files into SQLite)
|
|
924
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
925
|
+
|
|
926
|
+
migrate_json_data() {
|
|
927
|
+
if ! check_sqlite3; then
|
|
928
|
+
error "sqlite3 required for migration"
|
|
929
|
+
return 1
|
|
930
|
+
fi
|
|
931
|
+
|
|
932
|
+
ensure_db_dir
|
|
933
|
+
migrate_schema
|
|
934
|
+
|
|
935
|
+
local total_imported=0
|
|
936
|
+
|
|
937
|
+
# 1. Import events.jsonl
|
|
938
|
+
if [[ -f "$EVENTS_FILE" ]]; then
|
|
939
|
+
info "Importing events from ${EVENTS_FILE}..."
|
|
940
|
+
local evt_count=0
|
|
941
|
+
local evt_skipped=0
|
|
942
|
+
while IFS= read -r line; do
|
|
943
|
+
[[ -z "$line" ]] && continue
|
|
944
|
+
local e_ts e_epoch e_type e_job e_stage e_status
|
|
945
|
+
e_ts=$(echo "$line" | jq -r '.ts // ""' 2>/dev/null || continue)
|
|
946
|
+
e_epoch=$(echo "$line" | jq -r '.ts_epoch // 0' 2>/dev/null || continue)
|
|
947
|
+
e_type=$(echo "$line" | jq -r '.type // ""' 2>/dev/null || continue)
|
|
948
|
+
e_job=$(echo "$line" | jq -r '.job_id // ""' 2>/dev/null || true)
|
|
949
|
+
e_stage=$(echo "$line" | jq -r '.stage // ""' 2>/dev/null || true)
|
|
950
|
+
e_status=$(echo "$line" | jq -r '.status // ""' 2>/dev/null || true)
|
|
951
|
+
|
|
952
|
+
if _db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, stage, status, created_at, synced) VALUES ('${e_ts}', ${e_epoch}, '${e_type}', '${e_job}', '${e_stage}', '${e_status}', '${e_ts}', 0);" 2>/dev/null; then
|
|
953
|
+
evt_count=$((evt_count + 1))
|
|
954
|
+
else
|
|
955
|
+
evt_skipped=$((evt_skipped + 1))
|
|
956
|
+
fi
|
|
957
|
+
done < "$EVENTS_FILE"
|
|
958
|
+
success "Events: ${evt_count} imported, ${evt_skipped} skipped (duplicates)"
|
|
959
|
+
total_imported=$((total_imported + evt_count))
|
|
960
|
+
fi
|
|
961
|
+
|
|
962
|
+
# 2. Import daemon-state.json
|
|
963
|
+
if [[ -f "$DAEMON_STATE_FILE" ]]; then
|
|
964
|
+
info "Importing daemon state from ${DAEMON_STATE_FILE}..."
|
|
965
|
+
local job_count=0
|
|
966
|
+
|
|
967
|
+
# Import completed jobs
|
|
968
|
+
while IFS= read -r job; do
|
|
969
|
+
[[ -z "$job" || "$job" == "null" ]] && continue
|
|
970
|
+
local j_issue j_result j_dur j_at
|
|
971
|
+
j_issue=$(echo "$job" | jq -r '.issue // 0')
|
|
972
|
+
j_result=$(echo "$job" | jq -r '.result // ""')
|
|
973
|
+
j_dur=$(echo "$job" | jq -r '.duration // ""')
|
|
974
|
+
j_at=$(echo "$job" | jq -r '.completed_at // ""')
|
|
975
|
+
local j_id="migrated-${j_issue}-$(echo "$j_at" | tr -dc '0-9' | tail -c 10)"
|
|
976
|
+
_db_exec "INSERT OR IGNORE INTO daemon_state (job_id, issue_number, status, result, duration, completed_at, started_at, updated_at) VALUES ('${j_id}', ${j_issue}, 'completed', '${j_result}', '${j_dur}', '${j_at}', '${j_at}', '$(now_iso)');" 2>/dev/null && job_count=$((job_count + 1))
|
|
977
|
+
done < <(jq -c '.completed[]' "$DAEMON_STATE_FILE" 2>/dev/null)
|
|
978
|
+
|
|
979
|
+
success "Daemon state: ${job_count} completed jobs imported"
|
|
980
|
+
total_imported=$((total_imported + job_count))
|
|
981
|
+
fi
|
|
982
|
+
|
|
983
|
+
# 3. Import costs.json
|
|
984
|
+
if [[ -f "$COST_FILE_JSON" ]]; then
|
|
985
|
+
info "Importing costs from ${COST_FILE_JSON}..."
|
|
986
|
+
local cost_count=0
|
|
987
|
+
while IFS= read -r entry; do
|
|
988
|
+
[[ -z "$entry" || "$entry" == "null" ]] && continue
|
|
989
|
+
local c_input c_output c_model c_stage c_issue c_cost c_ts c_epoch
|
|
990
|
+
c_input=$(echo "$entry" | jq -r '.input_tokens // 0')
|
|
991
|
+
c_output=$(echo "$entry" | jq -r '.output_tokens // 0')
|
|
992
|
+
c_model=$(echo "$entry" | jq -r '.model // "sonnet"')
|
|
993
|
+
c_stage=$(echo "$entry" | jq -r '.stage // "unknown"')
|
|
994
|
+
c_issue=$(echo "$entry" | jq -r '.issue // ""')
|
|
995
|
+
c_cost=$(echo "$entry" | jq -r '.cost_usd // 0')
|
|
996
|
+
c_ts=$(echo "$entry" | jq -r '.ts // ""')
|
|
997
|
+
c_epoch=$(echo "$entry" | jq -r '.ts_epoch // 0')
|
|
998
|
+
_db_exec "INSERT INTO cost_entries (input_tokens, output_tokens, model, stage, issue, cost_usd, ts, ts_epoch, synced) VALUES (${c_input}, ${c_output}, '${c_model}', '${c_stage}', '${c_issue}', ${c_cost}, '${c_ts}', ${c_epoch}, 0);" 2>/dev/null && cost_count=$((cost_count + 1))
|
|
999
|
+
done < <(jq -c '.entries[]' "$COST_FILE_JSON" 2>/dev/null)
|
|
1000
|
+
|
|
1001
|
+
success "Costs: ${cost_count} entries imported"
|
|
1002
|
+
total_imported=$((total_imported + cost_count))
|
|
1003
|
+
fi
|
|
1004
|
+
|
|
1005
|
+
# 4. Import budget.json
|
|
1006
|
+
if [[ -f "$BUDGET_FILE_JSON" ]]; then
|
|
1007
|
+
info "Importing budget from ${BUDGET_FILE_JSON}..."
|
|
1008
|
+
local b_amount b_enabled
|
|
1009
|
+
b_amount=$(jq -r '.daily_budget_usd // 0' "$BUDGET_FILE_JSON" 2>/dev/null || echo "0")
|
|
1010
|
+
b_enabled=$(jq -r '.enabled // false' "$BUDGET_FILE_JSON" 2>/dev/null || echo "false")
|
|
1011
|
+
local b_flag=0
|
|
1012
|
+
[[ "$b_enabled" == "true" ]] && b_flag=1
|
|
1013
|
+
_db_exec "INSERT OR REPLACE INTO budgets (id, daily_budget_usd, enabled, updated_at) VALUES (1, ${b_amount}, ${b_flag}, '$(now_iso)');" && success "Budget: imported (\$${b_amount}, enabled=${b_enabled})"
|
|
1014
|
+
fi
|
|
1015
|
+
|
|
1016
|
+
# 5. Import heartbeats/*.json
|
|
1017
|
+
if [[ -d "$HEARTBEAT_DIR" ]]; then
|
|
1018
|
+
info "Importing heartbeats..."
|
|
1019
|
+
local hb_count=0
|
|
1020
|
+
for hb_file in "${HEARTBEAT_DIR}"/*.json; do
|
|
1021
|
+
[[ -f "$hb_file" ]] || continue
|
|
1022
|
+
local hb_job hb_pid hb_issue hb_stage hb_iter hb_activity hb_mem hb_updated
|
|
1023
|
+
hb_job="$(basename "$hb_file" .json)"
|
|
1024
|
+
hb_pid=$(jq -r '.pid // 0' "$hb_file" 2>/dev/null || echo "0")
|
|
1025
|
+
hb_issue=$(jq -r '.issue // 0' "$hb_file" 2>/dev/null || echo "0")
|
|
1026
|
+
hb_stage=$(jq -r '.stage // ""' "$hb_file" 2>/dev/null || echo "")
|
|
1027
|
+
hb_iter=$(jq -r '.iteration // 0' "$hb_file" 2>/dev/null || echo "0")
|
|
1028
|
+
hb_activity=$(jq -r '.last_activity // ""' "$hb_file" 2>/dev/null || echo "")
|
|
1029
|
+
hb_mem=$(jq -r '.memory_mb // 0' "$hb_file" 2>/dev/null || echo "0")
|
|
1030
|
+
hb_updated=$(jq -r '.updated_at // ""' "$hb_file" 2>/dev/null || echo "$(now_iso)")
|
|
1031
|
+
|
|
1032
|
+
hb_activity="${hb_activity//\'/\'\'}"
|
|
1033
|
+
_db_exec "INSERT OR REPLACE INTO heartbeats (job_id, pid, issue, stage, iteration, last_activity, memory_mb, updated_at) VALUES ('${hb_job}', ${hb_pid}, ${hb_issue}, '${hb_stage}', ${hb_iter}, '${hb_activity}', ${hb_mem}, '${hb_updated}');" 2>/dev/null && hb_count=$((hb_count + 1))
|
|
1034
|
+
done
|
|
1035
|
+
success "Heartbeats: ${hb_count} imported"
|
|
1036
|
+
total_imported=$((total_imported + hb_count))
|
|
1037
|
+
fi
|
|
1038
|
+
|
|
1039
|
+
echo ""
|
|
1040
|
+
success "Migration complete: ${total_imported} total records imported"
|
|
1041
|
+
|
|
1042
|
+
# Verify counts
|
|
1043
|
+
echo ""
|
|
1044
|
+
info "Verification:"
|
|
1045
|
+
local db_events db_costs db_hb
|
|
1046
|
+
db_events=$(_db_query "SELECT COUNT(*) FROM events;" || echo "0")
|
|
1047
|
+
db_costs=$(_db_query "SELECT COUNT(*) FROM cost_entries;" || echo "0")
|
|
1048
|
+
db_hb=$(_db_query "SELECT COUNT(*) FROM heartbeats;" || echo "0")
|
|
1049
|
+
echo " Events in DB: ${db_events}"
|
|
1050
|
+
echo " Cost entries: ${db_costs}"
|
|
1051
|
+
echo " Heartbeats: ${db_hb}"
|
|
1052
|
+
}
|
|
1053
|
+
|
|
1054
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
1055
|
+
# Export / Status / Cleanup
|
|
1056
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
1057
|
+
|
|
1058
|
+
export_db() {
|
|
1059
|
+
local output_file="${1:-${DB_DIR}/shipwright-backup.json}"
|
|
1060
|
+
|
|
1061
|
+
if ! check_sqlite3; then
|
|
1062
|
+
warn "Cannot export — sqlite3 not available"
|
|
1063
|
+
return 1
|
|
1064
|
+
fi
|
|
1065
|
+
|
|
1066
|
+
info "Exporting database to ${output_file}..."
|
|
1067
|
+
|
|
1068
|
+
local events_json runs_json costs_json
|
|
1069
|
+
events_json=$(_db_query "SELECT json_group_array(json_object('ts', ts, 'type', type, 'job_id', job_id, 'stage', stage, 'status', status)) FROM (SELECT * FROM events ORDER BY ts_epoch DESC LIMIT 1000);" || echo "[]")
|
|
1070
|
+
runs_json=$(_db_query "SELECT json_group_array(json_object('job_id', job_id, 'goal', goal, 'status', status, 'template', template, 'started_at', started_at)) FROM (SELECT * FROM pipeline_runs ORDER BY created_at DESC LIMIT 500);" || echo "[]")
|
|
1071
|
+
costs_json=$(_db_query "SELECT json_group_array(json_object('model', model, 'stage', stage, 'cost_usd', cost_usd, 'ts', ts)) FROM (SELECT * FROM cost_entries ORDER BY ts_epoch DESC LIMIT 1000);" || echo "[]")
|
|
1072
|
+
|
|
1073
|
+
local tmp_file
|
|
1074
|
+
tmp_file=$(mktemp "${output_file}.tmp.XXXXXX")
|
|
1075
|
+
jq -n \
|
|
1076
|
+
--arg exported_at "$(now_iso)" \
|
|
1077
|
+
--argjson events "$events_json" \
|
|
1078
|
+
--argjson pipeline_runs "$runs_json" \
|
|
1079
|
+
--argjson cost_entries "$costs_json" \
|
|
1080
|
+
'{exported_at: $exported_at, events: $events, pipeline_runs: $pipeline_runs, cost_entries: $cost_entries}' \
|
|
1081
|
+
> "$tmp_file" && mv "$tmp_file" "$output_file" || { rm -f "$tmp_file"; return 1; }
|
|
1082
|
+
|
|
1083
|
+
success "Database exported to ${output_file}"
|
|
1084
|
+
}
|
|
1085
|
+
|
|
1086
|
+
import_db() {
|
|
1087
|
+
local input_file="$1"
|
|
1088
|
+
|
|
1089
|
+
if [[ ! -f "$input_file" ]]; then
|
|
1090
|
+
error "File not found: ${input_file}"
|
|
1091
|
+
return 1
|
|
1092
|
+
fi
|
|
1093
|
+
|
|
1094
|
+
if ! check_sqlite3; then
|
|
1095
|
+
warn "Cannot import — sqlite3 not available"
|
|
1096
|
+
return 1
|
|
1097
|
+
fi
|
|
1098
|
+
|
|
1099
|
+
info "Importing data from ${input_file}..."
|
|
1100
|
+
warn "Full JSON import not yet implemented — use 'shipwright db migrate' to import from state files"
|
|
1101
|
+
}
|
|
1102
|
+
|
|
1103
|
+
show_status() {
|
|
1104
|
+
if ! check_sqlite3; then
|
|
1105
|
+
warn "sqlite3 not available"
|
|
1106
|
+
echo ""
|
|
1107
|
+
echo "Fallback: Reading from JSON files..."
|
|
1108
|
+
[[ -f "$EVENTS_FILE" ]] && echo " Events: $(wc -l < "$EVENTS_FILE") records"
|
|
1109
|
+
[[ -f "$DAEMON_STATE_FILE" ]] && echo " Pipeline state: $(jq '.active_jobs | length' "$DAEMON_STATE_FILE" 2>/dev/null || echo '?')"
|
|
1110
|
+
return 0
|
|
1111
|
+
fi
|
|
1112
|
+
|
|
1113
|
+
if [[ ! -f "$DB_FILE" ]]; then
|
|
1114
|
+
warn "Database not initialized. Run: shipwright db init"
|
|
1115
|
+
return 1
|
|
1116
|
+
fi
|
|
1117
|
+
|
|
1118
|
+
echo ""
|
|
1119
|
+
echo -e "${BOLD}SQLite Database Status${RESET}"
|
|
1120
|
+
echo -e "${DIM}Database: ${DB_FILE}${RESET}"
|
|
1121
|
+
echo ""
|
|
1122
|
+
|
|
1123
|
+
# WAL mode check
|
|
1124
|
+
local journal_mode
|
|
1125
|
+
journal_mode=$(_db_query "PRAGMA journal_mode;" || echo "unknown")
|
|
1126
|
+
echo -e "${DIM}Journal mode: ${journal_mode}${RESET}"
|
|
1127
|
+
|
|
1128
|
+
# Schema version
|
|
1129
|
+
local schema_v
|
|
1130
|
+
schema_v=$(_db_query "SELECT COALESCE(MAX(version), 0) FROM _schema;" || echo "0")
|
|
1131
|
+
echo -e "${DIM}Schema version: ${schema_v}${RESET}"
|
|
1132
|
+
|
|
1133
|
+
# DB file size
|
|
1134
|
+
local db_size
|
|
1135
|
+
if [[ -f "$DB_FILE" ]]; then
|
|
1136
|
+
db_size=$(ls -lh "$DB_FILE" 2>/dev/null | awk '{print $5}')
|
|
1137
|
+
echo -e "${DIM}File size: ${db_size}${RESET}"
|
|
1138
|
+
fi
|
|
1139
|
+
echo ""
|
|
1140
|
+
|
|
1141
|
+
local event_count pipeline_count stage_count daemon_count cost_count hb_count failure_count
|
|
1142
|
+
event_count=$(_db_query "SELECT COUNT(*) FROM events;" || echo "0")
|
|
1143
|
+
pipeline_count=$(_db_query "SELECT COUNT(*) FROM pipeline_runs;" || echo "0")
|
|
1144
|
+
stage_count=$(_db_query "SELECT COUNT(*) FROM pipeline_stages;" || echo "0")
|
|
1145
|
+
daemon_count=$(_db_query "SELECT COUNT(*) FROM daemon_state;" || echo "0")
|
|
1146
|
+
cost_count=$(_db_query "SELECT COUNT(*) FROM cost_entries;" || echo "0")
|
|
1147
|
+
hb_count=$(_db_query "SELECT COUNT(*) FROM heartbeats;" || echo "0")
|
|
1148
|
+
failure_count=$(_db_query "SELECT COUNT(*) FROM memory_failures;" || echo "0")
|
|
1149
|
+
|
|
1150
|
+
echo -e "${CYAN}Events${RESET} ${event_count} records"
|
|
1151
|
+
echo -e "${CYAN}Pipeline Runs${RESET} ${pipeline_count} records"
|
|
1152
|
+
echo -e "${CYAN}Pipeline Stages${RESET} ${stage_count} records"
|
|
1153
|
+
echo -e "${CYAN}Daemon Jobs${RESET} ${daemon_count} records"
|
|
1154
|
+
echo -e "${CYAN}Cost Entries${RESET} ${cost_count} records"
|
|
1155
|
+
echo -e "${CYAN}Heartbeats${RESET} ${hb_count} records"
|
|
1156
|
+
echo -e "${CYAN}Failure Patterns${RESET} ${failure_count} records"
|
|
1157
|
+
|
|
1158
|
+
# Sync status
|
|
1159
|
+
local device_id last_push last_pull
|
|
1160
|
+
device_id=$(_db_query "SELECT value FROM _sync_metadata WHERE key = 'device_id';" || echo "not set")
|
|
1161
|
+
last_push=$(_db_query "SELECT value FROM _sync_metadata WHERE key = 'last_push_epoch';" || echo "never")
|
|
1162
|
+
last_pull=$(_db_query "SELECT value FROM _sync_metadata WHERE key = 'last_pull_epoch';" || echo "never")
|
|
1163
|
+
local unsynced_events unsynced_costs
|
|
1164
|
+
unsynced_events=$(_db_query "SELECT COUNT(*) FROM events WHERE synced = 0;" || echo "0")
|
|
1165
|
+
unsynced_costs=$(_db_query "SELECT COUNT(*) FROM cost_entries WHERE synced = 0;" || echo "0")
|
|
1166
|
+
|
|
1167
|
+
echo ""
|
|
1168
|
+
echo -e "${BOLD}Sync${RESET}"
|
|
1169
|
+
echo -e " Device: ${DIM}${device_id}${RESET}"
|
|
1170
|
+
echo -e " Unsynced events: ${unsynced_events}"
|
|
1171
|
+
echo -e " Unsynced costs: ${unsynced_costs}"
|
|
1172
|
+
if [[ -f "$SYNC_CONFIG_FILE" ]]; then
|
|
1173
|
+
local sync_url
|
|
1174
|
+
sync_url=$(jq -r '.url // "not configured"' "$SYNC_CONFIG_FILE" 2>/dev/null || echo "not configured")
|
|
1175
|
+
echo -e " Remote: ${DIM}${sync_url}${RESET}"
|
|
1176
|
+
else
|
|
1177
|
+
echo -e " Remote: ${DIM}not configured${RESET}"
|
|
1178
|
+
fi
|
|
1179
|
+
|
|
1180
|
+
echo ""
|
|
1181
|
+
echo -e "${BOLD}Recent Runs${RESET}"
|
|
1182
|
+
sqlite3 -header -column "$DB_FILE" "SELECT job_id, goal, status, template, datetime(started_at) as started FROM pipeline_runs ORDER BY created_at DESC LIMIT 5;" 2>/dev/null || echo " (none)"
|
|
1183
|
+
}
|
|
1184
|
+
|
|
1185
|
+
cleanup_old_data() {
|
|
1186
|
+
local days="${1:-30}"
|
|
1187
|
+
|
|
1188
|
+
if ! check_sqlite3; then
|
|
1189
|
+
warn "Cannot cleanup — sqlite3 not available"
|
|
1190
|
+
return 1
|
|
1191
|
+
fi
|
|
1192
|
+
|
|
1193
|
+
local cutoff_epoch
|
|
1194
|
+
cutoff_epoch=$(( $(now_epoch) - (days * 86400) ))
|
|
1195
|
+
local cutoff_date
|
|
1196
|
+
cutoff_date=$(date -u -r "$cutoff_epoch" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || \
|
|
1197
|
+
date -u -d "@${cutoff_epoch}" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || \
|
|
1198
|
+
date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
1199
|
+
|
|
1200
|
+
info "Cleaning records older than ${days} days (before ${cutoff_date})..."
|
|
1201
|
+
|
|
1202
|
+
local d_events d_costs d_daemon d_stages
|
|
1203
|
+
_db_exec "DELETE FROM events WHERE ts < '${cutoff_date}';"
|
|
1204
|
+
d_events=$(_db_query "SELECT changes();" || echo "0")
|
|
1205
|
+
_db_exec "DELETE FROM cost_entries WHERE ts < '${cutoff_date}';"
|
|
1206
|
+
d_costs=$(_db_query "SELECT changes();" || echo "0")
|
|
1207
|
+
_db_exec "DELETE FROM daemon_state WHERE updated_at < '${cutoff_date}' AND status != 'active';"
|
|
1208
|
+
d_daemon=$(_db_query "SELECT changes();" || echo "0")
|
|
1209
|
+
_db_exec "DELETE FROM pipeline_stages WHERE created_at < '${cutoff_date}';"
|
|
1210
|
+
d_stages=$(_db_query "SELECT changes();" || echo "0")
|
|
1211
|
+
|
|
1212
|
+
success "Deleted: ${d_events} events, ${d_costs} costs, ${d_daemon} daemon jobs, ${d_stages} stages"
|
|
1213
|
+
|
|
1214
|
+
# VACUUM to reclaim space
|
|
1215
|
+
_db_exec "VACUUM;" 2>/dev/null || true
|
|
1216
|
+
}
|
|
1217
|
+
|
|
1218
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
1219
|
+
# Health Check (used by sw-doctor.sh)
|
|
1220
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
1221
|
+
|
|
1222
|
+
db_health_check() {
|
|
1223
|
+
local pass=0 fail=0
|
|
1224
|
+
|
|
1225
|
+
# sqlite3 binary
|
|
1226
|
+
if check_sqlite3; then
|
|
1227
|
+
echo -e " ${GREEN}${BOLD}✓${RESET} sqlite3 available"
|
|
1228
|
+
pass=$((pass + 1))
|
|
1229
|
+
else
|
|
1230
|
+
echo -e " ${RED}${BOLD}✗${RESET} sqlite3 not installed"
|
|
1231
|
+
fail=$((fail + 1))
|
|
1232
|
+
echo " ${pass} passed, ${fail} failed"
|
|
1233
|
+
return $fail
|
|
1234
|
+
fi
|
|
1235
|
+
|
|
1236
|
+
# DB file exists
|
|
1237
|
+
if [[ -f "$DB_FILE" ]]; then
|
|
1238
|
+
echo -e " ${GREEN}${BOLD}✓${RESET} Database file exists: ${DB_FILE}"
|
|
1239
|
+
pass=$((pass + 1))
|
|
1240
|
+
else
|
|
1241
|
+
echo -e " ${YELLOW}${BOLD}⚠${RESET} Database not initialized — run: shipwright db init"
|
|
1242
|
+
fail=$((fail + 1))
|
|
1243
|
+
echo " ${pass} passed, ${fail} failed"
|
|
1244
|
+
return $fail
|
|
1245
|
+
fi
|
|
1246
|
+
|
|
1247
|
+
# Schema version
|
|
1248
|
+
local sv
|
|
1249
|
+
sv=$(_db_query "SELECT COALESCE(MAX(version), 0) FROM _schema;" || echo "0")
|
|
1250
|
+
if [[ "$sv" -ge "$SCHEMA_VERSION" ]]; then
|
|
1251
|
+
echo -e " ${GREEN}${BOLD}✓${RESET} Schema version: v${sv}"
|
|
1252
|
+
pass=$((pass + 1))
|
|
1253
|
+
else
|
|
1254
|
+
echo -e " ${YELLOW}${BOLD}⚠${RESET} Schema version: v${sv} (expected v${SCHEMA_VERSION}) — run: shipwright db migrate"
|
|
1255
|
+
fail=$((fail + 1))
|
|
1256
|
+
fi
|
|
1257
|
+
|
|
1258
|
+
# WAL mode
|
|
1259
|
+
local jm
|
|
1260
|
+
jm=$(_db_query "PRAGMA journal_mode;" || echo "unknown")
|
|
1261
|
+
if [[ "$jm" == "wal" ]]; then
|
|
1262
|
+
echo -e " ${GREEN}${BOLD}✓${RESET} WAL mode enabled"
|
|
1263
|
+
pass=$((pass + 1))
|
|
1264
|
+
else
|
|
1265
|
+
echo -e " ${YELLOW}${BOLD}⚠${RESET} Journal mode: ${jm} (WAL recommended) — run: shipwright db init"
|
|
1266
|
+
fail=$((fail + 1))
|
|
1267
|
+
fi
|
|
1268
|
+
|
|
1269
|
+
# Integrity check
|
|
1270
|
+
local integrity
|
|
1271
|
+
integrity=$(_db_query "PRAGMA integrity_check;" || echo "error")
|
|
1272
|
+
if [[ "$integrity" == "ok" ]]; then
|
|
1273
|
+
echo -e " ${GREEN}${BOLD}✓${RESET} Integrity check passed"
|
|
1274
|
+
pass=$((pass + 1))
|
|
1275
|
+
else
|
|
1276
|
+
echo -e " ${RED}${BOLD}✗${RESET} Integrity check failed: ${integrity}"
|
|
1277
|
+
fail=$((fail + 1))
|
|
1278
|
+
fi
|
|
1279
|
+
|
|
1280
|
+
echo " ${pass} passed, ${fail} failed"
|
|
1281
|
+
return $fail
|
|
1282
|
+
}
|
|
1283
|
+
|
|
1284
|
+
# ─── Show Help ──────────────────────────────────────────────────────────────
|
|
1285
|
+
show_help() {
|
|
1286
|
+
echo -e "${CYAN}${BOLD}shipwright db${RESET} — SQLite Persistence Layer"
|
|
1287
|
+
echo ""
|
|
1288
|
+
echo -e "${BOLD}USAGE${RESET}"
|
|
1289
|
+
echo -e " shipwright db <command> [options]"
|
|
1290
|
+
echo ""
|
|
1291
|
+
echo -e "${BOLD}COMMANDS${RESET}"
|
|
1292
|
+
echo -e " ${CYAN}init${RESET} Initialize database schema (creates DB, enables WAL)"
|
|
1293
|
+
echo -e " ${CYAN}migrate${RESET} Apply schema migrations + import JSON state files"
|
|
1294
|
+
echo -e " ${CYAN}status${RESET} Show database stats, sync status, recent runs"
|
|
1295
|
+
echo -e " ${CYAN}query${RESET} [status] Query pipeline runs by status"
|
|
1296
|
+
echo -e " ${CYAN}export${RESET} [file] Export database to JSON backup"
|
|
1297
|
+
echo -e " ${CYAN}import${RESET} <file> Import data from JSON backup"
|
|
1298
|
+
echo -e " ${CYAN}cleanup${RESET} [days] Delete records older than N days (default 30)"
|
|
1299
|
+
echo -e " ${CYAN}health${RESET} Run database health checks"
|
|
1300
|
+
echo -e " ${CYAN}sync push${RESET} Push unsynced data to remote"
|
|
1301
|
+
echo -e " ${CYAN}sync pull${RESET} Pull new data from remote"
|
|
1302
|
+
echo -e " ${CYAN}help${RESET} Show this help"
|
|
1303
|
+
echo ""
|
|
1304
|
+
echo -e "${DIM}Examples:${RESET}"
|
|
1305
|
+
echo -e " shipwright db init"
|
|
1306
|
+
echo -e " shipwright db migrate # Import events.jsonl, costs.json, etc."
|
|
1307
|
+
echo -e " shipwright db status"
|
|
1308
|
+
echo -e " shipwright db query failed"
|
|
1309
|
+
echo -e " shipwright db health"
|
|
1310
|
+
echo -e " shipwright db sync push"
|
|
1311
|
+
echo -e " shipwright db cleanup 60"
|
|
1312
|
+
}
|
|
1313
|
+
|
|
1314
|
+
# ─── Main Router ────────────────────────────────────────────────────────────
|
|
1315
|
+
main() {
|
|
1316
|
+
local cmd="${1:-help}"
|
|
1317
|
+
shift 2>/dev/null || true
|
|
1318
|
+
|
|
1319
|
+
case "$cmd" in
|
|
1320
|
+
init)
|
|
1321
|
+
ensure_db_dir
|
|
1322
|
+
init_schema
|
|
1323
|
+
# Set schema version
|
|
1324
|
+
_db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (${SCHEMA_VERSION}, '$(now_iso)', '$(now_iso)');" 2>/dev/null || true
|
|
1325
|
+
_db_exec "INSERT OR IGNORE INTO _sync_metadata (key, value, updated_at) VALUES ('device_id', '$(uname -n)-$$-$(now_epoch)', '$(now_iso)');" 2>/dev/null || true
|
|
1326
|
+
success "Database initialized at ${DB_FILE} (WAL mode, schema v${SCHEMA_VERSION})"
|
|
1327
|
+
;;
|
|
1328
|
+
migrate)
|
|
1329
|
+
migrate_json_data
|
|
1330
|
+
;;
|
|
1331
|
+
status)
|
|
1332
|
+
show_status
|
|
1333
|
+
;;
|
|
1334
|
+
query)
|
|
1335
|
+
local status="${1:-}"
|
|
1336
|
+
query_runs "$status"
|
|
1337
|
+
;;
|
|
1338
|
+
export)
|
|
1339
|
+
local file="${1:-${DB_DIR}/shipwright-backup.json}"
|
|
1340
|
+
export_db "$file"
|
|
1341
|
+
;;
|
|
1342
|
+
import)
|
|
1343
|
+
local file="${1:-}"
|
|
1344
|
+
if [[ -z "$file" ]]; then
|
|
1345
|
+
error "Please provide a file to import"
|
|
1346
|
+
exit 1
|
|
1347
|
+
fi
|
|
1348
|
+
import_db "$file"
|
|
1349
|
+
;;
|
|
1350
|
+
cleanup)
|
|
1351
|
+
local days="${1:-30}"
|
|
1352
|
+
cleanup_old_data "$days"
|
|
1353
|
+
;;
|
|
1354
|
+
health)
|
|
1355
|
+
db_health_check
|
|
1356
|
+
;;
|
|
1357
|
+
sync)
|
|
1358
|
+
local sync_cmd="${1:-help}"
|
|
1359
|
+
shift 2>/dev/null || true
|
|
1360
|
+
case "$sync_cmd" in
|
|
1361
|
+
push) db_sync_push ;;
|
|
1362
|
+
pull) db_sync_pull ;;
|
|
1363
|
+
*) echo "Usage: shipwright db sync {push|pull}"; exit 1 ;;
|
|
1364
|
+
esac
|
|
1365
|
+
;;
|
|
1366
|
+
help|--help|-h)
|
|
1367
|
+
show_help
|
|
1368
|
+
;;
|
|
1369
|
+
*)
|
|
1370
|
+
error "Unknown command: ${cmd}"
|
|
1371
|
+
echo ""
|
|
1372
|
+
show_help
|
|
1373
|
+
exit 1
|
|
1374
|
+
;;
|
|
1375
|
+
esac
|
|
1376
|
+
}
|
|
1377
|
+
|
|
1378
|
+
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
|
|
1379
|
+
main "$@"
|
|
1380
|
+
fi
|