shipwright-cli 2.0.0 → 2.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/README.md +160 -72
  2. package/completions/_shipwright +59 -7
  3. package/completions/shipwright.bash +24 -4
  4. package/completions/shipwright.fish +80 -2
  5. package/dashboard/server.ts +208 -0
  6. package/docs/tmux-research/TMUX-ARCHITECTURE.md +567 -0
  7. package/docs/tmux-research/TMUX-AUDIT.md +925 -0
  8. package/docs/tmux-research/TMUX-BEST-PRACTICES-2025-2026.md +829 -0
  9. package/docs/tmux-research/TMUX-QUICK-REFERENCE.md +543 -0
  10. package/docs/tmux-research/TMUX-RESEARCH-INDEX.md +438 -0
  11. package/package.json +2 -2
  12. package/scripts/lib/helpers.sh +7 -0
  13. package/scripts/sw +116 -2
  14. package/scripts/sw-activity.sh +1 -1
  15. package/scripts/sw-adaptive.sh +1 -1
  16. package/scripts/sw-adversarial.sh +1 -1
  17. package/scripts/sw-architecture-enforcer.sh +1 -1
  18. package/scripts/sw-auth.sh +1 -1
  19. package/scripts/sw-autonomous.sh +128 -38
  20. package/scripts/sw-changelog.sh +1 -1
  21. package/scripts/sw-checkpoint.sh +1 -1
  22. package/scripts/sw-ci.sh +1 -1
  23. package/scripts/sw-cleanup.sh +1 -1
  24. package/scripts/sw-code-review.sh +62 -1
  25. package/scripts/sw-connect.sh +1 -1
  26. package/scripts/sw-context.sh +1 -1
  27. package/scripts/sw-cost.sh +44 -3
  28. package/scripts/sw-daemon.sh +155 -27
  29. package/scripts/sw-dashboard.sh +1 -1
  30. package/scripts/sw-db.sh +958 -118
  31. package/scripts/sw-decompose.sh +1 -1
  32. package/scripts/sw-deps.sh +1 -1
  33. package/scripts/sw-developer-simulation.sh +1 -1
  34. package/scripts/sw-discovery.sh +1 -1
  35. package/scripts/sw-docs-agent.sh +1 -1
  36. package/scripts/sw-docs.sh +1 -1
  37. package/scripts/sw-doctor.sh +49 -1
  38. package/scripts/sw-dora.sh +1 -1
  39. package/scripts/sw-durable.sh +1 -1
  40. package/scripts/sw-e2e-orchestrator.sh +1 -1
  41. package/scripts/sw-eventbus.sh +1 -1
  42. package/scripts/sw-feedback.sh +23 -15
  43. package/scripts/sw-fix.sh +1 -1
  44. package/scripts/sw-fleet-discover.sh +1 -1
  45. package/scripts/sw-fleet-viz.sh +1 -1
  46. package/scripts/sw-fleet.sh +1 -1
  47. package/scripts/sw-github-app.sh +1 -1
  48. package/scripts/sw-github-checks.sh +4 -4
  49. package/scripts/sw-github-deploy.sh +1 -1
  50. package/scripts/sw-github-graphql.sh +1 -1
  51. package/scripts/sw-guild.sh +1 -1
  52. package/scripts/sw-heartbeat.sh +1 -1
  53. package/scripts/sw-hygiene.sh +1 -1
  54. package/scripts/sw-incident.sh +45 -6
  55. package/scripts/sw-init.sh +150 -24
  56. package/scripts/sw-instrument.sh +1 -1
  57. package/scripts/sw-intelligence.sh +1 -1
  58. package/scripts/sw-jira.sh +1 -1
  59. package/scripts/sw-launchd.sh +1 -1
  60. package/scripts/sw-linear.sh +1 -1
  61. package/scripts/sw-logs.sh +1 -1
  62. package/scripts/sw-loop.sh +204 -19
  63. package/scripts/sw-memory.sh +18 -1
  64. package/scripts/sw-mission-control.sh +1 -1
  65. package/scripts/sw-model-router.sh +1 -1
  66. package/scripts/sw-otel.sh +1 -1
  67. package/scripts/sw-oversight.sh +76 -1
  68. package/scripts/sw-pipeline-composer.sh +1 -1
  69. package/scripts/sw-pipeline-vitals.sh +1 -1
  70. package/scripts/sw-pipeline.sh +302 -18
  71. package/scripts/sw-pm.sh +70 -5
  72. package/scripts/sw-pr-lifecycle.sh +1 -1
  73. package/scripts/sw-predictive.sh +8 -1
  74. package/scripts/sw-prep.sh +1 -1
  75. package/scripts/sw-ps.sh +1 -1
  76. package/scripts/sw-public-dashboard.sh +1 -1
  77. package/scripts/sw-quality.sh +1 -1
  78. package/scripts/sw-reaper.sh +1 -1
  79. package/scripts/sw-recruit.sh +1853 -178
  80. package/scripts/sw-regression.sh +1 -1
  81. package/scripts/sw-release-manager.sh +1 -1
  82. package/scripts/sw-release.sh +1 -1
  83. package/scripts/sw-remote.sh +1 -1
  84. package/scripts/sw-replay.sh +1 -1
  85. package/scripts/sw-retro.sh +1 -1
  86. package/scripts/sw-scale.sh +1 -1
  87. package/scripts/sw-security-audit.sh +1 -1
  88. package/scripts/sw-self-optimize.sh +1 -1
  89. package/scripts/sw-session.sh +1 -1
  90. package/scripts/sw-setup.sh +263 -127
  91. package/scripts/sw-standup.sh +1 -1
  92. package/scripts/sw-status.sh +44 -2
  93. package/scripts/sw-strategic.sh +189 -41
  94. package/scripts/sw-stream.sh +1 -1
  95. package/scripts/sw-swarm.sh +42 -5
  96. package/scripts/sw-team-stages.sh +1 -1
  97. package/scripts/sw-templates.sh +4 -4
  98. package/scripts/sw-testgen.sh +66 -15
  99. package/scripts/sw-tmux-pipeline.sh +1 -1
  100. package/scripts/sw-tmux-role-color.sh +58 -0
  101. package/scripts/sw-tmux-status.sh +128 -0
  102. package/scripts/sw-tmux.sh +1 -1
  103. package/scripts/sw-trace.sh +1 -1
  104. package/scripts/sw-tracker.sh +1 -1
  105. package/scripts/sw-triage.sh +61 -37
  106. package/scripts/sw-upgrade.sh +1 -1
  107. package/scripts/sw-ux.sh +30 -2
  108. package/scripts/sw-webhook.sh +1 -1
  109. package/scripts/sw-widgets.sh +1 -1
  110. package/scripts/sw-worktree.sh +1 -1
  111. package/tmux/shipwright-overlay.conf +35 -17
  112. package/tmux/tmux.conf +26 -21
package/scripts/sw-db.sh CHANGED
@@ -1,13 +1,20 @@
1
1
  #!/usr/bin/env bash
2
2
  # ╔═══════════════════════════════════════════════════════════════════════════╗
3
3
  # ║ shipwright db — SQLite Persistence Layer ║
4
- # ║ Store events, runs, developers, sessions, and metrics in SQLite
5
- # ║ Backward compatible: reads JSON if SQLite unavailable
4
+ # ║ Unified state store: events, runs, daemon state, costs, heartbeats
5
+ # ║ Backward compatible: falls back to JSON if SQLite unavailable
6
+ # ║ Cross-device sync via HTTP (Turso/sqld/any REST endpoint) ║
6
7
  # ╚═══════════════════════════════════════════════════════════════════════════╝
7
8
  set -euo pipefail
8
9
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
9
10
 
10
- VERSION="2.0.0"
11
+ # ─── Double-source guard ─────────────────────────────────────────
12
+ if [[ -n "${_SW_DB_LOADED:-}" ]] && [[ "${BASH_SOURCE[0]}" != "$0" ]]; then
13
+ return 0 2>/dev/null || true
14
+ fi
15
+ _SW_DB_LOADED=1
16
+
17
+ VERSION="2.1.1"
11
18
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
12
19
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
13
20
 
@@ -38,20 +45,52 @@ now_epoch() { date +%s; }
38
45
  # ─── Database Configuration ──────────────────────────────────────────────────
39
46
  DB_DIR="${HOME}/.shipwright"
40
47
  DB_FILE="${DB_DIR}/shipwright.db"
41
- SCHEMA_VERSION=1
48
+ SCHEMA_VERSION=2
42
49
 
43
50
  # JSON fallback paths
44
51
  EVENTS_FILE="${DB_DIR}/events.jsonl"
45
52
  DAEMON_STATE_FILE="${DB_DIR}/daemon-state.json"
46
53
  DEVELOPER_REGISTRY_FILE="${DB_DIR}/developer-registry.json"
54
+ COST_FILE_JSON="${DB_DIR}/costs.json"
55
+ BUDGET_FILE_JSON="${DB_DIR}/budget.json"
56
+ HEARTBEAT_DIR="${DB_DIR}/heartbeats"
57
+
58
+ # Sync config
59
+ SYNC_CONFIG_FILE="${DB_DIR}/sync-config.json"
60
+
61
+ # ─── Feature Flag ─────────────────────────────────────────────────────────────
62
+ # Check if DB is enabled in daemon config (default: true)
63
+ _db_feature_enabled() {
64
+ local config_file=".claude/daemon-config.json"
65
+ if [[ -f "$config_file" ]]; then
66
+ local enabled
67
+ enabled=$(jq -r '.db.enabled // true' "$config_file" 2>/dev/null || echo "true")
68
+ [[ "$enabled" == "true" ]]
69
+ return $?
70
+ fi
71
+ return 0
72
+ }
47
73
 
48
74
  # ─── Check Prerequisites ─────────────────────────────────────────────────────
75
+ _SQLITE3_CHECKED=""
76
+ _SQLITE3_AVAILABLE=""
77
+
49
78
  check_sqlite3() {
50
- if ! command -v sqlite3 &>/dev/null; then
51
- warn "sqlite3 not found. Install with: brew install sqlite (macOS) or apt install sqlite3 (Ubuntu)"
52
- return 1
79
+ # Cache the result to avoid repeated command lookups
80
+ if [[ -z "$_SQLITE3_CHECKED" ]]; then
81
+ _SQLITE3_CHECKED=1
82
+ if command -v sqlite3 &>/dev/null; then
83
+ _SQLITE3_AVAILABLE=1
84
+ else
85
+ _SQLITE3_AVAILABLE=""
86
+ fi
53
87
  fi
54
- return 0
88
+ [[ -n "$_SQLITE3_AVAILABLE" ]]
89
+ }
90
+
91
+ # Check if DB is ready (sqlite3 available + file exists + feature enabled)
92
+ db_available() {
93
+ check_sqlite3 && [[ -f "$DB_FILE" ]] && _db_feature_enabled
55
94
  }
56
95
 
57
96
  # ─── Ensure Database Directory ──────────────────────────────────────────────
@@ -59,6 +98,17 @@ ensure_db_dir() {
59
98
  mkdir -p "$DB_DIR"
60
99
  }
61
100
 
101
+ # ─── SQL Execution Helper ──────────────────────────────────────────────────
102
+ # Runs SQL with proper error handling. Silent on success.
103
+ _db_exec() {
104
+ sqlite3 "$DB_FILE" "$@" 2>/dev/null
105
+ }
106
+
107
+ # Runs SQL and returns output. Returns 1 on failure.
108
+ _db_query() {
109
+ sqlite3 "$DB_FILE" "$@" 2>/dev/null || return 1
110
+ }
111
+
62
112
  # ─── Initialize Database Schema ──────────────────────────────────────────────
63
113
  init_schema() {
64
114
  ensure_db_dir
@@ -68,7 +118,10 @@ init_schema() {
68
118
  return 0
69
119
  fi
70
120
 
71
- sqlite3 "$DB_FILE" <<'EOF'
121
+ # Enable WAL mode for crash safety + concurrent readers
122
+ sqlite3 "$DB_FILE" "PRAGMA journal_mode=WAL;" >/dev/null 2>&1 || true
123
+
124
+ sqlite3 "$DB_FILE" <<'SCHEMA'
72
125
  -- Schema version tracking
73
126
  CREATE TABLE IF NOT EXISTS _schema (
74
127
  version INTEGER PRIMARY KEY,
@@ -91,6 +144,7 @@ CREATE TABLE IF NOT EXISTS events (
91
144
  duration_secs INTEGER,
92
145
  metadata TEXT,
93
146
  created_at TEXT NOT NULL,
147
+ synced INTEGER DEFAULT 0,
94
148
  UNIQUE(ts_epoch, type, job_id)
95
149
  );
96
150
 
@@ -176,10 +230,112 @@ CREATE TABLE IF NOT EXISTS metrics (
176
230
  FOREIGN KEY (job_id) REFERENCES pipeline_runs(job_id)
177
231
  );
178
232
 
179
- -- Create indexes for common queries
233
+ -- ═══════════════════════════════════════════════════════════════════════
234
+ -- Phase 1: New tables for state migration
235
+ -- ═══════════════════════════════════════════════════════════════════════
236
+
237
+ -- Daemon state (replaces daemon-state.json)
238
+ CREATE TABLE IF NOT EXISTS daemon_state (
239
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
240
+ job_id TEXT NOT NULL,
241
+ issue_number INTEGER NOT NULL,
242
+ title TEXT,
243
+ goal TEXT,
244
+ pid INTEGER,
245
+ worktree TEXT,
246
+ branch TEXT,
247
+ status TEXT NOT NULL DEFAULT 'active',
248
+ template TEXT,
249
+ started_at TEXT NOT NULL,
250
+ completed_at TEXT,
251
+ result TEXT,
252
+ duration TEXT,
253
+ error_message TEXT,
254
+ retry_count INTEGER DEFAULT 0,
255
+ updated_at TEXT NOT NULL,
256
+ UNIQUE(job_id, status)
257
+ );
258
+
259
+ -- Cost entries (replaces costs.json)
260
+ CREATE TABLE IF NOT EXISTS cost_entries (
261
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
262
+ input_tokens INTEGER NOT NULL DEFAULT 0,
263
+ output_tokens INTEGER NOT NULL DEFAULT 0,
264
+ model TEXT NOT NULL DEFAULT 'sonnet',
265
+ stage TEXT,
266
+ issue TEXT,
267
+ cost_usd REAL NOT NULL DEFAULT 0,
268
+ ts TEXT NOT NULL,
269
+ ts_epoch INTEGER NOT NULL,
270
+ synced INTEGER DEFAULT 0
271
+ );
272
+
273
+ -- Budgets (replaces budget.json)
274
+ CREATE TABLE IF NOT EXISTS budgets (
275
+ id INTEGER PRIMARY KEY CHECK (id = 1),
276
+ daily_budget_usd REAL NOT NULL DEFAULT 0,
277
+ enabled INTEGER NOT NULL DEFAULT 0,
278
+ updated_at TEXT NOT NULL
279
+ );
280
+
281
+ -- Heartbeats (replaces heartbeats/*.json)
282
+ CREATE TABLE IF NOT EXISTS heartbeats (
283
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
284
+ job_id TEXT UNIQUE NOT NULL,
285
+ pid INTEGER,
286
+ issue INTEGER,
287
+ stage TEXT,
288
+ iteration INTEGER DEFAULT 0,
289
+ last_activity TEXT,
290
+ memory_mb INTEGER DEFAULT 0,
291
+ updated_at TEXT NOT NULL
292
+ );
293
+
294
+ -- Memory: failure patterns (replaces memory/*/failures.json)
295
+ CREATE TABLE IF NOT EXISTS memory_failures (
296
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
297
+ repo_hash TEXT NOT NULL,
298
+ failure_class TEXT NOT NULL,
299
+ error_signature TEXT,
300
+ root_cause TEXT,
301
+ fix_description TEXT,
302
+ file_path TEXT,
303
+ stage TEXT,
304
+ occurrences INTEGER DEFAULT 1,
305
+ last_seen_at TEXT NOT NULL,
306
+ created_at TEXT NOT NULL,
307
+ synced INTEGER DEFAULT 0
308
+ );
309
+
310
+ -- ═══════════════════════════════════════════════════════════════════════
311
+ -- Sync tables
312
+ -- ═══════════════════════════════════════════════════════════════════════
313
+
314
+ -- Track unsynced local changes
315
+ CREATE TABLE IF NOT EXISTS _sync_log (
316
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
317
+ table_name TEXT NOT NULL,
318
+ row_id INTEGER NOT NULL,
319
+ operation TEXT NOT NULL,
320
+ ts_epoch INTEGER NOT NULL,
321
+ synced INTEGER DEFAULT 0
322
+ );
323
+
324
+ -- Replication state
325
+ CREATE TABLE IF NOT EXISTS _sync_metadata (
326
+ key TEXT PRIMARY KEY,
327
+ value TEXT NOT NULL,
328
+ updated_at TEXT NOT NULL
329
+ );
330
+
331
+ -- ═══════════════════════════════════════════════════════════════════════
332
+ -- Indexes
333
+ -- ═══════════════════════════════════════════════════════════════════════
334
+
180
335
  CREATE INDEX IF NOT EXISTS idx_events_type ON events(type);
181
336
  CREATE INDEX IF NOT EXISTS idx_events_job_id ON events(job_id);
182
337
  CREATE INDEX IF NOT EXISTS idx_events_ts_epoch ON events(ts_epoch DESC);
338
+ CREATE INDEX IF NOT EXISTS idx_events_synced ON events(synced) WHERE synced = 0;
183
339
  CREATE INDEX IF NOT EXISTS idx_pipeline_runs_job_id ON pipeline_runs(job_id);
184
340
  CREATE INDEX IF NOT EXISTS idx_pipeline_runs_status ON pipeline_runs(status);
185
341
  CREATE INDEX IF NOT EXISTS idx_pipeline_runs_created ON pipeline_runs(created_at DESC);
@@ -188,30 +344,95 @@ CREATE INDEX IF NOT EXISTS idx_developers_name ON developers(name);
188
344
  CREATE INDEX IF NOT EXISTS idx_sessions_status ON sessions(status);
189
345
  CREATE INDEX IF NOT EXISTS idx_metrics_job_id ON metrics(job_id);
190
346
  CREATE INDEX IF NOT EXISTS idx_metrics_type ON metrics(metric_type);
191
- EOF
347
+ CREATE INDEX IF NOT EXISTS idx_daemon_state_status ON daemon_state(status);
348
+ CREATE INDEX IF NOT EXISTS idx_daemon_state_job ON daemon_state(job_id);
349
+ CREATE INDEX IF NOT EXISTS idx_cost_entries_epoch ON cost_entries(ts_epoch DESC);
350
+ CREATE INDEX IF NOT EXISTS idx_cost_entries_synced ON cost_entries(synced) WHERE synced = 0;
351
+ CREATE INDEX IF NOT EXISTS idx_heartbeats_job ON heartbeats(job_id);
352
+ CREATE INDEX IF NOT EXISTS idx_memory_failures_repo ON memory_failures(repo_hash);
353
+ CREATE INDEX IF NOT EXISTS idx_memory_failures_class ON memory_failures(failure_class);
354
+ CREATE INDEX IF NOT EXISTS idx_sync_log_unsynced ON _sync_log(synced) WHERE synced = 0;
355
+ SCHEMA
192
356
  }
193
357
 
194
- # ─── Migrate Database ────────────────────────────────────────────────────────
358
+ # ─── Schema Migration ───────────────────────────────────────────────────────
195
359
  migrate_schema() {
196
360
  if ! check_sqlite3; then
197
361
  warn "Skipping migration — sqlite3 not available"
198
362
  return 0
199
363
  fi
200
364
 
201
- local current_version
202
- current_version=$(sqlite3 "$DB_FILE" "SELECT COALESCE(MAX(version), 0) FROM _schema;" 2>/dev/null || echo 0)
365
+ ensure_db_dir
203
366
 
204
- if [[ "$current_version" -eq 0 ]]; then
205
- # First run: initialize schema version
367
+ # If DB doesn't exist, initialize fresh
368
+ if [[ ! -f "$DB_FILE" ]]; then
206
369
  init_schema
207
- sqlite3 "$DB_FILE" "INSERT INTO _schema (version, created_at, applied_at) VALUES (${SCHEMA_VERSION}, '$(now_iso)', '$(now_iso)');"
370
+ _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (${SCHEMA_VERSION}, '$(now_iso)', '$(now_iso)');"
371
+ # Initialize device_id for sync
372
+ _db_exec "INSERT OR REPLACE INTO _sync_metadata (key, value, updated_at) VALUES ('device_id', '$(uname -n)-$$-$(now_epoch)', '$(now_iso)');"
208
373
  success "Database schema initialized (v${SCHEMA_VERSION})"
209
- else
374
+ return 0
375
+ fi
376
+
377
+ local current_version
378
+ current_version=$(_db_query "SELECT COALESCE(MAX(version), 0) FROM _schema;" || echo 0)
379
+
380
+ if [[ "$current_version" -ge "$SCHEMA_VERSION" ]]; then
210
381
  info "Database already at schema v${current_version}"
382
+ return 0
211
383
  fi
384
+
385
+ # Migration from v1 → v2: add new tables
386
+ if [[ "$current_version" -lt 2 ]]; then
387
+ info "Migrating schema v${current_version} → v2..."
388
+ init_schema # CREATE IF NOT EXISTS is idempotent
389
+ # Enable WAL if not already
390
+ sqlite3 "$DB_FILE" "PRAGMA journal_mode=WAL;" >/dev/null 2>&1 || true
391
+ _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (2, '$(now_iso)', '$(now_iso)');"
392
+ # Initialize device_id if missing
393
+ _db_exec "INSERT OR IGNORE INTO _sync_metadata (key, value, updated_at) VALUES ('device_id', '$(uname -n)-$$-$(now_epoch)', '$(now_iso)');"
394
+ success "Migrated to schema v2"
395
+ fi
396
+ }
397
+
398
+ # ═══════════════════════════════════════════════════════════════════════════
399
+ # Event Functions (dual-write: SQLite + JSONL)
400
+ # ═══════════════════════════════════════════════════════════════════════════
401
+
402
+ # db_add_event <type> [key=value ...]
403
+ # Parameterized event insert. Used by emit_event() in helpers.sh.
404
+ db_add_event() {
405
+ local event_type="$1"
406
+ shift
407
+
408
+ local ts ts_epoch job_id="" stage="" status="" duration_secs="0" metadata=""
409
+ ts="$(now_iso)"
410
+ ts_epoch="$(now_epoch)"
411
+
412
+ # Parse key=value pairs
413
+ local kv key val
414
+ for kv in "$@"; do
415
+ key="${kv%%=*}"
416
+ val="${kv#*=}"
417
+ case "$key" in
418
+ job_id) job_id="$val" ;;
419
+ stage) stage="$val" ;;
420
+ status) status="$val" ;;
421
+ duration_secs) duration_secs="$val" ;;
422
+ *) metadata="${metadata:+${metadata},}\"${key}\":\"${val}\"" ;;
423
+ esac
424
+ done
425
+
426
+ [[ -n "$metadata" ]] && metadata="{${metadata}}"
427
+
428
+ if ! db_available; then
429
+ return 1
430
+ fi
431
+
432
+ _db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata, created_at, synced) VALUES ('${ts}', ${ts_epoch}, '${event_type}', '${job_id}', '${stage}', '${status}', ${duration_secs}, '${metadata}', '${ts}', 0);" || return 1
212
433
  }
213
434
 
214
- # ─── Add Event (SQLite + JSONL for backward compat) ──────────────────────────
435
+ # Legacy positional API (backward compat with existing add_event calls)
215
436
  add_event() {
216
437
  local event_type="$1"
217
438
  local job_id="${2:-}"
@@ -220,35 +441,312 @@ add_event() {
220
441
  local duration_secs="${5:-0}"
221
442
  local metadata="${6:-}"
222
443
 
223
- local ts
444
+ local ts ts_epoch
224
445
  ts="$(now_iso)"
225
- local ts_epoch
226
446
  ts_epoch="$(now_epoch)"
227
447
 
228
- # Try SQLite first, fallback to JSONL
229
- if check_sqlite3; then
230
- sqlite3 "$DB_FILE" <<EOF || true
231
- INSERT OR IGNORE INTO events
232
- (ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata, created_at)
233
- VALUES
234
- ('${ts}', ${ts_epoch}, '${event_type}', '${job_id}', '${stage}', '${status}', ${duration_secs}, '${metadata}', '${ts}');
235
- EOF
448
+ # Try SQLite first
449
+ if db_available; then
450
+ _db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata, created_at, synced) VALUES ('${ts}', ${ts_epoch}, '${event_type}', '${job_id}', '${stage}', '${status}', ${duration_secs}, '${metadata}', '${ts}', 0);" || true
236
451
  fi
237
452
 
238
- # Always write to JSONL for backward compat
453
+ # Always write to JSONL for backward compat (dual-write period)
239
454
  mkdir -p "$DB_DIR"
240
455
  local json_record
241
456
  json_record="{\"ts\":\"${ts}\",\"ts_epoch\":${ts_epoch},\"type\":\"${event_type}\""
242
457
  [[ -n "$job_id" ]] && json_record="${json_record},\"job_id\":\"${job_id}\""
243
458
  [[ -n "$stage" ]] && json_record="${json_record},\"stage\":\"${stage}\""
244
459
  [[ -n "$status" ]] && json_record="${json_record},\"status\":\"${status}\""
245
- [[ "$duration_secs" -gt 0 ]] && json_record="${json_record},\"duration_secs\":${duration_secs}"
460
+ [[ "$duration_secs" -gt 0 ]] 2>/dev/null && json_record="${json_record},\"duration_secs\":${duration_secs}"
246
461
  [[ -n "$metadata" ]] && json_record="${json_record},\"metadata\":${metadata}"
247
462
  json_record="${json_record}}"
248
463
  echo "$json_record" >> "$EVENTS_FILE"
249
464
  }
250
465
 
251
- # ─── Add Pipeline Run ────────────────────────────────────────────────────────
466
+ # ═══════════════════════════════════════════════════════════════════════════
467
+ # Daemon State Functions (replaces daemon-state.json operations)
468
+ # ═══════════════════════════════════════════════════════════════════════════
469
+
470
+ # db_save_job <job_id> <issue_number> <title> <pid> <worktree> [branch] [template] [goal]
471
+ db_save_job() {
472
+ local job_id="$1"
473
+ local issue_num="$2"
474
+ local title="${3:-}"
475
+ local pid="${4:-0}"
476
+ local worktree="${5:-}"
477
+ local branch="${6:-}"
478
+ local template="${7:-autonomous}"
479
+ local goal="${8:-}"
480
+ local ts
481
+ ts="$(now_iso)"
482
+
483
+ if ! db_available; then return 1; fi
484
+
485
+ # Escape single quotes in title/goal
486
+ title="${title//\'/\'\'}"
487
+ goal="${goal//\'/\'\'}"
488
+
489
+ _db_exec "INSERT OR REPLACE INTO daemon_state (job_id, issue_number, title, goal, pid, worktree, branch, status, template, started_at, updated_at) VALUES ('${job_id}', ${issue_num}, '${title}', '${goal}', ${pid}, '${worktree}', '${branch}', 'active', '${template}', '${ts}', '${ts}');"
490
+ }
491
+
492
+ # db_complete_job <job_id> <result> [duration] [error_message]
493
+ db_complete_job() {
494
+ local job_id="$1"
495
+ local result="$2"
496
+ local duration="${3:-}"
497
+ local error_msg="${4:-}"
498
+ local ts
499
+ ts="$(now_iso)"
500
+
501
+ if ! db_available; then return 1; fi
502
+
503
+ error_msg="${error_msg//\'/\'\'}"
504
+
505
+ _db_exec "UPDATE daemon_state SET status = 'completed', result = '${result}', duration = '${duration}', error_message = '${error_msg}', completed_at = '${ts}', updated_at = '${ts}' WHERE job_id = '${job_id}' AND status = 'active';"
506
+ }
507
+
508
+ # db_fail_job <job_id> [error_message]
509
+ db_fail_job() {
510
+ local job_id="$1"
511
+ local error_msg="${2:-}"
512
+ local ts
513
+ ts="$(now_iso)"
514
+
515
+ if ! db_available; then return 1; fi
516
+
517
+ error_msg="${error_msg//\'/\'\'}"
518
+
519
+ _db_exec "UPDATE daemon_state SET status = 'failed', result = 'failure', error_message = '${error_msg}', completed_at = '${ts}', updated_at = '${ts}' WHERE job_id = '${job_id}' AND status = 'active';"
520
+ }
521
+
522
+ # db_list_active_jobs — outputs JSON array of active daemon jobs
523
+ db_list_active_jobs() {
524
+ if ! db_available; then echo "[]"; return 0; fi
525
+ _db_query "SELECT json_group_array(json_object('job_id', job_id, 'issue', issue_number, 'title', title, 'pid', pid, 'worktree', worktree, 'branch', branch, 'started_at', started_at, 'template', template, 'goal', goal)) FROM daemon_state WHERE status = 'active';" || echo "[]"
526
+ }
527
+
528
+ # db_list_completed_jobs [limit] — outputs JSON array
529
+ db_list_completed_jobs() {
530
+ local limit="${1:-20}"
531
+ if ! db_available; then echo "[]"; return 0; fi
532
+ _db_query "SELECT json_group_array(json_object('job_id', job_id, 'issue', issue_number, 'title', title, 'result', result, 'duration', duration, 'completed_at', completed_at)) FROM (SELECT * FROM daemon_state WHERE status IN ('completed', 'failed') ORDER BY completed_at DESC LIMIT ${limit});" || echo "[]"
533
+ }
534
+
535
+ # db_active_job_count — returns integer
536
+ db_active_job_count() {
537
+ if ! db_available; then echo "0"; return 0; fi
538
+ _db_query "SELECT COUNT(*) FROM daemon_state WHERE status = 'active';" || echo "0"
539
+ }
540
+
541
+ # db_is_issue_active <issue_number> — returns 0 if active, 1 if not
542
+ db_is_issue_active() {
543
+ local issue_num="$1"
544
+ if ! db_available; then return 1; fi
545
+ local count
546
+ count=$(_db_query "SELECT COUNT(*) FROM daemon_state WHERE issue_number = ${issue_num} AND status = 'active';")
547
+ [[ "${count:-0}" -gt 0 ]]
548
+ }
549
+
550
+ # db_remove_active_job <job_id> — delete from active (for cleanup)
551
+ db_remove_active_job() {
552
+ local job_id="$1"
553
+ if ! db_available; then return 1; fi
554
+ _db_exec "DELETE FROM daemon_state WHERE job_id = '${job_id}' AND status = 'active';"
555
+ }
556
+
557
+ # db_daemon_summary — outputs JSON summary for status dashboard
558
+ db_daemon_summary() {
559
+ if ! db_available; then echo "{}"; return 0; fi
560
+ _db_query "SELECT json_object(
561
+ 'active_count', (SELECT COUNT(*) FROM daemon_state WHERE status = 'active'),
562
+ 'completed_count', (SELECT COUNT(*) FROM daemon_state WHERE status IN ('completed', 'failed')),
563
+ 'success_count', (SELECT COUNT(*) FROM daemon_state WHERE result = 'success'),
564
+ 'failure_count', (SELECT COUNT(*) FROM daemon_state WHERE result = 'failure')
565
+ );" || echo "{}"
566
+ }
567
+
568
+ # ═══════════════════════════════════════════════════════════════════════════
569
+ # Cost Functions (replaces costs.json)
570
+ # ═══════════════════════════════════════════════════════════════════════════
571
+
572
+ # db_record_cost <input_tokens> <output_tokens> <model> <cost_usd> <stage> [issue]
573
+ db_record_cost() {
574
+ local input_tokens="${1:-0}"
575
+ local output_tokens="${2:-0}"
576
+ local model="${3:-sonnet}"
577
+ local cost_usd="${4:-0}"
578
+ local stage="${5:-unknown}"
579
+ local issue="${6:-}"
580
+ local ts ts_epoch
581
+ ts="$(now_iso)"
582
+ ts_epoch="$(now_epoch)"
583
+
584
+ if ! db_available; then return 1; fi
585
+
586
+ _db_exec "INSERT INTO cost_entries (input_tokens, output_tokens, model, stage, issue, cost_usd, ts, ts_epoch, synced) VALUES (${input_tokens}, ${output_tokens}, '${model}', '${stage}', '${issue}', ${cost_usd}, '${ts}', ${ts_epoch}, 0);"
587
+ }
588
+
589
+ # db_cost_today — returns total cost for today as a number
590
+ db_cost_today() {
591
+ if ! db_available; then echo "0"; return 0; fi
592
+ local today_start
593
+ today_start=$(date -u +"%Y-%m-%dT00:00:00Z")
594
+ local today_epoch
595
+ today_epoch=$(date -u -jf "%Y-%m-%dT%H:%M:%SZ" "$today_start" +%s 2>/dev/null || date -u -d "$today_start" +%s 2>/dev/null || echo "0")
596
+ _db_query "SELECT COALESCE(ROUND(SUM(cost_usd), 4), 0) FROM cost_entries WHERE ts_epoch >= ${today_epoch};" || echo "0"
597
+ }
598
+
599
+ # db_cost_by_period <days> — returns JSON breakdown
600
+ db_cost_by_period() {
601
+ local days="${1:-7}"
602
+ if ! db_available; then echo "{}"; return 0; fi
603
+ local cutoff_epoch
604
+ cutoff_epoch=$(( $(now_epoch) - (days * 86400) ))
605
+ _db_query "SELECT json_object(
606
+ 'total', COALESCE(ROUND(SUM(cost_usd), 4), 0),
607
+ 'count', COUNT(*),
608
+ 'avg', COALESCE(ROUND(AVG(cost_usd), 4), 0),
609
+ 'max', COALESCE(ROUND(MAX(cost_usd), 4), 0),
610
+ 'input_tokens', COALESCE(SUM(input_tokens), 0),
611
+ 'output_tokens', COALESCE(SUM(output_tokens), 0)
612
+ ) FROM cost_entries WHERE ts_epoch >= ${cutoff_epoch};" || echo "{}"
613
+ }
614
+
615
+ # db_cost_by_stage <days> — returns JSON array grouped by stage
616
+ db_cost_by_stage() {
617
+ local days="${1:-7}"
618
+ if ! db_available; then echo "[]"; return 0; fi
619
+ local cutoff_epoch
620
+ cutoff_epoch=$(( $(now_epoch) - (days * 86400) ))
621
+ _db_query "SELECT json_group_array(json_object('stage', stage, 'cost', ROUND(total_cost, 4), 'count', cnt)) FROM (SELECT stage, SUM(cost_usd) as total_cost, COUNT(*) as cnt FROM cost_entries WHERE ts_epoch >= ${cutoff_epoch} GROUP BY stage ORDER BY total_cost DESC);" || echo "[]"
622
+ }
623
+
624
+ # db_remaining_budget — returns remaining budget or "unlimited"
625
+ db_remaining_budget() {
626
+ if ! db_available; then echo "unlimited"; return 0; fi
627
+ local row
628
+ row=$(_db_query "SELECT daily_budget_usd, enabled FROM budgets WHERE id = 1;" || echo "")
629
+ if [[ -z "$row" ]]; then
630
+ echo "unlimited"
631
+ return 0
632
+ fi
633
+ local budget_usd enabled
634
+ budget_usd=$(echo "$row" | cut -d'|' -f1)
635
+ enabled=$(echo "$row" | cut -d'|' -f2)
636
+ if [[ "${enabled:-0}" -ne 1 ]] || [[ "${budget_usd:-0}" == "0" ]]; then
637
+ echo "unlimited"
638
+ return 0
639
+ fi
640
+ local today_spent
641
+ today_spent=$(db_cost_today)
642
+ awk -v budget="$budget_usd" -v spent="$today_spent" 'BEGIN { printf "%.2f", budget - spent }'
643
+ }
644
+
645
+ # db_set_budget <amount_usd>
646
+ db_set_budget() {
647
+ local amount="$1"
648
+ if ! db_available; then return 1; fi
649
+ _db_exec "INSERT OR REPLACE INTO budgets (id, daily_budget_usd, enabled, updated_at) VALUES (1, ${amount}, 1, '$(now_iso)');"
650
+ }
651
+
652
+ # db_get_budget — returns "amount|enabled" or empty
653
+ db_get_budget() {
654
+ if ! db_available; then echo ""; return 0; fi
655
+ _db_query "SELECT daily_budget_usd || '|' || enabled FROM budgets WHERE id = 1;" || echo ""
656
+ }
657
+
658
+ # ═══════════════════════════════════════════════════════════════════════════
659
+ # Heartbeat Functions (replaces heartbeats/*.json)
660
+ # ═══════════════════════════════════════════════════════════════════════════
661
+
662
+ # db_record_heartbeat <job_id> <pid> <issue> <stage> <iteration> [activity] [memory_mb]
663
+ db_record_heartbeat() {
664
+ local job_id="$1"
665
+ local pid="${2:-0}"
666
+ local issue="${3:-0}"
667
+ local stage="${4:-}"
668
+ local iteration="${5:-0}"
669
+ local activity="${6:-}"
670
+ local memory_mb="${7:-0}"
671
+ local ts
672
+ ts="$(now_iso)"
673
+
674
+ if ! db_available; then return 1; fi
675
+
676
+ activity="${activity//\'/\'\'}"
677
+
678
+ _db_exec "INSERT OR REPLACE INTO heartbeats (job_id, pid, issue, stage, iteration, last_activity, memory_mb, updated_at) VALUES ('${job_id}', ${pid}, ${issue}, '${stage}', ${iteration}, '${activity}', ${memory_mb}, '${ts}');"
679
+ }
680
+
681
+ # db_stale_heartbeats [threshold_secs] — returns JSON array of stale heartbeats
682
+ db_stale_heartbeats() {
683
+ local threshold="${1:-120}"
684
+ if ! db_available; then echo "[]"; return 0; fi
685
+ local cutoff_epoch
686
+ cutoff_epoch=$(( $(now_epoch) - threshold ))
687
+ local cutoff_ts
688
+ cutoff_ts=$(date -u -r "$cutoff_epoch" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || date -u -d "@${cutoff_epoch}" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || echo "2000-01-01T00:00:00Z")
689
+ _db_query "SELECT json_group_array(json_object('job_id', job_id, 'pid', pid, 'stage', stage, 'updated_at', updated_at)) FROM heartbeats WHERE updated_at < '${cutoff_ts}';" || echo "[]"
690
+ }
691
+
692
+ # db_clear_heartbeat <job_id>
693
+ db_clear_heartbeat() {
694
+ local job_id="$1"
695
+ if ! db_available; then return 1; fi
696
+ _db_exec "DELETE FROM heartbeats WHERE job_id = '${job_id}';"
697
+ }
698
+
699
+ # db_list_heartbeats — returns JSON array
700
+ db_list_heartbeats() {
701
+ if ! db_available; then echo "[]"; return 0; fi
702
+ _db_query "SELECT json_group_array(json_object('job_id', job_id, 'pid', pid, 'issue', issue, 'stage', stage, 'iteration', iteration, 'last_activity', last_activity, 'memory_mb', memory_mb, 'updated_at', updated_at)) FROM heartbeats;" || echo "[]"
703
+ }
704
+
705
+ # ═══════════════════════════════════════════════════════════════════════════
706
+ # Memory Failure Functions (replaces memory/*/failures.json)
707
+ # ═══════════════════════════════════════════════════════════════════════════
708
+
709
+ # db_record_failure <repo_hash> <failure_class> <error_sig> [root_cause] [fix_desc] [file_path] [stage]
710
+ db_record_failure() {
711
+ local repo_hash="$1"
712
+ local failure_class="$2"
713
+ local error_sig="${3:-}"
714
+ local root_cause="${4:-}"
715
+ local fix_desc="${5:-}"
716
+ local file_path="${6:-}"
717
+ local stage="${7:-}"
718
+ local ts
719
+ ts="$(now_iso)"
720
+
721
+ if ! db_available; then return 1; fi
722
+
723
+ # Escape quotes
724
+ error_sig="${error_sig//\'/\'\'}"
725
+ root_cause="${root_cause//\'/\'\'}"
726
+ fix_desc="${fix_desc//\'/\'\'}"
727
+
728
+ # Upsert: increment occurrences if same signature exists
729
+ _db_exec "INSERT INTO memory_failures (repo_hash, failure_class, error_signature, root_cause, fix_description, file_path, stage, occurrences, last_seen_at, created_at, synced) VALUES ('${repo_hash}', '${failure_class}', '${error_sig}', '${root_cause}', '${fix_desc}', '${file_path}', '${stage}', 1, '${ts}', '${ts}', 0) ON CONFLICT(id) DO UPDATE SET occurrences = occurrences + 1, last_seen_at = '${ts}';"
730
+ }
731
+
732
+ # db_query_similar_failures <repo_hash> [failure_class] [limit]
733
+ db_query_similar_failures() {
734
+ local repo_hash="$1"
735
+ local failure_class="${2:-}"
736
+ local limit="${3:-10}"
737
+
738
+ if ! db_available; then echo "[]"; return 0; fi
739
+
740
+ local where_clause="WHERE repo_hash = '${repo_hash}'"
741
+ [[ -n "$failure_class" ]] && where_clause="${where_clause} AND failure_class = '${failure_class}'"
742
+
743
+ _db_query "SELECT json_group_array(json_object('failure_class', failure_class, 'error_signature', error_signature, 'root_cause', root_cause, 'fix_description', fix_description, 'file_path', file_path, 'occurrences', occurrences, 'last_seen_at', last_seen_at)) FROM (SELECT * FROM memory_failures ${where_clause} ORDER BY occurrences DESC, last_seen_at DESC LIMIT ${limit});" || echo "[]"
744
+ }
745
+
746
+ # ═══════════════════════════════════════════════════════════════════════════
747
+ # Pipeline Run Functions (enhanced from existing)
748
+ # ═══════════════════════════════════════════════════════════════════════════
749
+
252
750
  add_pipeline_run() {
253
751
  local job_id="$1"
254
752
  local issue_number="${2:-0}"
@@ -257,22 +755,16 @@ add_pipeline_run() {
257
755
  local template="${5:-standard}"
258
756
 
259
757
  if ! check_sqlite3; then
260
- warn "Skipping pipeline run insert — sqlite3 not available"
261
758
  return 1
262
759
  fi
263
760
 
264
761
  local ts
265
762
  ts="$(now_iso)"
763
+ goal="${goal//\'/\'\'}"
266
764
 
267
- sqlite3 "$DB_FILE" <<EOF || return 1
268
- INSERT INTO pipeline_runs
269
- (job_id, issue_number, goal, branch, status, template, started_at, created_at)
270
- VALUES
271
- ('${job_id}', ${issue_number}, '${goal}', '${branch}', 'pending', '${template}', '${ts}', '${ts}');
272
- EOF
765
+ _db_exec "INSERT OR IGNORE INTO pipeline_runs (job_id, issue_number, goal, branch, status, template, started_at, created_at) VALUES ('${job_id}', ${issue_number}, '${goal}', '${branch}', 'pending', '${template}', '${ts}', '${ts}');" || return 1
273
766
  }
274
767
 
275
- # ─── Update Pipeline Run Status ──────────────────────────────────────────────
276
768
  update_pipeline_status() {
277
769
  local job_id="$1"
278
770
  local status="$2"
@@ -280,26 +772,14 @@ update_pipeline_status() {
280
772
  local stage_status="${4:-}"
281
773
  local duration_secs="${5:-0}"
282
774
 
283
- if ! check_sqlite3; then
284
- return 1
285
- fi
775
+ if ! check_sqlite3; then return 1; fi
286
776
 
287
777
  local ts
288
778
  ts="$(now_iso)"
289
779
 
290
- sqlite3 "$DB_FILE" <<EOF || return 1
291
- UPDATE pipeline_runs
292
- SET
293
- status = '${status}',
294
- stage_name = '${stage_name}',
295
- stage_status = '${stage_status}',
296
- duration_secs = ${duration_secs},
297
- completed_at = CASE WHEN '${status}' = 'completed' OR '${status}' = 'failed' THEN '${ts}' ELSE completed_at END
298
- WHERE job_id = '${job_id}';
299
- EOF
780
+ _db_exec "UPDATE pipeline_runs SET status = '${status}', stage_name = '${stage_name}', stage_status = '${stage_status}', duration_secs = ${duration_secs}, completed_at = CASE WHEN '${status}' IN ('completed', 'failed') THEN '${ts}' ELSE completed_at END WHERE job_id = '${job_id}';" || return 1
300
781
  }
301
782
 
302
- # ─── Record Pipeline Stage ──────────────────────────────────────────────────
303
783
  record_stage() {
304
784
  local job_id="$1"
305
785
  local stage_name="$2"
@@ -307,22 +787,15 @@ record_stage() {
307
787
  local duration_secs="${4:-0}"
308
788
  local error_msg="${5:-}"
309
789
 
310
- if ! check_sqlite3; then
311
- return 1
312
- fi
790
+ if ! check_sqlite3; then return 1; fi
313
791
 
314
792
  local ts
315
793
  ts="$(now_iso)"
794
+ error_msg="${error_msg//\'/\'\'}"
316
795
 
317
- sqlite3 "$DB_FILE" <<EOF || return 1
318
- INSERT INTO pipeline_stages
319
- (job_id, stage_name, status, started_at, completed_at, duration_secs, error_message, created_at)
320
- VALUES
321
- ('${job_id}', '${stage_name}', '${status}', '${ts}', '${ts}', ${duration_secs}, '${error_msg}', '${ts}');
322
- EOF
796
+ _db_exec "INSERT INTO pipeline_stages (job_id, stage_name, status, started_at, completed_at, duration_secs, error_message, created_at) VALUES ('${job_id}', '${stage_name}', '${status}', '${ts}', '${ts}', ${duration_secs}, '${error_msg}', '${ts}');" || return 1
323
797
  }
324
798
 
325
- # ─── Query Pipeline Runs ─────────────────────────────────────────────────────
326
799
  query_runs() {
327
800
  local status="${1:-}"
328
801
  local limit="${2:-50}"
@@ -339,45 +812,277 @@ query_runs() {
339
812
  sqlite3 -header -column "$DB_FILE" "$query"
340
813
  }
341
814
 
342
- # ─── Export Database to JSON ─────────────────────────────────────────────────
343
- export_db() {
344
- local output_file="${1:-${DB_DIR}/shipwright-backup.json}"
815
+ # ═══════════════════════════════════════════════════════════════════════════
816
+ # Sync Functions (HTTP-based, vendor-neutral)
817
+ # ═══════════════════════════════════════════════════════════════════════════
818
+
819
+ # Load sync configuration
820
+ _sync_load_config() {
821
+ if [[ ! -f "$SYNC_CONFIG_FILE" ]]; then
822
+ return 1
823
+ fi
824
+ SYNC_URL=$(jq -r '.url // empty' "$SYNC_CONFIG_FILE" 2>/dev/null || true)
825
+ SYNC_TOKEN=$(jq -r '.token // empty' "$SYNC_CONFIG_FILE" 2>/dev/null || true)
826
+ [[ -n "$SYNC_URL" ]]
827
+ }
828
+
829
+ # db_sync_push — push unsynced rows to remote endpoint
830
+ db_sync_push() {
831
+ if ! db_available; then return 1; fi
832
+ if ! _sync_load_config; then
833
+ warn "Sync not configured. Set up ${SYNC_CONFIG_FILE}"
834
+ return 1
835
+ fi
836
+
837
+ local device_id
838
+ device_id=$(_db_query "SELECT value FROM _sync_metadata WHERE key = 'device_id';" || echo "unknown")
839
+
840
+ # Collect unsynced events
841
+ local unsynced_events
842
+ unsynced_events=$(_db_query "SELECT json_group_array(json_object('ts', ts, 'ts_epoch', ts_epoch, 'type', type, 'job_id', job_id, 'stage', stage, 'status', status, 'metadata', metadata)) FROM events WHERE synced = 0 LIMIT 500;" || echo "[]")
843
+
844
+ # Collect unsynced cost entries
845
+ local unsynced_costs
846
+ unsynced_costs=$(_db_query "SELECT json_group_array(json_object('input_tokens', input_tokens, 'output_tokens', output_tokens, 'model', model, 'stage', stage, 'cost_usd', cost_usd, 'ts', ts, 'ts_epoch', ts_epoch)) FROM cost_entries WHERE synced = 0 LIMIT 500;" || echo "[]")
847
+
848
+ # Build payload
849
+ local payload
850
+ payload=$(jq -n \
851
+ --arg device "$device_id" \
852
+ --argjson events "$unsynced_events" \
853
+ --argjson costs "$unsynced_costs" \
854
+ '{device_id: $device, events: $events, costs: $costs}')
855
+
856
+ # Push via HTTP
857
+ local response
858
+ local auth_header=""
859
+ [[ -n "${SYNC_TOKEN:-}" ]] && auth_header="-H 'Authorization: Bearer ${SYNC_TOKEN}'"
860
+
861
+ response=$(curl -s -w "%{http_code}" -o /dev/null \
862
+ -X POST "${SYNC_URL}/api/sync/push" \
863
+ -H "Content-Type: application/json" \
864
+ ${auth_header} \
865
+ -d "$payload" 2>/dev/null || echo "000")
866
+
867
+ if [[ "$response" == "200" || "$response" == "201" ]]; then
868
+ # Mark as synced
869
+ _db_exec "UPDATE events SET synced = 1 WHERE synced = 0;"
870
+ _db_exec "UPDATE cost_entries SET synced = 1 WHERE synced = 0;"
871
+ success "Pushed unsynced data to ${SYNC_URL}"
872
+ return 0
873
+ else
874
+ warn "Sync push failed (HTTP ${response})"
875
+ return 1
876
+ fi
877
+ }
878
+
879
+ # db_sync_pull — pull new rows from remote endpoint
880
+ db_sync_pull() {
881
+ if ! db_available; then return 1; fi
882
+ if ! _sync_load_config; then
883
+ warn "Sync not configured. Set up ${SYNC_CONFIG_FILE}"
884
+ return 1
885
+ fi
886
+
887
+ local last_sync
888
+ last_sync=$(_db_query "SELECT value FROM _sync_metadata WHERE key = 'last_pull_epoch';" || echo "0")
345
889
 
890
+ local auth_header=""
891
+ [[ -n "${SYNC_TOKEN:-}" ]] && auth_header="-H 'Authorization: Bearer ${SYNC_TOKEN}'"
892
+
893
+ local response_body
894
+ response_body=$(curl -s \
895
+ "${SYNC_URL}/api/sync/pull?since=${last_sync}" \
896
+ -H "Accept: application/json" \
897
+ ${auth_header} 2>/dev/null || echo "{}")
898
+
899
+ if ! echo "$response_body" | jq empty 2>/dev/null; then
900
+ warn "Sync pull returned invalid JSON"
901
+ return 1
902
+ fi
903
+
904
+ # Import events
905
+ local event_count=0
906
+ while IFS= read -r evt; do
907
+ [[ -z "$evt" || "$evt" == "null" ]] && continue
908
+ local e_ts e_epoch e_type e_job
909
+ e_ts=$(echo "$evt" | jq -r '.ts // ""')
910
+ e_epoch=$(echo "$evt" | jq -r '.ts_epoch // 0')
911
+ e_type=$(echo "$evt" | jq -r '.type // ""')
912
+ e_job=$(echo "$evt" | jq -r '.job_id // ""')
913
+ _db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, created_at, synced) VALUES ('${e_ts}', ${e_epoch}, '${e_type}', '${e_job}', '${e_ts}', 1);" 2>/dev/null && event_count=$((event_count + 1))
914
+ done < <(echo "$response_body" | jq -c '.events[]' 2>/dev/null)
915
+
916
+ # Update last pull timestamp
917
+ _db_exec "INSERT OR REPLACE INTO _sync_metadata (key, value, updated_at) VALUES ('last_pull_epoch', '$(now_epoch)', '$(now_iso)');"
918
+
919
+ success "Pulled ${event_count} new events from ${SYNC_URL}"
920
+ }
921
+
922
+ # ═══════════════════════════════════════════════════════════════════════════
923
+ # JSON Migration (import existing state files into SQLite)
924
+ # ═══════════════════════════════════════════════════════════════════════════
925
+
926
+ migrate_json_data() {
346
927
  if ! check_sqlite3; then
347
- warn "Cannot export sqlite3 not available"
928
+ error "sqlite3 required for migration"
348
929
  return 1
349
930
  fi
350
931
 
351
- info "Exporting database to ${output_file}..."
932
+ ensure_db_dir
933
+ migrate_schema
934
+
935
+ local total_imported=0
936
+
937
+ # 1. Import events.jsonl
938
+ if [[ -f "$EVENTS_FILE" ]]; then
939
+ info "Importing events from ${EVENTS_FILE}..."
940
+ local evt_count=0
941
+ local evt_skipped=0
942
+ while IFS= read -r line; do
943
+ [[ -z "$line" ]] && continue
944
+ local e_ts e_epoch e_type e_job e_stage e_status
945
+ e_ts=$(echo "$line" | jq -r '.ts // ""' 2>/dev/null || continue)
946
+ e_epoch=$(echo "$line" | jq -r '.ts_epoch // 0' 2>/dev/null || continue)
947
+ e_type=$(echo "$line" | jq -r '.type // ""' 2>/dev/null || continue)
948
+ e_job=$(echo "$line" | jq -r '.job_id // ""' 2>/dev/null || true)
949
+ e_stage=$(echo "$line" | jq -r '.stage // ""' 2>/dev/null || true)
950
+ e_status=$(echo "$line" | jq -r '.status // ""' 2>/dev/null || true)
951
+
952
+ if _db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, stage, status, created_at, synced) VALUES ('${e_ts}', ${e_epoch}, '${e_type}', '${e_job}', '${e_stage}', '${e_status}', '${e_ts}', 0);" 2>/dev/null; then
953
+ evt_count=$((evt_count + 1))
954
+ else
955
+ evt_skipped=$((evt_skipped + 1))
956
+ fi
957
+ done < "$EVENTS_FILE"
958
+ success "Events: ${evt_count} imported, ${evt_skipped} skipped (duplicates)"
959
+ total_imported=$((total_imported + evt_count))
960
+ fi
352
961
 
353
- local tmp_file
354
- tmp_file=$(mktemp)
962
+ # 2. Import daemon-state.json
963
+ if [[ -f "$DAEMON_STATE_FILE" ]]; then
964
+ info "Importing daemon state from ${DAEMON_STATE_FILE}..."
965
+ local job_count=0
966
+
967
+ # Import completed jobs
968
+ while IFS= read -r job; do
969
+ [[ -z "$job" || "$job" == "null" ]] && continue
970
+ local j_issue j_result j_dur j_at
971
+ j_issue=$(echo "$job" | jq -r '.issue // 0')
972
+ j_result=$(echo "$job" | jq -r '.result // ""')
973
+ j_dur=$(echo "$job" | jq -r '.duration // ""')
974
+ j_at=$(echo "$job" | jq -r '.completed_at // ""')
975
+ local j_id="migrated-${j_issue}-$(echo "$j_at" | tr -dc '0-9' | tail -c 10)"
976
+ _db_exec "INSERT OR IGNORE INTO daemon_state (job_id, issue_number, status, result, duration, completed_at, started_at, updated_at) VALUES ('${j_id}', ${j_issue}, 'completed', '${j_result}', '${j_dur}', '${j_at}', '${j_at}', '$(now_iso)');" 2>/dev/null && job_count=$((job_count + 1))
977
+ done < <(jq -c '.completed[]' "$DAEMON_STATE_FILE" 2>/dev/null)
978
+
979
+ success "Daemon state: ${job_count} completed jobs imported"
980
+ total_imported=$((total_imported + job_count))
981
+ fi
982
+
983
+ # 3. Import costs.json
984
+ if [[ -f "$COST_FILE_JSON" ]]; then
985
+ info "Importing costs from ${COST_FILE_JSON}..."
986
+ local cost_count=0
987
+ while IFS= read -r entry; do
988
+ [[ -z "$entry" || "$entry" == "null" ]] && continue
989
+ local c_input c_output c_model c_stage c_issue c_cost c_ts c_epoch
990
+ c_input=$(echo "$entry" | jq -r '.input_tokens // 0')
991
+ c_output=$(echo "$entry" | jq -r '.output_tokens // 0')
992
+ c_model=$(echo "$entry" | jq -r '.model // "sonnet"')
993
+ c_stage=$(echo "$entry" | jq -r '.stage // "unknown"')
994
+ c_issue=$(echo "$entry" | jq -r '.issue // ""')
995
+ c_cost=$(echo "$entry" | jq -r '.cost_usd // 0')
996
+ c_ts=$(echo "$entry" | jq -r '.ts // ""')
997
+ c_epoch=$(echo "$entry" | jq -r '.ts_epoch // 0')
998
+ _db_exec "INSERT INTO cost_entries (input_tokens, output_tokens, model, stage, issue, cost_usd, ts, ts_epoch, synced) VALUES (${c_input}, ${c_output}, '${c_model}', '${c_stage}', '${c_issue}', ${c_cost}, '${c_ts}', ${c_epoch}, 0);" 2>/dev/null && cost_count=$((cost_count + 1))
999
+ done < <(jq -c '.entries[]' "$COST_FILE_JSON" 2>/dev/null)
1000
+
1001
+ success "Costs: ${cost_count} entries imported"
1002
+ total_imported=$((total_imported + cost_count))
1003
+ fi
1004
+
1005
+ # 4. Import budget.json
1006
+ if [[ -f "$BUDGET_FILE_JSON" ]]; then
1007
+ info "Importing budget from ${BUDGET_FILE_JSON}..."
1008
+ local b_amount b_enabled
1009
+ b_amount=$(jq -r '.daily_budget_usd // 0' "$BUDGET_FILE_JSON" 2>/dev/null || echo "0")
1010
+ b_enabled=$(jq -r '.enabled // false' "$BUDGET_FILE_JSON" 2>/dev/null || echo "false")
1011
+ local b_flag=0
1012
+ [[ "$b_enabled" == "true" ]] && b_flag=1
1013
+ _db_exec "INSERT OR REPLACE INTO budgets (id, daily_budget_usd, enabled, updated_at) VALUES (1, ${b_amount}, ${b_flag}, '$(now_iso)');" && success "Budget: imported (\$${b_amount}, enabled=${b_enabled})"
1014
+ fi
1015
+
1016
+ # 5. Import heartbeats/*.json
1017
+ if [[ -d "$HEARTBEAT_DIR" ]]; then
1018
+ info "Importing heartbeats..."
1019
+ local hb_count=0
1020
+ for hb_file in "${HEARTBEAT_DIR}"/*.json; do
1021
+ [[ -f "$hb_file" ]] || continue
1022
+ local hb_job hb_pid hb_issue hb_stage hb_iter hb_activity hb_mem hb_updated
1023
+ hb_job="$(basename "$hb_file" .json)"
1024
+ hb_pid=$(jq -r '.pid // 0' "$hb_file" 2>/dev/null || echo "0")
1025
+ hb_issue=$(jq -r '.issue // 0' "$hb_file" 2>/dev/null || echo "0")
1026
+ hb_stage=$(jq -r '.stage // ""' "$hb_file" 2>/dev/null || echo "")
1027
+ hb_iter=$(jq -r '.iteration // 0' "$hb_file" 2>/dev/null || echo "0")
1028
+ hb_activity=$(jq -r '.last_activity // ""' "$hb_file" 2>/dev/null || echo "")
1029
+ hb_mem=$(jq -r '.memory_mb // 0' "$hb_file" 2>/dev/null || echo "0")
1030
+ hb_updated=$(jq -r '.updated_at // ""' "$hb_file" 2>/dev/null || echo "$(now_iso)")
1031
+
1032
+ hb_activity="${hb_activity//\'/\'\'}"
1033
+ _db_exec "INSERT OR REPLACE INTO heartbeats (job_id, pid, issue, stage, iteration, last_activity, memory_mb, updated_at) VALUES ('${hb_job}', ${hb_pid}, ${hb_issue}, '${hb_stage}', ${hb_iter}, '${hb_activity}', ${hb_mem}, '${hb_updated}');" 2>/dev/null && hb_count=$((hb_count + 1))
1034
+ done
1035
+ success "Heartbeats: ${hb_count} imported"
1036
+ total_imported=$((total_imported + hb_count))
1037
+ fi
1038
+
1039
+ echo ""
1040
+ success "Migration complete: ${total_imported} total records imported"
355
1041
 
356
- {
357
- echo "{"
358
- echo " \"exported_at\": \"$(now_iso)\","
359
- echo " \"events\": ["
1042
+ # Verify counts
1043
+ echo ""
1044
+ info "Verification:"
1045
+ local db_events db_costs db_hb
1046
+ db_events=$(_db_query "SELECT COUNT(*) FROM events;" || echo "0")
1047
+ db_costs=$(_db_query "SELECT COUNT(*) FROM cost_entries;" || echo "0")
1048
+ db_hb=$(_db_query "SELECT COUNT(*) FROM heartbeats;" || echo "0")
1049
+ echo " Events in DB: ${db_events}"
1050
+ echo " Cost entries: ${db_costs}"
1051
+ echo " Heartbeats: ${db_hb}"
1052
+ }
360
1053
 
361
- sqlite3 -json "$DB_FILE" "SELECT * FROM events ORDER BY ts_epoch DESC LIMIT 1000;" | sed '1s/\[//' | sed '$s/\]//' >> "$tmp_file"
1054
+ # ═══════════════════════════════════════════════════════════════════════════
1055
+ # Export / Status / Cleanup
1056
+ # ═══════════════════════════════════════════════════════════════════════════
362
1057
 
363
- echo " ],"
364
- echo " \"pipeline_runs\": ["
1058
+ export_db() {
1059
+ local output_file="${1:-${DB_DIR}/shipwright-backup.json}"
365
1060
 
366
- sqlite3 -json "$DB_FILE" "SELECT * FROM pipeline_runs ORDER BY created_at DESC LIMIT 500;" | sed '1s/\[//' | sed '$s/\]//' >> "$tmp_file"
1061
+ if ! check_sqlite3; then
1062
+ warn "Cannot export — sqlite3 not available"
1063
+ return 1
1064
+ fi
367
1065
 
368
- echo " ],"
369
- echo " \"developers\": ["
1066
+ info "Exporting database to ${output_file}..."
370
1067
 
371
- sqlite3 -json "$DB_FILE" "SELECT * FROM developers;" | sed '1s/\[//' | sed '$s/\]//' >> "$tmp_file"
1068
+ local events_json runs_json costs_json
1069
+ events_json=$(_db_query "SELECT json_group_array(json_object('ts', ts, 'type', type, 'job_id', job_id, 'stage', stage, 'status', status)) FROM (SELECT * FROM events ORDER BY ts_epoch DESC LIMIT 1000);" || echo "[]")
1070
+ runs_json=$(_db_query "SELECT json_group_array(json_object('job_id', job_id, 'goal', goal, 'status', status, 'template', template, 'started_at', started_at)) FROM (SELECT * FROM pipeline_runs ORDER BY created_at DESC LIMIT 500);" || echo "[]")
1071
+ costs_json=$(_db_query "SELECT json_group_array(json_object('model', model, 'stage', stage, 'cost_usd', cost_usd, 'ts', ts)) FROM (SELECT * FROM cost_entries ORDER BY ts_epoch DESC LIMIT 1000);" || echo "[]")
372
1072
 
373
- echo " ]"
374
- echo "}"
375
- } > "$output_file"
1073
+ local tmp_file
1074
+ tmp_file=$(mktemp "${output_file}.tmp.XXXXXX")
1075
+ jq -n \
1076
+ --arg exported_at "$(now_iso)" \
1077
+ --argjson events "$events_json" \
1078
+ --argjson pipeline_runs "$runs_json" \
1079
+ --argjson cost_entries "$costs_json" \
1080
+ '{exported_at: $exported_at, events: $events, pipeline_runs: $pipeline_runs, cost_entries: $cost_entries}' \
1081
+ > "$tmp_file" && mv "$tmp_file" "$output_file" || { rm -f "$tmp_file"; return 1; }
376
1082
 
377
1083
  success "Database exported to ${output_file}"
378
1084
  }
379
1085
 
380
- # ─── Import Data from JSON ──────────────────────────────────────────────────
381
1086
  import_db() {
382
1087
  local input_file="$1"
383
1088
 
@@ -392,12 +1097,9 @@ import_db() {
392
1097
  fi
393
1098
 
394
1099
  info "Importing data from ${input_file}..."
395
-
396
- # This is a simplified import; a full implementation would parse JSON and insert each record
397
- warn "Full JSON import not yet implemented — copy database file manually or use CLI commands to rebuild"
1100
+ warn "Full JSON import not yet implemented — use 'shipwright db migrate' to import from state files"
398
1101
  }
399
1102
 
400
- # ─── Show Database Status ────────────────────────────────────────────────────
401
1103
  show_status() {
402
1104
  if ! check_sqlite3; then
403
1105
  warn "sqlite3 not available"
@@ -418,27 +1120,68 @@ show_status() {
418
1120
  echo -e "${DIM}Database: ${DB_FILE}${RESET}"
419
1121
  echo ""
420
1122
 
421
- local event_count pipeline_count stage_count developer_count session_count metric_count
422
- event_count=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM events;" 2>/dev/null || echo "0")
423
- pipeline_count=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM pipeline_runs;" 2>/dev/null || echo "0")
424
- stage_count=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM pipeline_stages;" 2>/dev/null || echo "0")
425
- developer_count=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM developers;" 2>/dev/null || echo "0")
426
- session_count=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM sessions;" 2>/dev/null || echo "0")
427
- metric_count=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM metrics;" 2>/dev/null || echo "0")
428
-
429
- echo -e "${CYAN}Events${RESET} ${event_count} records"
430
- echo -e "${CYAN}Pipeline Runs${RESET} ${pipeline_count} records"
431
- echo -e "${CYAN}Pipeline Stages${RESET} ${stage_count} records"
432
- echo -e "${CYAN}Developers${RESET} ${developer_count} records"
433
- echo -e "${CYAN}Sessions${RESET} ${session_count} records"
434
- echo -e "${CYAN}Metrics${RESET} ${metric_count} records"
1123
+ # WAL mode check
1124
+ local journal_mode
1125
+ journal_mode=$(_db_query "PRAGMA journal_mode;" || echo "unknown")
1126
+ echo -e "${DIM}Journal mode: ${journal_mode}${RESET}"
1127
+
1128
+ # Schema version
1129
+ local schema_v
1130
+ schema_v=$(_db_query "SELECT COALESCE(MAX(version), 0) FROM _schema;" || echo "0")
1131
+ echo -e "${DIM}Schema version: ${schema_v}${RESET}"
1132
+
1133
+ # DB file size
1134
+ local db_size
1135
+ if [[ -f "$DB_FILE" ]]; then
1136
+ db_size=$(ls -lh "$DB_FILE" 2>/dev/null | awk '{print $5}')
1137
+ echo -e "${DIM}File size: ${db_size}${RESET}"
1138
+ fi
1139
+ echo ""
1140
+
1141
+ local event_count pipeline_count stage_count daemon_count cost_count hb_count failure_count
1142
+ event_count=$(_db_query "SELECT COUNT(*) FROM events;" || echo "0")
1143
+ pipeline_count=$(_db_query "SELECT COUNT(*) FROM pipeline_runs;" || echo "0")
1144
+ stage_count=$(_db_query "SELECT COUNT(*) FROM pipeline_stages;" || echo "0")
1145
+ daemon_count=$(_db_query "SELECT COUNT(*) FROM daemon_state;" || echo "0")
1146
+ cost_count=$(_db_query "SELECT COUNT(*) FROM cost_entries;" || echo "0")
1147
+ hb_count=$(_db_query "SELECT COUNT(*) FROM heartbeats;" || echo "0")
1148
+ failure_count=$(_db_query "SELECT COUNT(*) FROM memory_failures;" || echo "0")
1149
+
1150
+ echo -e "${CYAN}Events${RESET} ${event_count} records"
1151
+ echo -e "${CYAN}Pipeline Runs${RESET} ${pipeline_count} records"
1152
+ echo -e "${CYAN}Pipeline Stages${RESET} ${stage_count} records"
1153
+ echo -e "${CYAN}Daemon Jobs${RESET} ${daemon_count} records"
1154
+ echo -e "${CYAN}Cost Entries${RESET} ${cost_count} records"
1155
+ echo -e "${CYAN}Heartbeats${RESET} ${hb_count} records"
1156
+ echo -e "${CYAN}Failure Patterns${RESET} ${failure_count} records"
1157
+
1158
+ # Sync status
1159
+ local device_id last_push last_pull
1160
+ device_id=$(_db_query "SELECT value FROM _sync_metadata WHERE key = 'device_id';" || echo "not set")
1161
+ last_push=$(_db_query "SELECT value FROM _sync_metadata WHERE key = 'last_push_epoch';" || echo "never")
1162
+ last_pull=$(_db_query "SELECT value FROM _sync_metadata WHERE key = 'last_pull_epoch';" || echo "never")
1163
+ local unsynced_events unsynced_costs
1164
+ unsynced_events=$(_db_query "SELECT COUNT(*) FROM events WHERE synced = 0;" || echo "0")
1165
+ unsynced_costs=$(_db_query "SELECT COUNT(*) FROM cost_entries WHERE synced = 0;" || echo "0")
1166
+
1167
+ echo ""
1168
+ echo -e "${BOLD}Sync${RESET}"
1169
+ echo -e " Device: ${DIM}${device_id}${RESET}"
1170
+ echo -e " Unsynced events: ${unsynced_events}"
1171
+ echo -e " Unsynced costs: ${unsynced_costs}"
1172
+ if [[ -f "$SYNC_CONFIG_FILE" ]]; then
1173
+ local sync_url
1174
+ sync_url=$(jq -r '.url // "not configured"' "$SYNC_CONFIG_FILE" 2>/dev/null || echo "not configured")
1175
+ echo -e " Remote: ${DIM}${sync_url}${RESET}"
1176
+ else
1177
+ echo -e " Remote: ${DIM}not configured${RESET}"
1178
+ fi
435
1179
 
436
1180
  echo ""
437
1181
  echo -e "${BOLD}Recent Runs${RESET}"
438
- sqlite3 -header -column "$DB_FILE" "SELECT job_id, goal, status, template, datetime(started_at) as started FROM pipeline_runs ORDER BY created_at DESC LIMIT 5;"
1182
+ sqlite3 -header -column "$DB_FILE" "SELECT job_id, goal, status, template, datetime(started_at) as started FROM pipeline_runs ORDER BY created_at DESC LIMIT 5;" 2>/dev/null || echo " (none)"
439
1183
  }
440
1184
 
441
- # ─── Clean Old Records ──────────────────────────────────────────────────────
442
1185
  cleanup_old_data() {
443
1186
  local days="${1:-30}"
444
1187
 
@@ -447,18 +1190,95 @@ cleanup_old_data() {
447
1190
  return 1
448
1191
  fi
449
1192
 
1193
+ local cutoff_epoch
1194
+ cutoff_epoch=$(( $(now_epoch) - (days * 86400) ))
450
1195
  local cutoff_date
451
- cutoff_date=$(date -u -d "-${days} days" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || \
452
- date -u -v-${days}d +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || \
1196
+ cutoff_date=$(date -u -r "$cutoff_epoch" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || \
1197
+ date -u -d "@${cutoff_epoch}" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || \
453
1198
  date -u +"%Y-%m-%dT%H:%M:%SZ")
454
1199
 
455
1200
  info "Cleaning records older than ${days} days (before ${cutoff_date})..."
456
1201
 
457
- local deleted_events deleted_runs
458
- deleted_events=$(sqlite3 "$DB_FILE" "DELETE FROM events WHERE ts < '${cutoff_date}' RETURNING COUNT(*);" 2>/dev/null || echo "0")
459
- deleted_runs=$(sqlite3 "$DB_FILE" "DELETE FROM pipeline_runs WHERE created_at < '${cutoff_date}' RETURNING COUNT(*);" 2>/dev/null || echo "0")
1202
+ local d_events d_costs d_daemon d_stages
1203
+ _db_exec "DELETE FROM events WHERE ts < '${cutoff_date}';"
1204
+ d_events=$(_db_query "SELECT changes();" || echo "0")
1205
+ _db_exec "DELETE FROM cost_entries WHERE ts < '${cutoff_date}';"
1206
+ d_costs=$(_db_query "SELECT changes();" || echo "0")
1207
+ _db_exec "DELETE FROM daemon_state WHERE updated_at < '${cutoff_date}' AND status != 'active';"
1208
+ d_daemon=$(_db_query "SELECT changes();" || echo "0")
1209
+ _db_exec "DELETE FROM pipeline_stages WHERE created_at < '${cutoff_date}';"
1210
+ d_stages=$(_db_query "SELECT changes();" || echo "0")
1211
+
1212
+ success "Deleted: ${d_events} events, ${d_costs} costs, ${d_daemon} daemon jobs, ${d_stages} stages"
1213
+
1214
+ # VACUUM to reclaim space
1215
+ _db_exec "VACUUM;" 2>/dev/null || true
1216
+ }
1217
+
1218
+ # ═══════════════════════════════════════════════════════════════════════════
1219
+ # Health Check (used by sw-doctor.sh)
1220
+ # ═══════════════════════════════════════════════════════════════════════════
1221
+
1222
+ db_health_check() {
1223
+ local pass=0 fail=0
1224
+
1225
+ # sqlite3 binary
1226
+ if check_sqlite3; then
1227
+ echo -e " ${GREEN}${BOLD}✓${RESET} sqlite3 available"
1228
+ pass=$((pass + 1))
1229
+ else
1230
+ echo -e " ${RED}${BOLD}✗${RESET} sqlite3 not installed"
1231
+ fail=$((fail + 1))
1232
+ echo " ${pass} passed, ${fail} failed"
1233
+ return $fail
1234
+ fi
1235
+
1236
+ # DB file exists
1237
+ if [[ -f "$DB_FILE" ]]; then
1238
+ echo -e " ${GREEN}${BOLD}✓${RESET} Database file exists: ${DB_FILE}"
1239
+ pass=$((pass + 1))
1240
+ else
1241
+ echo -e " ${YELLOW}${BOLD}⚠${RESET} Database not initialized — run: shipwright db init"
1242
+ fail=$((fail + 1))
1243
+ echo " ${pass} passed, ${fail} failed"
1244
+ return $fail
1245
+ fi
1246
+
1247
+ # Schema version
1248
+ local sv
1249
+ sv=$(_db_query "SELECT COALESCE(MAX(version), 0) FROM _schema;" || echo "0")
1250
+ if [[ "$sv" -ge "$SCHEMA_VERSION" ]]; then
1251
+ echo -e " ${GREEN}${BOLD}✓${RESET} Schema version: v${sv}"
1252
+ pass=$((pass + 1))
1253
+ else
1254
+ echo -e " ${YELLOW}${BOLD}⚠${RESET} Schema version: v${sv} (expected v${SCHEMA_VERSION}) — run: shipwright db migrate"
1255
+ fail=$((fail + 1))
1256
+ fi
1257
+
1258
+ # WAL mode
1259
+ local jm
1260
+ jm=$(_db_query "PRAGMA journal_mode;" || echo "unknown")
1261
+ if [[ "$jm" == "wal" ]]; then
1262
+ echo -e " ${GREEN}${BOLD}✓${RESET} WAL mode enabled"
1263
+ pass=$((pass + 1))
1264
+ else
1265
+ echo -e " ${YELLOW}${BOLD}⚠${RESET} Journal mode: ${jm} (WAL recommended) — run: shipwright db init"
1266
+ fail=$((fail + 1))
1267
+ fi
1268
+
1269
+ # Integrity check
1270
+ local integrity
1271
+ integrity=$(_db_query "PRAGMA integrity_check;" || echo "error")
1272
+ if [[ "$integrity" == "ok" ]]; then
1273
+ echo -e " ${GREEN}${BOLD}✓${RESET} Integrity check passed"
1274
+ pass=$((pass + 1))
1275
+ else
1276
+ echo -e " ${RED}${BOLD}✗${RESET} Integrity check failed: ${integrity}"
1277
+ fail=$((fail + 1))
1278
+ fi
460
1279
 
461
- success "Deleted ${deleted_events} old events and ${deleted_runs} old pipeline runs"
1280
+ echo " ${pass} passed, ${fail} failed"
1281
+ return $fail
462
1282
  }
463
1283
 
464
1284
  # ─── Show Help ──────────────────────────────────────────────────────────────
@@ -469,20 +1289,25 @@ show_help() {
469
1289
  echo -e " shipwright db <command> [options]"
470
1290
  echo ""
471
1291
  echo -e "${BOLD}COMMANDS${RESET}"
472
- echo -e " ${CYAN}init${RESET} Initialize database schema"
473
- echo -e " ${CYAN}migrate${RESET} Apply schema migrations"
474
- echo -e " ${CYAN}status${RESET} Show database stats and recent runs"
1292
+ echo -e " ${CYAN}init${RESET} Initialize database schema (creates DB, enables WAL)"
1293
+ echo -e " ${CYAN}migrate${RESET} Apply schema migrations + import JSON state files"
1294
+ echo -e " ${CYAN}status${RESET} Show database stats, sync status, recent runs"
475
1295
  echo -e " ${CYAN}query${RESET} [status] Query pipeline runs by status"
476
1296
  echo -e " ${CYAN}export${RESET} [file] Export database to JSON backup"
477
1297
  echo -e " ${CYAN}import${RESET} <file> Import data from JSON backup"
478
1298
  echo -e " ${CYAN}cleanup${RESET} [days] Delete records older than N days (default 30)"
1299
+ echo -e " ${CYAN}health${RESET} Run database health checks"
1300
+ echo -e " ${CYAN}sync push${RESET} Push unsynced data to remote"
1301
+ echo -e " ${CYAN}sync pull${RESET} Pull new data from remote"
479
1302
  echo -e " ${CYAN}help${RESET} Show this help"
480
1303
  echo ""
481
1304
  echo -e "${DIM}Examples:${RESET}"
482
1305
  echo -e " shipwright db init"
1306
+ echo -e " shipwright db migrate # Import events.jsonl, costs.json, etc."
483
1307
  echo -e " shipwright db status"
484
1308
  echo -e " shipwright db query failed"
485
- echo -e " shipwright db export ~/backups/db-backup.json"
1309
+ echo -e " shipwright db health"
1310
+ echo -e " shipwright db sync push"
486
1311
  echo -e " shipwright db cleanup 60"
487
1312
  }
488
1313
 
@@ -495,10 +1320,13 @@ main() {
495
1320
  init)
496
1321
  ensure_db_dir
497
1322
  init_schema
498
- success "Database initialized at ${DB_FILE}"
1323
+ # Set schema version
1324
+ _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (${SCHEMA_VERSION}, '$(now_iso)', '$(now_iso)');" 2>/dev/null || true
1325
+ _db_exec "INSERT OR IGNORE INTO _sync_metadata (key, value, updated_at) VALUES ('device_id', '$(uname -n)-$$-$(now_epoch)', '$(now_iso)');" 2>/dev/null || true
1326
+ success "Database initialized at ${DB_FILE} (WAL mode, schema v${SCHEMA_VERSION})"
499
1327
  ;;
500
1328
  migrate)
501
- migrate_schema
1329
+ migrate_json_data
502
1330
  ;;
503
1331
  status)
504
1332
  show_status
@@ -523,6 +1351,18 @@ main() {
523
1351
  local days="${1:-30}"
524
1352
  cleanup_old_data "$days"
525
1353
  ;;
1354
+ health)
1355
+ db_health_check
1356
+ ;;
1357
+ sync)
1358
+ local sync_cmd="${1:-help}"
1359
+ shift 2>/dev/null || true
1360
+ case "$sync_cmd" in
1361
+ push) db_sync_push ;;
1362
+ pull) db_sync_pull ;;
1363
+ *) echo "Usage: shipwright db sync {push|pull}"; exit 1 ;;
1364
+ esac
1365
+ ;;
526
1366
  help|--help|-h)
527
1367
  show_help
528
1368
  ;;