shipwright-cli 2.4.0 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. package/README.md +16 -11
  2. package/completions/_shipwright +248 -94
  3. package/completions/shipwright.bash +68 -19
  4. package/completions/shipwright.fish +310 -42
  5. package/config/decision-tiers.json +55 -0
  6. package/config/defaults.json +111 -0
  7. package/config/event-schema.json +218 -0
  8. package/config/policy.json +21 -18
  9. package/dashboard/coverage/coverage-summary.json +14 -0
  10. package/dashboard/public/index.html +1 -1
  11. package/dashboard/server.ts +306 -17
  12. package/dashboard/src/components/charts/bar.test.ts +79 -0
  13. package/dashboard/src/components/charts/donut.test.ts +68 -0
  14. package/dashboard/src/components/charts/pipeline-rail.test.ts +117 -0
  15. package/dashboard/src/components/charts/sparkline.test.ts +125 -0
  16. package/dashboard/src/core/api.test.ts +309 -0
  17. package/dashboard/src/core/helpers.test.ts +301 -0
  18. package/dashboard/src/core/router.test.ts +307 -0
  19. package/dashboard/src/core/router.ts +7 -0
  20. package/dashboard/src/core/sse.test.ts +144 -0
  21. package/dashboard/src/views/metrics.test.ts +186 -0
  22. package/dashboard/src/views/overview.test.ts +173 -0
  23. package/dashboard/src/views/pipelines.test.ts +183 -0
  24. package/dashboard/src/views/team.test.ts +253 -0
  25. package/dashboard/vitest.config.ts +14 -5
  26. package/docs/TIPS.md +1 -1
  27. package/docs/patterns/README.md +1 -1
  28. package/package.json +7 -9
  29. package/scripts/adapters/docker-deploy.sh +1 -1
  30. package/scripts/adapters/tmux-adapter.sh +11 -1
  31. package/scripts/adapters/wezterm-adapter.sh +1 -1
  32. package/scripts/check-version-consistency.sh +1 -1
  33. package/scripts/lib/architecture.sh +127 -0
  34. package/scripts/lib/bootstrap.sh +75 -0
  35. package/scripts/lib/compat.sh +89 -6
  36. package/scripts/lib/config.sh +91 -0
  37. package/scripts/lib/daemon-adaptive.sh +3 -3
  38. package/scripts/lib/daemon-dispatch.sh +63 -17
  39. package/scripts/lib/daemon-failure.sh +0 -0
  40. package/scripts/lib/daemon-health.sh +1 -1
  41. package/scripts/lib/daemon-patrol.sh +64 -17
  42. package/scripts/lib/daemon-poll.sh +54 -25
  43. package/scripts/lib/daemon-state.sh +125 -23
  44. package/scripts/lib/daemon-triage.sh +31 -9
  45. package/scripts/lib/decide-autonomy.sh +295 -0
  46. package/scripts/lib/decide-scoring.sh +228 -0
  47. package/scripts/lib/decide-signals.sh +462 -0
  48. package/scripts/lib/fleet-failover.sh +63 -0
  49. package/scripts/lib/helpers.sh +29 -6
  50. package/scripts/lib/pipeline-detection.sh +2 -2
  51. package/scripts/lib/pipeline-github.sh +9 -9
  52. package/scripts/lib/pipeline-intelligence.sh +105 -38
  53. package/scripts/lib/pipeline-quality-checks.sh +17 -16
  54. package/scripts/lib/pipeline-quality.sh +1 -1
  55. package/scripts/lib/pipeline-stages.sh +440 -59
  56. package/scripts/lib/pipeline-state.sh +54 -4
  57. package/scripts/lib/policy.sh +0 -0
  58. package/scripts/lib/test-helpers.sh +247 -0
  59. package/scripts/postinstall.mjs +78 -12
  60. package/scripts/signals/example-collector.sh +36 -0
  61. package/scripts/sw +17 -7
  62. package/scripts/sw-activity.sh +1 -11
  63. package/scripts/sw-adaptive.sh +109 -85
  64. package/scripts/sw-adversarial.sh +4 -14
  65. package/scripts/sw-architecture-enforcer.sh +1 -11
  66. package/scripts/sw-auth.sh +8 -17
  67. package/scripts/sw-autonomous.sh +111 -49
  68. package/scripts/sw-changelog.sh +1 -11
  69. package/scripts/sw-checkpoint.sh +144 -20
  70. package/scripts/sw-ci.sh +2 -12
  71. package/scripts/sw-cleanup.sh +13 -17
  72. package/scripts/sw-code-review.sh +16 -36
  73. package/scripts/sw-connect.sh +5 -12
  74. package/scripts/sw-context.sh +9 -26
  75. package/scripts/sw-cost.sh +17 -18
  76. package/scripts/sw-daemon.sh +76 -71
  77. package/scripts/sw-dashboard.sh +57 -17
  78. package/scripts/sw-db.sh +524 -26
  79. package/scripts/sw-decide.sh +685 -0
  80. package/scripts/sw-decompose.sh +1 -11
  81. package/scripts/sw-deps.sh +15 -25
  82. package/scripts/sw-developer-simulation.sh +1 -11
  83. package/scripts/sw-discovery.sh +138 -30
  84. package/scripts/sw-doc-fleet.sh +7 -17
  85. package/scripts/sw-docs-agent.sh +6 -16
  86. package/scripts/sw-docs.sh +4 -12
  87. package/scripts/sw-doctor.sh +134 -43
  88. package/scripts/sw-dora.sh +11 -19
  89. package/scripts/sw-durable.sh +35 -52
  90. package/scripts/sw-e2e-orchestrator.sh +11 -27
  91. package/scripts/sw-eventbus.sh +115 -115
  92. package/scripts/sw-evidence.sh +114 -30
  93. package/scripts/sw-feedback.sh +3 -13
  94. package/scripts/sw-fix.sh +2 -20
  95. package/scripts/sw-fleet-discover.sh +1 -11
  96. package/scripts/sw-fleet-viz.sh +10 -18
  97. package/scripts/sw-fleet.sh +13 -17
  98. package/scripts/sw-github-app.sh +6 -16
  99. package/scripts/sw-github-checks.sh +1 -11
  100. package/scripts/sw-github-deploy.sh +1 -11
  101. package/scripts/sw-github-graphql.sh +2 -12
  102. package/scripts/sw-guild.sh +1 -11
  103. package/scripts/sw-heartbeat.sh +49 -12
  104. package/scripts/sw-hygiene.sh +45 -43
  105. package/scripts/sw-incident.sh +48 -74
  106. package/scripts/sw-init.sh +35 -37
  107. package/scripts/sw-instrument.sh +1 -11
  108. package/scripts/sw-intelligence.sh +368 -53
  109. package/scripts/sw-jira.sh +5 -14
  110. package/scripts/sw-launchd.sh +2 -12
  111. package/scripts/sw-linear.sh +8 -17
  112. package/scripts/sw-logs.sh +4 -12
  113. package/scripts/sw-loop.sh +905 -104
  114. package/scripts/sw-memory.sh +263 -20
  115. package/scripts/sw-mission-control.sh +2 -12
  116. package/scripts/sw-model-router.sh +73 -34
  117. package/scripts/sw-otel.sh +15 -23
  118. package/scripts/sw-oversight.sh +1 -11
  119. package/scripts/sw-patrol-meta.sh +5 -11
  120. package/scripts/sw-pipeline-composer.sh +7 -17
  121. package/scripts/sw-pipeline-vitals.sh +1 -11
  122. package/scripts/sw-pipeline.sh +550 -122
  123. package/scripts/sw-pm.sh +2 -12
  124. package/scripts/sw-pr-lifecycle.sh +33 -28
  125. package/scripts/sw-predictive.sh +16 -22
  126. package/scripts/sw-prep.sh +6 -16
  127. package/scripts/sw-ps.sh +1 -11
  128. package/scripts/sw-public-dashboard.sh +2 -12
  129. package/scripts/sw-quality.sh +85 -14
  130. package/scripts/sw-reaper.sh +1 -11
  131. package/scripts/sw-recruit.sh +15 -25
  132. package/scripts/sw-regression.sh +11 -21
  133. package/scripts/sw-release-manager.sh +19 -28
  134. package/scripts/sw-release.sh +8 -16
  135. package/scripts/sw-remote.sh +1 -11
  136. package/scripts/sw-replay.sh +48 -44
  137. package/scripts/sw-retro.sh +70 -92
  138. package/scripts/sw-review-rerun.sh +1 -1
  139. package/scripts/sw-scale.sh +174 -41
  140. package/scripts/sw-security-audit.sh +12 -22
  141. package/scripts/sw-self-optimize.sh +239 -23
  142. package/scripts/sw-session.sh +5 -15
  143. package/scripts/sw-setup.sh +8 -18
  144. package/scripts/sw-standup.sh +5 -15
  145. package/scripts/sw-status.sh +32 -23
  146. package/scripts/sw-strategic.sh +129 -13
  147. package/scripts/sw-stream.sh +1 -11
  148. package/scripts/sw-swarm.sh +76 -36
  149. package/scripts/sw-team-stages.sh +10 -20
  150. package/scripts/sw-templates.sh +4 -14
  151. package/scripts/sw-testgen.sh +3 -13
  152. package/scripts/sw-tmux-pipeline.sh +1 -19
  153. package/scripts/sw-tmux-role-color.sh +0 -10
  154. package/scripts/sw-tmux-status.sh +3 -11
  155. package/scripts/sw-tmux.sh +2 -20
  156. package/scripts/sw-trace.sh +1 -19
  157. package/scripts/sw-tracker-github.sh +0 -10
  158. package/scripts/sw-tracker-jira.sh +1 -11
  159. package/scripts/sw-tracker-linear.sh +1 -11
  160. package/scripts/sw-tracker.sh +7 -24
  161. package/scripts/sw-triage.sh +29 -39
  162. package/scripts/sw-upgrade.sh +5 -23
  163. package/scripts/sw-ux.sh +1 -19
  164. package/scripts/sw-webhook.sh +18 -32
  165. package/scripts/sw-widgets.sh +3 -21
  166. package/scripts/sw-worktree.sh +11 -27
  167. package/scripts/update-homebrew-sha.sh +73 -0
  168. package/templates/pipelines/tdd.json +72 -0
  169. package/scripts/sw-pipeline.sh.mock +0 -7
package/scripts/sw-db.sh CHANGED
@@ -14,7 +14,7 @@ if [[ -n "${_SW_DB_LOADED:-}" ]] && [[ "${BASH_SOURCE[0]}" != "$0" ]]; then
14
14
  fi
15
15
  _SW_DB_LOADED=1
16
16
 
17
- VERSION="2.4.0"
17
+ VERSION="3.1.0"
18
18
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
19
19
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
20
20
 
@@ -42,20 +42,10 @@ if [[ "$(type -t emit_event 2>/dev/null)" != "function" ]]; then
42
42
  echo "${payload}}" >> "${HOME}/.shipwright/events.jsonl"
43
43
  }
44
44
  fi
45
- CYAN="${CYAN:-\033[38;2;0;212;255m}"
46
- PURPLE="${PURPLE:-\033[38;2;124;58;237m}"
47
- BLUE="${BLUE:-\033[38;2;0;102;255m}"
48
- GREEN="${GREEN:-\033[38;2;74;222;128m}"
49
- YELLOW="${YELLOW:-\033[38;2;250;204;21m}"
50
- RED="${RED:-\033[38;2;248;113;113m}"
51
- DIM="${DIM:-\033[2m}"
52
- BOLD="${BOLD:-\033[1m}"
53
- RESET="${RESET:-\033[0m}"
54
-
55
45
  # ─── Database Configuration ──────────────────────────────────────────────────
56
46
  DB_DIR="${HOME}/.shipwright"
57
47
  DB_FILE="${DB_DIR}/shipwright.db"
58
- SCHEMA_VERSION=2
48
+ SCHEMA_VERSION=6
59
49
 
60
50
  # JSON fallback paths
61
51
  EVENTS_FILE="${DB_DIR}/events.jsonl"
@@ -89,7 +79,7 @@ check_sqlite3() {
89
79
  # Cache the result to avoid repeated command lookups
90
80
  if [[ -z "$_SQLITE3_CHECKED" ]]; then
91
81
  _SQLITE3_CHECKED=1
92
- if command -v sqlite3 &>/dev/null; then
82
+ if command -v sqlite3 >/dev/null 2>&1; then
93
83
  _SQLITE3_AVAILABLE=1
94
84
  else
95
85
  _SQLITE3_AVAILABLE=""
@@ -103,6 +93,12 @@ db_available() {
103
93
  check_sqlite3 && [[ -f "$DB_FILE" ]] && _db_feature_enabled
104
94
  }
105
95
 
96
+ # ─── SQL Escaping ──────────────────────────────────────────────────────────
97
+ # Bash 3.2 (macOS default) breaks ${var//$_SQL_SQ/$_SQL_SQ$_SQL_SQ} — backslashes leak into output.
98
+ # This helper uses a variable to hold the single quote for reliable escaping.
99
+ _SQL_SQ="'"
100
+ _sql_escape() { local _v="$1"; echo "${_v//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"; }
101
+
106
102
  # ─── Ensure Database Directory ──────────────────────────────────────────────
107
103
  ensure_db_dir() {
108
104
  mkdir -p "$DB_DIR"
@@ -244,6 +240,12 @@ CREATE TABLE IF NOT EXISTS metrics (
244
240
  -- Phase 1: New tables for state migration
245
241
  -- ═══════════════════════════════════════════════════════════════════════
246
242
 
243
+ -- Daemon queue (issue keys waiting for a slot)
244
+ CREATE TABLE IF NOT EXISTS daemon_queue (
245
+ issue_key TEXT PRIMARY KEY,
246
+ added_at TEXT NOT NULL
247
+ );
248
+
247
249
  -- Daemon state (replaces daemon-state.json)
248
250
  CREATE TABLE IF NOT EXISTS daemon_state (
249
251
  id INTEGER PRIMARY KEY AUTOINCREMENT,
@@ -317,6 +319,50 @@ CREATE TABLE IF NOT EXISTS memory_failures (
317
319
  synced INTEGER DEFAULT 0
318
320
  );
319
321
 
322
+ -- Memory: patterns (replaces memory/*/patterns.json)
323
+ CREATE TABLE IF NOT EXISTS memory_patterns (
324
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
325
+ repo_hash TEXT NOT NULL,
326
+ pattern_type TEXT NOT NULL,
327
+ pattern_key TEXT NOT NULL,
328
+ description TEXT,
329
+ frequency INTEGER DEFAULT 1,
330
+ confidence REAL DEFAULT 0.5,
331
+ last_seen_at TEXT NOT NULL,
332
+ created_at TEXT NOT NULL,
333
+ metadata TEXT,
334
+ synced INTEGER DEFAULT 0,
335
+ UNIQUE(repo_hash, pattern_type, pattern_key)
336
+ );
337
+
338
+ -- Memory: decisions (replaces memory/*/decisions.json)
339
+ CREATE TABLE IF NOT EXISTS memory_decisions (
340
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
341
+ repo_hash TEXT NOT NULL,
342
+ decision_type TEXT NOT NULL,
343
+ context TEXT NOT NULL,
344
+ decision TEXT NOT NULL,
345
+ outcome TEXT,
346
+ confidence REAL DEFAULT 0.5,
347
+ created_at TEXT NOT NULL,
348
+ updated_at TEXT NOT NULL,
349
+ metadata TEXT,
350
+ synced INTEGER DEFAULT 0
351
+ );
352
+
353
+ -- Memory: embeddings for semantic search
354
+ CREATE TABLE IF NOT EXISTS memory_embeddings (
355
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
356
+ content_hash TEXT UNIQUE NOT NULL,
357
+ source_type TEXT NOT NULL,
358
+ source_id INTEGER,
359
+ content_text TEXT NOT NULL,
360
+ embedding BLOB,
361
+ repo_hash TEXT,
362
+ created_at TEXT NOT NULL,
363
+ synced INTEGER DEFAULT 0
364
+ );
365
+
320
366
  -- ═══════════════════════════════════════════════════════════════════════
321
367
  -- Sync tables
322
368
  -- ═══════════════════════════════════════════════════════════════════════
@@ -354,6 +400,7 @@ CREATE INDEX IF NOT EXISTS idx_developers_name ON developers(name);
354
400
  CREATE INDEX IF NOT EXISTS idx_sessions_status ON sessions(status);
355
401
  CREATE INDEX IF NOT EXISTS idx_metrics_job_id ON metrics(job_id);
356
402
  CREATE INDEX IF NOT EXISTS idx_metrics_type ON metrics(metric_type);
403
+ CREATE INDEX IF NOT EXISTS idx_daemon_queue_added ON daemon_queue(added_at);
357
404
  CREATE INDEX IF NOT EXISTS idx_daemon_state_status ON daemon_state(status);
358
405
  CREATE INDEX IF NOT EXISTS idx_daemon_state_job ON daemon_state(job_id);
359
406
  CREATE INDEX IF NOT EXISTS idx_cost_entries_epoch ON cost_entries(ts_epoch DESC);
@@ -361,7 +408,73 @@ CREATE INDEX IF NOT EXISTS idx_cost_entries_synced ON cost_entries(synced) WHERE
361
408
  CREATE INDEX IF NOT EXISTS idx_heartbeats_job ON heartbeats(job_id);
362
409
  CREATE INDEX IF NOT EXISTS idx_memory_failures_repo ON memory_failures(repo_hash);
363
410
  CREATE INDEX IF NOT EXISTS idx_memory_failures_class ON memory_failures(failure_class);
411
+ CREATE INDEX IF NOT EXISTS idx_memory_patterns_repo ON memory_patterns(repo_hash);
412
+ CREATE INDEX IF NOT EXISTS idx_memory_patterns_type ON memory_patterns(pattern_type);
413
+ CREATE INDEX IF NOT EXISTS idx_memory_decisions_repo ON memory_decisions(repo_hash);
414
+ CREATE INDEX IF NOT EXISTS idx_memory_decisions_type ON memory_decisions(decision_type);
415
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_hash ON memory_embeddings(content_hash);
416
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_source ON memory_embeddings(source_type);
417
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_repo ON memory_embeddings(repo_hash);
364
418
  CREATE INDEX IF NOT EXISTS idx_sync_log_unsynced ON _sync_log(synced) WHERE synced = 0;
419
+
420
+ -- Event consumer offset tracking
421
+ CREATE TABLE IF NOT EXISTS event_consumers (
422
+ consumer_id TEXT PRIMARY KEY,
423
+ last_event_id INTEGER NOT NULL DEFAULT 0,
424
+ last_consumed_at TEXT NOT NULL
425
+ );
426
+ CREATE INDEX IF NOT EXISTS idx_event_consumers_id ON event_consumers(consumer_id);
427
+
428
+ -- Outcome-based learning (Thompson sampling, UCB1)
429
+ CREATE TABLE IF NOT EXISTS pipeline_outcomes (
430
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
431
+ job_id TEXT UNIQUE NOT NULL,
432
+ issue_number TEXT,
433
+ template TEXT,
434
+ success INTEGER NOT NULL DEFAULT 0,
435
+ duration_secs INTEGER DEFAULT 0,
436
+ retry_count INTEGER DEFAULT 0,
437
+ cost_usd REAL DEFAULT 0,
438
+ complexity TEXT DEFAULT 'medium',
439
+ created_at TEXT NOT NULL
440
+ );
441
+
442
+ CREATE TABLE IF NOT EXISTS model_outcomes (
443
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
444
+ model TEXT NOT NULL,
445
+ stage TEXT NOT NULL,
446
+ success INTEGER NOT NULL DEFAULT 0,
447
+ duration_secs INTEGER DEFAULT 0,
448
+ cost_usd REAL DEFAULT 0,
449
+ created_at TEXT NOT NULL
450
+ );
451
+
452
+ CREATE INDEX IF NOT EXISTS idx_pipeline_outcomes_template ON pipeline_outcomes(template);
453
+ CREATE INDEX IF NOT EXISTS idx_pipeline_outcomes_complexity ON pipeline_outcomes(complexity);
454
+ CREATE INDEX IF NOT EXISTS idx_model_outcomes_model_stage ON model_outcomes(model, stage);
455
+
456
+ -- Durable workflow checkpoints
457
+ CREATE TABLE IF NOT EXISTS durable_checkpoints (
458
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
459
+ workflow_id TEXT NOT NULL,
460
+ checkpoint_data TEXT NOT NULL,
461
+ created_at TEXT NOT NULL,
462
+ UNIQUE(workflow_id)
463
+ );
464
+
465
+ -- Reasoning traces for multi-step autonomous pipelines
466
+ CREATE TABLE IF NOT EXISTS reasoning_traces (
467
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
468
+ job_id TEXT NOT NULL,
469
+ step_name TEXT NOT NULL,
470
+ input_context TEXT,
471
+ reasoning TEXT,
472
+ output_decision TEXT,
473
+ confidence REAL,
474
+ created_at TEXT NOT NULL,
475
+ FOREIGN KEY (job_id) REFERENCES pipeline_runs(job_id)
476
+ );
477
+ CREATE INDEX IF NOT EXISTS idx_reasoning_traces_job ON reasoning_traces(job_id);
365
478
  SCHEMA
366
479
  }
367
480
 
@@ -403,6 +516,138 @@ migrate_schema() {
403
516
  _db_exec "INSERT OR IGNORE INTO _sync_metadata (key, value, updated_at) VALUES ('device_id', '$(uname -n)-$$-$(now_epoch)', '$(now_iso)');"
404
517
  success "Migrated to schema v2"
405
518
  fi
519
+
520
+ # Migration from v2 → v3: add memory_patterns, memory_decisions, memory_embeddings
521
+ if [[ "$current_version" -lt 3 ]]; then
522
+ info "Migrating schema v${current_version} → v3..."
523
+ sqlite3 "$DB_FILE" "
524
+ CREATE TABLE IF NOT EXISTS memory_patterns (
525
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
526
+ repo_hash TEXT NOT NULL,
527
+ pattern_type TEXT NOT NULL,
528
+ pattern_key TEXT NOT NULL,
529
+ description TEXT,
530
+ frequency INTEGER DEFAULT 1,
531
+ confidence REAL DEFAULT 0.5,
532
+ last_seen_at TEXT NOT NULL,
533
+ created_at TEXT NOT NULL,
534
+ metadata TEXT,
535
+ synced INTEGER DEFAULT 0,
536
+ UNIQUE(repo_hash, pattern_type, pattern_key)
537
+ );
538
+ CREATE TABLE IF NOT EXISTS memory_decisions (
539
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
540
+ repo_hash TEXT NOT NULL,
541
+ decision_type TEXT NOT NULL,
542
+ context TEXT NOT NULL,
543
+ decision TEXT NOT NULL,
544
+ outcome TEXT,
545
+ confidence REAL DEFAULT 0.5,
546
+ created_at TEXT NOT NULL,
547
+ updated_at TEXT NOT NULL,
548
+ metadata TEXT,
549
+ synced INTEGER DEFAULT 0
550
+ );
551
+ CREATE TABLE IF NOT EXISTS memory_embeddings (
552
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
553
+ content_hash TEXT UNIQUE NOT NULL,
554
+ source_type TEXT NOT NULL,
555
+ source_id INTEGER,
556
+ content_text TEXT NOT NULL,
557
+ embedding BLOB,
558
+ repo_hash TEXT,
559
+ created_at TEXT NOT NULL,
560
+ synced INTEGER DEFAULT 0
561
+ );
562
+ CREATE INDEX IF NOT EXISTS idx_memory_patterns_repo ON memory_patterns(repo_hash);
563
+ CREATE INDEX IF NOT EXISTS idx_memory_patterns_type ON memory_patterns(pattern_type);
564
+ CREATE INDEX IF NOT EXISTS idx_memory_decisions_repo ON memory_decisions(repo_hash);
565
+ CREATE INDEX IF NOT EXISTS idx_memory_decisions_type ON memory_decisions(decision_type);
566
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_hash ON memory_embeddings(content_hash);
567
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_source ON memory_embeddings(source_type);
568
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_repo ON memory_embeddings(repo_hash);
569
+ "
570
+ _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (3, '$(now_iso)', '$(now_iso)');"
571
+ success "Migrated to schema v3"
572
+ fi
573
+
574
+ # Migration from v3 → v4: event_consumers, durable_checkpoints
575
+ if [[ "$current_version" -lt 4 ]]; then
576
+ info "Migrating schema v${current_version} → v4..."
577
+ sqlite3 "$DB_FILE" "
578
+ CREATE TABLE IF NOT EXISTS event_consumers (
579
+ consumer_id TEXT PRIMARY KEY,
580
+ last_event_id INTEGER NOT NULL DEFAULT 0,
581
+ last_consumed_at TEXT NOT NULL
582
+ );
583
+ CREATE INDEX IF NOT EXISTS idx_event_consumers_id ON event_consumers(consumer_id);
584
+ CREATE TABLE IF NOT EXISTS durable_checkpoints (
585
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
586
+ workflow_id TEXT NOT NULL,
587
+ checkpoint_data TEXT NOT NULL,
588
+ created_at TEXT NOT NULL,
589
+ UNIQUE(workflow_id)
590
+ );
591
+ "
592
+ _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (4, '$(now_iso)', '$(now_iso)');"
593
+ success "Migrated to schema v4"
594
+ fi
595
+
596
+ # Migration from v4 → v5: pipeline_outcomes, model_outcomes for outcome-based learning
597
+ if [[ "$current_version" -lt 5 ]]; then
598
+ info "Migrating schema v${current_version} → v5..."
599
+ sqlite3 "$DB_FILE" "
600
+ CREATE TABLE IF NOT EXISTS pipeline_outcomes (
601
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
602
+ job_id TEXT UNIQUE NOT NULL,
603
+ issue_number TEXT,
604
+ template TEXT,
605
+ success INTEGER NOT NULL DEFAULT 0,
606
+ duration_secs INTEGER DEFAULT 0,
607
+ retry_count INTEGER DEFAULT 0,
608
+ cost_usd REAL DEFAULT 0,
609
+ complexity TEXT DEFAULT 'medium',
610
+ created_at TEXT NOT NULL
611
+ );
612
+
613
+ CREATE TABLE IF NOT EXISTS model_outcomes (
614
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
615
+ model TEXT NOT NULL,
616
+ stage TEXT NOT NULL,
617
+ success INTEGER NOT NULL DEFAULT 0,
618
+ duration_secs INTEGER DEFAULT 0,
619
+ cost_usd REAL DEFAULT 0,
620
+ created_at TEXT NOT NULL
621
+ );
622
+
623
+ CREATE INDEX IF NOT EXISTS idx_pipeline_outcomes_template ON pipeline_outcomes(template);
624
+ CREATE INDEX IF NOT EXISTS idx_pipeline_outcomes_complexity ON pipeline_outcomes(complexity);
625
+ CREATE INDEX IF NOT EXISTS idx_model_outcomes_model_stage ON model_outcomes(model, stage);
626
+ "
627
+ _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (5, '$(now_iso)', '$(now_iso)');"
628
+ success "Migrated to schema v5"
629
+ fi
630
+
631
+ # Migration from v5 → v6: reasoning_traces for multi-step autonomous reasoning
632
+ if [[ "$current_version" -lt 6 ]]; then
633
+ info "Migrating schema v${current_version} → v6..."
634
+ sqlite3 "$DB_FILE" "
635
+ CREATE TABLE IF NOT EXISTS reasoning_traces (
636
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
637
+ job_id TEXT NOT NULL,
638
+ step_name TEXT NOT NULL,
639
+ input_context TEXT,
640
+ reasoning TEXT,
641
+ output_decision TEXT,
642
+ confidence REAL,
643
+ created_at TEXT NOT NULL,
644
+ FOREIGN KEY (job_id) REFERENCES pipeline_runs(job_id)
645
+ );
646
+ CREATE INDEX IF NOT EXISTS idx_reasoning_traces_job ON reasoning_traces(job_id);
647
+ "
648
+ _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (6, '$(now_iso)', '$(now_iso)');"
649
+ success "Migrated to schema v6"
650
+ fi
406
651
  }
407
652
 
408
653
  # ═══════════════════════════════════════════════════════════════════════════
@@ -442,6 +687,116 @@ db_add_event() {
442
687
  _db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata, created_at, synced) VALUES ('${ts}', ${ts_epoch}, '${event_type}', '${job_id}', '${stage}', '${status}', ${duration_secs}, '${metadata}', '${ts}', 0);" || return 1
443
688
  }
444
689
 
690
+ # ═══════════════════════════════════════════════════════════════════════════
691
+ # Event Query Functions (dual-read: SQLite preferred, JSONL fallback)
692
+ # ═══════════════════════════════════════════════════════════════════════════
693
+
694
+ # db_query_events [filter] [limit] — Query events, SQLite when available else JSONL
695
+ # Output: JSON array of events. Uses duration_secs AS duration_s for compat.
696
+ db_query_events() {
697
+ local filter="${1:-}"
698
+ local limit="${2:-5000}"
699
+ local db_file="${DB_FILE:-$HOME/.shipwright/shipwright.db}"
700
+
701
+ if [[ -f "$db_file" ]] && command -v sqlite3 &>/dev/null; then
702
+ local where_clause=""
703
+ [[ -n "$filter" ]] && where_clause="WHERE type = '$filter'"
704
+ local result
705
+ result=$(sqlite3 -json "$db_file" "SELECT ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata FROM events $where_clause ORDER BY ts_epoch DESC LIMIT $limit" 2>/dev/null) || true
706
+ if [[ -n "$result" ]]; then
707
+ echo "$result" | jq -c '
708
+ map(. + {duration_s: (.duration_secs // 0), result: (.result // .status)} + ((.metadata | if type == "string" then (fromjson? // {}) else {} end) // {}))
709
+ | map(del(.duration_secs, .metadata))
710
+ ' 2>/dev/null || echo "$result"
711
+ return 0
712
+ fi
713
+ fi
714
+
715
+ # Fallback to JSONL
716
+ local events_file="${EVENTS_FILE:-$HOME/.shipwright/events.jsonl}"
717
+ [[ ! -f "$events_file" ]] && echo "[]" && return 0
718
+ if [[ -n "$filter" ]]; then
719
+ grep -F "\"type\":\"$filter\"" "$events_file" 2>/dev/null | tail -n "$limit" | jq -s '.' 2>/dev/null || echo "[]"
720
+ else
721
+ tail -n "$limit" "$events_file" | jq -s '.' 2>/dev/null || echo "[]"
722
+ fi
723
+ }
724
+
725
+ # db_query_events_since <since_epoch> [event_type] [to_epoch] — Events in time range
726
+ # Output: JSON array. SQLite when available else JSONL.
727
+ db_query_events_since() {
728
+ local since_epoch="$1"
729
+ local event_type="${2:-}"
730
+ local to_epoch="${3:-}"
731
+ local db_file="${DB_FILE:-$HOME/.shipwright/shipwright.db}"
732
+
733
+ if [[ -f "$db_file" ]] && command -v sqlite3 &>/dev/null; then
734
+ local type_filter=""
735
+ [[ -n "$event_type" ]] && type_filter="AND type = '$event_type'"
736
+ local to_filter=""
737
+ [[ -n "$to_epoch" ]] && to_filter="AND ts_epoch <= $to_epoch"
738
+ local result
739
+ result=$(sqlite3 -json "$db_file" "SELECT ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata FROM events WHERE ts_epoch >= $since_epoch $type_filter $to_filter ORDER BY ts_epoch DESC" 2>/dev/null) || true
740
+ if [[ -n "$result" ]]; then
741
+ echo "$result" | jq -c '
742
+ map(. + {duration_s: (.duration_secs // 0), result: (.result // .status)} + ((.metadata | if type == "string" then (fromjson? // {}) else {} end) // {}))
743
+ | map(del(.duration_secs, .metadata))
744
+ ' 2>/dev/null || echo "$result"
745
+ return 0
746
+ fi
747
+ fi
748
+
749
+ # JSONL fallback (DB not available or query failed)
750
+ local events_file="${EVENTS_FILE:-$HOME/.shipwright/events.jsonl}"
751
+ [[ ! -f "$events_file" ]] && echo "[]" && return 0
752
+ local to=${to_epoch:-9999999999}
753
+ if [[ -n "$event_type" ]]; then
754
+ grep '^{' "$events_file" 2>/dev/null | jq -s --argjson from "$since_epoch" --argjson to "$to" --arg t "$event_type" '
755
+ map(select(. != null and .ts_epoch != null)) |
756
+ map(select(.ts_epoch >= $from and .ts_epoch <= $to and .type == $t))
757
+ ' 2>/dev/null || echo "[]"
758
+ else
759
+ grep '^{' "$events_file" 2>/dev/null | jq -s --argjson from "$since_epoch" --argjson to "$to" '
760
+ map(select(. != null and .ts_epoch != null)) |
761
+ map(select(.ts_epoch >= $from and .ts_epoch <= $to))
762
+ ' 2>/dev/null || echo "[]"
763
+ fi
764
+ }
765
+
766
+ # db_get_consumer_offset <consumer_id> — returns last_event_id or "0"
767
+ db_get_consumer_offset() {
768
+ local consumer_id="$1"
769
+ consumer_id="${consumer_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
770
+ _db_query "SELECT last_event_id FROM event_consumers WHERE consumer_id = '${consumer_id}';" 2>/dev/null || echo "0"
771
+ }
772
+
773
+ # db_set_consumer_offset <consumer_id> <last_event_id>
774
+ db_set_consumer_offset() {
775
+ local consumer_id="$1"
776
+ local last_event_id="$2"
777
+ consumer_id="${consumer_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
778
+ _db_exec "INSERT OR REPLACE INTO event_consumers (consumer_id, last_event_id, last_consumed_at) VALUES ('${consumer_id}', ${last_event_id}, '$(now_iso)');"
779
+ }
780
+
781
+ # db_save_checkpoint <workflow_id> <data> — durable workflow checkpoint
782
+ db_save_checkpoint() {
783
+ local workflow_id="$1"
784
+ local data="$2"
785
+ workflow_id="${workflow_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
786
+ data="${data//$'\n'/ }"
787
+ data="${data//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
788
+ if ! db_available; then return 1; fi
789
+ _db_exec "INSERT OR REPLACE INTO durable_checkpoints (workflow_id, checkpoint_data, created_at) VALUES ('${workflow_id}', '${data}', '$(now_iso)');"
790
+ }
791
+
792
+ # db_load_checkpoint <workflow_id> — returns checkpoint_data or empty
793
+ db_load_checkpoint() {
794
+ local workflow_id="$1"
795
+ workflow_id="${workflow_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
796
+ if ! db_available; then return 1; fi
797
+ _db_query "SELECT checkpoint_data FROM durable_checkpoints WHERE workflow_id = '${workflow_id}';" 2>/dev/null || echo ""
798
+ }
799
+
445
800
  # Legacy positional API (backward compat with existing add_event calls)
446
801
  add_event() {
447
802
  local event_type="$1"
@@ -493,8 +848,8 @@ db_save_job() {
493
848
  if ! db_available; then return 1; fi
494
849
 
495
850
  # Escape single quotes in title/goal
496
- title="${title//\'/\'\'}"
497
- goal="${goal//\'/\'\'}"
851
+ title="${title//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
852
+ goal="${goal//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
498
853
 
499
854
  _db_exec "INSERT OR REPLACE INTO daemon_state (job_id, issue_number, title, goal, pid, worktree, branch, status, template, started_at, updated_at) VALUES ('${job_id}', ${issue_num}, '${title}', '${goal}', ${pid}, '${worktree}', '${branch}', 'active', '${template}', '${ts}', '${ts}');"
500
855
  }
@@ -510,7 +865,7 @@ db_complete_job() {
510
865
 
511
866
  if ! db_available; then return 1; fi
512
867
 
513
- error_msg="${error_msg//\'/\'\'}"
868
+ error_msg="${error_msg//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
514
869
 
515
870
  _db_exec "UPDATE daemon_state SET status = 'completed', result = '${result}', duration = '${duration}', error_message = '${error_msg}', completed_at = '${ts}', updated_at = '${ts}' WHERE job_id = '${job_id}' AND status = 'active';"
516
871
  }
@@ -524,7 +879,7 @@ db_fail_job() {
524
879
 
525
880
  if ! db_available; then return 1; fi
526
881
 
527
- error_msg="${error_msg//\'/\'\'}"
882
+ error_msg="${error_msg//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
528
883
 
529
884
  _db_exec "UPDATE daemon_state SET status = 'failed', result = 'failure', error_message = '${error_msg}', completed_at = '${ts}', updated_at = '${ts}' WHERE job_id = '${job_id}' AND status = 'active';"
530
885
  }
@@ -564,6 +919,44 @@ db_remove_active_job() {
564
919
  _db_exec "DELETE FROM daemon_state WHERE job_id = '${job_id}' AND status = 'active';"
565
920
  }
566
921
 
922
+ # db_enqueue_issue <issue_key> — add to daemon queue
923
+ db_enqueue_issue() {
924
+ local issue_key="$1"
925
+ if ! db_available; then return 1; fi
926
+ issue_key="${issue_key//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
927
+ _db_exec "INSERT OR REPLACE INTO daemon_queue (issue_key, added_at) VALUES ('${issue_key}', '$(now_iso)');"
928
+ }
929
+
930
+ # db_dequeue_next — returns first issue_key and removes it, empty if none
931
+ db_dequeue_next() {
932
+ if ! db_available; then echo ""; return 0; fi
933
+ local next escaped
934
+ next=$(_db_query "SELECT issue_key FROM daemon_queue ORDER BY added_at ASC LIMIT 1;" || echo "")
935
+ if [[ -n "$next" ]]; then
936
+ escaped="${next//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
937
+ _db_exec "DELETE FROM daemon_queue WHERE issue_key = '${escaped}';" 2>/dev/null || true
938
+ echo "$next"
939
+ fi
940
+ }
941
+
942
+ # db_is_issue_queued <issue_key> — returns 0 if queued, 1 if not
943
+ db_is_issue_queued() {
944
+ local issue_key="$1"
945
+ if ! db_available; then return 1; fi
946
+ issue_key="${issue_key//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
947
+ local count
948
+ count=$(_db_query "SELECT COUNT(*) FROM daemon_queue WHERE issue_key = '${issue_key}';")
949
+ [[ "${count:-0}" -gt 0 ]]
950
+ }
951
+
952
+ # db_remove_from_queue <issue_key> — remove specific key from queue
953
+ db_remove_from_queue() {
954
+ local issue_key="$1"
955
+ if ! db_available; then return 1; fi
956
+ issue_key="${issue_key//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
957
+ _db_exec "DELETE FROM daemon_queue WHERE issue_key = '${issue_key}';"
958
+ }
959
+
567
960
  # db_daemon_summary — outputs JSON summary for status dashboard
568
961
  db_daemon_summary() {
569
962
  if ! db_available; then echo "{}"; return 0; fi
@@ -579,6 +972,23 @@ db_daemon_summary() {
579
972
  # Cost Functions (replaces costs.json)
580
973
  # ═══════════════════════════════════════════════════════════════════════════
581
974
 
975
+ # Record pipeline outcome for learning (Thompson sampling, optimize_tune_templates)
976
+ # db_record_outcome <job_id> [issue] [template] [success] [duration_secs] [retries] [cost_usd] [complexity]
977
+ db_record_outcome() {
978
+ local job_id="$1" issue="${2:-}" template="${3:-}" success="${4:-1}"
979
+ local duration="${5:-0}" retries="${6:-0}" cost="${7:-0}" complexity="${8:-medium}"
980
+
981
+ if ! db_available; then return 1; fi
982
+
983
+ job_id="${job_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
984
+ issue="${issue//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
985
+ template="${template//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
986
+
987
+ _db_exec "INSERT OR REPLACE INTO pipeline_outcomes
988
+ (job_id, issue_number, template, success, duration_secs, retry_count, cost_usd, complexity, created_at)
989
+ VALUES ('$job_id', '$issue', '$template', $success, $duration, $retries, $cost, '$complexity', '$(now_iso)');"
990
+ }
991
+
582
992
  # db_record_cost <input_tokens> <output_tokens> <model> <cost_usd> <stage> [issue]
583
993
  db_record_cost() {
584
994
  local input_tokens="${1:-0}"
@@ -683,7 +1093,7 @@ db_record_heartbeat() {
683
1093
 
684
1094
  if ! db_available; then return 1; fi
685
1095
 
686
- activity="${activity//\'/\'\'}"
1096
+ activity="${activity//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
687
1097
 
688
1098
  _db_exec "INSERT OR REPLACE INTO heartbeats (job_id, pid, issue, stage, iteration, last_activity, memory_mb, updated_at) VALUES ('${job_id}', ${pid}, ${issue}, '${stage}', ${iteration}, '${activity}', ${memory_mb}, '${ts}');"
689
1099
  }
@@ -731,9 +1141,9 @@ db_record_failure() {
731
1141
  if ! db_available; then return 1; fi
732
1142
 
733
1143
  # Escape quotes
734
- error_sig="${error_sig//\'/\'\'}"
735
- root_cause="${root_cause//\'/\'\'}"
736
- fix_desc="${fix_desc//\'/\'\'}"
1144
+ error_sig="${error_sig//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1145
+ root_cause="${root_cause//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1146
+ fix_desc="${fix_desc//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
737
1147
 
738
1148
  # Upsert: increment occurrences if same signature exists
739
1149
  _db_exec "INSERT INTO memory_failures (repo_hash, failure_class, error_signature, root_cause, fix_description, file_path, stage, occurrences, last_seen_at, created_at, synced) VALUES ('${repo_hash}', '${failure_class}', '${error_sig}', '${root_cause}', '${fix_desc}', '${file_path}', '${stage}', 1, '${ts}', '${ts}', 0) ON CONFLICT(id) DO UPDATE SET occurrences = occurrences + 1, last_seen_at = '${ts}';"
@@ -753,6 +1163,93 @@ db_query_similar_failures() {
753
1163
  _db_query "SELECT json_group_array(json_object('failure_class', failure_class, 'error_signature', error_signature, 'root_cause', root_cause, 'fix_description', fix_description, 'file_path', file_path, 'occurrences', occurrences, 'last_seen_at', last_seen_at)) FROM (SELECT * FROM memory_failures ${where_clause} ORDER BY occurrences DESC, last_seen_at DESC LIMIT ${limit});" || echo "[]"
754
1164
  }
755
1165
 
1166
+ # Memory patterns
1167
+ db_save_pattern() {
1168
+ local repo_hash="$1" pattern_type="$2" pattern_key="$3" description="${4:-}" metadata="${5:-}"
1169
+ if ! db_available; then return 1; fi
1170
+ description="${description//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1171
+ metadata="${metadata//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1172
+ _db_exec "INSERT INTO memory_patterns (repo_hash, pattern_type, pattern_key, description, last_seen_at, created_at, metadata)
1173
+ VALUES ('$repo_hash', '$pattern_type', '$pattern_key', '$description', '$(now_iso)', '$(now_iso)', '$metadata')
1174
+ ON CONFLICT(repo_hash, pattern_type, pattern_key) DO UPDATE SET
1175
+ frequency = frequency + 1, last_seen_at = '$(now_iso)', description = COALESCE(NULLIF('$description',''), description);"
1176
+ }
1177
+
1178
+ db_query_patterns() {
1179
+ local repo_hash="$1" pattern_type="${2:-}" limit="${3:-20}"
1180
+ if ! db_available; then echo "[]"; return 0; fi
1181
+ local where="WHERE repo_hash = '$repo_hash'"
1182
+ [[ -n "$pattern_type" ]] && where="$where AND pattern_type = '$pattern_type'"
1183
+ _db_query -json "SELECT * FROM memory_patterns $where ORDER BY frequency DESC, last_seen_at DESC LIMIT $limit;" || echo "[]"
1184
+ }
1185
+
1186
+ # Memory decisions
1187
+ db_save_decision() {
1188
+ local repo_hash="$1" decision_type="$2" context="$3" decision="$4" metadata="${5:-}"
1189
+ if ! db_available; then return 1; fi
1190
+ context="${context//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1191
+ decision="${decision//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1192
+ metadata="${metadata//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1193
+ _db_exec "INSERT INTO memory_decisions (repo_hash, decision_type, context, decision, created_at, updated_at, metadata)
1194
+ VALUES ('$repo_hash', '$decision_type', '$context', '$decision', '$(now_iso)', '$(now_iso)', '$metadata');"
1195
+ }
1196
+
1197
+ db_update_decision_outcome() {
1198
+ local decision_id="$1" outcome="$2" confidence="${3:-}"
1199
+ if ! db_available; then return 1; fi
1200
+ outcome="${outcome//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1201
+ local set_clause="outcome = '$outcome', updated_at = '$(now_iso)'"
1202
+ [[ -n "$confidence" ]] && set_clause="$set_clause, confidence = $confidence"
1203
+ _db_exec "UPDATE memory_decisions SET $set_clause WHERE id = $decision_id;"
1204
+ }
1205
+
1206
+ db_query_decisions() {
1207
+ local repo_hash="$1" decision_type="${2:-}" limit="${3:-20}"
1208
+ if ! db_available; then echo "[]"; return 0; fi
1209
+ local where="WHERE repo_hash = '$repo_hash'"
1210
+ [[ -n "$decision_type" ]] && where="$where AND decision_type = '$decision_type'"
1211
+ _db_query -json "SELECT * FROM memory_decisions $where ORDER BY updated_at DESC LIMIT $limit;" || echo "[]"
1212
+ }
1213
+
1214
+ # Memory embeddings
1215
+ db_save_embedding() {
1216
+ local content_hash="$1" source_type="$2" content_text="$3" repo_hash="${4:-}"
1217
+ if ! db_available; then return 1; fi
1218
+ content_text="${content_text//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1219
+ _db_exec "INSERT OR IGNORE INTO memory_embeddings (content_hash, source_type, content_text, repo_hash, created_at)
1220
+ VALUES ('$content_hash', '$source_type', '$content_text', '$repo_hash', '$(now_iso)');"
1221
+ }
1222
+
1223
+ db_query_embeddings() {
1224
+ local source_type="${1:-}" repo_hash="${2:-}" limit="${3:-50}"
1225
+ if ! db_available; then echo "[]"; return 0; fi
1226
+ local where="WHERE 1=1"
1227
+ [[ -n "$source_type" ]] && where="$where AND source_type = '$source_type'"
1228
+ [[ -n "$repo_hash" ]] && where="$where AND repo_hash = '$repo_hash'"
1229
+ _db_query -json "SELECT id, content_hash, source_type, content_text, repo_hash, created_at FROM memory_embeddings $where ORDER BY created_at DESC LIMIT $limit;" || echo "[]"
1230
+ }
1231
+
1232
+ # Reasoning traces for multi-step autonomous pipelines
1233
+ db_save_reasoning_trace() {
1234
+ local job_id="$1" step_name="$2" input_context="$3" reasoning="$4" output_decision="$5" confidence="${6:-0.5}"
1235
+ local escaped_input escaped_reasoning escaped_output
1236
+ escaped_input=$(echo "$input_context" | sed "s/'/''/g")
1237
+ escaped_reasoning=$(echo "$reasoning" | sed "s/'/''/g")
1238
+ escaped_output=$(echo "$output_decision" | sed "s/'/''/g")
1239
+ job_id="${job_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1240
+ step_name="${step_name//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1241
+ if ! db_available; then return 1; fi
1242
+ _db_exec "INSERT INTO reasoning_traces (job_id, step_name, input_context, reasoning, output_decision, confidence, created_at)
1243
+ VALUES ('$job_id', '$step_name', '$escaped_input', '$escaped_reasoning', '$escaped_output', $confidence, '$(now_iso)');"
1244
+ }
1245
+
1246
+ db_query_reasoning_traces() {
1247
+ local job_id="$1"
1248
+ job_id="${job_id//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1249
+ if ! db_available; then echo "[]"; return 0; fi
1250
+ _db_query -json "SELECT * FROM reasoning_traces WHERE job_id = '$job_id' ORDER BY id ASC;" || echo "[]"
1251
+ }
1252
+
756
1253
  # ═══════════════════════════════════════════════════════════════════════════
757
1254
  # Pipeline Run Functions (enhanced from existing)
758
1255
  # ═══════════════════════════════════════════════════════════════════════════
@@ -770,7 +1267,8 @@ add_pipeline_run() {
770
1267
 
771
1268
  local ts
772
1269
  ts="$(now_iso)"
773
- goal="${goal//\'/\'\'}"
1270
+ goal="${goal//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1271
+ branch="${branch//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
774
1272
 
775
1273
  _db_exec "INSERT OR IGNORE INTO pipeline_runs (job_id, issue_number, goal, branch, status, template, started_at, created_at) VALUES ('${job_id}', ${issue_number}, '${goal}', '${branch}', 'pending', '${template}', '${ts}', '${ts}');" || return 1
776
1274
  }
@@ -801,7 +1299,7 @@ record_stage() {
801
1299
 
802
1300
  local ts
803
1301
  ts="$(now_iso)"
804
- error_msg="${error_msg//\'/\'\'}"
1302
+ error_msg="${error_msg//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
805
1303
 
806
1304
  _db_exec "INSERT INTO pipeline_stages (job_id, stage_name, status, started_at, completed_at, duration_secs, error_message, created_at) VALUES ('${job_id}', '${stage_name}', '${status}', '${ts}', '${ts}', ${duration_secs}, '${error_msg}', '${ts}');" || return 1
807
1305
  }
@@ -868,7 +1366,7 @@ db_sync_push() {
868
1366
  local auth_header=""
869
1367
  [[ -n "${SYNC_TOKEN:-}" ]] && auth_header="-H 'Authorization: Bearer ${SYNC_TOKEN}'"
870
1368
 
871
- response=$(curl -s -w "%{http_code}" -o /dev/null \
1369
+ response=$(curl -s --connect-timeout 10 --max-time 30 -w "%{http_code}" -o /dev/null \
872
1370
  -X POST "${SYNC_URL}/api/sync/push" \
873
1371
  -H "Content-Type: application/json" \
874
1372
  ${auth_header} \
@@ -901,7 +1399,7 @@ db_sync_pull() {
901
1399
  [[ -n "${SYNC_TOKEN:-}" ]] && auth_header="-H 'Authorization: Bearer ${SYNC_TOKEN}'"
902
1400
 
903
1401
  local response_body
904
- response_body=$(curl -s \
1402
+ response_body=$(curl -s --connect-timeout 10 --max-time 30 \
905
1403
  "${SYNC_URL}/api/sync/pull?since=${last_sync}" \
906
1404
  -H "Accept: application/json" \
907
1405
  ${auth_header} 2>/dev/null || echo "{}")
@@ -1039,7 +1537,7 @@ migrate_json_data() {
1039
1537
  hb_mem=$(jq -r '.memory_mb // 0' "$hb_file" 2>/dev/null || echo "0")
1040
1538
  hb_updated=$(jq -r '.updated_at // ""' "$hb_file" 2>/dev/null || echo "$(now_iso)")
1041
1539
 
1042
- hb_activity="${hb_activity//\'/\'\'}"
1540
+ hb_activity="${hb_activity//$_SQL_SQ/$_SQL_SQ$_SQL_SQ}"
1043
1541
  _db_exec "INSERT OR REPLACE INTO heartbeats (job_id, pid, issue, stage, iteration, last_activity, memory_mb, updated_at) VALUES ('${hb_job}', ${hb_pid}, ${hb_issue}, '${hb_stage}', ${hb_iter}, '${hb_activity}', ${hb_mem}, '${hb_updated}');" 2>/dev/null && hb_count=$((hb_count + 1))
1044
1542
  done
1045
1543
  success "Heartbeats: ${hb_count} imported"