shipwright-cli 2.4.0 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (161) hide show
  1. package/README.md +16 -11
  2. package/completions/_shipwright +1 -1
  3. package/completions/shipwright.bash +3 -8
  4. package/completions/shipwright.fish +1 -1
  5. package/config/defaults.json +111 -0
  6. package/config/event-schema.json +81 -0
  7. package/config/policy.json +13 -18
  8. package/dashboard/coverage/coverage-summary.json +14 -0
  9. package/dashboard/public/index.html +1 -1
  10. package/dashboard/server.ts +306 -17
  11. package/dashboard/src/components/charts/bar.test.ts +79 -0
  12. package/dashboard/src/components/charts/donut.test.ts +68 -0
  13. package/dashboard/src/components/charts/pipeline-rail.test.ts +117 -0
  14. package/dashboard/src/components/charts/sparkline.test.ts +125 -0
  15. package/dashboard/src/core/api.test.ts +309 -0
  16. package/dashboard/src/core/helpers.test.ts +301 -0
  17. package/dashboard/src/core/router.test.ts +307 -0
  18. package/dashboard/src/core/router.ts +7 -0
  19. package/dashboard/src/core/sse.test.ts +144 -0
  20. package/dashboard/src/views/metrics.test.ts +186 -0
  21. package/dashboard/src/views/overview.test.ts +173 -0
  22. package/dashboard/src/views/pipelines.test.ts +183 -0
  23. package/dashboard/src/views/team.test.ts +253 -0
  24. package/dashboard/vitest.config.ts +14 -5
  25. package/docs/TIPS.md +1 -1
  26. package/docs/patterns/README.md +1 -1
  27. package/package.json +5 -7
  28. package/scripts/adapters/docker-deploy.sh +1 -1
  29. package/scripts/adapters/tmux-adapter.sh +11 -1
  30. package/scripts/adapters/wezterm-adapter.sh +1 -1
  31. package/scripts/check-version-consistency.sh +1 -1
  32. package/scripts/lib/architecture.sh +126 -0
  33. package/scripts/lib/bootstrap.sh +75 -0
  34. package/scripts/lib/compat.sh +89 -6
  35. package/scripts/lib/config.sh +91 -0
  36. package/scripts/lib/daemon-adaptive.sh +3 -3
  37. package/scripts/lib/daemon-dispatch.sh +39 -16
  38. package/scripts/lib/daemon-health.sh +1 -1
  39. package/scripts/lib/daemon-patrol.sh +24 -12
  40. package/scripts/lib/daemon-poll.sh +37 -25
  41. package/scripts/lib/daemon-state.sh +115 -23
  42. package/scripts/lib/daemon-triage.sh +30 -8
  43. package/scripts/lib/fleet-failover.sh +63 -0
  44. package/scripts/lib/helpers.sh +30 -6
  45. package/scripts/lib/pipeline-detection.sh +2 -2
  46. package/scripts/lib/pipeline-github.sh +9 -9
  47. package/scripts/lib/pipeline-intelligence.sh +85 -35
  48. package/scripts/lib/pipeline-quality-checks.sh +16 -16
  49. package/scripts/lib/pipeline-quality.sh +1 -1
  50. package/scripts/lib/pipeline-stages.sh +242 -28
  51. package/scripts/lib/pipeline-state.sh +40 -4
  52. package/scripts/lib/test-helpers.sh +247 -0
  53. package/scripts/postinstall.mjs +3 -11
  54. package/scripts/sw +10 -4
  55. package/scripts/sw-activity.sh +1 -11
  56. package/scripts/sw-adaptive.sh +109 -85
  57. package/scripts/sw-adversarial.sh +4 -14
  58. package/scripts/sw-architecture-enforcer.sh +1 -11
  59. package/scripts/sw-auth.sh +8 -17
  60. package/scripts/sw-autonomous.sh +111 -49
  61. package/scripts/sw-changelog.sh +1 -11
  62. package/scripts/sw-checkpoint.sh +144 -20
  63. package/scripts/sw-ci.sh +2 -12
  64. package/scripts/sw-cleanup.sh +13 -17
  65. package/scripts/sw-code-review.sh +16 -36
  66. package/scripts/sw-connect.sh +5 -12
  67. package/scripts/sw-context.sh +9 -26
  68. package/scripts/sw-cost.sh +6 -16
  69. package/scripts/sw-daemon.sh +75 -70
  70. package/scripts/sw-dashboard.sh +57 -17
  71. package/scripts/sw-db.sh +506 -15
  72. package/scripts/sw-decompose.sh +1 -11
  73. package/scripts/sw-deps.sh +15 -25
  74. package/scripts/sw-developer-simulation.sh +1 -11
  75. package/scripts/sw-discovery.sh +112 -30
  76. package/scripts/sw-doc-fleet.sh +7 -17
  77. package/scripts/sw-docs-agent.sh +6 -16
  78. package/scripts/sw-docs.sh +4 -12
  79. package/scripts/sw-doctor.sh +134 -43
  80. package/scripts/sw-dora.sh +11 -19
  81. package/scripts/sw-durable.sh +35 -52
  82. package/scripts/sw-e2e-orchestrator.sh +11 -27
  83. package/scripts/sw-eventbus.sh +115 -115
  84. package/scripts/sw-evidence.sh +114 -30
  85. package/scripts/sw-feedback.sh +3 -13
  86. package/scripts/sw-fix.sh +2 -20
  87. package/scripts/sw-fleet-discover.sh +1 -11
  88. package/scripts/sw-fleet-viz.sh +10 -18
  89. package/scripts/sw-fleet.sh +13 -17
  90. package/scripts/sw-github-app.sh +6 -16
  91. package/scripts/sw-github-checks.sh +1 -11
  92. package/scripts/sw-github-deploy.sh +1 -11
  93. package/scripts/sw-github-graphql.sh +2 -12
  94. package/scripts/sw-guild.sh +1 -11
  95. package/scripts/sw-heartbeat.sh +49 -12
  96. package/scripts/sw-hygiene.sh +45 -43
  97. package/scripts/sw-incident.sh +48 -74
  98. package/scripts/sw-init.sh +35 -37
  99. package/scripts/sw-instrument.sh +1 -11
  100. package/scripts/sw-intelligence.sh +362 -51
  101. package/scripts/sw-jira.sh +5 -14
  102. package/scripts/sw-launchd.sh +2 -12
  103. package/scripts/sw-linear.sh +8 -17
  104. package/scripts/sw-logs.sh +4 -12
  105. package/scripts/sw-loop.sh +641 -90
  106. package/scripts/sw-memory.sh +243 -17
  107. package/scripts/sw-mission-control.sh +2 -12
  108. package/scripts/sw-model-router.sh +73 -34
  109. package/scripts/sw-otel.sh +11 -21
  110. package/scripts/sw-oversight.sh +1 -11
  111. package/scripts/sw-patrol-meta.sh +5 -11
  112. package/scripts/sw-pipeline-composer.sh +7 -17
  113. package/scripts/sw-pipeline-vitals.sh +1 -11
  114. package/scripts/sw-pipeline.sh +478 -122
  115. package/scripts/sw-pm.sh +2 -12
  116. package/scripts/sw-pr-lifecycle.sh +27 -25
  117. package/scripts/sw-predictive.sh +16 -22
  118. package/scripts/sw-prep.sh +6 -16
  119. package/scripts/sw-ps.sh +1 -11
  120. package/scripts/sw-public-dashboard.sh +2 -12
  121. package/scripts/sw-quality.sh +77 -10
  122. package/scripts/sw-reaper.sh +1 -11
  123. package/scripts/sw-recruit.sh +15 -25
  124. package/scripts/sw-regression.sh +11 -21
  125. package/scripts/sw-release-manager.sh +19 -28
  126. package/scripts/sw-release.sh +8 -16
  127. package/scripts/sw-remote.sh +1 -11
  128. package/scripts/sw-replay.sh +48 -44
  129. package/scripts/sw-retro.sh +70 -92
  130. package/scripts/sw-review-rerun.sh +1 -1
  131. package/scripts/sw-scale.sh +109 -32
  132. package/scripts/sw-security-audit.sh +12 -22
  133. package/scripts/sw-self-optimize.sh +239 -23
  134. package/scripts/sw-session.sh +3 -13
  135. package/scripts/sw-setup.sh +8 -18
  136. package/scripts/sw-standup.sh +5 -15
  137. package/scripts/sw-status.sh +32 -23
  138. package/scripts/sw-strategic.sh +129 -13
  139. package/scripts/sw-stream.sh +1 -11
  140. package/scripts/sw-swarm.sh +76 -36
  141. package/scripts/sw-team-stages.sh +10 -20
  142. package/scripts/sw-templates.sh +4 -14
  143. package/scripts/sw-testgen.sh +3 -13
  144. package/scripts/sw-tmux-pipeline.sh +1 -19
  145. package/scripts/sw-tmux-role-color.sh +0 -10
  146. package/scripts/sw-tmux-status.sh +3 -11
  147. package/scripts/sw-tmux.sh +2 -20
  148. package/scripts/sw-trace.sh +1 -19
  149. package/scripts/sw-tracker-github.sh +0 -10
  150. package/scripts/sw-tracker-jira.sh +1 -11
  151. package/scripts/sw-tracker-linear.sh +1 -11
  152. package/scripts/sw-tracker.sh +7 -24
  153. package/scripts/sw-triage.sh +24 -34
  154. package/scripts/sw-upgrade.sh +5 -23
  155. package/scripts/sw-ux.sh +1 -19
  156. package/scripts/sw-webhook.sh +18 -32
  157. package/scripts/sw-widgets.sh +3 -21
  158. package/scripts/sw-worktree.sh +11 -27
  159. package/scripts/update-homebrew-sha.sh +67 -0
  160. package/templates/pipelines/tdd.json +72 -0
  161. package/scripts/sw-pipeline.sh.mock +0 -7
package/scripts/sw-db.sh CHANGED
@@ -14,7 +14,7 @@ if [[ -n "${_SW_DB_LOADED:-}" ]] && [[ "${BASH_SOURCE[0]}" != "$0" ]]; then
14
14
  fi
15
15
  _SW_DB_LOADED=1
16
16
 
17
- VERSION="2.4.0"
17
+ VERSION="3.0.0"
18
18
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
19
19
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
20
20
 
@@ -42,20 +42,10 @@ if [[ "$(type -t emit_event 2>/dev/null)" != "function" ]]; then
42
42
  echo "${payload}}" >> "${HOME}/.shipwright/events.jsonl"
43
43
  }
44
44
  fi
45
- CYAN="${CYAN:-\033[38;2;0;212;255m}"
46
- PURPLE="${PURPLE:-\033[38;2;124;58;237m}"
47
- BLUE="${BLUE:-\033[38;2;0;102;255m}"
48
- GREEN="${GREEN:-\033[38;2;74;222;128m}"
49
- YELLOW="${YELLOW:-\033[38;2;250;204;21m}"
50
- RED="${RED:-\033[38;2;248;113;113m}"
51
- DIM="${DIM:-\033[2m}"
52
- BOLD="${BOLD:-\033[1m}"
53
- RESET="${RESET:-\033[0m}"
54
-
55
45
  # ─── Database Configuration ──────────────────────────────────────────────────
56
46
  DB_DIR="${HOME}/.shipwright"
57
47
  DB_FILE="${DB_DIR}/shipwright.db"
58
- SCHEMA_VERSION=2
48
+ SCHEMA_VERSION=6
59
49
 
60
50
  # JSON fallback paths
61
51
  EVENTS_FILE="${DB_DIR}/events.jsonl"
@@ -89,7 +79,7 @@ check_sqlite3() {
89
79
  # Cache the result to avoid repeated command lookups
90
80
  if [[ -z "$_SQLITE3_CHECKED" ]]; then
91
81
  _SQLITE3_CHECKED=1
92
- if command -v sqlite3 &>/dev/null; then
82
+ if command -v sqlite3 >/dev/null 2>&1; then
93
83
  _SQLITE3_AVAILABLE=1
94
84
  else
95
85
  _SQLITE3_AVAILABLE=""
@@ -244,6 +234,12 @@ CREATE TABLE IF NOT EXISTS metrics (
244
234
  -- Phase 1: New tables for state migration
245
235
  -- ═══════════════════════════════════════════════════════════════════════
246
236
 
237
+ -- Daemon queue (issue keys waiting for a slot)
238
+ CREATE TABLE IF NOT EXISTS daemon_queue (
239
+ issue_key TEXT PRIMARY KEY,
240
+ added_at TEXT NOT NULL
241
+ );
242
+
247
243
  -- Daemon state (replaces daemon-state.json)
248
244
  CREATE TABLE IF NOT EXISTS daemon_state (
249
245
  id INTEGER PRIMARY KEY AUTOINCREMENT,
@@ -317,6 +313,50 @@ CREATE TABLE IF NOT EXISTS memory_failures (
317
313
  synced INTEGER DEFAULT 0
318
314
  );
319
315
 
316
+ -- Memory: patterns (replaces memory/*/patterns.json)
317
+ CREATE TABLE IF NOT EXISTS memory_patterns (
318
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
319
+ repo_hash TEXT NOT NULL,
320
+ pattern_type TEXT NOT NULL,
321
+ pattern_key TEXT NOT NULL,
322
+ description TEXT,
323
+ frequency INTEGER DEFAULT 1,
324
+ confidence REAL DEFAULT 0.5,
325
+ last_seen_at TEXT NOT NULL,
326
+ created_at TEXT NOT NULL,
327
+ metadata TEXT,
328
+ synced INTEGER DEFAULT 0,
329
+ UNIQUE(repo_hash, pattern_type, pattern_key)
330
+ );
331
+
332
+ -- Memory: decisions (replaces memory/*/decisions.json)
333
+ CREATE TABLE IF NOT EXISTS memory_decisions (
334
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
335
+ repo_hash TEXT NOT NULL,
336
+ decision_type TEXT NOT NULL,
337
+ context TEXT NOT NULL,
338
+ decision TEXT NOT NULL,
339
+ outcome TEXT,
340
+ confidence REAL DEFAULT 0.5,
341
+ created_at TEXT NOT NULL,
342
+ updated_at TEXT NOT NULL,
343
+ metadata TEXT,
344
+ synced INTEGER DEFAULT 0
345
+ );
346
+
347
+ -- Memory: embeddings for semantic search
348
+ CREATE TABLE IF NOT EXISTS memory_embeddings (
349
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
350
+ content_hash TEXT UNIQUE NOT NULL,
351
+ source_type TEXT NOT NULL,
352
+ source_id INTEGER,
353
+ content_text TEXT NOT NULL,
354
+ embedding BLOB,
355
+ repo_hash TEXT,
356
+ created_at TEXT NOT NULL,
357
+ synced INTEGER DEFAULT 0
358
+ );
359
+
320
360
  -- ═══════════════════════════════════════════════════════════════════════
321
361
  -- Sync tables
322
362
  -- ═══════════════════════════════════════════════════════════════════════
@@ -354,6 +394,7 @@ CREATE INDEX IF NOT EXISTS idx_developers_name ON developers(name);
354
394
  CREATE INDEX IF NOT EXISTS idx_sessions_status ON sessions(status);
355
395
  CREATE INDEX IF NOT EXISTS idx_metrics_job_id ON metrics(job_id);
356
396
  CREATE INDEX IF NOT EXISTS idx_metrics_type ON metrics(metric_type);
397
+ CREATE INDEX IF NOT EXISTS idx_daemon_queue_added ON daemon_queue(added_at);
357
398
  CREATE INDEX IF NOT EXISTS idx_daemon_state_status ON daemon_state(status);
358
399
  CREATE INDEX IF NOT EXISTS idx_daemon_state_job ON daemon_state(job_id);
359
400
  CREATE INDEX IF NOT EXISTS idx_cost_entries_epoch ON cost_entries(ts_epoch DESC);
@@ -361,7 +402,73 @@ CREATE INDEX IF NOT EXISTS idx_cost_entries_synced ON cost_entries(synced) WHERE
361
402
  CREATE INDEX IF NOT EXISTS idx_heartbeats_job ON heartbeats(job_id);
362
403
  CREATE INDEX IF NOT EXISTS idx_memory_failures_repo ON memory_failures(repo_hash);
363
404
  CREATE INDEX IF NOT EXISTS idx_memory_failures_class ON memory_failures(failure_class);
405
+ CREATE INDEX IF NOT EXISTS idx_memory_patterns_repo ON memory_patterns(repo_hash);
406
+ CREATE INDEX IF NOT EXISTS idx_memory_patterns_type ON memory_patterns(pattern_type);
407
+ CREATE INDEX IF NOT EXISTS idx_memory_decisions_repo ON memory_decisions(repo_hash);
408
+ CREATE INDEX IF NOT EXISTS idx_memory_decisions_type ON memory_decisions(decision_type);
409
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_hash ON memory_embeddings(content_hash);
410
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_source ON memory_embeddings(source_type);
411
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_repo ON memory_embeddings(repo_hash);
364
412
  CREATE INDEX IF NOT EXISTS idx_sync_log_unsynced ON _sync_log(synced) WHERE synced = 0;
413
+
414
+ -- Event consumer offset tracking
415
+ CREATE TABLE IF NOT EXISTS event_consumers (
416
+ consumer_id TEXT PRIMARY KEY,
417
+ last_event_id INTEGER NOT NULL DEFAULT 0,
418
+ last_consumed_at TEXT NOT NULL
419
+ );
420
+ CREATE INDEX IF NOT EXISTS idx_event_consumers_id ON event_consumers(consumer_id);
421
+
422
+ -- Outcome-based learning (Thompson sampling, UCB1)
423
+ CREATE TABLE IF NOT EXISTS pipeline_outcomes (
424
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
425
+ job_id TEXT UNIQUE NOT NULL,
426
+ issue_number TEXT,
427
+ template TEXT,
428
+ success INTEGER NOT NULL DEFAULT 0,
429
+ duration_secs INTEGER DEFAULT 0,
430
+ retry_count INTEGER DEFAULT 0,
431
+ cost_usd REAL DEFAULT 0,
432
+ complexity TEXT DEFAULT 'medium',
433
+ created_at TEXT NOT NULL
434
+ );
435
+
436
+ CREATE TABLE IF NOT EXISTS model_outcomes (
437
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
438
+ model TEXT NOT NULL,
439
+ stage TEXT NOT NULL,
440
+ success INTEGER NOT NULL DEFAULT 0,
441
+ duration_secs INTEGER DEFAULT 0,
442
+ cost_usd REAL DEFAULT 0,
443
+ created_at TEXT NOT NULL
444
+ );
445
+
446
+ CREATE INDEX IF NOT EXISTS idx_pipeline_outcomes_template ON pipeline_outcomes(template);
447
+ CREATE INDEX IF NOT EXISTS idx_pipeline_outcomes_complexity ON pipeline_outcomes(complexity);
448
+ CREATE INDEX IF NOT EXISTS idx_model_outcomes_model_stage ON model_outcomes(model, stage);
449
+
450
+ -- Durable workflow checkpoints
451
+ CREATE TABLE IF NOT EXISTS durable_checkpoints (
452
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
453
+ workflow_id TEXT NOT NULL,
454
+ checkpoint_data TEXT NOT NULL,
455
+ created_at TEXT NOT NULL,
456
+ UNIQUE(workflow_id)
457
+ );
458
+
459
+ -- Reasoning traces for multi-step autonomous pipelines
460
+ CREATE TABLE IF NOT EXISTS reasoning_traces (
461
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
462
+ job_id TEXT NOT NULL,
463
+ step_name TEXT NOT NULL,
464
+ input_context TEXT,
465
+ reasoning TEXT,
466
+ output_decision TEXT,
467
+ confidence REAL,
468
+ created_at TEXT NOT NULL,
469
+ FOREIGN KEY (job_id) REFERENCES pipeline_runs(job_id)
470
+ );
471
+ CREATE INDEX IF NOT EXISTS idx_reasoning_traces_job ON reasoning_traces(job_id);
365
472
  SCHEMA
366
473
  }
367
474
 
@@ -403,6 +510,138 @@ migrate_schema() {
403
510
  _db_exec "INSERT OR IGNORE INTO _sync_metadata (key, value, updated_at) VALUES ('device_id', '$(uname -n)-$$-$(now_epoch)', '$(now_iso)');"
404
511
  success "Migrated to schema v2"
405
512
  fi
513
+
514
+ # Migration from v2 → v3: add memory_patterns, memory_decisions, memory_embeddings
515
+ if [[ "$current_version" -lt 3 ]]; then
516
+ info "Migrating schema v${current_version} → v3..."
517
+ sqlite3 "$DB_FILE" "
518
+ CREATE TABLE IF NOT EXISTS memory_patterns (
519
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
520
+ repo_hash TEXT NOT NULL,
521
+ pattern_type TEXT NOT NULL,
522
+ pattern_key TEXT NOT NULL,
523
+ description TEXT,
524
+ frequency INTEGER DEFAULT 1,
525
+ confidence REAL DEFAULT 0.5,
526
+ last_seen_at TEXT NOT NULL,
527
+ created_at TEXT NOT NULL,
528
+ metadata TEXT,
529
+ synced INTEGER DEFAULT 0,
530
+ UNIQUE(repo_hash, pattern_type, pattern_key)
531
+ );
532
+ CREATE TABLE IF NOT EXISTS memory_decisions (
533
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
534
+ repo_hash TEXT NOT NULL,
535
+ decision_type TEXT NOT NULL,
536
+ context TEXT NOT NULL,
537
+ decision TEXT NOT NULL,
538
+ outcome TEXT,
539
+ confidence REAL DEFAULT 0.5,
540
+ created_at TEXT NOT NULL,
541
+ updated_at TEXT NOT NULL,
542
+ metadata TEXT,
543
+ synced INTEGER DEFAULT 0
544
+ );
545
+ CREATE TABLE IF NOT EXISTS memory_embeddings (
546
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
547
+ content_hash TEXT UNIQUE NOT NULL,
548
+ source_type TEXT NOT NULL,
549
+ source_id INTEGER,
550
+ content_text TEXT NOT NULL,
551
+ embedding BLOB,
552
+ repo_hash TEXT,
553
+ created_at TEXT NOT NULL,
554
+ synced INTEGER DEFAULT 0
555
+ );
556
+ CREATE INDEX IF NOT EXISTS idx_memory_patterns_repo ON memory_patterns(repo_hash);
557
+ CREATE INDEX IF NOT EXISTS idx_memory_patterns_type ON memory_patterns(pattern_type);
558
+ CREATE INDEX IF NOT EXISTS idx_memory_decisions_repo ON memory_decisions(repo_hash);
559
+ CREATE INDEX IF NOT EXISTS idx_memory_decisions_type ON memory_decisions(decision_type);
560
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_hash ON memory_embeddings(content_hash);
561
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_source ON memory_embeddings(source_type);
562
+ CREATE INDEX IF NOT EXISTS idx_memory_embeddings_repo ON memory_embeddings(repo_hash);
563
+ "
564
+ _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (3, '$(now_iso)', '$(now_iso)');"
565
+ success "Migrated to schema v3"
566
+ fi
567
+
568
+ # Migration from v3 → v4: event_consumers, durable_checkpoints
569
+ if [[ "$current_version" -lt 4 ]]; then
570
+ info "Migrating schema v${current_version} → v4..."
571
+ sqlite3 "$DB_FILE" "
572
+ CREATE TABLE IF NOT EXISTS event_consumers (
573
+ consumer_id TEXT PRIMARY KEY,
574
+ last_event_id INTEGER NOT NULL DEFAULT 0,
575
+ last_consumed_at TEXT NOT NULL
576
+ );
577
+ CREATE INDEX IF NOT EXISTS idx_event_consumers_id ON event_consumers(consumer_id);
578
+ CREATE TABLE IF NOT EXISTS durable_checkpoints (
579
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
580
+ workflow_id TEXT NOT NULL,
581
+ checkpoint_data TEXT NOT NULL,
582
+ created_at TEXT NOT NULL,
583
+ UNIQUE(workflow_id)
584
+ );
585
+ "
586
+ _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (4, '$(now_iso)', '$(now_iso)');"
587
+ success "Migrated to schema v4"
588
+ fi
589
+
590
+ # Migration from v4 → v5: pipeline_outcomes, model_outcomes for outcome-based learning
591
+ if [[ "$current_version" -lt 5 ]]; then
592
+ info "Migrating schema v${current_version} → v5..."
593
+ sqlite3 "$DB_FILE" "
594
+ CREATE TABLE IF NOT EXISTS pipeline_outcomes (
595
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
596
+ job_id TEXT UNIQUE NOT NULL,
597
+ issue_number TEXT,
598
+ template TEXT,
599
+ success INTEGER NOT NULL DEFAULT 0,
600
+ duration_secs INTEGER DEFAULT 0,
601
+ retry_count INTEGER DEFAULT 0,
602
+ cost_usd REAL DEFAULT 0,
603
+ complexity TEXT DEFAULT 'medium',
604
+ created_at TEXT NOT NULL
605
+ );
606
+
607
+ CREATE TABLE IF NOT EXISTS model_outcomes (
608
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
609
+ model TEXT NOT NULL,
610
+ stage TEXT NOT NULL,
611
+ success INTEGER NOT NULL DEFAULT 0,
612
+ duration_secs INTEGER DEFAULT 0,
613
+ cost_usd REAL DEFAULT 0,
614
+ created_at TEXT NOT NULL
615
+ );
616
+
617
+ CREATE INDEX IF NOT EXISTS idx_pipeline_outcomes_template ON pipeline_outcomes(template);
618
+ CREATE INDEX IF NOT EXISTS idx_pipeline_outcomes_complexity ON pipeline_outcomes(complexity);
619
+ CREATE INDEX IF NOT EXISTS idx_model_outcomes_model_stage ON model_outcomes(model, stage);
620
+ "
621
+ _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (5, '$(now_iso)', '$(now_iso)');"
622
+ success "Migrated to schema v5"
623
+ fi
624
+
625
+ # Migration from v5 → v6: reasoning_traces for multi-step autonomous reasoning
626
+ if [[ "$current_version" -lt 6 ]]; then
627
+ info "Migrating schema v${current_version} → v6..."
628
+ sqlite3 "$DB_FILE" "
629
+ CREATE TABLE IF NOT EXISTS reasoning_traces (
630
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
631
+ job_id TEXT NOT NULL,
632
+ step_name TEXT NOT NULL,
633
+ input_context TEXT,
634
+ reasoning TEXT,
635
+ output_decision TEXT,
636
+ confidence REAL,
637
+ created_at TEXT NOT NULL,
638
+ FOREIGN KEY (job_id) REFERENCES pipeline_runs(job_id)
639
+ );
640
+ CREATE INDEX IF NOT EXISTS idx_reasoning_traces_job ON reasoning_traces(job_id);
641
+ "
642
+ _db_exec "INSERT OR REPLACE INTO _schema (version, created_at, applied_at) VALUES (6, '$(now_iso)', '$(now_iso)');"
643
+ success "Migrated to schema v6"
644
+ fi
406
645
  }
407
646
 
408
647
  # ═══════════════════════════════════════════════════════════════════════════
@@ -442,6 +681,116 @@ db_add_event() {
442
681
  _db_exec "INSERT OR IGNORE INTO events (ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata, created_at, synced) VALUES ('${ts}', ${ts_epoch}, '${event_type}', '${job_id}', '${stage}', '${status}', ${duration_secs}, '${metadata}', '${ts}', 0);" || return 1
443
682
  }
444
683
 
684
+ # ═══════════════════════════════════════════════════════════════════════════
685
+ # Event Query Functions (dual-read: SQLite preferred, JSONL fallback)
686
+ # ═══════════════════════════════════════════════════════════════════════════
687
+
688
+ # db_query_events [filter] [limit] — Query events, SQLite when available else JSONL
689
+ # Output: JSON array of events. Uses duration_secs AS duration_s for compat.
690
+ db_query_events() {
691
+ local filter="${1:-}"
692
+ local limit="${2:-5000}"
693
+ local db_file="${DB_FILE:-$HOME/.shipwright/shipwright.db}"
694
+
695
+ if [[ -f "$db_file" ]] && command -v sqlite3 &>/dev/null; then
696
+ local where_clause=""
697
+ [[ -n "$filter" ]] && where_clause="WHERE type = '$filter'"
698
+ local result
699
+ result=$(sqlite3 -json "$db_file" "SELECT ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata FROM events $where_clause ORDER BY ts_epoch DESC LIMIT $limit" 2>/dev/null) || true
700
+ if [[ -n "$result" ]]; then
701
+ echo "$result" | jq -c '
702
+ map(. + {duration_s: (.duration_secs // 0), result: (.result // .status)} + ((.metadata | if type == "string" then (fromjson? // {}) else {} end) // {}))
703
+ | map(del(.duration_secs, .metadata))
704
+ ' 2>/dev/null || echo "$result"
705
+ return 0
706
+ fi
707
+ fi
708
+
709
+ # Fallback to JSONL
710
+ local events_file="${EVENTS_FILE:-$HOME/.shipwright/events.jsonl}"
711
+ [[ ! -f "$events_file" ]] && echo "[]" && return 0
712
+ if [[ -n "$filter" ]]; then
713
+ grep -F "\"type\":\"$filter\"" "$events_file" 2>/dev/null | tail -n "$limit" | jq -s '.' 2>/dev/null || echo "[]"
714
+ else
715
+ tail -n "$limit" "$events_file" | jq -s '.' 2>/dev/null || echo "[]"
716
+ fi
717
+ }
718
+
719
+ # db_query_events_since <since_epoch> [event_type] [to_epoch] — Events in time range
720
+ # Output: JSON array. SQLite when available else JSONL.
721
+ db_query_events_since() {
722
+ local since_epoch="$1"
723
+ local event_type="${2:-}"
724
+ local to_epoch="${3:-}"
725
+ local db_file="${DB_FILE:-$HOME/.shipwright/shipwright.db}"
726
+
727
+ if [[ -f "$db_file" ]] && command -v sqlite3 &>/dev/null; then
728
+ local type_filter=""
729
+ [[ -n "$event_type" ]] && type_filter="AND type = '$event_type'"
730
+ local to_filter=""
731
+ [[ -n "$to_epoch" ]] && to_filter="AND ts_epoch <= $to_epoch"
732
+ local result
733
+ result=$(sqlite3 -json "$db_file" "SELECT ts, ts_epoch, type, job_id, stage, status, duration_secs, metadata FROM events WHERE ts_epoch >= $since_epoch $type_filter $to_filter ORDER BY ts_epoch DESC" 2>/dev/null) || true
734
+ if [[ -n "$result" ]]; then
735
+ echo "$result" | jq -c '
736
+ map(. + {duration_s: (.duration_secs // 0), result: (.result // .status)} + ((.metadata | if type == "string" then (fromjson? // {}) else {} end) // {}))
737
+ | map(del(.duration_secs, .metadata))
738
+ ' 2>/dev/null || echo "$result"
739
+ return 0
740
+ fi
741
+ fi
742
+
743
+ # JSONL fallback (DB not available or query failed)
744
+ local events_file="${EVENTS_FILE:-$HOME/.shipwright/events.jsonl}"
745
+ [[ ! -f "$events_file" ]] && echo "[]" && return 0
746
+ local to=${to_epoch:-9999999999}
747
+ if [[ -n "$event_type" ]]; then
748
+ grep '^{' "$events_file" 2>/dev/null | jq -s --argjson from "$since_epoch" --argjson to "$to" --arg t "$event_type" '
749
+ map(select(. != null and .ts_epoch != null)) |
750
+ map(select(.ts_epoch >= $from and .ts_epoch <= $to and .type == $t))
751
+ ' 2>/dev/null || echo "[]"
752
+ else
753
+ grep '^{' "$events_file" 2>/dev/null | jq -s --argjson from "$since_epoch" --argjson to "$to" '
754
+ map(select(. != null and .ts_epoch != null)) |
755
+ map(select(.ts_epoch >= $from and .ts_epoch <= $to))
756
+ ' 2>/dev/null || echo "[]"
757
+ fi
758
+ }
759
+
760
+ # db_get_consumer_offset <consumer_id> — returns last_event_id or "0"
761
+ db_get_consumer_offset() {
762
+ local consumer_id="$1"
763
+ consumer_id="${consumer_id//\'/\'\'}"
764
+ _db_query "SELECT last_event_id FROM event_consumers WHERE consumer_id = '${consumer_id}';" 2>/dev/null || echo "0"
765
+ }
766
+
767
+ # db_set_consumer_offset <consumer_id> <last_event_id>
768
+ db_set_consumer_offset() {
769
+ local consumer_id="$1"
770
+ local last_event_id="$2"
771
+ consumer_id="${consumer_id//\'/\'\'}"
772
+ _db_exec "INSERT OR REPLACE INTO event_consumers (consumer_id, last_event_id, last_consumed_at) VALUES ('${consumer_id}', ${last_event_id}, '$(now_iso)');"
773
+ }
774
+
775
+ # db_save_checkpoint <workflow_id> <data> — durable workflow checkpoint
776
+ db_save_checkpoint() {
777
+ local workflow_id="$1"
778
+ local data="$2"
779
+ workflow_id="${workflow_id//\'/\'\'}"
780
+ data="${data//$'\n'/ }"
781
+ data="${data//\'/\'\'}"
782
+ if ! db_available; then return 1; fi
783
+ _db_exec "INSERT OR REPLACE INTO durable_checkpoints (workflow_id, checkpoint_data, created_at) VALUES ('${workflow_id}', '${data}', '$(now_iso)');"
784
+ }
785
+
786
+ # db_load_checkpoint <workflow_id> — returns checkpoint_data or empty
787
+ db_load_checkpoint() {
788
+ local workflow_id="$1"
789
+ workflow_id="${workflow_id//\'/\'\'}"
790
+ if ! db_available; then return 1; fi
791
+ _db_query "SELECT checkpoint_data FROM durable_checkpoints WHERE workflow_id = '${workflow_id}';" 2>/dev/null || echo ""
792
+ }
793
+
445
794
  # Legacy positional API (backward compat with existing add_event calls)
446
795
  add_event() {
447
796
  local event_type="$1"
@@ -564,6 +913,44 @@ db_remove_active_job() {
564
913
  _db_exec "DELETE FROM daemon_state WHERE job_id = '${job_id}' AND status = 'active';"
565
914
  }
566
915
 
916
+ # db_enqueue_issue <issue_key> — add to daemon queue
917
+ db_enqueue_issue() {
918
+ local issue_key="$1"
919
+ if ! db_available; then return 1; fi
920
+ issue_key="${issue_key//\'/\'\'}"
921
+ _db_exec "INSERT OR REPLACE INTO daemon_queue (issue_key, added_at) VALUES ('${issue_key}', '$(now_iso)');"
922
+ }
923
+
924
+ # db_dequeue_next — returns first issue_key and removes it, empty if none
925
+ db_dequeue_next() {
926
+ if ! db_available; then echo ""; return 0; fi
927
+ local next escaped
928
+ next=$(_db_query "SELECT issue_key FROM daemon_queue ORDER BY added_at ASC LIMIT 1;" || echo "")
929
+ if [[ -n "$next" ]]; then
930
+ escaped="${next//\'/\'\'}"
931
+ _db_exec "DELETE FROM daemon_queue WHERE issue_key = '${escaped}';" 2>/dev/null || true
932
+ echo "$next"
933
+ fi
934
+ }
935
+
936
+ # db_is_issue_queued <issue_key> — returns 0 if queued, 1 if not
937
+ db_is_issue_queued() {
938
+ local issue_key="$1"
939
+ if ! db_available; then return 1; fi
940
+ issue_key="${issue_key//\'/\'\'}"
941
+ local count
942
+ count=$(_db_query "SELECT COUNT(*) FROM daemon_queue WHERE issue_key = '${issue_key}';")
943
+ [[ "${count:-0}" -gt 0 ]]
944
+ }
945
+
946
+ # db_remove_from_queue <issue_key> — remove specific key from queue
947
+ db_remove_from_queue() {
948
+ local issue_key="$1"
949
+ if ! db_available; then return 1; fi
950
+ issue_key="${issue_key//\'/\'\'}"
951
+ _db_exec "DELETE FROM daemon_queue WHERE issue_key = '${issue_key}';"
952
+ }
953
+
567
954
  # db_daemon_summary — outputs JSON summary for status dashboard
568
955
  db_daemon_summary() {
569
956
  if ! db_available; then echo "{}"; return 0; fi
@@ -579,6 +966,23 @@ db_daemon_summary() {
579
966
  # Cost Functions (replaces costs.json)
580
967
  # ═══════════════════════════════════════════════════════════════════════════
581
968
 
969
+ # Record pipeline outcome for learning (Thompson sampling, optimize_tune_templates)
970
+ # db_record_outcome <job_id> [issue] [template] [success] [duration_secs] [retries] [cost_usd] [complexity]
971
+ db_record_outcome() {
972
+ local job_id="$1" issue="${2:-}" template="${3:-}" success="${4:-1}"
973
+ local duration="${5:-0}" retries="${6:-0}" cost="${7:-0}" complexity="${8:-medium}"
974
+
975
+ if ! db_available; then return 1; fi
976
+
977
+ job_id="${job_id//\'/\'\'}"
978
+ issue="${issue//\'/\'\'}"
979
+ template="${template//\'/\'\'}"
980
+
981
+ _db_exec "INSERT OR REPLACE INTO pipeline_outcomes
982
+ (job_id, issue_number, template, success, duration_secs, retry_count, cost_usd, complexity, created_at)
983
+ VALUES ('$job_id', '$issue', '$template', $success, $duration, $retries, $cost, '$complexity', '$(now_iso)');"
984
+ }
985
+
582
986
  # db_record_cost <input_tokens> <output_tokens> <model> <cost_usd> <stage> [issue]
583
987
  db_record_cost() {
584
988
  local input_tokens="${1:-0}"
@@ -753,6 +1157,93 @@ db_query_similar_failures() {
753
1157
  _db_query "SELECT json_group_array(json_object('failure_class', failure_class, 'error_signature', error_signature, 'root_cause', root_cause, 'fix_description', fix_description, 'file_path', file_path, 'occurrences', occurrences, 'last_seen_at', last_seen_at)) FROM (SELECT * FROM memory_failures ${where_clause} ORDER BY occurrences DESC, last_seen_at DESC LIMIT ${limit});" || echo "[]"
754
1158
  }
755
1159
 
1160
+ # Memory patterns
1161
+ db_save_pattern() {
1162
+ local repo_hash="$1" pattern_type="$2" pattern_key="$3" description="${4:-}" metadata="${5:-}"
1163
+ if ! db_available; then return 1; fi
1164
+ description="${description//\'/\'\'}"
1165
+ metadata="${metadata//\'/\'\'}"
1166
+ _db_exec "INSERT INTO memory_patterns (repo_hash, pattern_type, pattern_key, description, last_seen_at, created_at, metadata)
1167
+ VALUES ('$repo_hash', '$pattern_type', '$pattern_key', '$description', '$(now_iso)', '$(now_iso)', '$metadata')
1168
+ ON CONFLICT(repo_hash, pattern_type, pattern_key) DO UPDATE SET
1169
+ frequency = frequency + 1, last_seen_at = '$(now_iso)', description = COALESCE(NULLIF('$description',''), description);"
1170
+ }
1171
+
1172
+ db_query_patterns() {
1173
+ local repo_hash="$1" pattern_type="${2:-}" limit="${3:-20}"
1174
+ if ! db_available; then echo "[]"; return 0; fi
1175
+ local where="WHERE repo_hash = '$repo_hash'"
1176
+ [[ -n "$pattern_type" ]] && where="$where AND pattern_type = '$pattern_type'"
1177
+ _db_query -json "SELECT * FROM memory_patterns $where ORDER BY frequency DESC, last_seen_at DESC LIMIT $limit;" || echo "[]"
1178
+ }
1179
+
1180
+ # Memory decisions
1181
+ db_save_decision() {
1182
+ local repo_hash="$1" decision_type="$2" context="$3" decision="$4" metadata="${5:-}"
1183
+ if ! db_available; then return 1; fi
1184
+ context="${context//\'/\'\'}"
1185
+ decision="${decision//\'/\'\'}"
1186
+ metadata="${metadata//\'/\'\'}"
1187
+ _db_exec "INSERT INTO memory_decisions (repo_hash, decision_type, context, decision, created_at, updated_at, metadata)
1188
+ VALUES ('$repo_hash', '$decision_type', '$context', '$decision', '$(now_iso)', '$(now_iso)', '$metadata');"
1189
+ }
1190
+
1191
+ db_update_decision_outcome() {
1192
+ local decision_id="$1" outcome="$2" confidence="${3:-}"
1193
+ if ! db_available; then return 1; fi
1194
+ outcome="${outcome//\'/\'\'}"
1195
+ local set_clause="outcome = '$outcome', updated_at = '$(now_iso)'"
1196
+ [[ -n "$confidence" ]] && set_clause="$set_clause, confidence = $confidence"
1197
+ _db_exec "UPDATE memory_decisions SET $set_clause WHERE id = $decision_id;"
1198
+ }
1199
+
1200
+ db_query_decisions() {
1201
+ local repo_hash="$1" decision_type="${2:-}" limit="${3:-20}"
1202
+ if ! db_available; then echo "[]"; return 0; fi
1203
+ local where="WHERE repo_hash = '$repo_hash'"
1204
+ [[ -n "$decision_type" ]] && where="$where AND decision_type = '$decision_type'"
1205
+ _db_query -json "SELECT * FROM memory_decisions $where ORDER BY updated_at DESC LIMIT $limit;" || echo "[]"
1206
+ }
1207
+
1208
+ # Memory embeddings
1209
+ db_save_embedding() {
1210
+ local content_hash="$1" source_type="$2" content_text="$3" repo_hash="${4:-}"
1211
+ if ! db_available; then return 1; fi
1212
+ content_text="${content_text//\'/\'\'}"
1213
+ _db_exec "INSERT OR IGNORE INTO memory_embeddings (content_hash, source_type, content_text, repo_hash, created_at)
1214
+ VALUES ('$content_hash', '$source_type', '$content_text', '$repo_hash', '$(now_iso)');"
1215
+ }
1216
+
1217
+ db_query_embeddings() {
1218
+ local source_type="${1:-}" repo_hash="${2:-}" limit="${3:-50}"
1219
+ if ! db_available; then echo "[]"; return 0; fi
1220
+ local where="WHERE 1=1"
1221
+ [[ -n "$source_type" ]] && where="$where AND source_type = '$source_type'"
1222
+ [[ -n "$repo_hash" ]] && where="$where AND repo_hash = '$repo_hash'"
1223
+ _db_query -json "SELECT id, content_hash, source_type, content_text, repo_hash, created_at FROM memory_embeddings $where ORDER BY created_at DESC LIMIT $limit;" || echo "[]"
1224
+ }
1225
+
1226
+ # Reasoning traces for multi-step autonomous pipelines
1227
+ db_save_reasoning_trace() {
1228
+ local job_id="$1" step_name="$2" input_context="$3" reasoning="$4" output_decision="$5" confidence="${6:-0.5}"
1229
+ local escaped_input escaped_reasoning escaped_output
1230
+ escaped_input=$(echo "$input_context" | sed "s/'/''/g")
1231
+ escaped_reasoning=$(echo "$reasoning" | sed "s/'/''/g")
1232
+ escaped_output=$(echo "$output_decision" | sed "s/'/''/g")
1233
+ job_id="${job_id//\'/\'\'}"
1234
+ step_name="${step_name//\'/\'\'}"
1235
+ if ! db_available; then return 1; fi
1236
+ _db_exec "INSERT INTO reasoning_traces (job_id, step_name, input_context, reasoning, output_decision, confidence, created_at)
1237
+ VALUES ('$job_id', '$step_name', '$escaped_input', '$escaped_reasoning', '$escaped_output', $confidence, '$(now_iso)');"
1238
+ }
1239
+
1240
+ db_query_reasoning_traces() {
1241
+ local job_id="$1"
1242
+ job_id="${job_id//\'/\'\'}"
1243
+ if ! db_available; then echo "[]"; return 0; fi
1244
+ _db_query -json "SELECT * FROM reasoning_traces WHERE job_id = '$job_id' ORDER BY id ASC;" || echo "[]"
1245
+ }
1246
+
756
1247
  # ═══════════════════════════════════════════════════════════════════════════
757
1248
  # Pipeline Run Functions (enhanced from existing)
758
1249
  # ═══════════════════════════════════════════════════════════════════════════
@@ -868,7 +1359,7 @@ db_sync_push() {
868
1359
  local auth_header=""
869
1360
  [[ -n "${SYNC_TOKEN:-}" ]] && auth_header="-H 'Authorization: Bearer ${SYNC_TOKEN}'"
870
1361
 
871
- response=$(curl -s -w "%{http_code}" -o /dev/null \
1362
+ response=$(curl -s --connect-timeout 10 --max-time 30 -w "%{http_code}" -o /dev/null \
872
1363
  -X POST "${SYNC_URL}/api/sync/push" \
873
1364
  -H "Content-Type: application/json" \
874
1365
  ${auth_header} \
@@ -901,7 +1392,7 @@ db_sync_pull() {
901
1392
  [[ -n "${SYNC_TOKEN:-}" ]] && auth_header="-H 'Authorization: Bearer ${SYNC_TOKEN}'"
902
1393
 
903
1394
  local response_body
904
- response_body=$(curl -s \
1395
+ response_body=$(curl -s --connect-timeout 10 --max-time 30 \
905
1396
  "${SYNC_URL}/api/sync/pull?since=${last_sync}" \
906
1397
  -H "Accept: application/json" \
907
1398
  ${auth_header} 2>/dev/null || echo "{}")
@@ -6,7 +6,7 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="2.4.0"
9
+ VERSION="3.0.0"
10
10
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
11
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
12
 
@@ -34,16 +34,6 @@ if [[ "$(type -t emit_event 2>/dev/null)" != "function" ]]; then
34
34
  echo "${payload}}" >> "${HOME}/.shipwright/events.jsonl"
35
35
  }
36
36
  fi
37
- CYAN="${CYAN:-\033[38;2;0;212;255m}"
38
- PURPLE="${PURPLE:-\033[38;2;124;58;237m}"
39
- BLUE="${BLUE:-\033[38;2;0;102;255m}"
40
- GREEN="${GREEN:-\033[38;2;74;222;128m}"
41
- YELLOW="${YELLOW:-\033[38;2;250;204;21m}"
42
- RED="${RED:-\033[38;2;248;113;113m}"
43
- DIM="${DIM:-\033[2m}"
44
- BOLD="${BOLD:-\033[1m}"
45
- RESET="${RESET:-\033[0m}"
46
-
47
37
  # ─── Structured Event Log ──────────────────────────────────────────────────
48
38
  EVENTS_FILE="${HOME}/.shipwright/events.jsonl"
49
39