@hasna/todos 0.11.6 → 0.11.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/index.js CHANGED
@@ -3730,6 +3730,7 @@ var init_webhooks = __esm(() => {
3730
3730
  // src/db/templates.ts
3731
3731
  var exports_templates = {};
3732
3732
  __export(exports_templates, {
3733
+ updateTemplate: () => updateTemplate,
3733
3734
  taskFromTemplate: () => taskFromTemplate,
3734
3735
  listTemplates: () => listTemplates,
3735
3736
  getTemplate: () => getTemplate,
@@ -3744,6 +3745,9 @@ function rowToTemplate(row) {
3744
3745
  priority: row.priority || "medium"
3745
3746
  };
3746
3747
  }
3748
+ function resolveTemplateId(id, d) {
3749
+ return resolvePartialId(d, "task_templates", id);
3750
+ }
3747
3751
  function createTemplate(input, db) {
3748
3752
  const d = db || getDatabase();
3749
3753
  const id = uuid();
@@ -3764,7 +3768,10 @@ function createTemplate(input, db) {
3764
3768
  }
3765
3769
  function getTemplate(id, db) {
3766
3770
  const d = db || getDatabase();
3767
- const row = d.query("SELECT * FROM task_templates WHERE id = ?").get(id);
3771
+ const resolved = resolveTemplateId(id, d);
3772
+ if (!resolved)
3773
+ return null;
3774
+ const row = d.query("SELECT * FROM task_templates WHERE id = ?").get(resolved);
3768
3775
  return row ? rowToTemplate(row) : null;
3769
3776
  }
3770
3777
  function listTemplates(db) {
@@ -3773,7 +3780,55 @@ function listTemplates(db) {
3773
3780
  }
3774
3781
  function deleteTemplate(id, db) {
3775
3782
  const d = db || getDatabase();
3776
- return d.run("DELETE FROM task_templates WHERE id = ?", [id]).changes > 0;
3783
+ const resolved = resolveTemplateId(id, d);
3784
+ if (!resolved)
3785
+ return false;
3786
+ return d.run("DELETE FROM task_templates WHERE id = ?", [resolved]).changes > 0;
3787
+ }
3788
+ function updateTemplate(id, updates, db) {
3789
+ const d = db || getDatabase();
3790
+ const resolved = resolveTemplateId(id, d);
3791
+ if (!resolved)
3792
+ return null;
3793
+ const sets = [];
3794
+ const values = [];
3795
+ if (updates.name !== undefined) {
3796
+ sets.push("name = ?");
3797
+ values.push(updates.name);
3798
+ }
3799
+ if (updates.title_pattern !== undefined) {
3800
+ sets.push("title_pattern = ?");
3801
+ values.push(updates.title_pattern);
3802
+ }
3803
+ if (updates.description !== undefined) {
3804
+ sets.push("description = ?");
3805
+ values.push(updates.description);
3806
+ }
3807
+ if (updates.priority !== undefined) {
3808
+ sets.push("priority = ?");
3809
+ values.push(updates.priority);
3810
+ }
3811
+ if (updates.tags !== undefined) {
3812
+ sets.push("tags = ?");
3813
+ values.push(JSON.stringify(updates.tags));
3814
+ }
3815
+ if (updates.project_id !== undefined) {
3816
+ sets.push("project_id = ?");
3817
+ values.push(updates.project_id);
3818
+ }
3819
+ if (updates.plan_id !== undefined) {
3820
+ sets.push("plan_id = ?");
3821
+ values.push(updates.plan_id);
3822
+ }
3823
+ if (updates.metadata !== undefined) {
3824
+ sets.push("metadata = ?");
3825
+ values.push(JSON.stringify(updates.metadata));
3826
+ }
3827
+ if (sets.length === 0)
3828
+ return getTemplate(resolved, d);
3829
+ values.push(resolved);
3830
+ d.run(`UPDATE task_templates SET ${sets.join(", ")} WHERE id = ?`, values);
3831
+ return getTemplate(resolved, d);
3777
3832
  }
3778
3833
  function taskFromTemplate(templateId, overrides = {}, db) {
3779
3834
  const t = getTemplate(templateId, db);
@@ -10524,6 +10579,64 @@ var init_zod = __esm(() => {
10524
10579
  });
10525
10580
 
10526
10581
  // node_modules/@hasna/cloud/dist/index.js
10582
+ var exports_dist = {};
10583
+ __export(exports_dist, {
10584
+ translateSql: () => translateSql,
10585
+ translateParams: () => translateParams,
10586
+ translateDdl: () => translateDdl,
10587
+ syncPush: () => syncPush,
10588
+ syncPull: () => syncPull,
10589
+ storeConflicts: () => storeConflicts,
10590
+ setupAutoSync: () => setupAutoSync,
10591
+ sendFeedback: () => sendFeedback,
10592
+ saveFeedback: () => saveFeedback,
10593
+ saveCloudConfig: () => saveCloudConfig,
10594
+ runScheduledSync: () => runScheduledSync,
10595
+ resolveConflicts: () => resolveConflicts,
10596
+ resolveConflict: () => resolveConflict,
10597
+ resetSyncMeta: () => resetSyncMeta,
10598
+ resetAllSyncMeta: () => resetAllSyncMeta,
10599
+ removeSyncSchedule: () => removeSyncSchedule,
10600
+ registerSyncSchedule: () => registerSyncSchedule,
10601
+ registerCloudTools: () => registerCloudTools,
10602
+ registerCloudCommands: () => registerCloudCommands,
10603
+ purgeResolvedConflicts: () => purgeResolvedConflicts,
10604
+ parseInterval: () => parseInterval,
10605
+ minutesToCron: () => minutesToCron,
10606
+ migrateDotfile: () => migrateDotfile,
10607
+ listSqliteTables: () => listSqliteTables,
10608
+ listPgTables: () => listPgTables,
10609
+ listFeedback: () => listFeedback,
10610
+ listConflicts: () => listConflicts,
10611
+ incrementalSyncPush: () => incrementalSyncPush,
10612
+ incrementalSyncPull: () => incrementalSyncPull,
10613
+ hasLegacyDotfile: () => hasLegacyDotfile,
10614
+ getWinningData: () => getWinningData,
10615
+ getSyncScheduleStatus: () => getSyncScheduleStatus,
10616
+ getSyncMetaForTable: () => getSyncMetaForTable,
10617
+ getSyncMetaAll: () => getSyncMetaAll,
10618
+ getHasnaDir: () => getHasnaDir,
10619
+ getDbPath: () => getDbPath2,
10620
+ getDataDir: () => getDataDir,
10621
+ getConnectionString: () => getConnectionString,
10622
+ getConflict: () => getConflict,
10623
+ getConfigPath: () => getConfigPath2,
10624
+ getConfigDir: () => getConfigDir,
10625
+ getCloudConfig: () => getCloudConfig,
10626
+ getAutoSyncConfig: () => getAutoSyncConfig,
10627
+ ensureSyncMetaTable: () => ensureSyncMetaTable,
10628
+ ensureFeedbackTable: () => ensureFeedbackTable,
10629
+ ensureConflictsTable: () => ensureConflictsTable,
10630
+ enableAutoSync: () => enableAutoSync,
10631
+ discoverSyncableServices: () => discoverSyncableServices,
10632
+ detectConflicts: () => detectConflicts,
10633
+ createDatabase: () => createDatabase,
10634
+ SyncProgressTracker: () => SyncProgressTracker,
10635
+ SqliteAdapter: () => SqliteAdapter,
10636
+ PgAdapterAsync: () => PgAdapterAsync,
10637
+ PgAdapter: () => PgAdapter,
10638
+ CloudConfigSchema: () => CloudConfigSchema
10639
+ });
10527
10640
  import { createRequire } from "module";
10528
10641
  import { Database as Database2 } from "bun:sqlite";
10529
10642
  import { existsSync as existsSync22, mkdirSync as mkdirSync22, readFileSync as readFileSync5, writeFileSync as writeFileSync5 } from "fs";
@@ -10538,6 +10651,12 @@ import {
10538
10651
  import { homedir as homedir3 } from "os";
10539
10652
  import { join as join9, relative as relative2 } from "path";
10540
10653
  import { hostname } from "os";
10654
+ import { existsSync as existsSync32, readFileSync as readFileSync22 } from "fs";
10655
+ import { homedir as homedir32 } from "os";
10656
+ import { join as join32 } from "path";
10657
+ import { existsSync as existsSync42, readdirSync as readdirSync22 } from "fs";
10658
+ import { join as join42 } from "path";
10659
+ import { join as join52, dirname as dirname2 } from "path";
10541
10660
  function __accessProp2(key) {
10542
10661
  return this[key];
10543
10662
  }
@@ -10578,6 +10697,17 @@ function sqliteToPostgres(sql) {
10578
10697
  out = out.replace(/INSERT\s+OR\s+IGNORE\s+INTO/gi, "INSERT INTO");
10579
10698
  return out;
10580
10699
  }
10700
+ function translateDdl(ddl, dialect) {
10701
+ if (dialect === "sqlite")
10702
+ return ddl;
10703
+ let out = ddl;
10704
+ out = out.replace(/\bINTEGER\s+PRIMARY\s+KEY\s+AUTOINCREMENT\b/gi, "BIGSERIAL PRIMARY KEY");
10705
+ out = out.replace(/\bAUTOINCREMENT\b/gi, "");
10706
+ out = out.replace(/\bREAL\b/gi, "DOUBLE PRECISION");
10707
+ out = out.replace(/\bBLOB\b/gi, "BYTEA");
10708
+ out = sqliteToPostgres(out);
10709
+ return out;
10710
+ }
10581
10711
 
10582
10712
  class SqliteAdapter {
10583
10713
  db;
@@ -10605,6 +10735,9 @@ class SqliteAdapter {
10605
10735
  exec(sql) {
10606
10736
  this.db.exec(sql);
10607
10737
  }
10738
+ query(sql) {
10739
+ return this.db.query(sql);
10740
+ }
10608
10741
  prepare(sql) {
10609
10742
  const stmt = this.db.prepare(sql);
10610
10743
  return {
@@ -10762,6 +10895,62 @@ class PgAdapter {
10762
10895
  return this.pool;
10763
10896
  }
10764
10897
  }
10898
+
10899
+ class PgAdapterAsync {
10900
+ pool;
10901
+ constructor(arg) {
10902
+ if (typeof arg === "string") {
10903
+ this.pool = new esm_default.Pool({ connectionString: arg });
10904
+ } else {
10905
+ this.pool = arg;
10906
+ }
10907
+ }
10908
+ async run(sql, ...params) {
10909
+ const pgSql = translateSql(sql, "pg");
10910
+ const pgParams = translateParams(params);
10911
+ const res = await this.pool.query(pgSql, pgParams);
10912
+ return {
10913
+ changes: res.rowCount ?? 0,
10914
+ lastInsertRowid: res.rows?.[0]?.id ?? 0
10915
+ };
10916
+ }
10917
+ async get(sql, ...params) {
10918
+ const pgSql = translateSql(sql, "pg");
10919
+ const pgParams = translateParams(params);
10920
+ const res = await this.pool.query(pgSql, pgParams);
10921
+ return res.rows[0] ?? null;
10922
+ }
10923
+ async all(sql, ...params) {
10924
+ const pgSql = translateSql(sql, "pg");
10925
+ const pgParams = translateParams(params);
10926
+ const res = await this.pool.query(pgSql, pgParams);
10927
+ return res.rows;
10928
+ }
10929
+ async exec(sql) {
10930
+ const pgSql = translateSql(sql, "pg");
10931
+ await this.pool.query(pgSql);
10932
+ }
10933
+ async close() {
10934
+ await this.pool.end();
10935
+ }
10936
+ async transaction(fn) {
10937
+ const client = await this.pool.connect();
10938
+ try {
10939
+ await client.query("BEGIN");
10940
+ const result = await fn(client);
10941
+ await client.query("COMMIT");
10942
+ return result;
10943
+ } catch (err) {
10944
+ await client.query("ROLLBACK");
10945
+ throw err;
10946
+ } finally {
10947
+ client.release();
10948
+ }
10949
+ }
10950
+ get raw() {
10951
+ return this.pool;
10952
+ }
10953
+ }
10765
10954
  function setErrorMap2(map) {
10766
10955
  overrideErrorMap2 = map;
10767
10956
  }
@@ -11342,6 +11531,45 @@ function getDbPath2(serviceName) {
11342
11531
  const dir = getDataDir(serviceName);
11343
11532
  return join9(dir, `${serviceName}.db`);
11344
11533
  }
11534
+ function migrateDotfile(serviceName) {
11535
+ const legacyDir = join9(homedir3(), `.${serviceName}`);
11536
+ const newDir = join9(HASNA_DIR, serviceName);
11537
+ if (!existsSync8(legacyDir))
11538
+ return [];
11539
+ if (existsSync8(newDir))
11540
+ return [];
11541
+ mkdirSync5(newDir, { recursive: true });
11542
+ const migrated = [];
11543
+ copyDirRecursive(legacyDir, newDir, legacyDir, migrated);
11544
+ return migrated;
11545
+ }
11546
+ function copyDirRecursive(src, dest, root, migrated) {
11547
+ const entries = readdirSync4(src, { withFileTypes: true });
11548
+ for (const entry of entries) {
11549
+ const srcPath = join9(src, entry.name);
11550
+ const destPath = join9(dest, entry.name);
11551
+ if (entry.isDirectory()) {
11552
+ mkdirSync5(destPath, { recursive: true });
11553
+ copyDirRecursive(srcPath, destPath, root, migrated);
11554
+ } else {
11555
+ copyFileSync(srcPath, destPath);
11556
+ migrated.push(relative2(root, srcPath));
11557
+ }
11558
+ }
11559
+ }
11560
+ function hasLegacyDotfile(serviceName) {
11561
+ return existsSync8(join9(homedir3(), `.${serviceName}`));
11562
+ }
11563
+ function getHasnaDir() {
11564
+ mkdirSync5(HASNA_DIR, { recursive: true });
11565
+ return HASNA_DIR;
11566
+ }
11567
+ function getConfigDir() {
11568
+ return CONFIG_DIR2;
11569
+ }
11570
+ function getConfigPath2() {
11571
+ return CONFIG_PATH2;
11572
+ }
11345
11573
  function getCloudConfig() {
11346
11574
  if (!existsSync22(CONFIG_PATH2)) {
11347
11575
  return CloudConfigSchema.parse({});
@@ -11353,6 +11581,11 @@ function getCloudConfig() {
11353
11581
  return CloudConfigSchema.parse({});
11354
11582
  }
11355
11583
  }
11584
+ function saveCloudConfig(config) {
11585
+ mkdirSync22(CONFIG_DIR2, { recursive: true });
11586
+ writeFileSync5(CONFIG_PATH2, JSON.stringify(config, null, 2) + `
11587
+ `, "utf-8");
11588
+ }
11356
11589
  function getConnectionString(dbName) {
11357
11590
  const config = getCloudConfig();
11358
11591
  const { host, port, username, password_env, ssl } = config.rds;
@@ -11373,126 +11606,367 @@ function createDatabase(options) {
11373
11606
  const dbPath = options.sqlitePath ?? getDbPath2(options.service);
11374
11607
  return new SqliteAdapter(dbPath);
11375
11608
  }
11376
- function syncPush(local, cloud, options) {
11377
- return syncTransfer(local, cloud, options, "push");
11609
+ async function syncPush(local, remote, options) {
11610
+ const orderedTables = await getTableOrder(remote, options.tables);
11611
+ return syncTransfer(local, remote, { ...options, tables: orderedTables }, "push");
11612
+ }
11613
+ async function syncPull(remote, local, options) {
11614
+ const orderedTables = await getTableOrder(remote, options.tables);
11615
+ return syncTransfer(remote, local, { ...options, tables: orderedTables }, "pull");
11616
+ }
11617
+ async function getTableOrder(remote, tables) {
11618
+ if (tables.length <= 1)
11619
+ return tables;
11620
+ try {
11621
+ const fks = await remote.all(`
11622
+ SELECT DISTINCT
11623
+ tc.table_name AS source_table,
11624
+ ccu.table_name AS referenced_table
11625
+ FROM information_schema.table_constraints tc
11626
+ JOIN information_schema.constraint_column_usage ccu
11627
+ ON tc.constraint_name = ccu.constraint_name
11628
+ AND tc.table_schema = ccu.table_schema
11629
+ WHERE tc.constraint_type = 'FOREIGN KEY'
11630
+ AND tc.table_schema = 'public'
11631
+ `);
11632
+ if (fks.length > 0) {
11633
+ return topoSort(tables, fks);
11634
+ }
11635
+ } catch {}
11636
+ return heuristicOrder(tables);
11637
+ }
11638
+ function topoSort(tables, fks) {
11639
+ const tableSet = new Set(tables);
11640
+ const deps = new Map;
11641
+ for (const t of tables) {
11642
+ deps.set(t, new Set);
11643
+ }
11644
+ for (const fk of fks) {
11645
+ if (tableSet.has(fk.source_table) && tableSet.has(fk.referenced_table)) {
11646
+ deps.get(fk.source_table).add(fk.referenced_table);
11647
+ }
11648
+ }
11649
+ const sorted = [];
11650
+ const visited = new Set;
11651
+ const visiting = new Set;
11652
+ function visit(table) {
11653
+ if (visited.has(table))
11654
+ return;
11655
+ if (visiting.has(table)) {
11656
+ sorted.push(table);
11657
+ visited.add(table);
11658
+ return;
11659
+ }
11660
+ visiting.add(table);
11661
+ const tableDeps = deps.get(table) ?? new Set;
11662
+ for (const dep of tableDeps) {
11663
+ visit(dep);
11664
+ }
11665
+ visiting.delete(table);
11666
+ visited.add(table);
11667
+ sorted.push(table);
11668
+ }
11669
+ for (const t of tables) {
11670
+ visit(t);
11671
+ }
11672
+ return sorted;
11673
+ }
11674
+ function heuristicOrder(tables) {
11675
+ const sorted = [...tables].sort((a, b) => {
11676
+ const aIsChild = a.includes("_") && tables.some((t) => a.startsWith(t + "_") || a.endsWith("_" + t));
11677
+ const bIsChild = b.includes("_") && tables.some((t) => b.startsWith(t + "_") || b.endsWith("_" + t));
11678
+ if (aIsChild && !bIsChild)
11679
+ return 1;
11680
+ if (!aIsChild && bIsChild)
11681
+ return -1;
11682
+ return a.localeCompare(b);
11683
+ });
11684
+ return sorted;
11685
+ }
11686
+ function getSqlitePrimaryKeys(adapter, table) {
11687
+ try {
11688
+ const cols = adapter.all(`PRAGMA table_info("${table}")`);
11689
+ const pkCols = cols.filter((c) => c.pk > 0).sort((a, b) => a.pk - b.pk).map((c) => c.name);
11690
+ return pkCols;
11691
+ } catch {
11692
+ return [];
11693
+ }
11694
+ }
11695
+ async function getPgPrimaryKeys(adapter, table) {
11696
+ try {
11697
+ const rows = await adapter.all(`
11698
+ SELECT kcu.column_name, kcu.ordinal_position
11699
+ FROM information_schema.table_constraints tc
11700
+ JOIN information_schema.key_column_usage kcu
11701
+ ON tc.constraint_name = kcu.constraint_name
11702
+ AND tc.table_schema = kcu.table_schema
11703
+ WHERE tc.constraint_type = 'PRIMARY KEY'
11704
+ AND tc.table_schema = 'public'
11705
+ AND tc.table_name = '${table}'
11706
+ ORDER BY kcu.ordinal_position
11707
+ `);
11708
+ return rows.map((r) => r.column_name);
11709
+ } catch {
11710
+ return [];
11711
+ }
11712
+ }
11713
+ async function detectPrimaryKeys(adapter, table) {
11714
+ if (isAsyncAdapter(adapter)) {
11715
+ return getPgPrimaryKeys(adapter, table);
11716
+ }
11717
+ return getSqlitePrimaryKeys(adapter, table);
11378
11718
  }
11379
- function syncPull(local, cloud, options) {
11380
- return syncTransfer(cloud, local, options, "pull");
11719
+ async function resolvePrimaryKeys(source, target, table, pkOption) {
11720
+ if (pkOption) {
11721
+ return Array.isArray(pkOption) ? pkOption : [pkOption];
11722
+ }
11723
+ let pks = await detectPrimaryKeys(source, table);
11724
+ if (pks.length === 0) {
11725
+ pks = await detectPrimaryKeys(target, table);
11726
+ }
11727
+ return pks;
11381
11728
  }
11382
- function syncTransfer(source, target, options, _direction) {
11729
+ async function syncTransfer(source, target, options, _direction) {
11383
11730
  const {
11384
11731
  tables,
11385
11732
  onProgress,
11386
- batchSize = 500,
11733
+ batchSize = 100,
11387
11734
  conflictColumn = "updated_at",
11388
- primaryKey = "id"
11735
+ primaryKey: pkOption
11389
11736
  } = options;
11390
11737
  const results = [];
11391
- for (let i = 0;i < tables.length; i++) {
11392
- const table = tables[i];
11393
- const result = {
11394
- table,
11395
- rowsRead: 0,
11396
- rowsWritten: 0,
11397
- rowsSkipped: 0,
11398
- errors: []
11399
- };
11738
+ const sqliteTarget = !isAsyncAdapter(target) ? target : null;
11739
+ if (sqliteTarget) {
11400
11740
  try {
11401
- onProgress?.({
11741
+ sqliteTarget.exec("PRAGMA foreign_keys = OFF");
11742
+ } catch {}
11743
+ }
11744
+ try {
11745
+ for (let i = 0;i < tables.length; i++) {
11746
+ const table = tables[i];
11747
+ const result = {
11402
11748
  table,
11403
- phase: "reading",
11404
11749
  rowsRead: 0,
11405
11750
  rowsWritten: 0,
11406
- totalTables: tables.length,
11407
- currentTableIndex: i
11408
- });
11409
- const rows = source.all(`SELECT * FROM "${table}"`);
11410
- result.rowsRead = rows.length;
11411
- if (rows.length === 0) {
11751
+ rowsSkipped: 0,
11752
+ errors: []
11753
+ };
11754
+ try {
11412
11755
  onProgress?.({
11413
11756
  table,
11414
- phase: "done",
11757
+ phase: "reading",
11415
11758
  rowsRead: 0,
11416
11759
  rowsWritten: 0,
11417
11760
  totalTables: tables.length,
11418
11761
  currentTableIndex: i
11419
11762
  });
11420
- results.push(result);
11421
- continue;
11422
- }
11423
- const columns = Object.keys(rows[0]);
11424
- const hasConflictCol = columns.includes(conflictColumn);
11425
- const hasPrimaryKey = columns.includes(primaryKey);
11426
- if (!hasPrimaryKey) {
11427
- result.errors.push(`Table "${table}" has no "${primaryKey}" column \u2014 skipping`);
11428
- results.push(result);
11429
- continue;
11430
- }
11431
- onProgress?.({
11432
- table,
11433
- phase: "writing",
11434
- rowsRead: result.rowsRead,
11435
- rowsWritten: 0,
11436
- totalTables: tables.length,
11437
- currentTableIndex: i
11438
- });
11439
- for (let offset = 0;offset < rows.length; offset += batchSize) {
11440
- const batch = rows.slice(offset, offset + batchSize);
11441
- for (const row of batch) {
11763
+ const rows = await readAll(source, `SELECT * FROM "${table}"`);
11764
+ result.rowsRead = rows.length;
11765
+ if (rows.length === 0) {
11766
+ onProgress?.({
11767
+ table,
11768
+ phase: "done",
11769
+ rowsRead: 0,
11770
+ rowsWritten: 0,
11771
+ totalTables: tables.length,
11772
+ currentTableIndex: i
11773
+ });
11774
+ results.push(result);
11775
+ continue;
11776
+ }
11777
+ const pkColumns = await resolvePrimaryKeys(source, target, table, pkOption);
11778
+ const sourceColumns = Object.keys(rows[0]);
11779
+ let targetColumns = null;
11780
+ if (!isAsyncAdapter(target)) {
11442
11781
  try {
11443
- const existing = target.get(`SELECT "${primaryKey}"${hasConflictCol ? `, "${conflictColumn}"` : ""} FROM "${table}" WHERE "${primaryKey}" = ?`, row[primaryKey]);
11444
- if (existing) {
11445
- if (hasConflictCol && existing[conflictColumn] && row[conflictColumn]) {
11446
- const existingTime = new Date(existing[conflictColumn]).getTime();
11447
- const incomingTime = new Date(row[conflictColumn]).getTime();
11448
- if (existingTime >= incomingTime) {
11449
- result.rowsSkipped++;
11450
- continue;
11451
- }
11782
+ const colInfo = target.all(`PRAGMA table_info("${table}")`);
11783
+ targetColumns = new Set(colInfo.map((c) => c.name));
11784
+ } catch {}
11785
+ } else {
11786
+ try {
11787
+ const colInfo = await target.all(`SELECT column_name FROM information_schema.columns WHERE table_schema = 'public' AND table_name = '${table}'`);
11788
+ targetColumns = new Set(colInfo.map((c) => c.column_name));
11789
+ } catch {}
11790
+ }
11791
+ const columns = targetColumns ? sourceColumns.filter((c) => targetColumns.has(c)) : sourceColumns;
11792
+ if (pkColumns.length === 0) {
11793
+ result.errors.push(`Table "${table}" has no primary key \u2014 inserting without conflict handling`);
11794
+ onProgress?.({
11795
+ table,
11796
+ phase: "writing",
11797
+ rowsRead: result.rowsRead,
11798
+ rowsWritten: 0,
11799
+ totalTables: tables.length,
11800
+ currentTableIndex: i
11801
+ });
11802
+ for (let offset = 0;offset < rows.length; offset += batchSize) {
11803
+ const batch = rows.slice(offset, offset + batchSize);
11804
+ try {
11805
+ if (isAsyncAdapter(target)) {
11806
+ await batchInsertPg(target, table, columns, batch);
11807
+ } else {
11808
+ batchInsertSqlite(target, table, columns, batch);
11452
11809
  }
11453
- const setClauses = columns.filter((c) => c !== primaryKey).map((c) => `"${c}" = ?`).join(", ");
11454
- const values = columns.filter((c) => c !== primaryKey).map((c) => row[c]);
11455
- values.push(row[primaryKey]);
11456
- target.run(`UPDATE "${table}" SET ${setClauses} WHERE "${primaryKey}" = ?`, ...values);
11457
- } else {
11458
- const placeholders = columns.map(() => "?").join(", ");
11459
- const colList = columns.map((c) => `"${c}"`).join(", ");
11460
- const values = columns.map((c) => row[c]);
11461
- target.run(`INSERT INTO "${table}" (${colList}) VALUES (${placeholders})`, ...values);
11810
+ result.rowsWritten += batch.length;
11811
+ } catch (err) {
11812
+ result.errors.push(`Batch at offset ${offset}: ${err?.message ?? String(err)}`);
11462
11813
  }
11463
- result.rowsWritten++;
11464
- } catch (err) {
11465
- result.errors.push(`Row ${row[primaryKey]}: ${err?.message ?? String(err)}`);
11466
11814
  }
11815
+ onProgress?.({
11816
+ table,
11817
+ phase: "done",
11818
+ rowsRead: result.rowsRead,
11819
+ rowsWritten: result.rowsWritten,
11820
+ totalTables: tables.length,
11821
+ currentTableIndex: i
11822
+ });
11823
+ results.push(result);
11824
+ continue;
11825
+ }
11826
+ const missingPks = pkColumns.filter((pk) => !columns.includes(pk));
11827
+ if (missingPks.length > 0) {
11828
+ result.errors.push(`Table "${table}" missing PK columns in data: ${missingPks.join(", ")} \u2014 skipping`);
11829
+ results.push(result);
11830
+ continue;
11467
11831
  }
11468
11832
  onProgress?.({
11469
11833
  table,
11470
11834
  phase: "writing",
11471
11835
  rowsRead: result.rowsRead,
11836
+ rowsWritten: 0,
11837
+ totalTables: tables.length,
11838
+ currentTableIndex: i
11839
+ });
11840
+ const updateCols = columns.filter((c) => !pkColumns.includes(c));
11841
+ for (let offset = 0;offset < rows.length; offset += batchSize) {
11842
+ const batch = rows.slice(offset, offset + batchSize);
11843
+ try {
11844
+ if (isAsyncAdapter(target)) {
11845
+ await batchUpsertPg(target, table, columns, updateCols, pkColumns, batch);
11846
+ } else {
11847
+ batchUpsertSqlite(target, table, columns, updateCols, pkColumns, batch);
11848
+ }
11849
+ result.rowsWritten += batch.length;
11850
+ } catch (err) {
11851
+ result.errors.push(`Batch at offset ${offset}: ${err?.message ?? String(err)}`);
11852
+ }
11853
+ onProgress?.({
11854
+ table,
11855
+ phase: "writing",
11856
+ rowsRead: result.rowsRead,
11857
+ rowsWritten: result.rowsWritten,
11858
+ totalTables: tables.length,
11859
+ currentTableIndex: i
11860
+ });
11861
+ }
11862
+ onProgress?.({
11863
+ table,
11864
+ phase: "done",
11865
+ rowsRead: result.rowsRead,
11472
11866
  rowsWritten: result.rowsWritten,
11473
11867
  totalTables: tables.length,
11474
11868
  currentTableIndex: i
11475
11869
  });
11870
+ } catch (err) {
11871
+ result.errors.push(`Table "${table}": ${err?.message ?? String(err)}`);
11476
11872
  }
11477
- onProgress?.({
11478
- table,
11479
- phase: "done",
11480
- rowsRead: result.rowsRead,
11481
- rowsWritten: result.rowsWritten,
11482
- totalTables: tables.length,
11483
- currentTableIndex: i
11484
- });
11485
- } catch (err) {
11486
- result.errors.push(`Table "${table}": ${err?.message ?? String(err)}`);
11873
+ results.push(result);
11874
+ }
11875
+ } finally {
11876
+ if (sqliteTarget) {
11877
+ try {
11878
+ sqliteTarget.exec("PRAGMA foreign_keys = ON");
11879
+ } catch {}
11880
+ try {
11881
+ const violations = sqliteTarget.all("PRAGMA foreign_key_check");
11882
+ if (violations.length > 0) {
11883
+ const tables2 = [...new Set(violations.map((v) => v.table))];
11884
+ const msg = `FK integrity check: ${violations.length} violation(s) in table(s): ${tables2.join(", ")}`;
11885
+ if (results.length > 0) {
11886
+ results[results.length - 1].errors.push(msg);
11887
+ }
11888
+ }
11889
+ } catch {}
11487
11890
  }
11488
- results.push(result);
11489
11891
  }
11490
11892
  return results;
11491
11893
  }
11894
+ async function batchUpsertPg(target, table, columns, updateCols, primaryKeys, batch) {
11895
+ if (batch.length === 0)
11896
+ return;
11897
+ const colList = columns.map((c) => `"${c}"`).join(", ");
11898
+ const valuePlaceholders = batch.map((_, rowIdx) => {
11899
+ const offset = rowIdx * columns.length;
11900
+ return `(${columns.map((_2, colIdx) => `$${offset + colIdx + 1}`).join(", ")})`;
11901
+ }).join(", ");
11902
+ const pkList = primaryKeys.map((c) => `"${c}"`).join(", ");
11903
+ const setClause = updateCols.length > 0 ? updateCols.map((c) => `"${c}" = EXCLUDED."${c}"`).join(", ") : `"${primaryKeys[0]}" = EXCLUDED."${primaryKeys[0]}"`;
11904
+ const sql = `INSERT INTO "${table}" (${colList}) VALUES ${valuePlaceholders}
11905
+ ON CONFLICT (${pkList}) DO UPDATE SET ${setClause}`;
11906
+ const params = batch.flatMap((row) => columns.map((c) => row[c] ?? null));
11907
+ await target.run(sql, ...params);
11908
+ }
11909
+ function batchUpsertSqlite(target, table, columns, updateCols, primaryKeys, batch) {
11910
+ if (batch.length === 0)
11911
+ return;
11912
+ const colList = columns.map((c) => `"${c}"`).join(", ");
11913
+ const valuePlaceholders = batch.map(() => `(${columns.map(() => "?").join(", ")})`).join(", ");
11914
+ const pkList = primaryKeys.map((c) => `"${c}"`).join(", ");
11915
+ const setClause = updateCols.length > 0 ? updateCols.map((c) => `"${c}" = EXCLUDED."${c}"`).join(", ") : `"${primaryKeys[0]}" = EXCLUDED."${primaryKeys[0]}"`;
11916
+ const sql = `INSERT INTO "${table}" (${colList}) VALUES ${valuePlaceholders}
11917
+ ON CONFLICT (${pkList}) DO UPDATE SET ${setClause}`;
11918
+ const params = batch.flatMap((row) => columns.map((c) => coerceForSqlite(row[c])));
11919
+ target.run(sql, ...params);
11920
+ }
11921
+ async function batchInsertPg(target, table, columns, batch) {
11922
+ if (batch.length === 0)
11923
+ return;
11924
+ const colList = columns.map((c) => `"${c}"`).join(", ");
11925
+ const valuePlaceholders = batch.map((_, rowIdx) => {
11926
+ const offset = rowIdx * columns.length;
11927
+ return `(${columns.map((_2, colIdx) => `$${offset + colIdx + 1}`).join(", ")})`;
11928
+ }).join(", ");
11929
+ const sql = `INSERT INTO "${table}" (${colList}) VALUES ${valuePlaceholders}`;
11930
+ const params = batch.flatMap((row) => columns.map((c) => row[c] ?? null));
11931
+ await target.run(sql, ...params);
11932
+ }
11933
+ function batchInsertSqlite(target, table, columns, batch) {
11934
+ if (batch.length === 0)
11935
+ return;
11936
+ const colList = columns.map((c) => `"${c}"`).join(", ");
11937
+ const valuePlaceholders = batch.map(() => `(${columns.map(() => "?").join(", ")})`).join(", ");
11938
+ const sql = `INSERT INTO "${table}" (${colList}) VALUES ${valuePlaceholders}`;
11939
+ const params = batch.flatMap((row) => columns.map((c) => coerceForSqlite(row[c])));
11940
+ target.run(sql, ...params);
11941
+ }
11942
+ function coerceForSqlite(value) {
11943
+ if (value === null || value === undefined)
11944
+ return null;
11945
+ if (typeof value === "string" || typeof value === "number" || typeof value === "bigint" || typeof value === "boolean")
11946
+ return value;
11947
+ if (value instanceof Date)
11948
+ return value.toISOString();
11949
+ if (Buffer.isBuffer(value) || value instanceof Uint8Array)
11950
+ return value;
11951
+ if (typeof value === "object")
11952
+ return JSON.stringify(value);
11953
+ return String(value);
11954
+ }
11955
+ function isAsyncAdapter(adapter) {
11956
+ return adapter.constructor.name === "PgAdapterAsync" || typeof adapter.raw?.connect === "function";
11957
+ }
11958
+ async function readAll(adapter, sql) {
11959
+ const result = adapter.all(sql);
11960
+ return result instanceof Promise ? await result : result;
11961
+ }
11492
11962
  function listSqliteTables(db) {
11493
11963
  const rows = db.all(`SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%' ORDER BY name`);
11494
11964
  return rows.map((r) => r.name);
11495
11965
  }
11966
+ async function listPgTables(db) {
11967
+ const rows = await db.all(`SELECT tablename FROM pg_tables WHERE schemaname = 'public' ORDER BY tablename`);
11968
+ return rows.map((r) => r.tablename);
11969
+ }
11496
11970
  function ensureFeedbackTable(db) {
11497
11971
  db.exec(FEEDBACK_TABLE_SQL);
11498
11972
  }
@@ -11545,74 +12019,762 @@ async function sendFeedback(feedback, db) {
11545
12019
  return { sent: false, id, error: errorMsg };
11546
12020
  }
11547
12021
  }
11548
- function registerCloudTools(server, serviceName) {
11549
- server.tool(`${serviceName}_cloud_status`, "Show cloud configuration and connection health", {}, async () => {
11550
- const config = getCloudConfig();
11551
- const lines = [
11552
- `Mode: ${config.mode}`,
11553
- `Service: ${serviceName}`,
11554
- `RDS Host: ${config.rds.host || "(not configured)"}`
11555
- ];
11556
- if (config.rds.host && config.rds.username) {
11557
- try {
11558
- const pg2 = new PgAdapter(getConnectionString("postgres"));
11559
- pg2.get("SELECT 1 as ok");
11560
- lines.push("PostgreSQL: connected");
11561
- pg2.close();
11562
- } catch (err) {
11563
- lines.push(`PostgreSQL: failed \u2014 ${err?.message}`);
12022
+ function listFeedback(db) {
12023
+ ensureFeedbackTable(db);
12024
+ return db.all(`SELECT id, service, version, message, email, machine_id, created_at FROM feedback ORDER BY created_at DESC`);
12025
+ }
12026
+
12027
+ class SyncProgressTracker {
12028
+ db;
12029
+ progress = new Map;
12030
+ startTimes = new Map;
12031
+ callback;
12032
+ constructor(db, callback) {
12033
+ this.db = db;
12034
+ this.callback = callback;
12035
+ this.ensureResumeTable();
12036
+ }
12037
+ ensureResumeTable() {
12038
+ this.db.exec(`
12039
+ CREATE TABLE IF NOT EXISTS _sync_resume (
12040
+ table_name TEXT PRIMARY KEY,
12041
+ last_row_id TEXT,
12042
+ direction TEXT,
12043
+ started_at TEXT,
12044
+ status TEXT DEFAULT 'in_progress'
12045
+ )
12046
+ `);
12047
+ }
12048
+ start(table, total, direction) {
12049
+ const resumed = this.canResume(table);
12050
+ const now2 = Date.now();
12051
+ this.startTimes.set(table, now2);
12052
+ const status = resumed ? "resumed" : "in_progress";
12053
+ const info = {
12054
+ table,
12055
+ total,
12056
+ done: 0,
12057
+ percent: 0,
12058
+ elapsed_ms: 0,
12059
+ eta_ms: 0,
12060
+ status
12061
+ };
12062
+ this.progress.set(table, info);
12063
+ this.db.run(`INSERT INTO _sync_resume (table_name, last_row_id, direction, started_at, status)
12064
+ VALUES (?, ?, ?, datetime('now'), ?)
12065
+ ON CONFLICT (table_name) DO UPDATE SET
12066
+ direction = excluded.direction,
12067
+ started_at = datetime('now'),
12068
+ status = excluded.status`, table, "", direction, status);
12069
+ this.notify(table);
12070
+ }
12071
+ update(table, done, lastRowId) {
12072
+ const info = this.progress.get(table);
12073
+ if (!info)
12074
+ return;
12075
+ const startTime = this.startTimes.get(table) ?? Date.now();
12076
+ const elapsed = Date.now() - startTime;
12077
+ const rate = done > 0 ? elapsed / done : 0;
12078
+ const remaining = info.total - done;
12079
+ const eta = remaining > 0 ? Math.round(rate * remaining) : 0;
12080
+ info.done = done;
12081
+ info.percent = info.total > 0 ? Math.round(done / info.total * 100) : 0;
12082
+ info.elapsed_ms = elapsed;
12083
+ info.eta_ms = eta;
12084
+ info.status = "in_progress";
12085
+ this.db.run(`UPDATE _sync_resume SET last_row_id = ?, status = 'in_progress' WHERE table_name = ?`, lastRowId, table);
12086
+ this.notify(table);
12087
+ }
12088
+ markComplete(table) {
12089
+ const info = this.progress.get(table);
12090
+ if (info) {
12091
+ const startTime = this.startTimes.get(table) ?? Date.now();
12092
+ info.elapsed_ms = Date.now() - startTime;
12093
+ info.done = info.total;
12094
+ info.percent = 100;
12095
+ info.eta_ms = 0;
12096
+ info.status = "completed";
12097
+ this.notify(table);
12098
+ }
12099
+ this.db.run(`UPDATE _sync_resume SET status = 'completed' WHERE table_name = ?`, table);
12100
+ }
12101
+ markFailed(table, _error) {
12102
+ const info = this.progress.get(table);
12103
+ if (info) {
12104
+ const startTime = this.startTimes.get(table) ?? Date.now();
12105
+ info.elapsed_ms = Date.now() - startTime;
12106
+ info.status = "failed";
12107
+ this.notify(table);
12108
+ }
12109
+ this.db.run(`UPDATE _sync_resume SET status = 'failed' WHERE table_name = ?`, table);
12110
+ }
12111
+ canResume(table) {
12112
+ const row = this.db.get(`SELECT status FROM _sync_resume WHERE table_name = ?`, table);
12113
+ if (!row)
12114
+ return false;
12115
+ return row.status === "in_progress" || row.status === "resumed";
12116
+ }
12117
+ getResumePoint(table) {
12118
+ const row = this.db.get(`SELECT table_name, last_row_id, direction, started_at, status FROM _sync_resume WHERE table_name = ?`, table);
12119
+ if (!row)
12120
+ return null;
12121
+ if (row.status !== "in_progress" && row.status !== "resumed")
12122
+ return null;
12123
+ return row;
12124
+ }
12125
+ clearResume(table) {
12126
+ this.db.run(`DELETE FROM _sync_resume WHERE table_name = ?`, table);
12127
+ this.progress.delete(table);
12128
+ this.startTimes.delete(table);
12129
+ }
12130
+ getProgress(table) {
12131
+ return this.progress.get(table) ?? null;
12132
+ }
12133
+ getAllProgress() {
12134
+ return Array.from(this.progress.values());
12135
+ }
12136
+ listResumeRecords() {
12137
+ return this.db.all(`SELECT table_name, last_row_id, direction, started_at, status FROM _sync_resume ORDER BY started_at DESC`);
12138
+ }
12139
+ notify(table) {
12140
+ const info = this.progress.get(table);
12141
+ if (info && this.callback) {
12142
+ this.callback({ ...info });
12143
+ }
12144
+ }
12145
+ }
12146
+ function detectConflicts(local, remote, table, primaryKey = "id", conflictColumn = "updated_at") {
12147
+ const conflicts = [];
12148
+ const remoteMap = new Map;
12149
+ for (const row of remote) {
12150
+ const key = String(row[primaryKey]);
12151
+ remoteMap.set(key, row);
12152
+ }
12153
+ for (const localRow of local) {
12154
+ const key = String(localRow[primaryKey]);
12155
+ const remoteRow = remoteMap.get(key);
12156
+ if (!remoteRow)
12157
+ continue;
12158
+ const localTs = localRow[conflictColumn];
12159
+ const remoteTs = remoteRow[conflictColumn];
12160
+ if (localTs !== remoteTs) {
12161
+ conflicts.push({
12162
+ table,
12163
+ row_id: key,
12164
+ local_updated_at: String(localTs ?? ""),
12165
+ remote_updated_at: String(remoteTs ?? ""),
12166
+ local_data: { ...localRow },
12167
+ remote_data: { ...remoteRow },
12168
+ resolved: false
12169
+ });
12170
+ }
12171
+ }
12172
+ return conflicts;
12173
+ }
12174
+ function resolveConflicts(conflicts, strategy = "newest-wins") {
12175
+ return conflicts.map((conflict) => {
12176
+ const resolved = { ...conflict, resolved: true, resolution: strategy };
12177
+ switch (strategy) {
12178
+ case "local-wins":
12179
+ break;
12180
+ case "remote-wins":
12181
+ break;
12182
+ case "newest-wins": {
12183
+ const localTime = new Date(conflict.local_updated_at).getTime();
12184
+ const remoteTime = new Date(conflict.remote_updated_at).getTime();
12185
+ if (remoteTime > localTime) {
12186
+ resolved.resolution = "newest-wins";
12187
+ } else {
12188
+ resolved.resolution = "newest-wins";
12189
+ }
12190
+ break;
11564
12191
  }
11565
12192
  }
11566
- return { content: [{ type: "text", text: lines.join(`
11567
- `) }] };
12193
+ return resolved;
11568
12194
  });
11569
- server.tool(`${serviceName}_cloud_push`, "Push local data to cloud PostgreSQL", {
11570
- tables: exports_external2.string().optional().describe("Comma-separated table names (default: all)")
11571
- }, async ({ tables: tablesStr }) => {
11572
- const config = getCloudConfig();
11573
- if (config.mode === "local") {
11574
- return {
11575
- content: [
11576
- { type: "text", text: "Error: cloud mode not configured." }
11577
- ],
11578
- isError: true
11579
- };
12195
+ }
12196
+ function getWinningData(conflict) {
12197
+ if (!conflict.resolved || !conflict.resolution) {
12198
+ throw new Error(`Conflict for row ${conflict.row_id} is not resolved`);
12199
+ }
12200
+ switch (conflict.resolution) {
12201
+ case "local-wins":
12202
+ return conflict.local_data;
12203
+ case "remote-wins":
12204
+ return conflict.remote_data;
12205
+ case "newest-wins": {
12206
+ const localTime = new Date(conflict.local_updated_at).getTime();
12207
+ const remoteTime = new Date(conflict.remote_updated_at).getTime();
12208
+ return remoteTime >= localTime ? conflict.remote_data : conflict.local_data;
12209
+ }
12210
+ case "manual":
12211
+ return conflict.local_data;
12212
+ default:
12213
+ return conflict.local_data;
12214
+ }
12215
+ }
12216
+ function ensureConflictsTable(db) {
12217
+ db.exec(`
12218
+ CREATE TABLE IF NOT EXISTS _sync_conflicts (
12219
+ id TEXT PRIMARY KEY,
12220
+ table_name TEXT,
12221
+ row_id TEXT,
12222
+ local_data TEXT,
12223
+ remote_data TEXT,
12224
+ local_updated_at TEXT,
12225
+ remote_updated_at TEXT,
12226
+ resolution TEXT,
12227
+ resolved_at TEXT,
12228
+ created_at TEXT DEFAULT (datetime('now'))
12229
+ )
12230
+ `);
12231
+ }
12232
+ function storeConflicts(db, conflicts) {
12233
+ ensureConflictsTable(db);
12234
+ for (const conflict of conflicts) {
12235
+ const id = `${conflict.table}:${conflict.row_id}:${Date.now()}`;
12236
+ db.run(`INSERT INTO _sync_conflicts (id, table_name, row_id, local_data, remote_data, local_updated_at, remote_updated_at, resolution, resolved_at)
12237
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`, id, conflict.table, conflict.row_id, JSON.stringify(conflict.local_data), JSON.stringify(conflict.remote_data), conflict.local_updated_at, conflict.remote_updated_at, conflict.resolution ?? null, conflict.resolved ? new Date().toISOString() : null);
12238
+ }
12239
+ }
12240
+ function listConflicts(db, opts) {
12241
+ ensureConflictsTable(db);
12242
+ let sql = `SELECT * FROM _sync_conflicts WHERE 1=1`;
12243
+ const params = [];
12244
+ if (opts?.resolved !== undefined) {
12245
+ if (opts.resolved) {
12246
+ sql += ` AND resolution IS NOT NULL AND resolved_at IS NOT NULL`;
12247
+ } else {
12248
+ sql += ` AND (resolution IS NULL OR resolved_at IS NULL)`;
11580
12249
  }
11581
- const local = new SqliteAdapter(getDbPath2(serviceName));
11582
- const cloud = new PgAdapter(getConnectionString(serviceName));
11583
- const tableList = tablesStr ? tablesStr.split(",").map((t) => t.trim()) : listSqliteTables(local);
11584
- const results = syncPush(local, cloud, { tables: tableList });
11585
- local.close();
11586
- cloud.close();
11587
- const total = results.reduce((s, r) => s + r.rowsWritten, 0);
11588
- return {
11589
- content: [{ type: "text", text: `Pushed ${total} rows across ${tableList.length} table(s).` }]
11590
- };
11591
- });
11592
- server.tool(`${serviceName}_cloud_pull`, "Pull cloud PostgreSQL data to local", {
11593
- tables: exports_external2.string().optional().describe("Comma-separated table names (default: all)")
11594
- }, async ({ tables: tablesStr }) => {
11595
- const config = getCloudConfig();
11596
- if (config.mode === "local") {
11597
- return {
11598
- content: [
11599
- { type: "text", text: "Error: cloud mode not configured." }
11600
- ],
11601
- isError: true
12250
+ }
12251
+ if (opts?.table) {
12252
+ sql += ` AND table_name = ?`;
12253
+ params.push(opts.table);
12254
+ }
12255
+ sql += ` ORDER BY created_at DESC`;
12256
+ return db.all(sql, ...params);
12257
+ }
12258
+ function resolveConflict(db, conflictId, strategy) {
12259
+ ensureConflictsTable(db);
12260
+ const row = db.get(`SELECT * FROM _sync_conflicts WHERE id = ?`, conflictId);
12261
+ if (!row)
12262
+ return null;
12263
+ db.run(`UPDATE _sync_conflicts SET resolution = ?, resolved_at = datetime('now') WHERE id = ?`, strategy, conflictId);
12264
+ return db.get(`SELECT * FROM _sync_conflicts WHERE id = ?`, conflictId);
12265
+ }
12266
+ function getConflict(db, conflictId) {
12267
+ ensureConflictsTable(db);
12268
+ return db.get(`SELECT * FROM _sync_conflicts WHERE id = ?`, conflictId);
12269
+ }
12270
+ function purgeResolvedConflicts(db) {
12271
+ ensureConflictsTable(db);
12272
+ const result = db.run(`DELETE FROM _sync_conflicts WHERE resolution IS NOT NULL AND resolved_at IS NOT NULL`);
12273
+ return result.changes;
12274
+ }
12275
+ function ensureSyncMetaTable(db) {
12276
+ db.exec(SYNC_META_TABLE_SQL);
12277
+ }
12278
+ function getSyncMeta(db, table) {
12279
+ ensureSyncMetaTable(db);
12280
+ return db.get(`SELECT table_name, last_synced_at, last_synced_row_count, direction FROM _sync_meta WHERE table_name = ?`, table) ?? null;
12281
+ }
12282
+ function upsertSyncMeta(db, meta) {
12283
+ ensureSyncMetaTable(db);
12284
+ const existing = db.get(`SELECT table_name FROM _sync_meta WHERE table_name = ?`, meta.table_name);
12285
+ if (existing) {
12286
+ db.run(`UPDATE _sync_meta SET last_synced_at = ?, last_synced_row_count = ?, direction = ? WHERE table_name = ?`, meta.last_synced_at, meta.last_synced_row_count, meta.direction, meta.table_name);
12287
+ } else {
12288
+ db.run(`INSERT INTO _sync_meta (table_name, last_synced_at, last_synced_row_count, direction) VALUES (?, ?, ?, ?)`, meta.table_name, meta.last_synced_at, meta.last_synced_row_count, meta.direction);
12289
+ }
12290
+ }
12291
+ function transferRows(source, target, table, rows, options) {
12292
+ const { primaryKey = "id", conflictColumn = "updated_at" } = options;
12293
+ let written = 0;
12294
+ let skipped = 0;
12295
+ const errors2 = [];
12296
+ if (rows.length === 0)
12297
+ return { written, skipped, errors: errors2 };
12298
+ const columns = Object.keys(rows[0]);
12299
+ const hasConflictCol = columns.includes(conflictColumn);
12300
+ const hasPrimaryKey = columns.includes(primaryKey);
12301
+ if (!hasPrimaryKey) {
12302
+ errors2.push(`Table "${table}" has no "${primaryKey}" column -- skipping`);
12303
+ return { written, skipped, errors: errors2 };
12304
+ }
12305
+ for (const row of rows) {
12306
+ try {
12307
+ const existing = target.get(`SELECT "${primaryKey}"${hasConflictCol ? `, "${conflictColumn}"` : ""} FROM "${table}" WHERE "${primaryKey}" = ?`, row[primaryKey]);
12308
+ if (existing) {
12309
+ if (hasConflictCol && existing[conflictColumn] && row[conflictColumn]) {
12310
+ const existingTime = new Date(existing[conflictColumn]).getTime();
12311
+ const incomingTime = new Date(row[conflictColumn]).getTime();
12312
+ if (existingTime >= incomingTime) {
12313
+ skipped++;
12314
+ continue;
12315
+ }
12316
+ }
12317
+ const setClauses = columns.filter((c) => c !== primaryKey).map((c) => `"${c}" = ?`).join(", ");
12318
+ const values = columns.filter((c) => c !== primaryKey).map((c) => row[c]);
12319
+ values.push(row[primaryKey]);
12320
+ target.run(`UPDATE "${table}" SET ${setClauses} WHERE "${primaryKey}" = ?`, ...values);
12321
+ } else {
12322
+ const placeholders = columns.map(() => "?").join(", ");
12323
+ const colList = columns.map((c) => `"${c}"`).join(", ");
12324
+ const values = columns.map((c) => row[c]);
12325
+ target.run(`INSERT INTO "${table}" (${colList}) VALUES (${placeholders})`, ...values);
12326
+ }
12327
+ written++;
12328
+ } catch (err) {
12329
+ errors2.push(`Row ${row[primaryKey]}: ${err?.message ?? String(err)}`);
12330
+ }
12331
+ }
12332
+ return { written, skipped, errors: errors2 };
12333
+ }
12334
+ function incrementalSyncPush(local, remote, tables, options = {}) {
12335
+ const { conflictColumn = "updated_at", batchSize = 500 } = options;
12336
+ const results = [];
12337
+ ensureSyncMetaTable(local);
12338
+ for (const table of tables) {
12339
+ const stat = {
12340
+ table,
12341
+ total_rows: 0,
12342
+ synced_rows: 0,
12343
+ skipped_rows: 0,
12344
+ errors: [],
12345
+ first_sync: false
12346
+ };
12347
+ try {
12348
+ const countResult = local.get(`SELECT COUNT(*) as cnt FROM "${table}"`);
12349
+ stat.total_rows = countResult?.cnt ?? 0;
12350
+ const meta = getSyncMeta(local, table);
12351
+ let rows;
12352
+ if (meta?.last_synced_at) {
12353
+ try {
12354
+ rows = local.all(`SELECT * FROM "${table}" WHERE "${conflictColumn}" > ?`, meta.last_synced_at);
12355
+ } catch {
12356
+ rows = local.all(`SELECT * FROM "${table}"`);
12357
+ stat.first_sync = true;
12358
+ }
12359
+ } else {
12360
+ rows = local.all(`SELECT * FROM "${table}"`);
12361
+ stat.first_sync = true;
12362
+ }
12363
+ for (let offset = 0;offset < rows.length; offset += batchSize) {
12364
+ const batch = rows.slice(offset, offset + batchSize);
12365
+ const result = transferRows(local, remote, table, batch, options);
12366
+ stat.synced_rows += result.written;
12367
+ stat.skipped_rows += result.skipped;
12368
+ stat.errors.push(...result.errors);
12369
+ }
12370
+ if (rows.length === 0) {
12371
+ stat.skipped_rows = stat.total_rows;
12372
+ }
12373
+ const now2 = new Date().toISOString();
12374
+ upsertSyncMeta(local, {
12375
+ table_name: table,
12376
+ last_synced_at: now2,
12377
+ last_synced_row_count: stat.synced_rows,
12378
+ direction: "push"
12379
+ });
12380
+ } catch (err) {
12381
+ stat.errors.push(`Table "${table}": ${err?.message ?? String(err)}`);
12382
+ }
12383
+ results.push(stat);
12384
+ }
12385
+ return results;
12386
+ }
12387
+ function incrementalSyncPull(remote, local, tables, options = {}) {
12388
+ const { conflictColumn = "updated_at", batchSize = 500 } = options;
12389
+ const results = [];
12390
+ ensureSyncMetaTable(local);
12391
+ for (const table of tables) {
12392
+ const stat = {
12393
+ table,
12394
+ total_rows: 0,
12395
+ synced_rows: 0,
12396
+ skipped_rows: 0,
12397
+ errors: [],
12398
+ first_sync: false
12399
+ };
12400
+ try {
12401
+ const countResult = remote.get(`SELECT COUNT(*) as cnt FROM "${table}"`);
12402
+ stat.total_rows = countResult?.cnt ?? 0;
12403
+ const meta = getSyncMeta(local, table);
12404
+ let rows;
12405
+ if (meta?.last_synced_at) {
12406
+ try {
12407
+ rows = remote.all(`SELECT * FROM "${table}" WHERE "${conflictColumn}" > ?`, meta.last_synced_at);
12408
+ } catch {
12409
+ rows = remote.all(`SELECT * FROM "${table}"`);
12410
+ stat.first_sync = true;
12411
+ }
12412
+ } else {
12413
+ rows = remote.all(`SELECT * FROM "${table}"`);
12414
+ stat.first_sync = true;
12415
+ }
12416
+ for (let offset = 0;offset < rows.length; offset += batchSize) {
12417
+ const batch = rows.slice(offset, offset + batchSize);
12418
+ const result = transferRows(remote, local, table, batch, options);
12419
+ stat.synced_rows += result.written;
12420
+ stat.skipped_rows += result.skipped;
12421
+ stat.errors.push(...result.errors);
12422
+ }
12423
+ if (rows.length === 0) {
12424
+ stat.skipped_rows = stat.total_rows;
12425
+ }
12426
+ const now2 = new Date().toISOString();
12427
+ upsertSyncMeta(local, {
12428
+ table_name: table,
12429
+ last_synced_at: now2,
12430
+ last_synced_row_count: stat.synced_rows,
12431
+ direction: "pull"
12432
+ });
12433
+ } catch (err) {
12434
+ stat.errors.push(`Table "${table}": ${err?.message ?? String(err)}`);
12435
+ }
12436
+ results.push(stat);
12437
+ }
12438
+ return results;
12439
+ }
12440
+ function getSyncMetaAll(db) {
12441
+ ensureSyncMetaTable(db);
12442
+ return db.all(`SELECT table_name, last_synced_at, last_synced_row_count, direction FROM _sync_meta ORDER BY table_name`);
12443
+ }
12444
+ function getSyncMetaForTable(db, table) {
12445
+ return getSyncMeta(db, table);
12446
+ }
12447
+ function resetSyncMeta(db, table) {
12448
+ ensureSyncMetaTable(db);
12449
+ db.run(`DELETE FROM _sync_meta WHERE table_name = ?`, table);
12450
+ }
12451
+ function resetAllSyncMeta(db) {
12452
+ ensureSyncMetaTable(db);
12453
+ db.run(`DELETE FROM _sync_meta`);
12454
+ }
12455
+ function getAutoSyncConfig() {
12456
+ try {
12457
+ if (!existsSync32(AUTO_SYNC_CONFIG_PATH)) {
12458
+ return { ...DEFAULT_AUTO_SYNC_CONFIG };
12459
+ }
12460
+ const raw = JSON.parse(readFileSync22(AUTO_SYNC_CONFIG_PATH, "utf-8"));
12461
+ return {
12462
+ auto_sync_on_start: typeof raw.auto_sync_on_start === "boolean" ? raw.auto_sync_on_start : DEFAULT_AUTO_SYNC_CONFIG.auto_sync_on_start,
12463
+ auto_sync_on_stop: typeof raw.auto_sync_on_stop === "boolean" ? raw.auto_sync_on_stop : DEFAULT_AUTO_SYNC_CONFIG.auto_sync_on_stop
12464
+ };
12465
+ } catch {
12466
+ return { ...DEFAULT_AUTO_SYNC_CONFIG };
12467
+ }
12468
+ }
12469
+ function executeAutoSync(event, local, remote, tables) {
12470
+ const direction = event === "start" ? "pull" : "push";
12471
+ const result = {
12472
+ event,
12473
+ direction,
12474
+ success: false,
12475
+ tables_synced: 0,
12476
+ total_rows_synced: 0,
12477
+ errors: []
12478
+ };
12479
+ try {
12480
+ const stats = direction === "pull" ? incrementalSyncPull(remote, local, tables) : incrementalSyncPush(local, remote, tables);
12481
+ for (const s of stats) {
12482
+ if (s.errors.length === 0) {
12483
+ result.tables_synced++;
12484
+ }
12485
+ result.total_rows_synced += s.synced_rows;
12486
+ result.errors.push(...s.errors);
12487
+ }
12488
+ result.success = result.errors.length === 0;
12489
+ } catch (err) {
12490
+ result.errors.push(err?.message ?? String(err));
12491
+ }
12492
+ return result;
12493
+ }
12494
+ function installSignalHandlers() {
12495
+ if (signalHandlersInstalled)
12496
+ return;
12497
+ signalHandlersInstalled = true;
12498
+ const handleExit = () => {
12499
+ for (const fn of cleanupHandlers) {
12500
+ try {
12501
+ fn();
12502
+ } catch {}
12503
+ }
12504
+ };
12505
+ process.on("SIGTERM", () => {
12506
+ handleExit();
12507
+ process.exit(0);
12508
+ });
12509
+ process.on("SIGINT", () => {
12510
+ handleExit();
12511
+ process.exit(0);
12512
+ });
12513
+ process.on("beforeExit", () => {
12514
+ handleExit();
12515
+ });
12516
+ }
12517
+ function setupAutoSync(serviceName, server, local, remote, tables) {
12518
+ const config = getAutoSyncConfig();
12519
+ const cloudConfig = getCloudConfig();
12520
+ const isSyncEnabled = cloudConfig.mode === "hybrid" || cloudConfig.mode === "cloud";
12521
+ const syncOnStart = () => {
12522
+ if (!config.auto_sync_on_start || !isSyncEnabled)
12523
+ return null;
12524
+ return executeAutoSync("start", local, remote, tables);
12525
+ };
12526
+ const syncOnStop = () => {
12527
+ if (!config.auto_sync_on_stop || !isSyncEnabled)
12528
+ return null;
12529
+ return executeAutoSync("stop", local, remote, tables);
12530
+ };
12531
+ if (server && typeof server.onconnect === "function") {
12532
+ const origOnConnect = server.onconnect;
12533
+ server.onconnect = (...args) => {
12534
+ syncOnStart();
12535
+ return origOnConnect.apply(server, args);
12536
+ };
12537
+ } else if (server && typeof server.on === "function") {
12538
+ server.on("connect", () => {
12539
+ syncOnStart();
12540
+ });
12541
+ }
12542
+ if (server && typeof server.ondisconnect === "function") {
12543
+ const origOnDisconnect = server.ondisconnect;
12544
+ server.ondisconnect = (...args) => {
12545
+ syncOnStop();
12546
+ return origOnDisconnect.apply(server, args);
12547
+ };
12548
+ } else if (server && typeof server.on === "function") {
12549
+ server.on("disconnect", () => {
12550
+ syncOnStop();
12551
+ });
12552
+ }
12553
+ installSignalHandlers();
12554
+ cleanupHandlers.push(() => {
12555
+ syncOnStop();
12556
+ });
12557
+ return { syncOnStart, syncOnStop, config };
12558
+ }
12559
+ function enableAutoSync(serviceName, mcpServer, local, remote, tables) {
12560
+ setupAutoSync(serviceName, mcpServer, local, remote, tables);
12561
+ }
12562
+ function discoverSyncableServices() {
12563
+ const hasnaDir = getHasnaDir();
12564
+ const services = [];
12565
+ try {
12566
+ const entries = readdirSync22(hasnaDir, { withFileTypes: true });
12567
+ for (const entry of entries) {
12568
+ if (!entry.isDirectory())
12569
+ continue;
12570
+ const dbPath = join42(hasnaDir, entry.name, `${entry.name}.db`);
12571
+ if (existsSync42(dbPath)) {
12572
+ services.push(entry.name);
12573
+ }
12574
+ }
12575
+ } catch {}
12576
+ return services;
12577
+ }
12578
+ async function runScheduledSync() {
12579
+ const config = getCloudConfig();
12580
+ if (config.mode === "local")
12581
+ return [];
12582
+ const services = discoverSyncableServices();
12583
+ const results = [];
12584
+ let remote = null;
12585
+ for (const service of services) {
12586
+ const result = {
12587
+ service,
12588
+ tables_synced: 0,
12589
+ total_rows_synced: 0,
12590
+ errors: []
12591
+ };
12592
+ try {
12593
+ const dbPath = join42(getDataDir(service), `${service}.db`);
12594
+ if (!existsSync42(dbPath)) {
12595
+ continue;
12596
+ }
12597
+ const local = new SqliteAdapter(dbPath);
12598
+ const tables = listSqliteTables(local).filter((t) => !t.startsWith("_") && !t.startsWith("sqlite_"));
12599
+ if (tables.length === 0) {
12600
+ local.close();
12601
+ continue;
12602
+ }
12603
+ try {
12604
+ const connStr = getConnectionString(service);
12605
+ remote = new PgAdapterAsync(connStr);
12606
+ } catch (err) {
12607
+ result.errors.push(`Connection failed: ${err?.message ?? String(err)}`);
12608
+ local.close();
12609
+ results.push(result);
12610
+ continue;
12611
+ }
12612
+ const stats = incrementalSyncPush(local, remote, tables);
12613
+ for (const s of stats) {
12614
+ if (s.errors.length === 0) {
12615
+ result.tables_synced++;
12616
+ }
12617
+ result.total_rows_synced += s.synced_rows;
12618
+ result.errors.push(...s.errors);
12619
+ }
12620
+ local.close();
12621
+ await remote.close();
12622
+ remote = null;
12623
+ } catch (err) {
12624
+ result.errors.push(err?.message ?? String(err));
12625
+ }
12626
+ results.push(result);
12627
+ }
12628
+ if (remote) {
12629
+ try {
12630
+ await remote.close();
12631
+ } catch {}
12632
+ }
12633
+ return results;
12634
+ }
12635
+ function getWorkerPath() {
12636
+ const dir = typeof import.meta.dir === "string" ? import.meta.dir : dirname2(import.meta.url.replace("file://", ""));
12637
+ const tsPath = join52(dir, "scheduled-sync.ts");
12638
+ const jsPath = join52(dir, "scheduled-sync.js");
12639
+ try {
12640
+ const { existsSync: existsSync52 } = __require2("fs");
12641
+ if (existsSync52(tsPath))
12642
+ return tsPath;
12643
+ } catch {}
12644
+ return jsPath;
12645
+ }
12646
+ function parseInterval(input) {
12647
+ const trimmed = input.trim().toLowerCase();
12648
+ const hourMatch = trimmed.match(/^(\d+)\s*h$/);
12649
+ if (hourMatch) {
12650
+ const hours = parseInt(hourMatch[1], 10);
12651
+ if (hours <= 0) {
12652
+ throw new Error(`Invalid interval "${input}". Value must be greater than 0.`);
12653
+ }
12654
+ return hours * 60;
12655
+ }
12656
+ const minMatch = trimmed.match(/^(\d+)\s*m$/);
12657
+ if (minMatch) {
12658
+ const mins = parseInt(minMatch[1], 10);
12659
+ if (mins <= 0) {
12660
+ throw new Error(`Invalid interval "${input}". Value must be greater than 0.`);
12661
+ }
12662
+ return mins;
12663
+ }
12664
+ const plain = parseInt(trimmed, 10);
12665
+ if (!isNaN(plain) && plain > 0) {
12666
+ return plain;
12667
+ }
12668
+ throw new Error(`Invalid interval "${input}". Use formats like: 5m, 10m, 1h, or a plain number of minutes.`);
12669
+ }
12670
+ function minutesToCron(minutes) {
12671
+ if (minutes <= 0) {
12672
+ throw new Error("Interval must be greater than 0 minutes.");
12673
+ }
12674
+ if (minutes < 60) {
12675
+ return `*/${minutes} * * * *`;
12676
+ }
12677
+ const hours = Math.floor(minutes / 60);
12678
+ const remainderMins = minutes % 60;
12679
+ if (remainderMins === 0 && hours <= 24) {
12680
+ return `0 */${hours} * * *`;
12681
+ }
12682
+ return `*/${minutes} * * * *`;
12683
+ }
12684
+ async function registerSyncSchedule(intervalMinutes) {
12685
+ if (intervalMinutes <= 0) {
12686
+ throw new Error("Interval must be a positive number of minutes.");
12687
+ }
12688
+ const cronExpr = minutesToCron(intervalMinutes);
12689
+ const workerPath = getWorkerPath();
12690
+ await Bun.cron(workerPath, cronExpr, CRON_TITLE);
12691
+ const config = getCloudConfig();
12692
+ config.sync.schedule_minutes = intervalMinutes;
12693
+ saveCloudConfig(config);
12694
+ }
12695
+ async function removeSyncSchedule() {
12696
+ await Bun.cron.remove(CRON_TITLE);
12697
+ const config = getCloudConfig();
12698
+ config.sync.schedule_minutes = 0;
12699
+ saveCloudConfig(config);
12700
+ }
12701
+ function getSyncScheduleStatus() {
12702
+ const config = getCloudConfig();
12703
+ const minutes = config.sync.schedule_minutes;
12704
+ const registered = minutes > 0;
12705
+ return {
12706
+ registered,
12707
+ schedule_minutes: minutes,
12708
+ cron_expression: registered ? minutesToCron(minutes) : null
12709
+ };
12710
+ }
12711
+ function registerCloudTools(server, serviceName) {
12712
+ server.tool(`${serviceName}_cloud_status`, "Show cloud configuration and connection health", {}, async () => {
12713
+ const config = getCloudConfig();
12714
+ const lines = [
12715
+ `Mode: ${config.mode}`,
12716
+ `Service: ${serviceName}`,
12717
+ `RDS Host: ${config.rds.host || "(not configured)"}`
12718
+ ];
12719
+ if (config.rds.host && config.rds.username) {
12720
+ try {
12721
+ const pg2 = new PgAdapterAsync(getConnectionString("postgres"));
12722
+ await pg2.get("SELECT 1 as ok");
12723
+ lines.push("PostgreSQL: connected");
12724
+ await pg2.close();
12725
+ } catch (err) {
12726
+ lines.push(`PostgreSQL: failed \u2014 ${err?.message}`);
12727
+ }
12728
+ }
12729
+ return { content: [{ type: "text", text: lines.join(`
12730
+ `) }] };
12731
+ });
12732
+ server.tool(`${serviceName}_cloud_push`, "Push local data to cloud PostgreSQL", {
12733
+ tables: exports_external2.string().optional().describe("Comma-separated table names (default: all)")
12734
+ }, async ({ tables: tablesStr }) => {
12735
+ const config = getCloudConfig();
12736
+ if (config.mode === "local") {
12737
+ return {
12738
+ content: [
12739
+ { type: "text", text: "Error: cloud mode not configured." }
12740
+ ],
12741
+ isError: true
12742
+ };
12743
+ }
12744
+ const local = new SqliteAdapter(getDbPath2(serviceName));
12745
+ const cloud = new PgAdapterAsync(getConnectionString(serviceName));
12746
+ const tableList = tablesStr ? tablesStr.split(",").map((t) => t.trim()) : listSqliteTables(local);
12747
+ const results = await syncPush(local, cloud, { tables: tableList });
12748
+ local.close();
12749
+ await cloud.close();
12750
+ const total = results.reduce((s, r) => s + r.rowsWritten, 0);
12751
+ return {
12752
+ content: [{ type: "text", text: `Pushed ${total} rows across ${tableList.length} table(s).` }]
12753
+ };
12754
+ });
12755
+ server.tool(`${serviceName}_cloud_pull`, "Pull cloud PostgreSQL data to local", {
12756
+ tables: exports_external2.string().optional().describe("Comma-separated table names (default: all)")
12757
+ }, async ({ tables: tablesStr }) => {
12758
+ const config = getCloudConfig();
12759
+ if (config.mode === "local") {
12760
+ return {
12761
+ content: [
12762
+ { type: "text", text: "Error: cloud mode not configured." }
12763
+ ],
12764
+ isError: true
11602
12765
  };
11603
12766
  }
11604
12767
  const local = new SqliteAdapter(getDbPath2(serviceName));
11605
- const cloud = new PgAdapter(getConnectionString(serviceName));
12768
+ const cloud = new PgAdapterAsync(getConnectionString(serviceName));
11606
12769
  let tableList;
11607
12770
  if (tablesStr) {
11608
12771
  tableList = tablesStr.split(",").map((t) => t.trim());
11609
12772
  } else {
11610
12773
  try {
11611
- const rows = cloud.all(`SELECT tablename FROM pg_tables WHERE schemaname = 'public'`);
11612
- tableList = rows.map((r) => r.tablename);
12774
+ tableList = await listPgTables(cloud);
11613
12775
  } catch {
11614
12776
  local.close();
11615
- cloud.close();
12777
+ await cloud.close();
11616
12778
  return {
11617
12779
  content: [
11618
12780
  { type: "text", text: "Error: failed to list cloud tables." }
@@ -11621,9 +12783,9 @@ function registerCloudTools(server, serviceName) {
11621
12783
  };
11622
12784
  }
11623
12785
  }
11624
- const results = syncPull(local, cloud, { tables: tableList });
12786
+ const results = await syncPull(cloud, local, { tables: tableList });
11625
12787
  local.close();
11626
- cloud.close();
12788
+ await cloud.close();
11627
12789
  const total = results.reduce((s, r) => s + r.rowsWritten, 0);
11628
12790
  return {
11629
12791
  content: [{ type: "text", text: `Pulled ${total} rows across ${tableList.length} table(s).` }]
@@ -11646,6 +12808,85 @@ function registerCloudTools(server, serviceName) {
11646
12808
  };
11647
12809
  });
11648
12810
  }
12811
+ function registerCloudCommands(program2, serviceName) {
12812
+ const cloudCmd = program2.command("cloud").description("Cloud sync and feedback commands");
12813
+ cloudCmd.command("status").description("Show cloud config and connection health").action(async () => {
12814
+ const config = getCloudConfig();
12815
+ console.log("Mode:", config.mode);
12816
+ console.log("RDS Host:", config.rds.host || "(not configured)");
12817
+ console.log("Service:", serviceName);
12818
+ if (config.rds.host && config.rds.username) {
12819
+ try {
12820
+ const connStr = getConnectionString("postgres");
12821
+ const pg2 = new PgAdapterAsync(connStr);
12822
+ await pg2.get("SELECT 1 as ok");
12823
+ console.log("PostgreSQL: connected");
12824
+ await pg2.close();
12825
+ } catch (err) {
12826
+ console.log("PostgreSQL: connection failed \u2014", err?.message);
12827
+ }
12828
+ }
12829
+ });
12830
+ cloudCmd.command("push").description("Push local data to cloud").option("--tables <tables>", "Comma-separated table names").action(async (opts) => {
12831
+ const config = getCloudConfig();
12832
+ if (config.mode === "local") {
12833
+ console.error("Error: mode is 'local'. Run `cloud setup` first.");
12834
+ process.exit(1);
12835
+ }
12836
+ const local = new SqliteAdapter(getDbPath2(serviceName));
12837
+ const cloud = new PgAdapterAsync(getConnectionString(serviceName));
12838
+ const tables = opts.tables ? opts.tables.split(",").map((t) => t.trim()) : listSqliteTables(local);
12839
+ const results = await syncPush(local, cloud, {
12840
+ tables,
12841
+ onProgress: (p) => {
12842
+ if (p.phase === "done") {
12843
+ console.log(` ${p.table}: ${p.rowsWritten} rows pushed`);
12844
+ }
12845
+ }
12846
+ });
12847
+ local.close();
12848
+ await cloud.close();
12849
+ const total = results.reduce((s, r) => s + r.rowsWritten, 0);
12850
+ console.log(`Done. ${total} rows pushed.`);
12851
+ });
12852
+ cloudCmd.command("pull").description("Pull cloud data to local").option("--tables <tables>", "Comma-separated table names").action(async (opts) => {
12853
+ const config = getCloudConfig();
12854
+ if (config.mode === "local") {
12855
+ console.error("Error: mode is 'local'. Run `cloud setup` first.");
12856
+ process.exit(1);
12857
+ }
12858
+ const local = new SqliteAdapter(getDbPath2(serviceName));
12859
+ const cloud = new PgAdapterAsync(getConnectionString(serviceName));
12860
+ let tables;
12861
+ if (opts.tables) {
12862
+ tables = opts.tables.split(",").map((t) => t.trim());
12863
+ } else {
12864
+ tables = await listPgTables(cloud);
12865
+ }
12866
+ const results = await syncPull(cloud, local, {
12867
+ tables,
12868
+ onProgress: (p) => {
12869
+ if (p.phase === "done") {
12870
+ console.log(` ${p.table}: ${p.rowsWritten} rows pulled`);
12871
+ }
12872
+ }
12873
+ });
12874
+ local.close();
12875
+ await cloud.close();
12876
+ const total = results.reduce((s, r) => s + r.rowsWritten, 0);
12877
+ console.log(`Done. ${total} rows pulled.`);
12878
+ });
12879
+ cloudCmd.command("feedback").description("Send feedback").requiredOption("--message <msg>", "Feedback message").option("--email <email>", "Contact email").action(async (opts) => {
12880
+ const db = createDatabase({ service: "cloud" });
12881
+ const result = await sendFeedback({ service: serviceName, message: opts.message, email: opts.email }, db);
12882
+ db.close();
12883
+ if (result.sent) {
12884
+ console.log(`Feedback sent (id: ${result.id})`);
12885
+ } else {
12886
+ console.log(`Feedback saved locally (id: ${result.id}): ${result.error}`);
12887
+ }
12888
+ });
12889
+ }
11649
12890
  var __create2, __getProtoOf2, __defProp2, __getOwnPropNames2, __hasOwnProp2, __toESMCache_node2, __toESMCache_esm2, __toESM2 = (mod, isNodeMode, target) => {
11650
12891
  var canCache = mod != null && typeof mod === "object";
11651
12892
  if (canCache) {
@@ -11898,7 +13139,13 @@ CREATE TABLE IF NOT EXISTS feedback (
11898
13139
  email TEXT DEFAULT '',
11899
13140
  machine_id TEXT DEFAULT '',
11900
13141
  created_at TEXT DEFAULT (datetime('now'))
11901
- )`;
13142
+ )`, SYNC_META_TABLE_SQL = `
13143
+ CREATE TABLE IF NOT EXISTS _sync_meta (
13144
+ table_name TEXT PRIMARY KEY,
13145
+ last_synced_at TEXT,
13146
+ last_synced_row_count INTEGER DEFAULT 0,
13147
+ direction TEXT DEFAULT 'push'
13148
+ )`, AUTO_SYNC_CONFIG_PATH, DEFAULT_AUTO_SYNC_CONFIG, cleanupHandlers, signalHandlersInstalled = false, CRON_TITLE = "hasna-cloud-sync";
11902
13149
  var init_dist = __esm(() => {
11903
13150
  __create2 = Object.create;
11904
13151
  __getProtoOf2 = Object.getPrototypeOf;
@@ -19837,10 +21084,19 @@ See https://www.postgresql.org/docs/current/libpq-ssl.html for libpq SSL mode de
19837
21084
  }).default({}),
19838
21085
  mode: exports_external2.enum(["local", "cloud", "hybrid"]).default("local"),
19839
21086
  auto_sync_interval_minutes: exports_external2.number().default(0),
19840
- feedback_endpoint: exports_external2.string().default("https://feedback.hasna.com/api/v1/feedback")
21087
+ feedback_endpoint: exports_external2.string().default("https://feedback.hasna.com/api/v1/feedback"),
21088
+ sync: exports_external2.object({
21089
+ schedule_minutes: exports_external2.number().default(0)
21090
+ }).default({})
19841
21091
  });
19842
21092
  CONFIG_DIR2 = join22(homedir22(), ".hasna", "cloud");
19843
21093
  CONFIG_PATH2 = join22(CONFIG_DIR2, "config.json");
21094
+ AUTO_SYNC_CONFIG_PATH = join32(homedir32(), ".hasna", "cloud", "config.json");
21095
+ DEFAULT_AUTO_SYNC_CONFIG = {
21096
+ auto_sync_on_start: true,
21097
+ auto_sync_on_stop: true
21098
+ };
21099
+ cleanupHandlers = [];
19844
21100
  });
19845
21101
 
19846
21102
  // src/db/traces.ts
@@ -21046,106 +22302,691 @@ var init_patrol = __esm(() => {
21046
22302
  init_database();
21047
22303
  });
21048
22304
 
21049
- // src/db/agent-metrics.ts
21050
- var exports_agent_metrics = {};
21051
- __export(exports_agent_metrics, {
21052
- scoreTask: () => scoreTask,
21053
- getLeaderboard: () => getLeaderboard,
21054
- getAgentMetrics: () => getAgentMetrics
22305
+ // src/db/agent-metrics.ts
22306
+ var exports_agent_metrics = {};
22307
+ __export(exports_agent_metrics, {
22308
+ scoreTask: () => scoreTask,
22309
+ getLeaderboard: () => getLeaderboard,
22310
+ getAgentMetrics: () => getAgentMetrics
22311
+ });
22312
+ function getAgentMetrics(agentId, opts, db) {
22313
+ const d = db || getDatabase();
22314
+ const agent = d.query("SELECT id, name FROM agents WHERE id = ? OR LOWER(name) = LOWER(?)").get(agentId, agentId);
22315
+ if (!agent)
22316
+ return null;
22317
+ let projectFilter = "";
22318
+ const params = [agent.id, agent.id];
22319
+ if (opts?.project_id) {
22320
+ projectFilter = " AND project_id = ?";
22321
+ params.push(opts.project_id);
22322
+ }
22323
+ const completed = d.query(`SELECT COUNT(*) as count FROM tasks WHERE (agent_id = ? OR assigned_to = ?) AND status = 'completed'${projectFilter}`).get(...params).count;
22324
+ const failed = d.query(`SELECT COUNT(*) as count FROM tasks WHERE (agent_id = ? OR assigned_to = ?) AND status = 'failed'${projectFilter}`).get(...params).count;
22325
+ const inProgress = d.query(`SELECT COUNT(*) as count FROM tasks WHERE (agent_id = ? OR assigned_to = ?) AND status = 'in_progress'${projectFilter}`).get(...params).count;
22326
+ const total = completed + failed;
22327
+ const completionRate = total > 0 ? completed / total : 0;
22328
+ const avgTime = d.query(`SELECT AVG(
22329
+ (julianday(completed_at) - julianday(created_at)) * 24 * 60
22330
+ ) as avg_minutes
22331
+ FROM tasks
22332
+ WHERE (agent_id = ? OR assigned_to = ?) AND status = 'completed' AND completed_at IS NOT NULL${projectFilter}`).get(...params);
22333
+ const avgConf = d.query(`SELECT AVG(confidence) as avg_confidence
22334
+ FROM tasks
22335
+ WHERE (agent_id = ? OR assigned_to = ?) AND status = 'completed' AND confidence IS NOT NULL${projectFilter}`).get(...params);
22336
+ const reviewTasks = d.query(`SELECT metadata FROM tasks
22337
+ WHERE (agent_id = ? OR assigned_to = ?) AND status = 'completed'${projectFilter}
22338
+ AND metadata LIKE '%_review_score%'`).all(...params);
22339
+ let reviewScoreAvg = null;
22340
+ if (reviewTasks.length > 0) {
22341
+ let total2 = 0;
22342
+ let count = 0;
22343
+ for (const row of reviewTasks) {
22344
+ try {
22345
+ const meta = JSON.parse(row.metadata);
22346
+ if (typeof meta._review_score === "number") {
22347
+ total2 += meta._review_score;
22348
+ count++;
22349
+ }
22350
+ } catch {}
22351
+ }
22352
+ if (count > 0)
22353
+ reviewScoreAvg = total2 / count;
22354
+ }
22355
+ const speedScore = avgTime?.avg_minutes != null ? Math.max(0, 1 - avgTime.avg_minutes / (60 * 24)) : 0.5;
22356
+ const confidenceScore = avgConf?.avg_confidence ?? 0.5;
22357
+ const volumeScore = Math.min(1, completed / 50);
22358
+ const compositeScore = completionRate * 0.3 + speedScore * 0.2 + confidenceScore * 0.3 + volumeScore * 0.2;
22359
+ return {
22360
+ agent_id: agent.id,
22361
+ agent_name: agent.name,
22362
+ tasks_completed: completed,
22363
+ tasks_failed: failed,
22364
+ tasks_in_progress: inProgress,
22365
+ completion_rate: Math.round(completionRate * 1000) / 1000,
22366
+ avg_completion_minutes: avgTime?.avg_minutes != null ? Math.round(avgTime.avg_minutes * 10) / 10 : null,
22367
+ avg_confidence: avgConf?.avg_confidence != null ? Math.round(avgConf.avg_confidence * 1000) / 1000 : null,
22368
+ review_score_avg: reviewScoreAvg != null ? Math.round(reviewScoreAvg * 1000) / 1000 : null,
22369
+ composite_score: Math.round(compositeScore * 1000) / 1000
22370
+ };
22371
+ }
22372
+ function getLeaderboard(opts, db) {
22373
+ const d = db || getDatabase();
22374
+ const agents = d.query("SELECT id FROM agents ORDER BY name").all();
22375
+ const entries = [];
22376
+ for (const agent of agents) {
22377
+ const metrics = getAgentMetrics(agent.id, { project_id: opts?.project_id }, d);
22378
+ if (metrics && (metrics.tasks_completed > 0 || metrics.tasks_failed > 0 || metrics.tasks_in_progress > 0)) {
22379
+ entries.push(metrics);
22380
+ }
22381
+ }
22382
+ entries.sort((a, b) => b.composite_score - a.composite_score);
22383
+ const limit = opts?.limit || 20;
22384
+ return entries.slice(0, limit).map((entry, idx) => ({
22385
+ ...entry,
22386
+ rank: idx + 1
22387
+ }));
22388
+ }
22389
+ function scoreTask(taskId, score, reviewerId, db) {
22390
+ const d = db || getDatabase();
22391
+ if (score < 0 || score > 1)
22392
+ throw new Error("Score must be between 0 and 1");
22393
+ const task = d.query("SELECT metadata FROM tasks WHERE id = ?").get(taskId);
22394
+ if (!task)
22395
+ throw new Error(`Task not found: ${taskId}`);
22396
+ const metadata = JSON.parse(task.metadata || "{}");
22397
+ metadata._review_score = score;
22398
+ if (reviewerId)
22399
+ metadata._reviewed_by = reviewerId;
22400
+ metadata._reviewed_at = now();
22401
+ d.run("UPDATE tasks SET metadata = ?, updated_at = ? WHERE id = ?", [JSON.stringify(metadata), now(), taskId]);
22402
+ }
22403
+ var init_agent_metrics = __esm(() => {
22404
+ init_database();
22405
+ });
22406
+
22407
+ // src/db/pg-migrations.ts
22408
+ var PG_MIGRATIONS;
22409
+ var init_pg_migrations = __esm(() => {
22410
+ PG_MIGRATIONS = [
22411
+ `
22412
+ CREATE TABLE IF NOT EXISTS projects (
22413
+ id TEXT PRIMARY KEY,
22414
+ name TEXT NOT NULL,
22415
+ path TEXT UNIQUE NOT NULL,
22416
+ description TEXT,
22417
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22418
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22419
+ );
22420
+
22421
+ CREATE TABLE IF NOT EXISTS tasks (
22422
+ id TEXT PRIMARY KEY,
22423
+ project_id TEXT REFERENCES projects(id) ON DELETE SET NULL,
22424
+ parent_id TEXT REFERENCES tasks(id) ON DELETE CASCADE,
22425
+ title TEXT NOT NULL,
22426
+ description TEXT,
22427
+ status TEXT NOT NULL DEFAULT 'pending' CHECK(status IN ('pending', 'in_progress', 'completed', 'failed', 'cancelled')),
22428
+ priority TEXT NOT NULL DEFAULT 'medium' CHECK(priority IN ('low', 'medium', 'high', 'critical')),
22429
+ agent_id TEXT,
22430
+ assigned_to TEXT,
22431
+ session_id TEXT,
22432
+ working_dir TEXT,
22433
+ tags TEXT DEFAULT '[]',
22434
+ metadata TEXT DEFAULT '{}',
22435
+ version INTEGER NOT NULL DEFAULT 1,
22436
+ locked_by TEXT,
22437
+ locked_at TEXT,
22438
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22439
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22440
+ completed_at TEXT
22441
+ );
22442
+
22443
+ CREATE TABLE IF NOT EXISTS task_dependencies (
22444
+ task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
22445
+ depends_on TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
22446
+ PRIMARY KEY (task_id, depends_on),
22447
+ CHECK (task_id != depends_on)
22448
+ );
22449
+
22450
+ CREATE TABLE IF NOT EXISTS task_comments (
22451
+ id TEXT PRIMARY KEY,
22452
+ task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
22453
+ agent_id TEXT,
22454
+ session_id TEXT,
22455
+ content TEXT NOT NULL,
22456
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22457
+ );
22458
+
22459
+ CREATE TABLE IF NOT EXISTS sessions (
22460
+ id TEXT PRIMARY KEY,
22461
+ agent_id TEXT,
22462
+ project_id TEXT REFERENCES projects(id) ON DELETE SET NULL,
22463
+ working_dir TEXT,
22464
+ started_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22465
+ last_activity TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22466
+ metadata TEXT DEFAULT '{}'
22467
+ );
22468
+
22469
+ CREATE INDEX IF NOT EXISTS idx_tasks_project ON tasks(project_id);
22470
+ CREATE INDEX IF NOT EXISTS idx_tasks_parent ON tasks(parent_id);
22471
+ CREATE INDEX IF NOT EXISTS idx_tasks_status ON tasks(status);
22472
+ CREATE INDEX IF NOT EXISTS idx_tasks_priority ON tasks(priority);
22473
+ CREATE INDEX IF NOT EXISTS idx_tasks_assigned ON tasks(assigned_to);
22474
+ CREATE INDEX IF NOT EXISTS idx_tasks_agent ON tasks(agent_id);
22475
+ CREATE INDEX IF NOT EXISTS idx_tasks_session ON tasks(session_id);
22476
+ CREATE INDEX IF NOT EXISTS idx_comments_task ON task_comments(task_id);
22477
+ CREATE INDEX IF NOT EXISTS idx_sessions_agent ON sessions(agent_id);
22478
+ CREATE INDEX IF NOT EXISTS idx_sessions_project ON sessions(project_id);
22479
+
22480
+ CREATE TABLE IF NOT EXISTS _migrations (
22481
+ id INTEGER PRIMARY KEY,
22482
+ applied_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22483
+ );
22484
+
22485
+ INSERT INTO _migrations (id) VALUES (1) ON CONFLICT DO NOTHING;
22486
+ `,
22487
+ `
22488
+ ALTER TABLE projects ADD COLUMN IF NOT EXISTS task_list_id TEXT;
22489
+ INSERT INTO _migrations (id) VALUES (2) ON CONFLICT DO NOTHING;
22490
+ `,
22491
+ `
22492
+ CREATE TABLE IF NOT EXISTS task_tags (
22493
+ task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
22494
+ tag TEXT NOT NULL,
22495
+ PRIMARY KEY (task_id, tag)
22496
+ );
22497
+ CREATE INDEX IF NOT EXISTS idx_task_tags_tag ON task_tags(tag);
22498
+ CREATE INDEX IF NOT EXISTS idx_task_tags_task ON task_tags(task_id);
22499
+
22500
+ INSERT INTO _migrations (id) VALUES (3) ON CONFLICT DO NOTHING;
22501
+ `,
22502
+ `
22503
+ CREATE TABLE IF NOT EXISTS plans (
22504
+ id TEXT PRIMARY KEY,
22505
+ project_id TEXT REFERENCES projects(id) ON DELETE CASCADE,
22506
+ name TEXT NOT NULL,
22507
+ description TEXT,
22508
+ status TEXT NOT NULL DEFAULT 'active' CHECK(status IN ('active', 'completed', 'archived')),
22509
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22510
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22511
+ );
22512
+ CREATE INDEX IF NOT EXISTS idx_plans_project ON plans(project_id);
22513
+ CREATE INDEX IF NOT EXISTS idx_plans_status ON plans(status);
22514
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS plan_id TEXT REFERENCES plans(id) ON DELETE SET NULL;
22515
+ CREATE INDEX IF NOT EXISTS idx_tasks_plan ON tasks(plan_id);
22516
+ INSERT INTO _migrations (id) VALUES (4) ON CONFLICT DO NOTHING;
22517
+ `,
22518
+ `
22519
+ CREATE TABLE IF NOT EXISTS agents (
22520
+ id TEXT PRIMARY KEY,
22521
+ name TEXT NOT NULL UNIQUE,
22522
+ description TEXT,
22523
+ metadata TEXT DEFAULT '{}',
22524
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22525
+ last_seen_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22526
+ );
22527
+ CREATE INDEX IF NOT EXISTS idx_agents_name ON agents(name);
22528
+
22529
+ CREATE TABLE IF NOT EXISTS task_lists (
22530
+ id TEXT PRIMARY KEY,
22531
+ project_id TEXT REFERENCES projects(id) ON DELETE SET NULL,
22532
+ slug TEXT NOT NULL,
22533
+ name TEXT NOT NULL,
22534
+ description TEXT,
22535
+ metadata TEXT DEFAULT '{}',
22536
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22537
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22538
+ UNIQUE(project_id, slug)
22539
+ );
22540
+ CREATE INDEX IF NOT EXISTS idx_task_lists_project ON task_lists(project_id);
22541
+ CREATE INDEX IF NOT EXISTS idx_task_lists_slug ON task_lists(slug);
22542
+
22543
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS task_list_id TEXT REFERENCES task_lists(id) ON DELETE SET NULL;
22544
+ CREATE INDEX IF NOT EXISTS idx_tasks_task_list ON tasks(task_list_id);
22545
+
22546
+ INSERT INTO _migrations (id) VALUES (5) ON CONFLICT DO NOTHING;
22547
+ `,
22548
+ `
22549
+ ALTER TABLE projects ADD COLUMN IF NOT EXISTS task_prefix TEXT;
22550
+ ALTER TABLE projects ADD COLUMN IF NOT EXISTS task_counter INTEGER NOT NULL DEFAULT 0;
22551
+
22552
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS short_id TEXT;
22553
+ CREATE UNIQUE INDEX IF NOT EXISTS idx_tasks_short_id ON tasks(short_id) WHERE short_id IS NOT NULL;
22554
+
22555
+ INSERT INTO _migrations (id) VALUES (6) ON CONFLICT DO NOTHING;
22556
+ `,
22557
+ `
22558
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS due_at TEXT;
22559
+ CREATE INDEX IF NOT EXISTS idx_tasks_due_at ON tasks(due_at);
22560
+ INSERT INTO _migrations (id) VALUES (7) ON CONFLICT DO NOTHING;
22561
+ `,
22562
+ `
22563
+ ALTER TABLE agents ADD COLUMN IF NOT EXISTS role TEXT DEFAULT 'agent';
22564
+ INSERT INTO _migrations (id) VALUES (8) ON CONFLICT DO NOTHING;
22565
+ `,
22566
+ `
22567
+ ALTER TABLE plans ADD COLUMN IF NOT EXISTS task_list_id TEXT REFERENCES task_lists(id) ON DELETE SET NULL;
22568
+ ALTER TABLE plans ADD COLUMN IF NOT EXISTS agent_id TEXT;
22569
+ CREATE INDEX IF NOT EXISTS idx_plans_task_list ON plans(task_list_id);
22570
+ CREATE INDEX IF NOT EXISTS idx_plans_agent ON plans(agent_id);
22571
+ INSERT INTO _migrations (id) VALUES (9) ON CONFLICT DO NOTHING;
22572
+ `,
22573
+ `
22574
+ CREATE TABLE IF NOT EXISTS task_history (
22575
+ id TEXT PRIMARY KEY,
22576
+ task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
22577
+ action TEXT NOT NULL,
22578
+ field TEXT,
22579
+ old_value TEXT,
22580
+ new_value TEXT,
22581
+ agent_id TEXT,
22582
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22583
+ );
22584
+ CREATE INDEX IF NOT EXISTS idx_task_history_task ON task_history(task_id);
22585
+ CREATE INDEX IF NOT EXISTS idx_task_history_agent ON task_history(agent_id);
22586
+
22587
+ CREATE TABLE IF NOT EXISTS webhooks (
22588
+ id TEXT PRIMARY KEY,
22589
+ url TEXT NOT NULL,
22590
+ events TEXT NOT NULL DEFAULT '[]',
22591
+ secret TEXT,
22592
+ active BOOLEAN NOT NULL DEFAULT TRUE,
22593
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22594
+ );
22595
+
22596
+ CREATE TABLE IF NOT EXISTS task_templates (
22597
+ id TEXT PRIMARY KEY,
22598
+ name TEXT NOT NULL,
22599
+ title_pattern TEXT NOT NULL,
22600
+ description TEXT,
22601
+ priority TEXT DEFAULT 'medium',
22602
+ tags TEXT DEFAULT '[]',
22603
+ project_id TEXT REFERENCES projects(id) ON DELETE SET NULL,
22604
+ plan_id TEXT REFERENCES plans(id) ON DELETE SET NULL,
22605
+ metadata TEXT DEFAULT '{}',
22606
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22607
+ );
22608
+
22609
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS estimated_minutes INTEGER;
22610
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS requires_approval BOOLEAN NOT NULL DEFAULT FALSE;
22611
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS approved_by TEXT;
22612
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS approved_at TEXT;
22613
+
22614
+ ALTER TABLE agents ADD COLUMN IF NOT EXISTS permissions TEXT DEFAULT '["*"]';
22615
+
22616
+ INSERT INTO _migrations (id) VALUES (10) ON CONFLICT DO NOTHING;
22617
+ `,
22618
+ `
22619
+ ALTER TABLE agents ADD COLUMN IF NOT EXISTS reports_to TEXT;
22620
+ ALTER TABLE agents ADD COLUMN IF NOT EXISTS title TEXT;
22621
+ ALTER TABLE agents ADD COLUMN IF NOT EXISTS level TEXT;
22622
+ INSERT INTO _migrations (id) VALUES (11) ON CONFLICT DO NOTHING;
22623
+ `,
22624
+ `
22625
+ CREATE TABLE IF NOT EXISTS orgs (
22626
+ id TEXT PRIMARY KEY,
22627
+ name TEXT NOT NULL UNIQUE,
22628
+ description TEXT,
22629
+ metadata TEXT DEFAULT '{}',
22630
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22631
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22632
+ );
22633
+ ALTER TABLE agents ADD COLUMN IF NOT EXISTS org_id TEXT REFERENCES orgs(id) ON DELETE SET NULL;
22634
+ ALTER TABLE projects ADD COLUMN IF NOT EXISTS org_id TEXT REFERENCES orgs(id) ON DELETE SET NULL;
22635
+ INSERT INTO _migrations (id) VALUES (12) ON CONFLICT DO NOTHING;
22636
+ `,
22637
+ `
22638
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS recurrence_rule TEXT;
22639
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS recurrence_parent_id TEXT REFERENCES tasks(id) ON DELETE SET NULL;
22640
+ CREATE INDEX IF NOT EXISTS idx_tasks_recurrence_parent ON tasks(recurrence_parent_id);
22641
+ CREATE INDEX IF NOT EXISTS idx_tasks_recurrence_rule ON tasks(recurrence_rule) WHERE recurrence_rule IS NOT NULL;
22642
+ INSERT INTO _migrations (id) VALUES (13) ON CONFLICT DO NOTHING;
22643
+ `,
22644
+ `
22645
+ ALTER TABLE task_comments ADD COLUMN IF NOT EXISTS type TEXT DEFAULT 'comment' CHECK(type IN ('comment', 'progress', 'note'));
22646
+ ALTER TABLE task_comments ADD COLUMN IF NOT EXISTS progress_pct INTEGER CHECK(progress_pct IS NULL OR (progress_pct >= 0 AND progress_pct <= 100));
22647
+ INSERT INTO _migrations (id) VALUES (14) ON CONFLICT DO NOTHING;
22648
+ `,
22649
+ `
22650
+ -- PostgreSQL uses tsvector/tsquery instead of FTS5
22651
+ -- Full-text search can be done with to_tsvector/to_tsquery on tasks table directly
22652
+ -- No virtual table needed; add a generated tsvector column instead
22653
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS search_vector tsvector;
22654
+
22655
+ CREATE INDEX IF NOT EXISTS idx_tasks_search ON tasks USING GIN(search_vector);
22656
+
22657
+ -- Function to update search vector
22658
+ CREATE OR REPLACE FUNCTION tasks_search_vector_update() RETURNS trigger AS $$
22659
+ BEGIN
22660
+ NEW.search_vector :=
22661
+ setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'A') ||
22662
+ setweight(to_tsvector('english', COALESCE(NEW.description, '')), 'B') ||
22663
+ setweight(to_tsvector('english', COALESCE(NEW.tags, '')), 'C');
22664
+ RETURN NEW;
22665
+ END;
22666
+ $$ LANGUAGE plpgsql;
22667
+
22668
+ DROP TRIGGER IF EXISTS tasks_search_vector_trigger ON tasks;
22669
+ CREATE TRIGGER tasks_search_vector_trigger
22670
+ BEFORE INSERT OR UPDATE OF title, description, tags ON tasks
22671
+ FOR EACH ROW EXECUTE FUNCTION tasks_search_vector_update();
22672
+
22673
+ -- Backfill existing rows
22674
+ UPDATE tasks SET search_vector =
22675
+ setweight(to_tsvector('english', COALESCE(title, '')), 'A') ||
22676
+ setweight(to_tsvector('english', COALESCE(description, '')), 'B') ||
22677
+ setweight(to_tsvector('english', COALESCE(tags, '')), 'C')
22678
+ WHERE search_vector IS NULL;
22679
+
22680
+ INSERT INTO _migrations (id) VALUES (15) ON CONFLICT DO NOTHING;
22681
+ `,
22682
+ `
22683
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS spawns_template_id TEXT REFERENCES task_templates(id) ON DELETE SET NULL;
22684
+ INSERT INTO _migrations (id) VALUES (16) ON CONFLICT DO NOTHING;
22685
+ `,
22686
+ `
22687
+ ALTER TABLE agents ADD COLUMN IF NOT EXISTS session_id TEXT;
22688
+ ALTER TABLE agents ADD COLUMN IF NOT EXISTS working_dir TEXT;
22689
+ INSERT INTO _migrations (id) VALUES (17) ON CONFLICT DO NOTHING;
22690
+ `,
22691
+ `
22692
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS confidence DOUBLE PRECISION;
22693
+ INSERT INTO _migrations (id) VALUES (18) ON CONFLICT DO NOTHING;
22694
+ `,
22695
+ `
22696
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS reason TEXT;
22697
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS spawned_from_session TEXT;
22698
+ INSERT INTO _migrations (id) VALUES (19) ON CONFLICT DO NOTHING;
22699
+ `,
22700
+ `
22701
+ CREATE TABLE IF NOT EXISTS handoffs (
22702
+ id TEXT PRIMARY KEY,
22703
+ agent_id TEXT,
22704
+ project_id TEXT REFERENCES projects(id) ON DELETE SET NULL,
22705
+ summary TEXT NOT NULL,
22706
+ completed TEXT,
22707
+ in_progress TEXT,
22708
+ blockers TEXT,
22709
+ next_steps TEXT,
22710
+ created_at TIMESTAMPTZ NOT NULL
22711
+ );
22712
+ INSERT INTO _migrations (id) VALUES (20) ON CONFLICT DO NOTHING;
22713
+ `,
22714
+ `
22715
+ CREATE TABLE IF NOT EXISTS task_checklists (
22716
+ id TEXT PRIMARY KEY,
22717
+ task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
22718
+ position INTEGER NOT NULL DEFAULT 0,
22719
+ text TEXT NOT NULL,
22720
+ checked BOOLEAN NOT NULL DEFAULT FALSE,
22721
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22722
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22723
+ );
22724
+ CREATE INDEX IF NOT EXISTS idx_task_checklists_task ON task_checklists(task_id);
22725
+ INSERT INTO _migrations (id) VALUES (21) ON CONFLICT DO NOTHING;
22726
+ `,
22727
+ `
22728
+ CREATE TABLE IF NOT EXISTS project_sources (
22729
+ id TEXT PRIMARY KEY,
22730
+ project_id TEXT NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
22731
+ type TEXT NOT NULL,
22732
+ name TEXT NOT NULL,
22733
+ uri TEXT NOT NULL,
22734
+ description TEXT,
22735
+ metadata TEXT DEFAULT '{}',
22736
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22737
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22738
+ );
22739
+ CREATE INDEX IF NOT EXISTS idx_project_sources_project ON project_sources(project_id);
22740
+ CREATE INDEX IF NOT EXISTS idx_project_sources_type ON project_sources(type);
22741
+ INSERT INTO _migrations (id) VALUES (22) ON CONFLICT DO NOTHING;
22742
+ `,
22743
+ `
22744
+ ALTER TABLE agents ADD COLUMN IF NOT EXISTS active_project_id TEXT;
22745
+ INSERT INTO _migrations (id) VALUES (23) ON CONFLICT DO NOTHING;
22746
+ `,
22747
+ `
22748
+ CREATE TABLE IF NOT EXISTS resource_locks (
22749
+ resource_type TEXT NOT NULL,
22750
+ resource_id TEXT NOT NULL,
22751
+ agent_id TEXT NOT NULL,
22752
+ lock_type TEXT NOT NULL DEFAULT 'advisory',
22753
+ locked_at TIMESTAMPTZ NOT NULL,
22754
+ expires_at TIMESTAMPTZ NOT NULL,
22755
+ UNIQUE(resource_type, resource_id, lock_type)
22756
+ );
22757
+ CREATE INDEX IF NOT EXISTS idx_resource_locks_type_id ON resource_locks(resource_type, resource_id);
22758
+ CREATE INDEX IF NOT EXISTS idx_resource_locks_agent ON resource_locks(agent_id);
22759
+ INSERT INTO _migrations (id) VALUES (24) ON CONFLICT DO NOTHING;
22760
+ `,
22761
+ `
22762
+ CREATE TABLE IF NOT EXISTS task_files (
22763
+ id TEXT PRIMARY KEY,
22764
+ task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
22765
+ path TEXT NOT NULL,
22766
+ status TEXT NOT NULL DEFAULT 'active',
22767
+ agent_id TEXT,
22768
+ note TEXT,
22769
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22770
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22771
+ );
22772
+ CREATE INDEX IF NOT EXISTS idx_task_files_task ON task_files(task_id);
22773
+ CREATE INDEX IF NOT EXISTS idx_task_files_path ON task_files(path);
22774
+ CREATE UNIQUE INDEX IF NOT EXISTS idx_task_files_task_path ON task_files(task_id, path);
22775
+ INSERT INTO _migrations (id) VALUES (25) ON CONFLICT DO NOTHING;
22776
+ `,
22777
+ `
22778
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS assigned_by TEXT;
22779
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS assigned_from_project TEXT;
22780
+ CREATE INDEX IF NOT EXISTS idx_tasks_assigned_by ON tasks(assigned_by);
22781
+ INSERT INTO _migrations (id) VALUES (26) ON CONFLICT DO NOTHING;
22782
+ `,
22783
+ `
22784
+ CREATE TABLE IF NOT EXISTS task_relationships (
22785
+ id TEXT PRIMARY KEY,
22786
+ source_task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
22787
+ target_task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
22788
+ relationship_type TEXT NOT NULL CHECK(relationship_type IN ('related_to', 'conflicts_with', 'similar_to', 'duplicates', 'supersedes', 'modifies_same_file')),
22789
+ metadata TEXT DEFAULT '{}',
22790
+ created_by TEXT,
22791
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22792
+ CHECK (source_task_id != target_task_id)
22793
+ );
22794
+ CREATE INDEX IF NOT EXISTS idx_task_rel_source ON task_relationships(source_task_id);
22795
+ CREATE INDEX IF NOT EXISTS idx_task_rel_target ON task_relationships(target_task_id);
22796
+ CREATE INDEX IF NOT EXISTS idx_task_rel_type ON task_relationships(relationship_type);
22797
+ INSERT INTO _migrations (id) VALUES (27) ON CONFLICT DO NOTHING;
22798
+ `,
22799
+ `
22800
+ CREATE TABLE IF NOT EXISTS kg_edges (
22801
+ id TEXT PRIMARY KEY,
22802
+ source_id TEXT NOT NULL,
22803
+ source_type TEXT NOT NULL,
22804
+ target_id TEXT NOT NULL,
22805
+ target_type TEXT NOT NULL,
22806
+ relation_type TEXT NOT NULL,
22807
+ weight DOUBLE PRECISION NOT NULL DEFAULT 1.0,
22808
+ metadata TEXT DEFAULT '{}',
22809
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22810
+ UNIQUE(source_id, source_type, target_id, target_type, relation_type)
22811
+ );
22812
+ CREATE INDEX IF NOT EXISTS idx_kg_source ON kg_edges(source_id, source_type);
22813
+ CREATE INDEX IF NOT EXISTS idx_kg_target ON kg_edges(target_id, target_type);
22814
+ CREATE INDEX IF NOT EXISTS idx_kg_relation ON kg_edges(relation_type);
22815
+ INSERT INTO _migrations (id) VALUES (28) ON CONFLICT DO NOTHING;
22816
+ `,
22817
+ `
22818
+ ALTER TABLE agents ADD COLUMN IF NOT EXISTS capabilities TEXT DEFAULT '[]';
22819
+ INSERT INTO _migrations (id) VALUES (29) ON CONFLICT DO NOTHING;
22820
+ `,
22821
+ `
22822
+ ALTER TABLE agents ADD COLUMN IF NOT EXISTS status TEXT NOT NULL DEFAULT 'active' CHECK(status IN ('active', 'archived'));
22823
+ CREATE INDEX IF NOT EXISTS idx_agents_status ON agents(status);
22824
+ INSERT INTO _migrations (id) VALUES (30) ON CONFLICT DO NOTHING;
22825
+ `,
22826
+ `
22827
+ CREATE TABLE IF NOT EXISTS project_agent_roles (
22828
+ id TEXT PRIMARY KEY,
22829
+ project_id TEXT NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
22830
+ agent_id TEXT NOT NULL REFERENCES agents(id) ON DELETE CASCADE,
22831
+ role TEXT NOT NULL,
22832
+ is_lead BOOLEAN NOT NULL DEFAULT FALSE,
22833
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22834
+ UNIQUE(project_id, agent_id, role)
22835
+ );
22836
+ CREATE INDEX IF NOT EXISTS idx_project_agent_roles_project ON project_agent_roles(project_id);
22837
+ CREATE INDEX IF NOT EXISTS idx_project_agent_roles_agent ON project_agent_roles(agent_id);
22838
+ INSERT INTO _migrations (id) VALUES (31) ON CONFLICT DO NOTHING;
22839
+ `,
22840
+ `
22841
+ CREATE TABLE IF NOT EXISTS task_commits (
22842
+ id TEXT PRIMARY KEY,
22843
+ task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
22844
+ sha TEXT NOT NULL,
22845
+ message TEXT,
22846
+ author TEXT,
22847
+ files_changed TEXT,
22848
+ committed_at TEXT,
22849
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22850
+ UNIQUE(task_id, sha)
22851
+ );
22852
+ CREATE INDEX IF NOT EXISTS idx_task_commits_task ON task_commits(task_id);
22853
+ CREATE INDEX IF NOT EXISTS idx_task_commits_sha ON task_commits(sha);
22854
+ INSERT INTO _migrations (id) VALUES (32) ON CONFLICT DO NOTHING;
22855
+ `,
22856
+ `
22857
+ CREATE TABLE IF NOT EXISTS file_locks (
22858
+ id TEXT PRIMARY KEY,
22859
+ path TEXT NOT NULL UNIQUE,
22860
+ agent_id TEXT NOT NULL,
22861
+ task_id TEXT REFERENCES tasks(id) ON DELETE SET NULL,
22862
+ expires_at TIMESTAMPTZ NOT NULL,
22863
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22864
+ );
22865
+ CREATE INDEX IF NOT EXISTS idx_file_locks_path ON file_locks(path);
22866
+ CREATE INDEX IF NOT EXISTS idx_file_locks_agent ON file_locks(agent_id);
22867
+ CREATE INDEX IF NOT EXISTS idx_file_locks_expires ON file_locks(expires_at);
22868
+ INSERT INTO _migrations (id) VALUES (33) ON CONFLICT DO NOTHING;
22869
+ `,
22870
+ `
22871
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS started_at TEXT;
22872
+ INSERT INTO _migrations (id) VALUES (34) ON CONFLICT DO NOTHING;
22873
+ `,
22874
+ `
22875
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS task_type TEXT;
22876
+ CREATE INDEX IF NOT EXISTS idx_tasks_task_type ON tasks(task_type);
22877
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS cost_tokens INTEGER DEFAULT 0;
22878
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS cost_usd DOUBLE PRECISION DEFAULT 0;
22879
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS delegated_from TEXT;
22880
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS delegation_depth INTEGER DEFAULT 0;
22881
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS retry_count INTEGER DEFAULT 0;
22882
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS max_retries INTEGER DEFAULT 3;
22883
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS retry_after TEXT;
22884
+ ALTER TABLE tasks ADD COLUMN IF NOT EXISTS sla_minutes INTEGER;
22885
+
22886
+ CREATE TABLE IF NOT EXISTS task_traces (
22887
+ id TEXT PRIMARY KEY,
22888
+ task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
22889
+ agent_id TEXT,
22890
+ trace_type TEXT NOT NULL CHECK(trace_type IN ('tool_call','llm_call','error','handoff','custom')),
22891
+ name TEXT,
22892
+ input_summary TEXT,
22893
+ output_summary TEXT,
22894
+ duration_ms INTEGER,
22895
+ tokens INTEGER,
22896
+ cost_usd DOUBLE PRECISION,
22897
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22898
+ );
22899
+ CREATE INDEX IF NOT EXISTS idx_task_traces_task ON task_traces(task_id);
22900
+ CREATE INDEX IF NOT EXISTS idx_task_traces_agent ON task_traces(agent_id);
22901
+
22902
+ CREATE TABLE IF NOT EXISTS context_snapshots (
22903
+ id TEXT PRIMARY KEY,
22904
+ agent_id TEXT,
22905
+ task_id TEXT REFERENCES tasks(id) ON DELETE SET NULL,
22906
+ project_id TEXT REFERENCES projects(id) ON DELETE SET NULL,
22907
+ snapshot_type TEXT NOT NULL CHECK(snapshot_type IN ('interrupt','complete','handoff','checkpoint')),
22908
+ plan_summary TEXT,
22909
+ files_open TEXT DEFAULT '[]',
22910
+ attempts TEXT DEFAULT '[]',
22911
+ blockers TEXT DEFAULT '[]',
22912
+ next_steps TEXT,
22913
+ metadata TEXT DEFAULT '{}',
22914
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22915
+ );
22916
+ CREATE INDEX IF NOT EXISTS idx_snapshots_agent ON context_snapshots(agent_id);
22917
+ CREATE INDEX IF NOT EXISTS idx_snapshots_task ON context_snapshots(task_id);
22918
+
22919
+ CREATE TABLE IF NOT EXISTS agent_budgets (
22920
+ agent_id TEXT PRIMARY KEY,
22921
+ max_concurrent INTEGER DEFAULT 5,
22922
+ max_cost_usd DOUBLE PRECISION,
22923
+ max_task_minutes INTEGER,
22924
+ period_hours INTEGER DEFAULT 24,
22925
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
22926
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22927
+ );
22928
+
22929
+ INSERT INTO _migrations (id) VALUES (35) ON CONFLICT DO NOTHING;
22930
+ `,
22931
+ `
22932
+ CREATE TABLE IF NOT EXISTS feedback (
22933
+ id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::text,
22934
+ message TEXT NOT NULL,
22935
+ email TEXT,
22936
+ category TEXT DEFAULT 'general',
22937
+ version TEXT,
22938
+ machine_id TEXT,
22939
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
22940
+ );
22941
+
22942
+ INSERT INTO _migrations (id) VALUES (36) ON CONFLICT DO NOTHING;
22943
+ `
22944
+ ];
22945
+ });
22946
+
22947
+ // src/db/pg-migrate.ts
22948
+ var exports_pg_migrate = {};
22949
+ __export(exports_pg_migrate, {
22950
+ applyPgMigrations: () => applyPgMigrations
21055
22951
  });
21056
- function getAgentMetrics(agentId, opts, db) {
21057
- const d = db || getDatabase();
21058
- const agent = d.query("SELECT id, name FROM agents WHERE id = ? OR LOWER(name) = LOWER(?)").get(agentId, agentId);
21059
- if (!agent)
21060
- return null;
21061
- let projectFilter = "";
21062
- const params = [agent.id, agent.id];
21063
- if (opts?.project_id) {
21064
- projectFilter = " AND project_id = ?";
21065
- params.push(opts.project_id);
21066
- }
21067
- const completed = d.query(`SELECT COUNT(*) as count FROM tasks WHERE (agent_id = ? OR assigned_to = ?) AND status = 'completed'${projectFilter}`).get(...params).count;
21068
- const failed = d.query(`SELECT COUNT(*) as count FROM tasks WHERE (agent_id = ? OR assigned_to = ?) AND status = 'failed'${projectFilter}`).get(...params).count;
21069
- const inProgress = d.query(`SELECT COUNT(*) as count FROM tasks WHERE (agent_id = ? OR assigned_to = ?) AND status = 'in_progress'${projectFilter}`).get(...params).count;
21070
- const total = completed + failed;
21071
- const completionRate = total > 0 ? completed / total : 0;
21072
- const avgTime = d.query(`SELECT AVG(
21073
- (julianday(completed_at) - julianday(created_at)) * 24 * 60
21074
- ) as avg_minutes
21075
- FROM tasks
21076
- WHERE (agent_id = ? OR assigned_to = ?) AND status = 'completed' AND completed_at IS NOT NULL${projectFilter}`).get(...params);
21077
- const avgConf = d.query(`SELECT AVG(confidence) as avg_confidence
21078
- FROM tasks
21079
- WHERE (agent_id = ? OR assigned_to = ?) AND status = 'completed' AND confidence IS NOT NULL${projectFilter}`).get(...params);
21080
- const reviewTasks = d.query(`SELECT metadata FROM tasks
21081
- WHERE (agent_id = ? OR assigned_to = ?) AND status = 'completed'${projectFilter}
21082
- AND metadata LIKE '%_review_score%'`).all(...params);
21083
- let reviewScoreAvg = null;
21084
- if (reviewTasks.length > 0) {
21085
- let total2 = 0;
21086
- let count = 0;
21087
- for (const row of reviewTasks) {
21088
- try {
21089
- const meta = JSON.parse(row.metadata);
21090
- if (typeof meta._review_score === "number") {
21091
- total2 += meta._review_score;
21092
- count++;
21093
- }
21094
- } catch {}
21095
- }
21096
- if (count > 0)
21097
- reviewScoreAvg = total2 / count;
21098
- }
21099
- const speedScore = avgTime?.avg_minutes != null ? Math.max(0, 1 - avgTime.avg_minutes / (60 * 24)) : 0.5;
21100
- const confidenceScore = avgConf?.avg_confidence ?? 0.5;
21101
- const volumeScore = Math.min(1, completed / 50);
21102
- const compositeScore = completionRate * 0.3 + speedScore * 0.2 + confidenceScore * 0.3 + volumeScore * 0.2;
21103
- return {
21104
- agent_id: agent.id,
21105
- agent_name: agent.name,
21106
- tasks_completed: completed,
21107
- tasks_failed: failed,
21108
- tasks_in_progress: inProgress,
21109
- completion_rate: Math.round(completionRate * 1000) / 1000,
21110
- avg_completion_minutes: avgTime?.avg_minutes != null ? Math.round(avgTime.avg_minutes * 10) / 10 : null,
21111
- avg_confidence: avgConf?.avg_confidence != null ? Math.round(avgConf.avg_confidence * 1000) / 1000 : null,
21112
- review_score_avg: reviewScoreAvg != null ? Math.round(reviewScoreAvg * 1000) / 1000 : null,
21113
- composite_score: Math.round(compositeScore * 1000) / 1000
22952
+ async function applyPgMigrations(connectionString) {
22953
+ const pg = new PgAdapterAsync(connectionString);
22954
+ const result = {
22955
+ applied: [],
22956
+ alreadyApplied: [],
22957
+ errors: [],
22958
+ totalMigrations: PG_MIGRATIONS.length
21114
22959
  };
21115
- }
21116
- function getLeaderboard(opts, db) {
21117
- const d = db || getDatabase();
21118
- const agents = d.query("SELECT id FROM agents ORDER BY name").all();
21119
- const entries = [];
21120
- for (const agent of agents) {
21121
- const metrics = getAgentMetrics(agent.id, { project_id: opts?.project_id }, d);
21122
- if (metrics && (metrics.tasks_completed > 0 || metrics.tasks_failed > 0 || metrics.tasks_in_progress > 0)) {
21123
- entries.push(metrics);
22960
+ try {
22961
+ await pg.run(`CREATE TABLE IF NOT EXISTS _pg_migrations (
22962
+ id SERIAL PRIMARY KEY,
22963
+ version INT UNIQUE NOT NULL,
22964
+ applied_at TIMESTAMPTZ DEFAULT NOW()
22965
+ )`);
22966
+ const applied = await pg.all("SELECT version FROM _pg_migrations ORDER BY version");
22967
+ const appliedSet = new Set(applied.map((r) => r.version));
22968
+ for (let i = 0;i < PG_MIGRATIONS.length; i++) {
22969
+ if (appliedSet.has(i)) {
22970
+ result.alreadyApplied.push(i);
22971
+ continue;
22972
+ }
22973
+ try {
22974
+ await pg.exec(PG_MIGRATIONS[i]);
22975
+ await pg.run("INSERT INTO _pg_migrations (version) VALUES ($1) ON CONFLICT DO NOTHING", i);
22976
+ result.applied.push(i);
22977
+ } catch (err) {
22978
+ result.errors.push(`Migration ${i}: ${err?.message ?? String(err)}`);
22979
+ break;
22980
+ }
21124
22981
  }
22982
+ } finally {
22983
+ await pg.close();
21125
22984
  }
21126
- entries.sort((a, b) => b.composite_score - a.composite_score);
21127
- const limit = opts?.limit || 20;
21128
- return entries.slice(0, limit).map((entry, idx) => ({
21129
- ...entry,
21130
- rank: idx + 1
21131
- }));
21132
- }
21133
- function scoreTask(taskId, score, reviewerId, db) {
21134
- const d = db || getDatabase();
21135
- if (score < 0 || score > 1)
21136
- throw new Error("Score must be between 0 and 1");
21137
- const task = d.query("SELECT metadata FROM tasks WHERE id = ?").get(taskId);
21138
- if (!task)
21139
- throw new Error(`Task not found: ${taskId}`);
21140
- const metadata = JSON.parse(task.metadata || "{}");
21141
- metadata._review_score = score;
21142
- if (reviewerId)
21143
- metadata._reviewed_by = reviewerId;
21144
- metadata._reviewed_at = now();
21145
- d.run("UPDATE tasks SET metadata = ?, updated_at = ? WHERE id = ?", [JSON.stringify(metadata), now(), taskId]);
22985
+ return result;
21146
22986
  }
21147
- var init_agent_metrics = __esm(() => {
21148
- init_database();
22987
+ var init_pg_migrate = __esm(() => {
22988
+ init_dist();
22989
+ init_pg_migrations();
21149
22990
  });
21150
22991
 
21151
22992
  // src/mcp/index.ts
@@ -21156,11 +22997,11 @@ __export(exports_mcp, {
21156
22997
  import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
21157
22998
  import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
21158
22999
  import { readFileSync as readFileSync6 } from "fs";
21159
- import { join as join10, dirname as dirname2 } from "path";
23000
+ import { join as join10, dirname as dirname3 } from "path";
21160
23001
  import { fileURLToPath } from "url";
21161
23002
  function getMcpVersion() {
21162
23003
  try {
21163
- const __dir = dirname2(fileURLToPath(import.meta.url));
23004
+ const __dir = dirname3(fileURLToPath(import.meta.url));
21164
23005
  const pkgPath = join10(__dir, "..", "package.json");
21165
23006
  return JSON.parse(readFileSync6(pkgPath, "utf-8")).version || "0.0.0";
21166
23007
  } catch {
@@ -21357,6 +23198,7 @@ var init_mcp = __esm(() => {
21357
23198
  "list_templates",
21358
23199
  "create_task_from_template",
21359
23200
  "delete_template",
23201
+ "update_template",
21360
23202
  "approve_task"
21361
23203
  ]);
21362
23204
  agentFocusMap = new Map;
@@ -23039,7 +24881,8 @@ ${text}` }] };
23039
24881
  }, async (params) => {
23040
24882
  try {
23041
24883
  const { taskFromTemplate: taskFromTemplate2 } = await Promise.resolve().then(() => (init_templates(), exports_templates));
23042
- const input = taskFromTemplate2(params.template_id, {
24884
+ const resolvedTemplateId = resolveId(params.template_id, "task_templates");
24885
+ const input = taskFromTemplate2(resolvedTemplateId, {
23043
24886
  title: params.title,
23044
24887
  description: params.description,
23045
24888
  priority: params.priority,
@@ -23058,13 +24901,37 @@ ${task.id.slice(0, 8)} | ${task.priority} | ${task.title}` }] };
23058
24901
  server.tool("delete_template", "Delete a task template by ID.", { id: exports_external.string() }, async ({ id }) => {
23059
24902
  try {
23060
24903
  const { deleteTemplate: deleteTemplate2 } = await Promise.resolve().then(() => (init_templates(), exports_templates));
23061
- const deleted = deleteTemplate2(id);
24904
+ const resolvedId = resolveId(id, "task_templates");
24905
+ const deleted = deleteTemplate2(resolvedId);
23062
24906
  return { content: [{ type: "text", text: deleted ? "Template deleted." : "Template not found." }] };
23063
24907
  } catch (e) {
23064
24908
  return { content: [{ type: "text", text: formatError(e) }], isError: true };
23065
24909
  }
23066
24910
  });
23067
24911
  }
24912
+ if (shouldRegisterTool("update_template")) {
24913
+ server.tool("update_template", "Update a task template's name, title pattern, description, priority, tags, or other fields.", {
24914
+ id: exports_external.string(),
24915
+ name: exports_external.string().optional(),
24916
+ title_pattern: exports_external.string().optional(),
24917
+ description: exports_external.string().optional(),
24918
+ priority: exports_external.enum(["low", "medium", "high", "critical"]).optional(),
24919
+ tags: exports_external.array(exports_external.string()).optional(),
24920
+ project_id: exports_external.string().optional(),
24921
+ plan_id: exports_external.string().optional()
24922
+ }, async ({ id, ...updates }) => {
24923
+ try {
24924
+ const { updateTemplate: updateTemplate2 } = await Promise.resolve().then(() => (init_templates(), exports_templates));
24925
+ const resolvedId = resolveId(id, "task_templates");
24926
+ const t = updateTemplate2(resolvedId, updates);
24927
+ if (!t)
24928
+ return { content: [{ type: "text", text: `Template not found: ${id}` }], isError: true };
24929
+ return { content: [{ type: "text", text: `Template updated: ${t.id.slice(0, 8)} | ${t.name} | "${t.title_pattern}" | ${t.priority}` }] };
24930
+ } catch (e) {
24931
+ return { content: [{ type: "text", text: formatError(e) }], isError: true };
24932
+ }
24933
+ });
24934
+ }
23068
24935
  if (shouldRegisterTool("approve_task")) {
23069
24936
  server.tool("approve_task", "Approve a task with requires_approval=true.", {
23070
24937
  id: exports_external.string(),
@@ -24181,6 +26048,7 @@ ${stack_trace.slice(0, 1500)}
24181
26048
  "list_templates",
24182
26049
  "create_task_from_template",
24183
26050
  "delete_template",
26051
+ "update_template",
24184
26052
  "bulk_update_tasks",
24185
26053
  "bulk_create_tasks",
24186
26054
  "get_task_stats",
@@ -24417,6 +26285,9 @@ ${stack_trace.slice(0, 1500)}
24417
26285
  delete_template: `Delete a task template.
24418
26286
  Params: id(string, req)
24419
26287
  Example: {id: 'a1b2c3d4'}`,
26288
+ update_template: `Update a task template's name, title pattern, or other fields.
26289
+ Params: id(string, req), name(string), title_pattern(string), description(string), priority(low|medium|high|critical), tags(string[]), project_id(string), plan_id(string)
26290
+ Example: {id: 'a1b2c3d4', name: 'Renamed Template', priority: 'critical'}`,
24420
26291
  get_active_work: `See all in-progress tasks and who is working on them.
24421
26292
  Params: project_id(string, optional), task_list_id(string, optional)
24422
26293
  Example: {project_id: 'a1b2c3d4'}`,
@@ -25227,6 +27098,49 @@ ${taskLines.join(`
25227
27098
  const lists = listTaskLists();
25228
27099
  return { contents: [{ uri: "todos://task-lists", text: JSON.stringify(lists, null, 2), mimeType: "application/json" }] };
25229
27100
  });
27101
+ if (shouldRegisterTool("migrate_pg")) {
27102
+ server.tool("migrate_pg", "Apply PostgreSQL schema migrations to the configured RDS instance", {
27103
+ connection_string: exports_external.string().optional().describe("PostgreSQL connection string (overrides cloud config)")
27104
+ }, async ({ connection_string }) => {
27105
+ try {
27106
+ let connStr;
27107
+ if (connection_string) {
27108
+ connStr = connection_string;
27109
+ } else {
27110
+ const { getConnectionString: getConnectionString2 } = await Promise.resolve().then(() => (init_dist(), exports_dist));
27111
+ connStr = getConnectionString2("todos");
27112
+ }
27113
+ const { applyPgMigrations: applyPgMigrations2 } = await Promise.resolve().then(() => (init_pg_migrate(), exports_pg_migrate));
27114
+ const result = await applyPgMigrations2(connStr);
27115
+ const lines = [];
27116
+ if (result.applied.length > 0) {
27117
+ lines.push(`Applied ${result.applied.length} migration(s): ${result.applied.join(", ")}`);
27118
+ }
27119
+ if (result.alreadyApplied.length > 0) {
27120
+ lines.push(`Already applied: ${result.alreadyApplied.length} migration(s)`);
27121
+ }
27122
+ if (result.errors.length > 0) {
27123
+ lines.push(`Errors:
27124
+ ${result.errors.join(`
27125
+ `)}`);
27126
+ }
27127
+ if (result.applied.length === 0 && result.errors.length === 0) {
27128
+ lines.push("Schema is up to date.");
27129
+ }
27130
+ lines.push(`Total migrations: ${result.totalMigrations}`);
27131
+ return {
27132
+ content: [{ type: "text", text: lines.join(`
27133
+ `) }],
27134
+ isError: result.errors.length > 0
27135
+ };
27136
+ } catch (e) {
27137
+ return {
27138
+ content: [{ type: "text", text: `Migration failed: ${e?.message ?? String(e)}` }],
27139
+ isError: true
27140
+ };
27141
+ }
27142
+ });
27143
+ }
25230
27144
  registerCloudTools(server, "todos");
25231
27145
  main().catch((err) => {
25232
27146
  console.error("MCP server error:", err);
@@ -25305,17 +27219,17 @@ __export(exports_serve, {
25305
27219
  startServer: () => startServer
25306
27220
  });
25307
27221
  import { existsSync as existsSync9 } from "fs";
25308
- import { join as join11, dirname as dirname3, extname, resolve as resolve3, sep } from "path";
27222
+ import { join as join11, dirname as dirname4, extname, resolve as resolve3, sep } from "path";
25309
27223
  import { fileURLToPath as fileURLToPath2 } from "url";
25310
27224
  function resolveDashboardDir() {
25311
27225
  const candidates = [];
25312
27226
  try {
25313
- const scriptDir = dirname3(fileURLToPath2(import.meta.url));
27227
+ const scriptDir = dirname4(fileURLToPath2(import.meta.url));
25314
27228
  candidates.push(join11(scriptDir, "..", "dashboard", "dist"));
25315
27229
  candidates.push(join11(scriptDir, "..", "..", "dashboard", "dist"));
25316
27230
  } catch {}
25317
27231
  if (process.argv[1]) {
25318
- const mainDir = dirname3(process.argv[1]);
27232
+ const mainDir = dirname4(process.argv[1]);
25319
27233
  candidates.push(join11(mainDir, "..", "dashboard", "dist"));
25320
27234
  candidates.push(join11(mainDir, "..", "..", "dashboard", "dist"));
25321
27235
  }
@@ -27560,7 +29474,7 @@ init_config();
27560
29474
  import chalk2 from "chalk";
27561
29475
  import { execSync as execSync2 } from "child_process";
27562
29476
  import { existsSync as existsSync10, mkdirSync as mkdirSync6, readFileSync as readFileSync7, writeFileSync as writeFileSync6 } from "fs";
27563
- import { basename, dirname as dirname4, join as join12, resolve as resolve4 } from "path";
29477
+ import { basename, dirname as dirname5, join as join12, resolve as resolve4 } from "path";
27564
29478
  import { fileURLToPath as fileURLToPath3 } from "url";
27565
29479
 
27566
29480
  // src/cli/brains.ts
@@ -27883,7 +29797,7 @@ function makeBrainsCommand() {
27883
29797
  // src/cli/index.tsx
27884
29798
  function getPackageVersion() {
27885
29799
  try {
27886
- const pkgPath = join12(dirname4(fileURLToPath3(import.meta.url)), "..", "..", "package.json");
29800
+ const pkgPath = join12(dirname5(fileURLToPath3(import.meta.url)), "..", "..", "package.json");
27887
29801
  return JSON.parse(readFileSync7(pkgPath, "utf-8")).version || "0.0.0";
27888
29802
  } catch {
27889
29803
  return "0.0.0";
@@ -28694,9 +30608,9 @@ program2.command("plans").description("List and manage plans").option("--add <na
28694
30608
  console.log(`${chalk2.dim(p.id.slice(0, 8))} ${chalk2.bold(p.name)} ${chalk2.cyan(`[${p.status}]`)}${desc}`);
28695
30609
  }
28696
30610
  });
28697
- program2.command("templates").description("List and manage task templates").option("--add <name>", "Create a template").option("--title <pattern>", "Title pattern (with --add)").option("-d, --description <text>", "Default description").option("-p, --priority <level>", "Default priority").option("-t, --tags <tags>", "Default tags (comma-separated)").option("--delete <id>", "Delete a template").option("--use <id>", "Create a task from a template").action((opts) => {
30611
+ program2.command("templates").description("List and manage task templates").option("--add <name>", "Create a template").option("--title <pattern>", "Title pattern (with --add)").option("-d, --description <text>", "Default description").option("-p, --priority <level>", "Default priority").option("-t, --tags <tags>", "Default tags (comma-separated)").option("--delete <id>", "Delete a template").option("--update <id>", "Update a template").option("--use <id>", "Create a task from a template").action((opts) => {
28698
30612
  const globalOpts = program2.opts();
28699
- const { createTemplate: createTemplate2, listTemplates: listTemplates2, deleteTemplate: deleteTemplate2, taskFromTemplate: taskFromTemplate2 } = (init_templates(), __toCommonJS(exports_templates));
30613
+ const { createTemplate: createTemplate2, listTemplates: listTemplates2, deleteTemplate: deleteTemplate2, updateTemplate: updateTemplate2, taskFromTemplate: taskFromTemplate2 } = (init_templates(), __toCommonJS(exports_templates));
28700
30614
  if (opts.add) {
28701
30615
  if (!opts.title) {
28702
30616
  console.error(chalk2.red("--title is required with --add"));
@@ -28730,6 +30644,30 @@ program2.command("templates").description("List and manage task templates").opti
28730
30644
  }
28731
30645
  return;
28732
30646
  }
30647
+ if (opts.update) {
30648
+ const updates = {};
30649
+ if (opts.add)
30650
+ updates.name = opts.add;
30651
+ if (opts.title)
30652
+ updates.title_pattern = opts.title;
30653
+ if (opts.description)
30654
+ updates.description = opts.description;
30655
+ if (opts.priority)
30656
+ updates.priority = opts.priority;
30657
+ if (opts.tags)
30658
+ updates.tags = opts.tags.split(",").map((t) => t.trim());
30659
+ const updated = updateTemplate2(opts.update, updates);
30660
+ if (!updated) {
30661
+ console.error(chalk2.red("Template not found."));
30662
+ process.exit(1);
30663
+ }
30664
+ if (globalOpts.json) {
30665
+ output(updated, true);
30666
+ } else {
30667
+ console.log(chalk2.green(`Template updated: ${updated.id.slice(0, 8)} | ${updated.name} | "${updated.title_pattern}"`));
30668
+ }
30669
+ return;
30670
+ }
28733
30671
  if (opts.use) {
28734
30672
  try {
28735
30673
  const input = taskFromTemplate2(opts.use, {
@@ -29111,7 +31049,7 @@ function readJsonFile2(path) {
29111
31049
  }
29112
31050
  }
29113
31051
  function writeJsonFile2(path, data) {
29114
- const dir = dirname4(path);
31052
+ const dir = dirname5(path);
29115
31053
  if (!existsSync10(dir))
29116
31054
  mkdirSync6(dir, { recursive: true });
29117
31055
  writeFileSync6(path, JSON.stringify(data, null, 2) + `
@@ -29123,7 +31061,7 @@ function readTomlFile(path) {
29123
31061
  return readFileSync7(path, "utf-8");
29124
31062
  }
29125
31063
  function writeTomlFile(path, content) {
29126
- const dir = dirname4(path);
31064
+ const dir = dirname5(path);
29127
31065
  if (!existsSync10(dir))
29128
31066
  mkdirSync6(dir, { recursive: true });
29129
31067
  writeFileSync6(path, content);
@@ -29740,7 +31678,7 @@ program2.command("config").description("View or update configuration").option("-
29740
31678
  obj = obj[keys[i]];
29741
31679
  }
29742
31680
  obj[keys[keys.length - 1]] = parsedValue;
29743
- const dir = dirname4(configPath);
31681
+ const dir = dirname5(configPath);
29744
31682
  if (!existsSync10(dir))
29745
31683
  mkdirSync6(dir, { recursive: true });
29746
31684
  writeFileSync6(configPath, JSON.stringify(config2, null, 2));
@@ -31122,4 +33060,57 @@ program2.action(async () => {
31122
33060
  }
31123
33061
  });
31124
33062
  program2.addCommand(makeBrainsCommand());
33063
+ var dbCmd = program2.command("db").description("Database management commands");
33064
+ dbCmd.command("migrate-pg").description("Apply PostgreSQL migrations to the configured RDS instance").option("--connection-string <url>", "PostgreSQL connection string (overrides cloud config)").option("--json", "Output as JSON").action(async (opts) => {
33065
+ const globalOpts = program2.opts();
33066
+ const useJson = opts.json || globalOpts.json;
33067
+ let connStr;
33068
+ if (opts.connectionString) {
33069
+ connStr = opts.connectionString;
33070
+ } else {
33071
+ try {
33072
+ const { getConnectionString: getConnectionString2 } = await Promise.resolve().then(() => (init_dist(), exports_dist));
33073
+ connStr = getConnectionString2("todos");
33074
+ } catch (e) {
33075
+ const msg = "Cloud RDS not configured. Use --connection-string or run `cloud setup`.";
33076
+ if (useJson) {
33077
+ console.log(JSON.stringify({ error: msg }));
33078
+ } else {
33079
+ console.error(chalk2.red(msg));
33080
+ }
33081
+ process.exit(1);
33082
+ }
33083
+ }
33084
+ try {
33085
+ const { applyPgMigrations: applyPgMigrations2 } = await Promise.resolve().then(() => (init_pg_migrate(), exports_pg_migrate));
33086
+ const result = await applyPgMigrations2(connStr);
33087
+ if (useJson) {
33088
+ console.log(JSON.stringify(result, null, 2));
33089
+ return;
33090
+ }
33091
+ if (result.applied.length > 0) {
33092
+ console.log(chalk2.green(`Applied ${result.applied.length} migration(s): ${result.applied.join(", ")}`));
33093
+ }
33094
+ if (result.alreadyApplied.length > 0) {
33095
+ console.log(chalk2.dim(`Already applied: ${result.alreadyApplied.length} migration(s)`));
33096
+ }
33097
+ if (result.errors.length > 0) {
33098
+ for (const err of result.errors) {
33099
+ console.error(chalk2.red(` Error: ${err}`));
33100
+ }
33101
+ process.exit(1);
33102
+ }
33103
+ if (result.applied.length === 0 && result.errors.length === 0) {
33104
+ console.log(chalk2.dim("Schema is up to date."));
33105
+ }
33106
+ } catch (e) {
33107
+ const msg = e instanceof Error ? e.message : String(e);
33108
+ if (useJson) {
33109
+ console.log(JSON.stringify({ error: msg }));
33110
+ } else {
33111
+ console.error(chalk2.red(`Migration failed: ${msg}`));
33112
+ }
33113
+ process.exit(1);
33114
+ }
33115
+ });
31125
33116
  program2.parse();