@rocicorp/zero 0.26.1-canary.9 → 0.26.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/out/analyze-query/src/bin-analyze.js +3 -0
  2. package/out/analyze-query/src/bin-analyze.js.map +1 -1
  3. package/out/analyze-query/src/run-ast.d.ts.map +1 -1
  4. package/out/analyze-query/src/run-ast.js +11 -2
  5. package/out/analyze-query/src/run-ast.js.map +1 -1
  6. package/out/zero/package.json.js +1 -1
  7. package/out/zero-cache/src/config/zero-config.d.ts +4 -0
  8. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  9. package/out/zero-cache/src/config/zero-config.js +17 -0
  10. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  11. package/out/zero-cache/src/db/lite-tables.d.ts +2 -1
  12. package/out/zero-cache/src/db/lite-tables.d.ts.map +1 -1
  13. package/out/zero-cache/src/db/lite-tables.js +7 -3
  14. package/out/zero-cache/src/db/lite-tables.js.map +1 -1
  15. package/out/zero-cache/src/db/specs.d.ts +8 -2
  16. package/out/zero-cache/src/db/specs.d.ts.map +1 -1
  17. package/out/zero-cache/src/db/specs.js.map +1 -1
  18. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  19. package/out/zero-cache/src/server/change-streamer.js +3 -1
  20. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  21. package/out/zero-cache/src/services/analyze.js +1 -0
  22. package/out/zero-cache/src/services/analyze.js.map +1 -1
  23. package/out/zero-cache/src/services/change-source/common/replica-schema.d.ts +2 -0
  24. package/out/zero-cache/src/services/change-source/common/replica-schema.d.ts.map +1 -1
  25. package/out/zero-cache/src/services/change-source/common/replica-schema.js +56 -3
  26. package/out/zero-cache/src/services/change-source/common/replica-schema.js.map +1 -1
  27. package/out/zero-cache/src/services/change-source/pg/backfill-stream.d.ts.map +1 -1
  28. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +15 -11
  29. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
  30. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  31. package/out/zero-cache/src/services/change-source/pg/change-source.js +61 -0
  32. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  33. package/out/zero-cache/src/services/change-streamer/broadcast.d.ts +100 -0
  34. package/out/zero-cache/src/services/change-streamer/broadcast.d.ts.map +1 -0
  35. package/out/zero-cache/src/services/change-streamer/broadcast.js +171 -0
  36. package/out/zero-cache/src/services/change-streamer/broadcast.js.map +1 -0
  37. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +1 -1
  38. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
  39. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +14 -7
  40. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  41. package/out/zero-cache/src/services/change-streamer/forwarder.d.ts +17 -1
  42. package/out/zero-cache/src/services/change-streamer/forwarder.d.ts.map +1 -1
  43. package/out/zero-cache/src/services/change-streamer/forwarder.js +52 -4
  44. package/out/zero-cache/src/services/change-streamer/forwarder.js.map +1 -1
  45. package/out/zero-cache/src/services/change-streamer/subscriber.d.ts +18 -0
  46. package/out/zero-cache/src/services/change-streamer/subscriber.d.ts.map +1 -1
  47. package/out/zero-cache/src/services/change-streamer/subscriber.js +68 -12
  48. package/out/zero-cache/src/services/change-streamer/subscriber.js.map +1 -1
  49. package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
  50. package/out/zero-cache/src/services/replicator/change-processor.js +10 -13
  51. package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
  52. package/out/zero-cache/src/services/replicator/schema/table-metadata.d.ts +28 -7
  53. package/out/zero-cache/src/services/replicator/schema/table-metadata.d.ts.map +1 -1
  54. package/out/zero-cache/src/services/replicator/schema/table-metadata.js +55 -24
  55. package/out/zero-cache/src/services/replicator/schema/table-metadata.js.map +1 -1
  56. package/out/zero-cache/src/services/run-ast.d.ts.map +1 -1
  57. package/out/zero-cache/src/services/run-ast.js +4 -2
  58. package/out/zero-cache/src/services/run-ast.js.map +1 -1
  59. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +3 -2
  60. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  61. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +27 -12
  62. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  63. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +3 -3
  64. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
  65. package/out/zero-cache/src/services/view-syncer/snapshotter.js +4 -0
  66. package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
  67. package/out/zero-cache/src/types/subscription.d.ts +3 -1
  68. package/out/zero-cache/src/types/subscription.d.ts.map +1 -1
  69. package/out/zero-cache/src/types/subscription.js +21 -9
  70. package/out/zero-cache/src/types/subscription.js.map +1 -1
  71. package/out/zero-client/src/client/version.js +1 -1
  72. package/package.json +2 -1
@@ -1 +1 @@
1
- {"version":3,"file":"lite-tables.js","sources":["../../../../../zero-cache/src/db/lite-tables.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {must} from '../../../shared/src/must.ts';\nimport {difference} from '../../../shared/src/set-utils.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {primaryKeySchema} from '../../../zero-protocol/src/primary-key.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport {\n ColumnMetadataStore,\n metadataToLiteTypeString,\n} from '../services/replicator/schema/column-metadata.ts';\nimport {\n isArray,\n isEnum,\n liteTypeToZqlValueType,\n mapLiteDataTypeToZqlSchemaValue,\n nullableUpstream,\n} from '../types/lite.ts';\nimport * as PostgresTypeClass from './postgres-type-class-enum.ts';\nimport type {\n LiteAndZqlSpec,\n LiteIndexSpec,\n LiteTableSpec,\n MutableLiteIndexSpec,\n MutableLiteTableSpec,\n} from './specs.ts';\n\ntype ColumnInfo = {\n table: string;\n name: string;\n type: string;\n notNull: number;\n dflt: string | null;\n keyPos: number;\n};\n\nexport type LiteTableSpecWithReplicationStatus = LiteTableSpec & {\n readonly backfilling?: string[];\n};\n\ntype MutableLiteTableSpecWithReplicationStatus = MutableLiteTableSpec & {\n backfilling: string[];\n};\n\nexport function listTables(\n db: Database,\n useColumnMetadata = true,\n): LiteTableSpecWithReplicationStatus[] {\n const columns = db\n .prepare(\n `\n SELECT\n m.name as \"table\",\n p.name as name,\n p.type as type,\n p.\"notnull\" as \"notNull\",\n p.dflt_value as \"dflt\",\n p.pk as keyPos\n FROM sqlite_master as m\n LEFT JOIN pragma_table_info(m.name) as p\n WHERE m.type = 'table'\n AND m.name NOT LIKE 'sqlite_%'\n AND m.name NOT LIKE '_zero.%'\n AND m.name NOT LIKE '_litestream_%'\n `,\n )\n .all() as ColumnInfo[];\n\n const tables: LiteTableSpec[] = [];\n let table: MutableLiteTableSpecWithReplicationStatus | undefined;\n\n columns.forEach(col => {\n if (col.table !== table?.name) {\n // New table\n table = {\n name: col.table,\n columns: {},\n backfilling: [],\n };\n tables.push(table);\n }\n\n // Try to read from metadata table first, fall back to SQLite column type\n let dataType: string;\n let elemPgTypeClass:\n | typeof PostgresTypeClass.Base\n | typeof PostgresTypeClass.Enum\n | null;\n\n const metadata = useColumnMetadata\n ? ColumnMetadataStore.getInstance(db)?.getColumn(col.table, col.name)\n : undefined;\n if (metadata) {\n // Read from metadata table and convert to pipe notation\n dataType = metadataToLiteTypeString(metadata);\n elemPgTypeClass = metadata.isArray\n ? metadata.isEnum\n ? PostgresTypeClass.Enum\n : PostgresTypeClass.Base\n : null;\n } else {\n // Fall back to reading from SQLite column type (pipe notation)\n dataType = col.type;\n elemPgTypeClass = isArray(col.type)\n ? isEnum(col.type)\n ? PostgresTypeClass.Enum\n : PostgresTypeClass.Base\n : null;\n }\n\n table.columns[col.name] = {\n pos: Object.keys(table.columns).length + 1,\n dataType,\n characterMaximumLength: null,\n notNull: col.notNull !== 0,\n dflt: col.dflt,\n elemPgTypeClass,\n };\n if (metadata?.isBackfilling) {\n table.backfilling.push(col.name);\n }\n if (col.keyPos) {\n table.primaryKey ??= [];\n while (table.primaryKey.length < col.keyPos) {\n table.primaryKey.push('');\n }\n table.primaryKey[col.keyPos - 1] = col.name;\n }\n });\n\n return tables;\n}\n\nexport function listIndexes(db: Database): LiteIndexSpec[] {\n const indexes = db\n .prepare(\n `SELECT \n idx.name as indexName, \n idx.tbl_name as tableName, \n info.\"unique\" as \"unique\",\n col.name as column,\n CASE WHEN col.desc = 0 THEN 'ASC' ELSE 'DESC' END as dir\n FROM sqlite_master as idx\n JOIN pragma_index_list(idx.tbl_name) AS info ON info.name = idx.name\n JOIN pragma_index_xinfo(idx.name) as col\n WHERE idx.type = 'index' AND \n col.key = 1 AND\n idx.tbl_name NOT LIKE '_zero.%'\n ORDER BY idx.name, col.seqno ASC`,\n )\n .all() as {\n indexName: string;\n tableName: string;\n unique: number;\n column: string;\n dir: 'ASC' | 'DESC';\n }[];\n\n const ret: MutableLiteIndexSpec[] = [];\n for (const {indexName: name, tableName, unique, column, dir} of indexes) {\n if (ret.at(-1)?.name === name) {\n // Aggregate multiple column names into the array.\n must(ret.at(-1)).columns[column] = dir;\n } else {\n ret.push({\n tableName,\n name,\n columns: {[column]: dir},\n unique: unique !== 0,\n });\n }\n }\n\n return ret;\n}\n\nexport type ZqlSpecOptions = {\n /**\n * Controls whether to include backfilling columns in the computed\n * LiteAndZqlSpec. In general, backfilling columns should be include\n * in \"replication\" logic (copying data from upstream to the replica),\n * and excluded from \"sync\" logic (sending data from the replica to\n * clients).\n */\n includeBackfillingColumns: boolean;\n};\n\n/**\n * Computes a TableSpec \"view\" of the replicated data that is\n * suitable for processing / consumption for the client. This\n * includes:\n * * excluding tables without a PRIMARY KEY or UNIQUE INDEX\n * * excluding columns with types that are not supported by ZQL\n * * choosing columns to use as the primary key amongst those\n * in unique indexes\n *\n * @param tableSpecs an optional map to reset and populate\n * @param fullTables an optional map to receive the full table specs,\n * which may include tables and columns that are not synced to\n * the client because they lack a primary key or are of unsupported\n * data types.\n */\nexport function computeZqlSpecs(\n lc: LogContext,\n replica: Database,\n opts: ZqlSpecOptions,\n tableSpecs: Map<string, LiteAndZqlSpec> = new Map(),\n fullTables?: Map<string, LiteTableSpec>,\n): Map<string, LiteAndZqlSpec> {\n return computeZqlSpecsFromLiteSpecs(\n listTables(replica),\n listIndexes(replica),\n opts,\n tableSpecs,\n fullTables,\n lc,\n );\n}\n\nexport function computeZqlSpecsFromLiteSpecs(\n tables: LiteTableSpecWithReplicationStatus[],\n indexes: LiteIndexSpec[],\n {includeBackfillingColumns}: ZqlSpecOptions,\n tableSpecs: Map<string, LiteAndZqlSpec> = new Map(),\n fullTables?: Map<string, LiteTableSpec>,\n lc?: LogContext,\n): Map<string, LiteAndZqlSpec> {\n tableSpecs.clear();\n fullTables?.clear();\n\n const uniqueIndexColumns = new Map<string, string[][]>();\n for (const {tableName, columns} of indexes.filter(idx => idx.unique)) {\n if (!uniqueIndexColumns.has(tableName)) {\n uniqueIndexColumns.set(tableName, []);\n }\n uniqueIndexColumns.get(tableName)?.push(Object.keys(columns));\n }\n\n tables.forEach(fullTable => {\n fullTables?.set(fullTable.name, fullTable);\n\n const backfilling = new Set(fullTable.backfilling);\n // Only include columns that:\n // - have a defined ZQL Value\n // - aren't backfilling if `includeBackfillingColumns` is false\n const visibleColumns = Object.entries(fullTable.columns).filter(\n ([col, {dataType}]) =>\n liteTypeToZqlValueType(dataType) &&\n (includeBackfillingColumns || !backfilling.has(col)),\n );\n const notNullColumns = new Set(\n visibleColumns\n .filter(\n ([col, {dataType}]) =>\n !nullableUpstream(dataType) || fullTable.primaryKey?.includes(col),\n )\n .map(([col]) => col),\n );\n\n const uniqueKeys = uniqueIndexColumns.get(fullTable.name) ?? [];\n // Examine all column combinations that can serve as a primary key,\n // i.e. excluding indexes over nullable or unsynced columns.\n const keys = uniqueKeys.filter(\n key => difference(new Set(key), notNullColumns).size === 0,\n );\n if (keys.length === 0) {\n // Only include tables with a row key.\n //\n // Note that this will automatically exclude tables that are being\n // backfilled (when includeBackfillingColumns is `false`) since candidate\n // keys only include visible columns.\n lc?.debug?.(\n `not syncing table ${fullTable.name} because it ` +\n (backfilling.size ? 'is being backfilled' : 'has no primary key'),\n );\n return;\n }\n // Pick the \"best\" (i.e. shortest) key for default IVM operations.\n const primaryKey = keys.sort(keyCmp)[0];\n\n const tableSpec = {\n ...fullTable,\n columns: Object.fromEntries(visibleColumns),\n // normalize (sort) keys to minimize creating new objects.\n // See row-key.ts: normalizedKeyOrder()\n primaryKey: v.parse(primaryKey.sort(), primaryKeySchema),\n uniqueKeys: uniqueKeys.map(key => v.parse(key.sort(), primaryKeySchema)),\n allPotentialPrimaryKeys: keys.map(key =>\n v.parse(key.sort(), primaryKeySchema),\n ),\n };\n\n tableSpecs.set(tableSpec.name, {\n tableSpec,\n zqlSpec: Object.fromEntries(\n Object.entries(tableSpec.columns).map(([name, {dataType}]) => [\n name,\n mapLiteDataTypeToZqlSchemaValue(dataType),\n ]),\n ),\n });\n });\n return tableSpecs;\n}\n\nexport function mustGetTableSpec(\n tableSpecs: Map<string, LiteAndZqlSpec>,\n tableName: string,\n): LiteAndZqlSpec {\n const tableSpec = tableSpecs.get(tableName);\n if (!tableSpec) {\n throw new Error(\n `table '${tableName}' is not one of: ${[...tableSpecs.keys()]\n .filter(t => !t.includes('.') && !t.startsWith('_litestream_'))\n .sort()}. ` +\n `Check the spelling and ensure that the table has a primary key.`,\n );\n }\n return tableSpec;\n}\n\n// Deterministic comparator for favoring shorter row keys.\nfunction keyCmp(a: string[], b: string[]) {\n if (a.length !== b.length) {\n return a.length - b.length; // Fewer columns are better.\n }\n for (let i = 0; i < a.length; i++) {\n if (a[i] < b[i]) {\n return -1;\n }\n if (a[i] > b[i]) {\n return 1;\n }\n }\n return 0;\n}\n"],"names":["PostgresTypeClass.Enum","PostgresTypeClass.Base","v.parse"],"mappings":";;;;;;;AA2CO,SAAS,WACd,IACA,oBAAoB,MACkB;AACtC,QAAM,UAAU,GACb;AAAA,IACC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAAA,EAgBD,IAAA;AAEH,QAAM,SAA0B,CAAA;AAChC,MAAI;AAEJ,UAAQ,QAAQ,CAAA,QAAO;AACrB,QAAI,IAAI,UAAU,OAAO,MAAM;AAE7B,cAAQ;AAAA,QACN,MAAM,IAAI;AAAA,QACV,SAAS,CAAA;AAAA,QACT,aAAa,CAAA;AAAA,MAAC;AAEhB,aAAO,KAAK,KAAK;AAAA,IACnB;AAGA,QAAI;AACJ,QAAI;AAKJ,UAAM,WAAW,oBACb,oBAAoB,YAAY,EAAE,GAAG,UAAU,IAAI,OAAO,IAAI,IAAI,IAClE;AACJ,QAAI,UAAU;AAEZ,iBAAW,yBAAyB,QAAQ;AAC5C,wBAAkB,SAAS,UACvB,SAAS,SACPA,OACAC,OACF;AAAA,IACN,OAAO;AAEL,iBAAW,IAAI;AACf,wBAAkB,QAAQ,IAAI,IAAI,IAC9B,OAAO,IAAI,IAAI,IACbD,OACAC,OACF;AAAA,IACN;AAEA,UAAM,QAAQ,IAAI,IAAI,IAAI;AAAA,MACxB,KAAK,OAAO,KAAK,MAAM,OAAO,EAAE,SAAS;AAAA,MACzC;AAAA,MACA,wBAAwB;AAAA,MACxB,SAAS,IAAI,YAAY;AAAA,MACzB,MAAM,IAAI;AAAA,MACV;AAAA,IAAA;AAEF,QAAI,UAAU,eAAe;AAC3B,YAAM,YAAY,KAAK,IAAI,IAAI;AAAA,IACjC;AACA,QAAI,IAAI,QAAQ;AACd,YAAM,eAAe,CAAA;AACrB,aAAO,MAAM,WAAW,SAAS,IAAI,QAAQ;AAC3C,cAAM,WAAW,KAAK,EAAE;AAAA,MAC1B;AACA,YAAM,WAAW,IAAI,SAAS,CAAC,IAAI,IAAI;AAAA,IACzC;AAAA,EACF,CAAC;AAED,SAAO;AACT;AAEO,SAAS,YAAY,IAA+B;AACzD,QAAM,UAAU,GACb;AAAA,IACC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAAA,EAcD,IAAA;AAQH,QAAM,MAA8B,CAAA;AACpC,aAAW,EAAC,WAAW,MAAM,WAAW,QAAQ,QAAQ,IAAA,KAAQ,SAAS;AACvE,QAAI,IAAI,GAAG,EAAE,GAAG,SAAS,MAAM;AAE7B,WAAK,IAAI,GAAG,EAAE,CAAC,EAAE,QAAQ,MAAM,IAAI;AAAA,IACrC,OAAO;AACL,UAAI,KAAK;AAAA,QACP;AAAA,QACA;AAAA,QACA,SAAS,EAAC,CAAC,MAAM,GAAG,IAAA;AAAA,QACpB,QAAQ,WAAW;AAAA,MAAA,CACpB;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;AA4BO,SAAS,gBACd,IACA,SACA,MACA,aAA0C,oBAAI,IAAA,GAC9C,YAC6B;AAC7B,SAAO;AAAA,IACL,WAAW,OAAO;AAAA,IAClB,YAAY,OAAO;AAAA,IACnB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEJ;AAEO,SAAS,6BACd,QACA,SACA,EAAC,0BAAA,GACD,aAA0C,oBAAI,IAAA,GAC9C,YACA,IAC6B;AAC7B,aAAW,MAAA;AACX,cAAY,MAAA;AAEZ,QAAM,yCAAyB,IAAA;AAC/B,aAAW,EAAC,WAAW,aAAY,QAAQ,OAAO,CAAA,QAAO,IAAI,MAAM,GAAG;AACpE,QAAI,CAAC,mBAAmB,IAAI,SAAS,GAAG;AACtC,yBAAmB,IAAI,WAAW,EAAE;AAAA,IACtC;AACA,uBAAmB,IAAI,SAAS,GAAG,KAAK,OAAO,KAAK,OAAO,CAAC;AAAA,EAC9D;AAEA,SAAO,QAAQ,CAAA,cAAa;AAC1B,gBAAY,IAAI,UAAU,MAAM,SAAS;AAEzC,UAAM,cAAc,IAAI,IAAI,UAAU,WAAW;AAIjD,UAAM,iBAAiB,OAAO,QAAQ,UAAU,OAAO,EAAE;AAAA,MACvD,CAAC,CAAC,KAAK,EAAC,UAAS,MACf,uBAAuB,QAAQ,MAC9B,6BAA6B,CAAC,YAAY,IAAI,GAAG;AAAA,IAAA;AAEtD,UAAM,iBAAiB,IAAI;AAAA,MACzB,eACG;AAAA,QACC,CAAC,CAAC,KAAK,EAAC,UAAS,MACf,CAAC,iBAAiB,QAAQ,KAAK,UAAU,YAAY,SAAS,GAAG;AAAA,MAAA,EAEpE,IAAI,CAAC,CAAC,GAAG,MAAM,GAAG;AAAA,IAAA;AAGvB,UAAM,aAAa,mBAAmB,IAAI,UAAU,IAAI,KAAK,CAAA;AAG7D,UAAM,OAAO,WAAW;AAAA,MACtB,CAAA,QAAO,WAAW,IAAI,IAAI,GAAG,GAAG,cAAc,EAAE,SAAS;AAAA,IAAA;AAE3D,QAAI,KAAK,WAAW,GAAG;AAMrB,UAAI;AAAA,QACF,qBAAqB,UAAU,IAAI,kBAChC,YAAY,OAAO,wBAAwB;AAAA,MAAA;AAEhD;AAAA,IACF;AAEA,UAAM,aAAa,KAAK,KAAK,MAAM,EAAE,CAAC;AAEtC,UAAM,YAAY;AAAA,MAChB,GAAG;AAAA,MACH,SAAS,OAAO,YAAY,cAAc;AAAA;AAAA;AAAA,MAG1C,YAAYC,MAAQ,WAAW,KAAA,GAAQ,gBAAgB;AAAA,MACvD,YAAY,WAAW,IAAI,CAAA,QAAOA,MAAQ,IAAI,QAAQ,gBAAgB,CAAC;AAAA,MACvE,yBAAyB,KAAK;AAAA,QAAI,SAChCA,MAAQ,IAAI,KAAA,GAAQ,gBAAgB;AAAA,MAAA;AAAA,IACtC;AAGF,eAAW,IAAI,UAAU,MAAM;AAAA,MAC7B;AAAA,MACA,SAAS,OAAO;AAAA,QACd,OAAO,QAAQ,UAAU,OAAO,EAAE,IAAI,CAAC,CAAC,MAAM,EAAC,SAAA,CAAS,MAAM;AAAA,UAC5D;AAAA,UACA,gCAAgC,QAAQ;AAAA,QAAA,CACzC;AAAA,MAAA;AAAA,IACH,CACD;AAAA,EACH,CAAC;AACD,SAAO;AACT;AAEO,SAAS,iBACd,YACA,WACgB;AAChB,QAAM,YAAY,WAAW,IAAI,SAAS;AAC1C,MAAI,CAAC,WAAW;AACd,UAAM,IAAI;AAAA,MACR,UAAU,SAAS,oBAAoB,CAAC,GAAG,WAAW,KAAA,CAAM,EACzD,OAAO,CAAA,MAAK,CAAC,EAAE,SAAS,GAAG,KAAK,CAAC,EAAE,WAAW,cAAc,CAAC,EAC7D,KAAA,CAAM;AAAA,IAAA;AAAA,EAGb;AACA,SAAO;AACT;AAGA,SAAS,OAAO,GAAa,GAAa;AACxC,MAAI,EAAE,WAAW,EAAE,QAAQ;AACzB,WAAO,EAAE,SAAS,EAAE;AAAA,EACtB;AACA,WAAS,IAAI,GAAG,IAAI,EAAE,QAAQ,KAAK;AACjC,QAAI,EAAE,CAAC,IAAI,EAAE,CAAC,GAAG;AACf,aAAO;AAAA,IACT;AACA,QAAI,EAAE,CAAC,IAAI,EAAE,CAAC,GAAG;AACf,aAAO;AAAA,IACT;AAAA,EACF;AACA,SAAO;AACT;"}
1
+ {"version":3,"file":"lite-tables.js","sources":["../../../../../zero-cache/src/db/lite-tables.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {must} from '../../../shared/src/must.ts';\nimport {difference} from '../../../shared/src/set-utils.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {primaryKeySchema} from '../../../zero-protocol/src/primary-key.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport {\n ColumnMetadataStore,\n metadataToLiteTypeString,\n} from '../services/replicator/schema/column-metadata.ts';\nimport {TableMetadataTracker} from '../services/replicator/schema/table-metadata.ts';\nimport {\n isArray,\n isEnum,\n liteTypeToZqlValueType,\n mapLiteDataTypeToZqlSchemaValue,\n nullableUpstream,\n} from '../types/lite.ts';\nimport * as PostgresTypeClass from './postgres-type-class-enum.ts';\nimport type {\n LiteAndZqlSpec,\n LiteIndexSpec,\n LiteTableSpec,\n MutableLiteIndexSpec,\n MutableLiteTableSpec,\n} from './specs.ts';\n\ntype ColumnInfo = {\n table: string;\n name: string;\n type: string;\n notNull: number;\n dflt: string | null;\n keyPos: number;\n};\n\nexport type LiteTableSpecWithReplicationStatus = LiteTableSpec & {\n readonly backfilling?: string[];\n readonly minRowVersion?: string | null;\n};\n\ntype MutableLiteTableSpecWithReplicationStatus = MutableLiteTableSpec & {\n backfilling: string[];\n minRowVersion: string | null;\n};\n\nexport function listTables(\n db: Database,\n useColumnMetadata = true,\n useTableMetadata = true,\n): LiteTableSpecWithReplicationStatus[] {\n const columns = db\n .prepare(\n `\n SELECT\n m.name as \"table\",\n p.name as name,\n p.type as type,\n p.\"notnull\" as \"notNull\",\n p.dflt_value as \"dflt\",\n p.pk as keyPos\n FROM sqlite_master as m\n LEFT JOIN pragma_table_info(m.name) as p\n WHERE m.type = 'table'\n AND m.name NOT LIKE 'sqlite_%'\n AND m.name NOT LIKE '_zero.%'\n AND m.name NOT LIKE '_litestream_%'\n `,\n )\n .all() as ColumnInfo[];\n\n const minRowVersions = useTableMetadata\n ? new TableMetadataTracker(db).getMinRowVersions()\n : new Map();\n const tables: LiteTableSpecWithReplicationStatus[] = [];\n let table: MutableLiteTableSpecWithReplicationStatus | undefined;\n\n columns.forEach(col => {\n if (col.table !== table?.name) {\n // New table\n table = {\n name: col.table,\n columns: {},\n backfilling: [],\n minRowVersion: minRowVersions.get(col.table) ?? null,\n };\n tables.push(table);\n }\n\n // Try to read from metadata table first, fall back to SQLite column type\n let dataType: string;\n let elemPgTypeClass:\n | typeof PostgresTypeClass.Base\n | typeof PostgresTypeClass.Enum\n | null;\n\n const metadata = useColumnMetadata\n ? ColumnMetadataStore.getInstance(db)?.getColumn(col.table, col.name)\n : undefined;\n if (metadata) {\n // Read from metadata table and convert to pipe notation\n dataType = metadataToLiteTypeString(metadata);\n elemPgTypeClass = metadata.isArray\n ? metadata.isEnum\n ? PostgresTypeClass.Enum\n : PostgresTypeClass.Base\n : null;\n } else {\n // Fall back to reading from SQLite column type (pipe notation)\n dataType = col.type;\n elemPgTypeClass = isArray(col.type)\n ? isEnum(col.type)\n ? PostgresTypeClass.Enum\n : PostgresTypeClass.Base\n : null;\n }\n\n table.columns[col.name] = {\n pos: Object.keys(table.columns).length + 1,\n dataType,\n characterMaximumLength: null,\n notNull: col.notNull !== 0,\n dflt: col.dflt,\n elemPgTypeClass,\n };\n if (metadata?.isBackfilling) {\n table.backfilling.push(col.name);\n }\n if (col.keyPos) {\n table.primaryKey ??= [];\n while (table.primaryKey.length < col.keyPos) {\n table.primaryKey.push('');\n }\n table.primaryKey[col.keyPos - 1] = col.name;\n }\n });\n\n return tables;\n}\n\nexport function listIndexes(db: Database): LiteIndexSpec[] {\n const indexes = db\n .prepare(\n `SELECT \n idx.name as indexName, \n idx.tbl_name as tableName, \n info.\"unique\" as \"unique\",\n col.name as column,\n CASE WHEN col.desc = 0 THEN 'ASC' ELSE 'DESC' END as dir\n FROM sqlite_master as idx\n JOIN pragma_index_list(idx.tbl_name) AS info ON info.name = idx.name\n JOIN pragma_index_xinfo(idx.name) as col\n WHERE idx.type = 'index' AND \n col.key = 1 AND\n idx.tbl_name NOT LIKE '_zero.%'\n ORDER BY idx.name, col.seqno ASC`,\n )\n .all() as {\n indexName: string;\n tableName: string;\n unique: number;\n column: string;\n dir: 'ASC' | 'DESC';\n }[];\n\n const ret: MutableLiteIndexSpec[] = [];\n for (const {indexName: name, tableName, unique, column, dir} of indexes) {\n if (ret.at(-1)?.name === name) {\n // Aggregate multiple column names into the array.\n must(ret.at(-1)).columns[column] = dir;\n } else {\n ret.push({\n tableName,\n name,\n columns: {[column]: dir},\n unique: unique !== 0,\n });\n }\n }\n\n return ret;\n}\n\nexport type ZqlSpecOptions = {\n /**\n * Controls whether to include backfilling columns in the computed\n * LiteAndZqlSpec. In general, backfilling columns should be include\n * in \"replication\" logic (copying data from upstream to the replica),\n * and excluded from \"sync\" logic (sending data from the replica to\n * clients).\n */\n includeBackfillingColumns: boolean;\n};\n\n/**\n * Computes a TableSpec \"view\" of the replicated data that is\n * suitable for processing / consumption for the client. This\n * includes:\n * * excluding tables without a PRIMARY KEY or UNIQUE INDEX\n * * excluding columns with types that are not supported by ZQL\n * * choosing columns to use as the primary key amongst those\n * in unique indexes\n *\n * @param tableSpecs an optional map to reset and populate\n * @param fullTables an optional map to receive the full table specs,\n * which may include tables and columns that are not synced to\n * the client because they lack a primary key or are of unsupported\n * data types.\n */\nexport function computeZqlSpecs(\n lc: LogContext,\n replica: Database,\n opts: ZqlSpecOptions,\n tableSpecs: Map<string, LiteAndZqlSpec> = new Map(),\n fullTables?: Map<string, LiteTableSpec>,\n): Map<string, LiteAndZqlSpec> {\n return computeZqlSpecsFromLiteSpecs(\n listTables(replica),\n listIndexes(replica),\n opts,\n tableSpecs,\n fullTables,\n lc,\n );\n}\n\nexport function computeZqlSpecsFromLiteSpecs(\n tables: LiteTableSpecWithReplicationStatus[],\n indexes: LiteIndexSpec[],\n {includeBackfillingColumns}: ZqlSpecOptions,\n tableSpecs: Map<string, LiteAndZqlSpec> = new Map(),\n fullTables?: Map<string, LiteTableSpec>,\n lc?: LogContext,\n): Map<string, LiteAndZqlSpec> {\n tableSpecs.clear();\n fullTables?.clear();\n\n const uniqueIndexColumns = new Map<string, string[][]>();\n for (const {tableName, columns} of indexes.filter(idx => idx.unique)) {\n if (!uniqueIndexColumns.has(tableName)) {\n uniqueIndexColumns.set(tableName, []);\n }\n uniqueIndexColumns.get(tableName)?.push(Object.keys(columns));\n }\n\n tables.forEach(fullTable => {\n fullTables?.set(fullTable.name, fullTable);\n\n const backfilling = new Set(fullTable.backfilling);\n // Only include columns that:\n // - have a defined ZQL Value\n // - aren't backfilling if `includeBackfillingColumns` is false\n const visibleColumns = Object.entries(fullTable.columns).filter(\n ([col, {dataType}]) =>\n liteTypeToZqlValueType(dataType) &&\n (includeBackfillingColumns || !backfilling.has(col)),\n );\n const notNullColumns = new Set(\n visibleColumns\n .filter(\n ([col, {dataType}]) =>\n !nullableUpstream(dataType) || fullTable.primaryKey?.includes(col),\n )\n .map(([col]) => col),\n );\n\n const uniqueKeys = uniqueIndexColumns.get(fullTable.name) ?? [];\n // Examine all column combinations that can serve as a primary key,\n // i.e. excluding indexes over nullable or unsynced columns.\n const keys = uniqueKeys.filter(\n key => difference(new Set(key), notNullColumns).size === 0,\n );\n if (keys.length === 0) {\n // Only include tables with a row key.\n //\n // Note that this will automatically exclude tables that are being\n // backfilled (when includeBackfillingColumns is `false`) since candidate\n // keys only include visible columns.\n lc?.debug?.(\n `not syncing table ${fullTable.name} because it ` +\n (backfilling.size ? 'is being backfilled' : 'has no primary key'),\n );\n return;\n }\n // Pick the \"best\" (i.e. shortest) key for default IVM operations.\n const primaryKey = keys.sort(keyCmp)[0];\n\n const tableSpec = {\n ...fullTable,\n columns: Object.fromEntries(visibleColumns),\n // normalize (sort) keys to minimize creating new objects.\n // See row-key.ts: normalizedKeyOrder()\n primaryKey: v.parse(primaryKey.sort(), primaryKeySchema),\n uniqueKeys: uniqueKeys.map(key => v.parse(key.sort(), primaryKeySchema)),\n allPotentialPrimaryKeys: keys.map(key =>\n v.parse(key.sort(), primaryKeySchema),\n ),\n minRowVersion: fullTable.minRowVersion ?? null,\n };\n\n tableSpecs.set(tableSpec.name, {\n tableSpec,\n zqlSpec: Object.fromEntries(\n Object.entries(tableSpec.columns).map(([name, {dataType}]) => [\n name,\n mapLiteDataTypeToZqlSchemaValue(dataType),\n ]),\n ),\n });\n });\n return tableSpecs;\n}\n\nexport function mustGetTableSpec(\n tableSpecs: Map<string, LiteAndZqlSpec>,\n tableName: string,\n): LiteAndZqlSpec {\n const tableSpec = tableSpecs.get(tableName);\n if (!tableSpec) {\n throw new Error(\n `table '${tableName}' is not one of: ${[...tableSpecs.keys()]\n .filter(t => !t.includes('.') && !t.startsWith('_litestream_'))\n .sort()}. ` +\n `Check the spelling and ensure that the table has a primary key.`,\n );\n }\n return tableSpec;\n}\n\n// Deterministic comparator for favoring shorter row keys.\nfunction keyCmp(a: string[], b: string[]) {\n if (a.length !== b.length) {\n return a.length - b.length; // Fewer columns are better.\n }\n for (let i = 0; i < a.length; i++) {\n if (a[i] < b[i]) {\n return -1;\n }\n if (a[i] > b[i]) {\n return 1;\n }\n }\n return 0;\n}\n"],"names":["PostgresTypeClass.Enum","PostgresTypeClass.Base","v.parse"],"mappings":";;;;;;;;AA8CO,SAAS,WACd,IACA,oBAAoB,MACpB,mBAAmB,MACmB;AACtC,QAAM,UAAU,GACb;AAAA,IACC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAAA,EAgBD,IAAA;AAEH,QAAM,iBAAiB,mBACnB,IAAI,qBAAqB,EAAE,EAAE,kBAAA,IAC7B,oBAAI,IAAA;AACR,QAAM,SAA+C,CAAA;AACrD,MAAI;AAEJ,UAAQ,QAAQ,CAAA,QAAO;AACrB,QAAI,IAAI,UAAU,OAAO,MAAM;AAE7B,cAAQ;AAAA,QACN,MAAM,IAAI;AAAA,QACV,SAAS,CAAA;AAAA,QACT,aAAa,CAAA;AAAA,QACb,eAAe,eAAe,IAAI,IAAI,KAAK,KAAK;AAAA,MAAA;AAElD,aAAO,KAAK,KAAK;AAAA,IACnB;AAGA,QAAI;AACJ,QAAI;AAKJ,UAAM,WAAW,oBACb,oBAAoB,YAAY,EAAE,GAAG,UAAU,IAAI,OAAO,IAAI,IAAI,IAClE;AACJ,QAAI,UAAU;AAEZ,iBAAW,yBAAyB,QAAQ;AAC5C,wBAAkB,SAAS,UACvB,SAAS,SACPA,OACAC,OACF;AAAA,IACN,OAAO;AAEL,iBAAW,IAAI;AACf,wBAAkB,QAAQ,IAAI,IAAI,IAC9B,OAAO,IAAI,IAAI,IACbD,OACAC,OACF;AAAA,IACN;AAEA,UAAM,QAAQ,IAAI,IAAI,IAAI;AAAA,MACxB,KAAK,OAAO,KAAK,MAAM,OAAO,EAAE,SAAS;AAAA,MACzC;AAAA,MACA,wBAAwB;AAAA,MACxB,SAAS,IAAI,YAAY;AAAA,MACzB,MAAM,IAAI;AAAA,MACV;AAAA,IAAA;AAEF,QAAI,UAAU,eAAe;AAC3B,YAAM,YAAY,KAAK,IAAI,IAAI;AAAA,IACjC;AACA,QAAI,IAAI,QAAQ;AACd,YAAM,eAAe,CAAA;AACrB,aAAO,MAAM,WAAW,SAAS,IAAI,QAAQ;AAC3C,cAAM,WAAW,KAAK,EAAE;AAAA,MAC1B;AACA,YAAM,WAAW,IAAI,SAAS,CAAC,IAAI,IAAI;AAAA,IACzC;AAAA,EACF,CAAC;AAED,SAAO;AACT;AAEO,SAAS,YAAY,IAA+B;AACzD,QAAM,UAAU,GACb;AAAA,IACC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAAA,EAcD,IAAA;AAQH,QAAM,MAA8B,CAAA;AACpC,aAAW,EAAC,WAAW,MAAM,WAAW,QAAQ,QAAQ,IAAA,KAAQ,SAAS;AACvE,QAAI,IAAI,GAAG,EAAE,GAAG,SAAS,MAAM;AAE7B,WAAK,IAAI,GAAG,EAAE,CAAC,EAAE,QAAQ,MAAM,IAAI;AAAA,IACrC,OAAO;AACL,UAAI,KAAK;AAAA,QACP;AAAA,QACA;AAAA,QACA,SAAS,EAAC,CAAC,MAAM,GAAG,IAAA;AAAA,QACpB,QAAQ,WAAW;AAAA,MAAA,CACpB;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;AA4BO,SAAS,gBACd,IACA,SACA,MACA,aAA0C,oBAAI,IAAA,GAC9C,YAC6B;AAC7B,SAAO;AAAA,IACL,WAAW,OAAO;AAAA,IAClB,YAAY,OAAO;AAAA,IACnB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEJ;AAEO,SAAS,6BACd,QACA,SACA,EAAC,0BAAA,GACD,aAA0C,oBAAI,IAAA,GAC9C,YACA,IAC6B;AAC7B,aAAW,MAAA;AACX,cAAY,MAAA;AAEZ,QAAM,yCAAyB,IAAA;AAC/B,aAAW,EAAC,WAAW,aAAY,QAAQ,OAAO,CAAA,QAAO,IAAI,MAAM,GAAG;AACpE,QAAI,CAAC,mBAAmB,IAAI,SAAS,GAAG;AACtC,yBAAmB,IAAI,WAAW,EAAE;AAAA,IACtC;AACA,uBAAmB,IAAI,SAAS,GAAG,KAAK,OAAO,KAAK,OAAO,CAAC;AAAA,EAC9D;AAEA,SAAO,QAAQ,CAAA,cAAa;AAC1B,gBAAY,IAAI,UAAU,MAAM,SAAS;AAEzC,UAAM,cAAc,IAAI,IAAI,UAAU,WAAW;AAIjD,UAAM,iBAAiB,OAAO,QAAQ,UAAU,OAAO,EAAE;AAAA,MACvD,CAAC,CAAC,KAAK,EAAC,UAAS,MACf,uBAAuB,QAAQ,MAC9B,6BAA6B,CAAC,YAAY,IAAI,GAAG;AAAA,IAAA;AAEtD,UAAM,iBAAiB,IAAI;AAAA,MACzB,eACG;AAAA,QACC,CAAC,CAAC,KAAK,EAAC,UAAS,MACf,CAAC,iBAAiB,QAAQ,KAAK,UAAU,YAAY,SAAS,GAAG;AAAA,MAAA,EAEpE,IAAI,CAAC,CAAC,GAAG,MAAM,GAAG;AAAA,IAAA;AAGvB,UAAM,aAAa,mBAAmB,IAAI,UAAU,IAAI,KAAK,CAAA;AAG7D,UAAM,OAAO,WAAW;AAAA,MACtB,CAAA,QAAO,WAAW,IAAI,IAAI,GAAG,GAAG,cAAc,EAAE,SAAS;AAAA,IAAA;AAE3D,QAAI,KAAK,WAAW,GAAG;AAMrB,UAAI;AAAA,QACF,qBAAqB,UAAU,IAAI,kBAChC,YAAY,OAAO,wBAAwB;AAAA,MAAA;AAEhD;AAAA,IACF;AAEA,UAAM,aAAa,KAAK,KAAK,MAAM,EAAE,CAAC;AAEtC,UAAM,YAAY;AAAA,MAChB,GAAG;AAAA,MACH,SAAS,OAAO,YAAY,cAAc;AAAA;AAAA;AAAA,MAG1C,YAAYC,MAAQ,WAAW,KAAA,GAAQ,gBAAgB;AAAA,MACvD,YAAY,WAAW,IAAI,CAAA,QAAOA,MAAQ,IAAI,QAAQ,gBAAgB,CAAC;AAAA,MACvE,yBAAyB,KAAK;AAAA,QAAI,SAChCA,MAAQ,IAAI,KAAA,GAAQ,gBAAgB;AAAA,MAAA;AAAA,MAEtC,eAAe,UAAU,iBAAiB;AAAA,IAAA;AAG5C,eAAW,IAAI,UAAU,MAAM;AAAA,MAC7B;AAAA,MACA,SAAS,OAAO;AAAA,QACd,OAAO,QAAQ,UAAU,OAAO,EAAE,IAAI,CAAC,CAAC,MAAM,EAAC,SAAA,CAAS,MAAM;AAAA,UAC5D;AAAA,UACA,gCAAgC,QAAQ;AAAA,QAAA,CACzC;AAAA,MAAA;AAAA,IACH,CACD;AAAA,EACH,CAAC;AACD,SAAO;AACT;AAEO,SAAS,iBACd,YACA,WACgB;AAChB,QAAM,YAAY,WAAW,IAAI,SAAS;AAC1C,MAAI,CAAC,WAAW;AACd,UAAM,IAAI;AAAA,MACR,UAAU,SAAS,oBAAoB,CAAC,GAAG,WAAW,KAAA,CAAM,EACzD,OAAO,CAAA,MAAK,CAAC,EAAE,SAAS,GAAG,KAAK,CAAC,EAAE,WAAW,cAAc,CAAC,EAC7D,KAAA,CAAM;AAAA,IAAA;AAAA,EAGb;AACA,SAAO;AACT;AAGA,SAAS,OAAO,GAAa,GAAa;AACxC,MAAI,EAAE,WAAW,EAAE,QAAQ;AACzB,WAAO,EAAE,SAAS,EAAE;AAAA,EACtB;AACA,WAAS,IAAI,GAAG,IAAI,EAAE,QAAQ,KAAK;AACjC,QAAI,EAAE,CAAC,IAAI,EAAE,CAAC,GAAG;AACf,aAAO;AAAA,IACT;AACA,QAAI,EAAE,CAAC,IAAI,EAAE,CAAC,GAAG;AACf,aAAO;AAAA,IACT;AAAA,EACF;AACA,SAAO;AACT;"}
@@ -75,7 +75,7 @@ export declare const publishedTableSpec: v.ObjectType<Omit<Omit<{
75
75
  }, undefined>;
76
76
  export type MutableLiteTableSpec = v.Infer<typeof liteTableSpec>;
77
77
  export type LiteTableSpec = Readonly<MutableLiteTableSpec>;
78
- export type LiteTableSpecWithKeys = Omit<LiteTableSpec, 'primaryKey'> & {
78
+ export type LiteTableSpecWithKeysAndVersion = Omit<LiteTableSpec, 'primaryKey'> & {
79
79
  /**
80
80
  * All keys associated with a unique index. Includes indexes with
81
81
  * nullable columns.
@@ -92,9 +92,15 @@ export type LiteTableSpecWithKeys = Omit<LiteTableSpec, 'primaryKey'> & {
92
92
  * columns, i.e. suitable as a primary key.
93
93
  */
94
94
  allPotentialPrimaryKeys: PrimaryKey[];
95
+ /**
96
+ * The minimum `_0_version` value for every row in the table. If this is
97
+ * present, `_0_version` value in the row itself should only be used if
98
+ * it is greater (i.e. later) than the `minRowVersion`.
99
+ */
100
+ minRowVersion: string | null;
95
101
  };
96
102
  export type LiteAndZqlSpec = {
97
- tableSpec: LiteTableSpecWithKeys;
103
+ tableSpec: LiteTableSpecWithKeysAndVersion;
98
104
  zqlSpec: Record<string, SchemaValue>;
99
105
  };
100
106
  export type TableSpec = Readonly<v.Infer<typeof tableSpec>>;
@@ -1 +1 @@
1
- {"version":3,"file":"specs.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/specs.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6BAA6B,CAAC;AAC9D,OAAO,KAAK,CAAC,MAAM,+BAA+B,CAAC;AACnD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,2CAA2C,CAAC;AAC1E,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,0CAA0C,CAAC;AAI1E,eAAO,MAAM,iBAAiB,iDAQ7B,CAAC;AASF,eAAO,MAAM,UAAU;;;;;;;;aAarB,CAAC;AAEH,MAAM,MAAM,UAAU,GAAG,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,UAAU,CAAC,CAAC,CAAC;AAM9D,eAAO,MAAM,aAAa;;;;;;;;;;;;aAIxB,CAAC;AAEH,eAAO,MAAM,SAAS;;;;;;;;;;;;;;aAEpB,CAAC;AAEH,eAAO,MAAM,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAQ7B,CAAC;AAEH,MAAM,MAAM,oBAAoB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAEjE,MAAM,MAAM,aAAa,GAAG,QAAQ,CAAC,oBAAoB,CAAC,CAAC;AAE3D,MAAM,MAAM,qBAAqB,GAAG,IAAI,CAAC,aAAa,EAAE,YAAY,CAAC,GAAG;IACtE;;;OAGG;IACH,UAAU,EAAE,UAAU,EAAE,CAAC;IAEzB;;;;OAIG;IACH,UAAU,EAAE,UAAU,CAAC;IAEvB;;;OAGG;IACH,uBAAuB,EAAE,UAAU,EAAE,CAAC;CACvC,CAAC;AAEF,MAAM,MAAM,cAAc,GAAG;IAC3B,SAAS,EAAE,qBAAqB,CAAC;IACjC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,WAAW,CAAC,CAAC;CACtC,CAAC;AAEF,MAAM,MAAM,SAAS,GAAG,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,SAAS,CAAC,CAAC,CAAC;AAE5D,MAAM,MAAM,kBAAkB,GAAG,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,kBAAkB,CAAC,CAAC,CAAC;AAE9E,eAAO,MAAM,eAAe,wBAAgC,CAAC;AAE7D,eAAO,MAAM,aAAa;;;;;aAKxB,CAAC;AAEH,MAAM,MAAM,oBAAoB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAEjE,MAAM,MAAM,aAAa,GAAG,QAAQ,CAAC,oBAAoB,CAAC,CAAC;AAE3D,eAAO,MAAM,SAAS;;;;;;;aAEpB,CAAC;AAEH,MAAM,MAAM,SAAS,GAAG,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,SAAS,CAAC,CAAC,CAAC;AAEhE,eAAO,MAAM,kBAAkB;;;;;;;;;;;aAI7B,CAAC;AAEH,MAAM,MAAM,kBAAkB,GAAG,YAAY,CAC3C,CAAC,CAAC,KAAK,CAAC,OAAO,kBAAkB,CAAC,CACnC,CAAC"}
1
+ {"version":3,"file":"specs.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/specs.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6BAA6B,CAAC;AAC9D,OAAO,KAAK,CAAC,MAAM,+BAA+B,CAAC;AACnD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,2CAA2C,CAAC;AAC1E,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,0CAA0C,CAAC;AAI1E,eAAO,MAAM,iBAAiB,iDAQ7B,CAAC;AASF,eAAO,MAAM,UAAU;;;;;;;;aAarB,CAAC;AAEH,MAAM,MAAM,UAAU,GAAG,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,UAAU,CAAC,CAAC,CAAC;AAM9D,eAAO,MAAM,aAAa;;;;;;;;;;;;aAIxB,CAAC;AAEH,eAAO,MAAM,SAAS;;;;;;;;;;;;;;aAEpB,CAAC;AAEH,eAAO,MAAM,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAQ7B,CAAC;AAEH,MAAM,MAAM,oBAAoB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAEjE,MAAM,MAAM,aAAa,GAAG,QAAQ,CAAC,oBAAoB,CAAC,CAAC;AAE3D,MAAM,MAAM,+BAA+B,GAAG,IAAI,CAChD,aAAa,EACb,YAAY,CACb,GAAG;IACF;;;OAGG;IACH,UAAU,EAAE,UAAU,EAAE,CAAC;IAEzB;;;;OAIG;IACH,UAAU,EAAE,UAAU,CAAC;IAEvB;;;OAGG;IACH,uBAAuB,EAAE,UAAU,EAAE,CAAC;IAEtC;;;;OAIG;IACH,aAAa,EAAE,MAAM,GAAG,IAAI,CAAC;CAC9B,CAAC;AAEF,MAAM,MAAM,cAAc,GAAG;IAC3B,SAAS,EAAE,+BAA+B,CAAC;IAC3C,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,WAAW,CAAC,CAAC;CACtC,CAAC;AAEF,MAAM,MAAM,SAAS,GAAG,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,SAAS,CAAC,CAAC,CAAC;AAE5D,MAAM,MAAM,kBAAkB,GAAG,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,kBAAkB,CAAC,CAAC,CAAC;AAE9E,eAAO,MAAM,eAAe,wBAAgC,CAAC;AAE7D,eAAO,MAAM,aAAa;;;;;aAKxB,CAAC;AAEH,MAAM,MAAM,oBAAoB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAEjE,MAAM,MAAM,aAAa,GAAG,QAAQ,CAAC,oBAAoB,CAAC,CAAC;AAE3D,eAAO,MAAM,SAAS;;;;;;;aAEpB,CAAC;AAEH,MAAM,MAAM,SAAS,GAAG,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,SAAS,CAAC,CAAC,CAAC;AAEhE,eAAO,MAAM,kBAAkB;;;;;;;;;;;aAI7B,CAAC;AAEH,MAAM,MAAM,kBAAkB,GAAG,YAAY,CAC3C,CAAC,CAAC,KAAK,CAAC,OAAO,kBAAkB,CAAC,CACnC,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"specs.js","sources":["../../../../../zero-cache/src/db/specs.ts"],"sourcesContent":["import type {DeepReadonly} from '../../../shared/src/json.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport type {PrimaryKey} from '../../../zero-protocol/src/primary-key.ts';\nimport type {SchemaValue} from '../../../zero-schema/src/table-schema.ts';\nimport * as PostgresReplicaIdentity from './postgres-replica-identity-enum.ts';\nimport * as PostgresTypeClass from './postgres-type-class-enum.ts';\n\nexport const pgTypeClassSchema = v.literalUnion(\n PostgresTypeClass.Base,\n PostgresTypeClass.Composite,\n PostgresTypeClass.Domain,\n PostgresTypeClass.Enum,\n PostgresTypeClass.Pseudo,\n PostgresTypeClass.Range,\n PostgresTypeClass.Multirange,\n);\n\nconst pgReplicaIdentitySchema = v.literalUnion(\n PostgresReplicaIdentity.Default,\n PostgresReplicaIdentity.Nothing,\n PostgresReplicaIdentity.Full,\n PostgresReplicaIdentity.Index,\n);\n\nexport const columnSpec = v.object({\n pos: v.number(),\n dataType: v.string(),\n pgTypeClass: pgTypeClassSchema.optional(),\n\n // If the column is an array, this will be the type of the\n // elements in the array. If the column is not an array,\n // this will be null.\n elemPgTypeClass: pgTypeClassSchema.nullable().optional(),\n\n characterMaximumLength: v.number().nullable().optional(),\n notNull: v.boolean().nullable().optional(),\n dflt: v.string().nullable().optional(),\n});\n\nexport type ColumnSpec = Readonly<v.Infer<typeof columnSpec>>;\n\nconst publishedColumnSpec = columnSpec.extend({\n typeOID: v.number(),\n});\n\nexport const liteTableSpec = v.object({\n name: v.string(),\n columns: v.record(columnSpec),\n primaryKey: v.array(v.string()).optional(),\n});\n\nexport const tableSpec = liteTableSpec.extend({\n schema: v.string(),\n});\n\nexport const publishedTableSpec = tableSpec.extend({\n oid: v.number(),\n // Always present for new instances (e.g. from DDL triggers), but\n // may from `initialSchema` object stored in the `replicas` table.\n schemaOID: v.number().optional(),\n columns: v.record(publishedColumnSpec),\n replicaIdentity: pgReplicaIdentitySchema.optional(),\n publications: v.record(v.object({rowFilter: v.string().nullable()})),\n});\n\nexport type MutableLiteTableSpec = v.Infer<typeof liteTableSpec>;\n\nexport type LiteTableSpec = Readonly<MutableLiteTableSpec>;\n\nexport type LiteTableSpecWithKeys = Omit<LiteTableSpec, 'primaryKey'> & {\n /**\n * All keys associated with a unique index. Includes indexes with\n * nullable columns.\n */\n uniqueKeys: PrimaryKey[];\n\n /**\n * The key selected to act as the \"primary key\". Primary keys\n * are not explicitly set on the replica, but an appropriate\n * unique index is required.\n */\n primaryKey: PrimaryKey; // note: required\n\n /**\n * All keys associated with a unique index over non-null\n * columns, i.e. suitable as a primary key.\n */\n allPotentialPrimaryKeys: PrimaryKey[];\n};\n\nexport type LiteAndZqlSpec = {\n tableSpec: LiteTableSpecWithKeys;\n zqlSpec: Record<string, SchemaValue>;\n};\n\nexport type TableSpec = Readonly<v.Infer<typeof tableSpec>>;\n\nexport type PublishedTableSpec = Readonly<v.Infer<typeof publishedTableSpec>>;\n\nexport const directionSchema = v.literalUnion('ASC', 'DESC');\n\nexport const liteIndexSpec = v.object({\n name: v.string(),\n tableName: v.string(),\n unique: v.boolean(),\n columns: v.record(directionSchema),\n});\n\nexport type MutableLiteIndexSpec = v.Infer<typeof liteIndexSpec>;\n\nexport type LiteIndexSpec = Readonly<MutableLiteIndexSpec>;\n\nexport const indexSpec = liteIndexSpec.extend({\n schema: v.string(),\n});\n\nexport type IndexSpec = DeepReadonly<v.Infer<typeof indexSpec>>;\n\nexport const publishedIndexSpec = indexSpec.extend({\n isReplicaIdentity: v.boolean().optional(),\n isPrimaryKey: v.boolean().optional(),\n isImmediate: v.boolean().optional(),\n});\n\nexport type PublishedIndexSpec = DeepReadonly<\n v.Infer<typeof publishedIndexSpec>\n>;\n"],"names":["v.literalUnion","PostgresTypeClass.Base","PostgresTypeClass.Composite","PostgresTypeClass.Domain","PostgresTypeClass.Enum","PostgresTypeClass.Pseudo","PostgresTypeClass.Range","PostgresTypeClass.Multirange","PostgresReplicaIdentity.Default","PostgresReplicaIdentity.Nothing","PostgresReplicaIdentity.Full","PostgresReplicaIdentity.Index","v.object","v.number","v.string","v.boolean","v.record","v.array"],"mappings":";;;;AAOO,MAAM,oBAAoBA;AAAAA,EAC/BC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AACF;AAEA,MAAM,0BAA0BP;AAAAA,EAC9BQ;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AACF;AAEO,MAAM,aAAaC,OAAS;AAAA,EACjC,KAAKC,OAAE;AAAA,EACP,UAAUC,OAAE;AAAA,EACZ,aAAa,kBAAkB,SAAA;AAAA;AAAA;AAAA;AAAA,EAK/B,iBAAiB,kBAAkB,SAAA,EAAW,SAAA;AAAA,EAE9C,wBAAwBD,OAAE,EAAS,SAAA,EAAW,SAAA;AAAA,EAC9C,SAASE,QAAE,EAAU,SAAA,EAAW,SAAA;AAAA,EAChC,MAAMD,OAAE,EAAS,SAAA,EAAW,SAAA;AAC9B,CAAC;AAID,MAAM,sBAAsB,WAAW,OAAO;AAAA,EAC5C,SAASD,OAAE;AACb,CAAC;AAEM,MAAM,gBAAgBD,OAAS;AAAA,EACpC,MAAME,OAAE;AAAA,EACR,SAASE,OAAS,UAAU;AAAA,EAC5B,YAAYC,MAAQH,OAAE,CAAQ,EAAE,SAAA;AAClC,CAAC;AAEM,MAAM,YAAY,cAAc,OAAO;AAAA,EAC5C,QAAQA,OAAE;AACZ,CAAC;AAEM,MAAM,qBAAqB,UAAU,OAAO;AAAA,EACjD,KAAKD,OAAE;AAAA;AAAA;AAAA,EAGP,WAAWA,OAAE,EAAS,SAAA;AAAA,EACtB,SAASG,OAAS,mBAAmB;AAAA,EACrC,iBAAiB,wBAAwB,SAAA;AAAA,EACzC,cAAcA,OAASJ,OAAS,EAAC,WAAWE,OAAE,EAAS,SAAA,GAAW,CAAC;AACrE,CAAC;AAoCM,MAAM,kBAAkBd,aAAe,OAAO,MAAM;AAEpD,MAAM,gBAAgBY,OAAS;AAAA,EACpC,MAAME,OAAE;AAAA,EACR,WAAWA,OAAE;AAAA,EACb,QAAQC,QAAE;AAAA,EACV,SAASC,OAAS,eAAe;AACnC,CAAC;AAMM,MAAM,YAAY,cAAc,OAAO;AAAA,EAC5C,QAAQF,OAAE;AACZ,CAAC;AAIM,MAAM,qBAAqB,UAAU,OAAO;AAAA,EACjD,mBAAmBC,QAAE,EAAU,SAAA;AAAA,EAC/B,cAAcA,QAAE,EAAU,SAAA;AAAA,EAC1B,aAAaA,QAAE,EAAU,SAAA;AAC3B,CAAC;"}
1
+ {"version":3,"file":"specs.js","sources":["../../../../../zero-cache/src/db/specs.ts"],"sourcesContent":["import type {DeepReadonly} from '../../../shared/src/json.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport type {PrimaryKey} from '../../../zero-protocol/src/primary-key.ts';\nimport type {SchemaValue} from '../../../zero-schema/src/table-schema.ts';\nimport * as PostgresReplicaIdentity from './postgres-replica-identity-enum.ts';\nimport * as PostgresTypeClass from './postgres-type-class-enum.ts';\n\nexport const pgTypeClassSchema = v.literalUnion(\n PostgresTypeClass.Base,\n PostgresTypeClass.Composite,\n PostgresTypeClass.Domain,\n PostgresTypeClass.Enum,\n PostgresTypeClass.Pseudo,\n PostgresTypeClass.Range,\n PostgresTypeClass.Multirange,\n);\n\nconst pgReplicaIdentitySchema = v.literalUnion(\n PostgresReplicaIdentity.Default,\n PostgresReplicaIdentity.Nothing,\n PostgresReplicaIdentity.Full,\n PostgresReplicaIdentity.Index,\n);\n\nexport const columnSpec = v.object({\n pos: v.number(),\n dataType: v.string(),\n pgTypeClass: pgTypeClassSchema.optional(),\n\n // If the column is an array, this will be the type of the\n // elements in the array. If the column is not an array,\n // this will be null.\n elemPgTypeClass: pgTypeClassSchema.nullable().optional(),\n\n characterMaximumLength: v.number().nullable().optional(),\n notNull: v.boolean().nullable().optional(),\n dflt: v.string().nullable().optional(),\n});\n\nexport type ColumnSpec = Readonly<v.Infer<typeof columnSpec>>;\n\nconst publishedColumnSpec = columnSpec.extend({\n typeOID: v.number(),\n});\n\nexport const liteTableSpec = v.object({\n name: v.string(),\n columns: v.record(columnSpec),\n primaryKey: v.array(v.string()).optional(),\n});\n\nexport const tableSpec = liteTableSpec.extend({\n schema: v.string(),\n});\n\nexport const publishedTableSpec = tableSpec.extend({\n oid: v.number(),\n // Always present for new instances (e.g. from DDL triggers), but\n // may from `initialSchema` object stored in the `replicas` table.\n schemaOID: v.number().optional(),\n columns: v.record(publishedColumnSpec),\n replicaIdentity: pgReplicaIdentitySchema.optional(),\n publications: v.record(v.object({rowFilter: v.string().nullable()})),\n});\n\nexport type MutableLiteTableSpec = v.Infer<typeof liteTableSpec>;\n\nexport type LiteTableSpec = Readonly<MutableLiteTableSpec>;\n\nexport type LiteTableSpecWithKeysAndVersion = Omit<\n LiteTableSpec,\n 'primaryKey'\n> & {\n /**\n * All keys associated with a unique index. Includes indexes with\n * nullable columns.\n */\n uniqueKeys: PrimaryKey[];\n\n /**\n * The key selected to act as the \"primary key\". Primary keys\n * are not explicitly set on the replica, but an appropriate\n * unique index is required.\n */\n primaryKey: PrimaryKey; // note: required\n\n /**\n * All keys associated with a unique index over non-null\n * columns, i.e. suitable as a primary key.\n */\n allPotentialPrimaryKeys: PrimaryKey[];\n\n /**\n * The minimum `_0_version` value for every row in the table. If this is\n * present, `_0_version` value in the row itself should only be used if\n * it is greater (i.e. later) than the `minRowVersion`.\n */\n minRowVersion: string | null;\n};\n\nexport type LiteAndZqlSpec = {\n tableSpec: LiteTableSpecWithKeysAndVersion;\n zqlSpec: Record<string, SchemaValue>;\n};\n\nexport type TableSpec = Readonly<v.Infer<typeof tableSpec>>;\n\nexport type PublishedTableSpec = Readonly<v.Infer<typeof publishedTableSpec>>;\n\nexport const directionSchema = v.literalUnion('ASC', 'DESC');\n\nexport const liteIndexSpec = v.object({\n name: v.string(),\n tableName: v.string(),\n unique: v.boolean(),\n columns: v.record(directionSchema),\n});\n\nexport type MutableLiteIndexSpec = v.Infer<typeof liteIndexSpec>;\n\nexport type LiteIndexSpec = Readonly<MutableLiteIndexSpec>;\n\nexport const indexSpec = liteIndexSpec.extend({\n schema: v.string(),\n});\n\nexport type IndexSpec = DeepReadonly<v.Infer<typeof indexSpec>>;\n\nexport const publishedIndexSpec = indexSpec.extend({\n isReplicaIdentity: v.boolean().optional(),\n isPrimaryKey: v.boolean().optional(),\n isImmediate: v.boolean().optional(),\n});\n\nexport type PublishedIndexSpec = DeepReadonly<\n v.Infer<typeof publishedIndexSpec>\n>;\n"],"names":["v.literalUnion","PostgresTypeClass.Base","PostgresTypeClass.Composite","PostgresTypeClass.Domain","PostgresTypeClass.Enum","PostgresTypeClass.Pseudo","PostgresTypeClass.Range","PostgresTypeClass.Multirange","PostgresReplicaIdentity.Default","PostgresReplicaIdentity.Nothing","PostgresReplicaIdentity.Full","PostgresReplicaIdentity.Index","v.object","v.number","v.string","v.boolean","v.record","v.array"],"mappings":";;;;AAOO,MAAM,oBAAoBA;AAAAA,EAC/BC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AACF;AAEA,MAAM,0BAA0BP;AAAAA,EAC9BQ;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AACF;AAEO,MAAM,aAAaC,OAAS;AAAA,EACjC,KAAKC,OAAE;AAAA,EACP,UAAUC,OAAE;AAAA,EACZ,aAAa,kBAAkB,SAAA;AAAA;AAAA;AAAA;AAAA,EAK/B,iBAAiB,kBAAkB,SAAA,EAAW,SAAA;AAAA,EAE9C,wBAAwBD,OAAE,EAAS,SAAA,EAAW,SAAA;AAAA,EAC9C,SAASE,QAAE,EAAU,SAAA,EAAW,SAAA;AAAA,EAChC,MAAMD,OAAE,EAAS,SAAA,EAAW,SAAA;AAC9B,CAAC;AAID,MAAM,sBAAsB,WAAW,OAAO;AAAA,EAC5C,SAASD,OAAE;AACb,CAAC;AAEM,MAAM,gBAAgBD,OAAS;AAAA,EACpC,MAAME,OAAE;AAAA,EACR,SAASE,OAAS,UAAU;AAAA,EAC5B,YAAYC,MAAQH,OAAE,CAAQ,EAAE,SAAA;AAClC,CAAC;AAEM,MAAM,YAAY,cAAc,OAAO;AAAA,EAC5C,QAAQA,OAAE;AACZ,CAAC;AAEM,MAAM,qBAAqB,UAAU,OAAO;AAAA,EACjD,KAAKD,OAAE;AAAA;AAAA;AAAA,EAGP,WAAWA,OAAE,EAAS,SAAA;AAAA,EACtB,SAASG,OAAS,mBAAmB;AAAA,EACrC,iBAAiB,wBAAwB,SAAA;AAAA,EACzC,cAAcA,OAASJ,OAAS,EAAC,WAAWE,OAAE,EAAS,SAAA,GAAW,CAAC;AACrE,CAAC;AA8CM,MAAM,kBAAkBd,aAAe,OAAO,MAAM;AAEpD,MAAM,gBAAgBY,OAAS;AAAA,EACpC,MAAME,OAAE;AAAA,EACR,WAAWA,OAAE;AAAA,EACb,QAAQC,QAAE;AAAA,EACV,SAASC,OAAS,eAAe;AACnC,CAAC;AAMM,MAAM,YAAY,cAAc,OAAO;AAAA,EAC5C,QAAQF,OAAE;AACZ,CAAC;AAIM,MAAM,qBAAqB,UAAU,OAAO;AAAA,EACjD,mBAAmBC,QAAE,EAAU,SAAA;AAAA,EAC/B,cAAcA,QAAE,EAAU,SAAA;AAAA,EAC1B,aAAaA,QAAE,EAAU,SAAA;AAC3B,CAAC;"}
@@ -1 +1 @@
1
- {"version":3,"file":"change-streamer.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"names":[],"mappings":"AAmBA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAK/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CAuIf"}
1
+ {"version":3,"file":"change-streamer.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"names":[],"mappings":"AAmBA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAK/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CAyIf"}
@@ -31,7 +31,8 @@ async function runWorker(parent, env, ...args) {
31
31
  address,
32
32
  protocol,
33
33
  startupDelayMs,
34
- backPressureLimitHeapProportion
34
+ backPressureLimitHeapProportion,
35
+ flowControlConsensusPaddingSeconds
35
36
  },
36
37
  upstream,
37
38
  change,
@@ -83,6 +84,7 @@ async function runWorker(parent, env, ...args) {
83
84
  subscriptionState,
84
85
  autoReset ?? false,
85
86
  backPressureLimitHeapProportion,
87
+ flowControlConsensusPaddingSeconds,
86
88
  setTimeout
87
89
  );
88
90
  break;
@@ -1 +1 @@
1
- {"version":3,"file":"change-streamer.js","sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"sourcesContent":["import {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {DatabaseInitError} from '../../../zqlite/src/db.ts';\nimport {getServerContext} from '../config/server-context.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {deleteLiteDB} from '../db/delete-lite-db.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink, publishCriticalEvent} from '../observability/events.ts';\nimport {initializeCustomChangeSource} from '../services/change-source/custom/change-source.ts';\nimport {initializePostgresChangeSource} from '../services/change-source/pg/change-source.ts';\nimport {BackupMonitor} from '../services/change-streamer/backup-monitor.ts';\nimport {ChangeStreamerHttpServer} from '../services/change-streamer/change-streamer-http.ts';\nimport {initializeStreamer} from '../services/change-streamer/change-streamer-service.ts';\nimport type {ChangeStreamerService} from '../services/change-streamer/change-streamer.ts';\nimport {ReplicaMonitor} from '../services/change-streamer/replica-monitor.ts';\nimport {AutoResetSignal} from '../services/change-streamer/schema/tables.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {replicationStatusError} from '../services/replicator/replication-status.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardConfig} from '../types/shards.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n assert(args.length > 0, `parent startMs not specified`);\n const parentStartMs = parseInt(args[0]);\n\n const config = getNormalizedZeroConfig({env, argv: args.slice(1)});\n const {\n taskID,\n changeStreamer: {\n port,\n address,\n protocol,\n startupDelayMs,\n backPressureLimitHeapProportion,\n },\n upstream,\n change,\n replica,\n initialSync,\n litestream,\n } = config;\n\n startOtelAuto(createLogContext(config, {worker: 'change-streamer'}, false));\n const lc = createLogContext(config, {worker: 'change-streamer'}, true);\n initEventSink(lc, config);\n\n // Kick off DB connection warmup in the background.\n const changeDB = pgClient(\n lc,\n change.db,\n {\n max: change.maxConns,\n connection: {['application_name']: 'zero-change-streamer'},\n },\n {sendStringAsJson: true},\n );\n void warmupConnections(lc, changeDB, 'change');\n\n const {autoReset} = config;\n const shard = getShardConfig(config);\n\n let changeStreamer: ChangeStreamerService | undefined;\n\n const context = getServerContext(config);\n\n for (const first of [true, false]) {\n try {\n // Note: This performs initial sync of the replica if necessary.\n const {changeSource, subscriptionState} =\n upstream.type === 'pg'\n ? await initializePostgresChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n initialSync,\n context,\n )\n : await initializeCustomChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n context,\n );\n\n changeStreamer = await initializeStreamer(\n lc,\n shard,\n taskID,\n address,\n protocol,\n changeDB,\n changeSource,\n subscriptionState,\n autoReset ?? false,\n backPressureLimitHeapProportion,\n setTimeout,\n );\n break;\n } catch (e) {\n if (first && e instanceof AutoResetSignal) {\n lc.warn?.(`resetting replica ${replica.file}`, e);\n // TODO: Make deleteLiteDB work with litestream. It will probably have to be\n // a semantic wipe instead of a file delete.\n deleteLiteDB(replica.file);\n continue; // execute again with a fresh initial-sync\n }\n await publishCriticalEvent(\n lc,\n replicationStatusError(lc, 'Initializing', e),\n );\n if (e instanceof DatabaseInitError) {\n throw new Error(\n `Cannot open ZERO_REPLICA_FILE at \"${replica.file}\". Please check that the path is valid.`,\n {cause: e},\n );\n }\n throw e;\n }\n }\n // impossible: upstream must have advanced in order for replication to be stuck.\n assert(changeStreamer, `resetting replica did not advance replicaVersion`);\n\n const {backupURL, port: metricsPort} = litestream;\n const monitor = backupURL\n ? new BackupMonitor(\n lc,\n backupURL,\n `http://localhost:${metricsPort}/metrics`,\n changeStreamer,\n // The time between when the zero-cache was started to when the\n // change-streamer is ready to start serves as the initial delay for\n // watermark cleanup (as it either includes a similar replica\n // restoration/preparation step, or an initial-sync, which\n // generally takes longer).\n //\n // Consider: Also account for permanent volumes?\n Date.now() - parentStartMs,\n )\n : new ReplicaMonitor(lc, replica.file, changeStreamer);\n\n const changeStreamerWebServer = new ChangeStreamerHttpServer(\n lc,\n config,\n {port, startupDelayMs},\n parent,\n changeStreamer,\n monitor instanceof BackupMonitor ? monitor : null,\n );\n\n parent.send(['ready', {ready: true}]);\n\n // Note: The changeStreamer itself is not started here; it is started by the\n // changeStreamerWebServer.\n return runUntilKilled(lc, parent, changeStreamerWebServer, monitor);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;AA4BA,eAA8B,UAC5B,QACA,QACG,MACY;AACf,SAAO,KAAK,SAAS,GAAG,8BAA8B;AACtD,QAAM,gBAAgB,SAAS,KAAK,CAAC,CAAC;AAEtC,QAAM,SAAS,wBAAwB,EAAC,KAAK,MAAM,KAAK,MAAM,CAAC,GAAE;AACjE,QAAM;AAAA,IACJ;AAAA,IACA,gBAAgB;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA,IACE;AAEJ,gBAAc,iBAAiB,QAAQ,EAAC,QAAQ,kBAAA,GAAoB,KAAK,CAAC;AAC1E,QAAM,KAAK,iBAAiB,QAAQ,EAAC,QAAQ,kBAAA,GAAoB,IAAI;AACrE,gBAAc,IAAI,MAAM;AAGxB,QAAM,WAAW;AAAA,IACf;AAAA,IACA,OAAO;AAAA,IACP;AAAA,MACE,KAAK,OAAO;AAAA,MACZ,YAAY,EAAC,CAAC,kBAAkB,GAAG,uBAAA;AAAA,IAAsB;AAAA,IAE3D,EAAC,kBAAkB,KAAA;AAAA,EAAI;AAEzB,OAAK,kBAAkB,IAAI,UAAU,QAAQ;AAE7C,QAAM,EAAC,cAAa;AACpB,QAAM,QAAQ,eAAe,MAAM;AAEnC,MAAI;AAEJ,QAAM,UAAU,iBAAiB,MAAM;AAEvC,aAAW,SAAS,CAAC,MAAM,KAAK,GAAG;AACjC,QAAI;AAEF,YAAM,EAAC,cAAc,kBAAA,IACnB,SAAS,SAAS,OACd,MAAM;AAAA,QACJ;AAAA,QACA,SAAS;AAAA,QACT;AAAA,QACA,QAAQ;AAAA,QACR;AAAA,QACA;AAAA,MAAA,IAEF,MAAM;AAAA,QACJ;AAAA,QACA,SAAS;AAAA,QACT;AAAA,QACA,QAAQ;AAAA,QACR;AAAA,MAAA;AAGR,uBAAiB,MAAM;AAAA,QACrB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA,aAAa;AAAA,QACb;AAAA,QACA;AAAA,MAAA;AAEF;AAAA,IACF,SAAS,GAAG;AACV,UAAI,SAAS,aAAa,iBAAiB;AACzC,WAAG,OAAO,qBAAqB,QAAQ,IAAI,IAAI,CAAC;AAGhD,qBAAa,QAAQ,IAAI;AACzB;AAAA,MACF;AACA,YAAM;AAAA,QACJ;AAAA,QACA,uBAAuB,IAAI,gBAAgB,CAAC;AAAA,MAAA;AAE9C,UAAI,aAAa,mBAAmB;AAClC,cAAM,IAAI;AAAA,UACR,qCAAqC,QAAQ,IAAI;AAAA,UACjD,EAAC,OAAO,EAAA;AAAA,QAAC;AAAA,MAEb;AACA,YAAM;AAAA,IACR;AAAA,EACF;AAEA,SAAO,gBAAgB,kDAAkD;AAEzE,QAAM,EAAC,WAAW,MAAM,YAAA,IAAe;AACvC,QAAM,UAAU,YACZ,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,oBAAoB,WAAW;AAAA,IAC/B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAQA,KAAK,QAAQ;AAAA,EAAA,IAEf,IAAI,eAAe,IAAI,QAAQ,MAAM,cAAc;AAEvD,QAAM,0BAA0B,IAAI;AAAA,IAClC;AAAA,IACA;AAAA,IACA,EAAC,MAAM,eAAA;AAAA,IACP;AAAA,IACA;AAAA,IACA,mBAAmB,gBAAgB,UAAU;AAAA,EAAA;AAG/C,SAAO,KAAK,CAAC,SAAS,EAAC,OAAO,KAAA,CAAK,CAAC;AAIpC,SAAO,eAAe,IAAI,QAAQ,yBAAyB,OAAO;AACpE;AAGA,IAAI,CAAC,qBAAqB;AACxB,OAAK;AAAA,IAAU,MACb,UAAU,KAAK,YAAY,GAAG,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,CAAC,CAAC;AAAA,EAAA;AAEvE;"}
1
+ {"version":3,"file":"change-streamer.js","sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"sourcesContent":["import {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {DatabaseInitError} from '../../../zqlite/src/db.ts';\nimport {getServerContext} from '../config/server-context.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {deleteLiteDB} from '../db/delete-lite-db.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink, publishCriticalEvent} from '../observability/events.ts';\nimport {initializeCustomChangeSource} from '../services/change-source/custom/change-source.ts';\nimport {initializePostgresChangeSource} from '../services/change-source/pg/change-source.ts';\nimport {BackupMonitor} from '../services/change-streamer/backup-monitor.ts';\nimport {ChangeStreamerHttpServer} from '../services/change-streamer/change-streamer-http.ts';\nimport {initializeStreamer} from '../services/change-streamer/change-streamer-service.ts';\nimport type {ChangeStreamerService} from '../services/change-streamer/change-streamer.ts';\nimport {ReplicaMonitor} from '../services/change-streamer/replica-monitor.ts';\nimport {AutoResetSignal} from '../services/change-streamer/schema/tables.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {replicationStatusError} from '../services/replicator/replication-status.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardConfig} from '../types/shards.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n assert(args.length > 0, `parent startMs not specified`);\n const parentStartMs = parseInt(args[0]);\n\n const config = getNormalizedZeroConfig({env, argv: args.slice(1)});\n const {\n taskID,\n changeStreamer: {\n port,\n address,\n protocol,\n startupDelayMs,\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n },\n upstream,\n change,\n replica,\n initialSync,\n litestream,\n } = config;\n\n startOtelAuto(createLogContext(config, {worker: 'change-streamer'}, false));\n const lc = createLogContext(config, {worker: 'change-streamer'}, true);\n initEventSink(lc, config);\n\n // Kick off DB connection warmup in the background.\n const changeDB = pgClient(\n lc,\n change.db,\n {\n max: change.maxConns,\n connection: {['application_name']: 'zero-change-streamer'},\n },\n {sendStringAsJson: true},\n );\n void warmupConnections(lc, changeDB, 'change');\n\n const {autoReset} = config;\n const shard = getShardConfig(config);\n\n let changeStreamer: ChangeStreamerService | undefined;\n\n const context = getServerContext(config);\n\n for (const first of [true, false]) {\n try {\n // Note: This performs initial sync of the replica if necessary.\n const {changeSource, subscriptionState} =\n upstream.type === 'pg'\n ? await initializePostgresChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n initialSync,\n context,\n )\n : await initializeCustomChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n context,\n );\n\n changeStreamer = await initializeStreamer(\n lc,\n shard,\n taskID,\n address,\n protocol,\n changeDB,\n changeSource,\n subscriptionState,\n autoReset ?? false,\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n setTimeout,\n );\n break;\n } catch (e) {\n if (first && e instanceof AutoResetSignal) {\n lc.warn?.(`resetting replica ${replica.file}`, e);\n // TODO: Make deleteLiteDB work with litestream. It will probably have to be\n // a semantic wipe instead of a file delete.\n deleteLiteDB(replica.file);\n continue; // execute again with a fresh initial-sync\n }\n await publishCriticalEvent(\n lc,\n replicationStatusError(lc, 'Initializing', e),\n );\n if (e instanceof DatabaseInitError) {\n throw new Error(\n `Cannot open ZERO_REPLICA_FILE at \"${replica.file}\". Please check that the path is valid.`,\n {cause: e},\n );\n }\n throw e;\n }\n }\n // impossible: upstream must have advanced in order for replication to be stuck.\n assert(changeStreamer, `resetting replica did not advance replicaVersion`);\n\n const {backupURL, port: metricsPort} = litestream;\n const monitor = backupURL\n ? new BackupMonitor(\n lc,\n backupURL,\n `http://localhost:${metricsPort}/metrics`,\n changeStreamer,\n // The time between when the zero-cache was started to when the\n // change-streamer is ready to start serves as the initial delay for\n // watermark cleanup (as it either includes a similar replica\n // restoration/preparation step, or an initial-sync, which\n // generally takes longer).\n //\n // Consider: Also account for permanent volumes?\n Date.now() - parentStartMs,\n )\n : new ReplicaMonitor(lc, replica.file, changeStreamer);\n\n const changeStreamerWebServer = new ChangeStreamerHttpServer(\n lc,\n config,\n {port, startupDelayMs},\n parent,\n changeStreamer,\n monitor instanceof BackupMonitor ? monitor : null,\n );\n\n parent.send(['ready', {ready: true}]);\n\n // Note: The changeStreamer itself is not started here; it is started by the\n // changeStreamerWebServer.\n return runUntilKilled(lc, parent, changeStreamerWebServer, monitor);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;AA4BA,eAA8B,UAC5B,QACA,QACG,MACY;AACf,SAAO,KAAK,SAAS,GAAG,8BAA8B;AACtD,QAAM,gBAAgB,SAAS,KAAK,CAAC,CAAC;AAEtC,QAAM,SAAS,wBAAwB,EAAC,KAAK,MAAM,KAAK,MAAM,CAAC,GAAE;AACjE,QAAM;AAAA,IACJ;AAAA,IACA,gBAAgB;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA,IACE;AAEJ,gBAAc,iBAAiB,QAAQ,EAAC,QAAQ,kBAAA,GAAoB,KAAK,CAAC;AAC1E,QAAM,KAAK,iBAAiB,QAAQ,EAAC,QAAQ,kBAAA,GAAoB,IAAI;AACrE,gBAAc,IAAI,MAAM;AAGxB,QAAM,WAAW;AAAA,IACf;AAAA,IACA,OAAO;AAAA,IACP;AAAA,MACE,KAAK,OAAO;AAAA,MACZ,YAAY,EAAC,CAAC,kBAAkB,GAAG,uBAAA;AAAA,IAAsB;AAAA,IAE3D,EAAC,kBAAkB,KAAA;AAAA,EAAI;AAEzB,OAAK,kBAAkB,IAAI,UAAU,QAAQ;AAE7C,QAAM,EAAC,cAAa;AACpB,QAAM,QAAQ,eAAe,MAAM;AAEnC,MAAI;AAEJ,QAAM,UAAU,iBAAiB,MAAM;AAEvC,aAAW,SAAS,CAAC,MAAM,KAAK,GAAG;AACjC,QAAI;AAEF,YAAM,EAAC,cAAc,kBAAA,IACnB,SAAS,SAAS,OACd,MAAM;AAAA,QACJ;AAAA,QACA,SAAS;AAAA,QACT;AAAA,QACA,QAAQ;AAAA,QACR;AAAA,QACA;AAAA,MAAA,IAEF,MAAM;AAAA,QACJ;AAAA,QACA,SAAS;AAAA,QACT;AAAA,QACA,QAAQ;AAAA,QACR;AAAA,MAAA;AAGR,uBAAiB,MAAM;AAAA,QACrB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA,aAAa;AAAA,QACb;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAEF;AAAA,IACF,SAAS,GAAG;AACV,UAAI,SAAS,aAAa,iBAAiB;AACzC,WAAG,OAAO,qBAAqB,QAAQ,IAAI,IAAI,CAAC;AAGhD,qBAAa,QAAQ,IAAI;AACzB;AAAA,MACF;AACA,YAAM;AAAA,QACJ;AAAA,QACA,uBAAuB,IAAI,gBAAgB,CAAC;AAAA,MAAA;AAE9C,UAAI,aAAa,mBAAmB;AAClC,cAAM,IAAI;AAAA,UACR,qCAAqC,QAAQ,IAAI;AAAA,UACjD,EAAC,OAAO,EAAA;AAAA,QAAC;AAAA,MAEb;AACA,YAAM;AAAA,IACR;AAAA,EACF;AAEA,SAAO,gBAAgB,kDAAkD;AAEzE,QAAM,EAAC,WAAW,MAAM,YAAA,IAAe;AACvC,QAAM,UAAU,YACZ,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,oBAAoB,WAAW;AAAA,IAC/B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAQA,KAAK,QAAQ;AAAA,EAAA,IAEf,IAAI,eAAe,IAAI,QAAQ,MAAM,cAAc;AAEvD,QAAM,0BAA0B,IAAI;AAAA,IAClC;AAAA,IACA;AAAA,IACA,EAAC,MAAM,eAAA;AAAA,IACP;AAAA,IACA;AAAA,IACA,mBAAmB,gBAAgB,UAAU;AAAA,EAAA;AAG/C,SAAO,KAAK,CAAC,SAAS,EAAC,OAAO,KAAA,CAAK,CAAC;AAIpC,SAAO,eAAe,IAAI,QAAQ,yBAAyB,OAAO;AACpE;AAGA,IAAI,CAAC,qBAAqB;AACxB,OAAK;AAAA,IAAU,MACb,UAAU,KAAK,YAAY,GAAG,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,CAAC,CAAC;AAAA,EAAA;AAEvE;"}
@@ -83,6 +83,7 @@ async function analyzeQuery(lc, config, clientSchema, ast, syncedRows = true, ve
83
83
  syncedRows,
84
84
  vendedRows,
85
85
  auth,
86
+ db,
86
87
  tableSpecs,
87
88
  permissions,
88
89
  costModel,
@@ -1 +1 @@
1
- {"version":3,"file":"analyze.js","sources":["../../../../../zero-cache/src/services/analyze.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST} from '../../../zero-protocol/src/ast.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport {Debug} from '../../../zql/src/builder/debug-delegate.ts';\nimport {MemoryStorage} from '../../../zql/src/ivm/memory-storage.ts';\nimport {\n AccumulatorDebugger,\n serializePlanDebugEvents,\n} from '../../../zql/src/planner/planner-debug.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\nimport {explainQueries} from '../../../zqlite/src/explain-queries.ts';\nimport {createSQLiteCostModel} from '../../../zqlite/src/sqlite-cost-model.ts';\nimport {TableSource} from '../../../zqlite/src/table-source.ts';\nimport type {JWTAuth} from '../auth/auth.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {computeZqlSpecs, mustGetTableSpec} from '../db/lite-tables.ts';\nimport type {LiteAndZqlSpec, LiteTableSpec} from '../db/specs.ts';\nimport {runAst} from './run-ast.ts';\nimport {TimeSliceTimer} from './view-syncer/view-syncer.ts';\n\nconst TIME_SLICE_LAP_THRESHOLD_MS = 200;\n\nexport async function analyzeQuery(\n lc: LogContext,\n config: NormalizedZeroConfig,\n clientSchema: ClientSchema,\n ast: AST,\n syncedRows = true,\n vendedRows = false,\n permissions?: PermissionsConfig,\n auth?: JWTAuth,\n joinPlans = false,\n): Promise<AnalyzeQueryResult> {\n using db = new Database(lc, config.replica.file);\n const fullTables = new Map<string, LiteTableSpec>();\n const tableSpecs = new Map<string, LiteAndZqlSpec>();\n const tables = new Map<string, TableSource>();\n\n computeZqlSpecs(\n lc,\n db,\n {includeBackfillingColumns: false},\n tableSpecs,\n fullTables,\n );\n\n const planDebugger = joinPlans ? new AccumulatorDebugger() : undefined;\n const costModel = joinPlans\n ? createSQLiteCostModel(db, tableSpecs)\n : undefined;\n const timer = await new TimeSliceTimer(lc).start();\n const shouldYield = () => timer.elapsedLap() > TIME_SLICE_LAP_THRESHOLD_MS;\n const yieldProcess = () => timer.yieldProcess();\n const result = await runAst(\n lc,\n clientSchema,\n ast,\n true,\n {\n applyPermissions: permissions !== undefined,\n syncedRows,\n vendedRows,\n auth,\n db,\n tableSpecs,\n permissions,\n costModel,\n planDebugger,\n host: {\n debug: new Debug(),\n getSource(tableName: string) {\n let source = tables.get(tableName);\n if (source) {\n return source;\n }\n\n const tableSpec = mustGetTableSpec(tableSpecs, tableName);\n const {primaryKey} = tableSpec.tableSpec;\n\n source = new TableSource(\n lc,\n config.log,\n db,\n tableName,\n tableSpec.zqlSpec,\n primaryKey,\n shouldYield,\n );\n tables.set(tableName, source);\n return source;\n },\n createStorage() {\n return new MemoryStorage();\n },\n decorateSourceInput: input => input,\n decorateInput: input => input,\n addEdge() {},\n decorateFilterInput: input => input,\n },\n },\n yieldProcess,\n );\n\n result.sqlitePlans = explainQueries(result.readRowCountsByQuery ?? {}, db);\n\n if (planDebugger) {\n result.joinPlans = serializePlanDebugEvents(planDebugger.events);\n }\n\n return result;\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsBA,MAAM,8BAA8B;AAEpC,eAAsB,aACpB,IACA,QACA,cACA,KACA,aAAa,MACb,aAAa,OACb,aACA,MACA,YAAY,OACiB;AAC7B;AAAA;AAAA,UAAM,KAAK,oBAAI,SAAS,IAAI,OAAO,QAAQ,IAAI;AAC/C,UAAM,iCAAiB,IAAA;AACvB,UAAM,iCAAiB,IAAA;AACvB,UAAM,6BAAa,IAAA;AAEnB;AAAA,MACE;AAAA,MACA;AAAA,MACA,EAAC,2BAA2B,MAAA;AAAA,MAC5B;AAAA,MACA;AAAA,IAAA;AAGF,UAAM,eAAe,YAAY,IAAI,oBAAA,IAAwB;AAC7D,UAAM,YAAY,YACd,sBAAsB,IAAI,UAAU,IACpC;AACJ,UAAM,QAAQ,MAAM,IAAI,eAAe,EAAE,EAAE,MAAA;AAC3C,UAAM,cAAc,MAAM,MAAM,WAAA,IAAe;AAC/C,UAAM,eAAe,MAAM,MAAM,aAAA;AACjC,UAAM,SAAS,MAAM;AAAA,MACnB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,QACE,kBAAkB,gBAAgB;AAAA,QAClC;AAAA,QACA;AAAA,QACA;AAAA,QAEA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA,MAAM;AAAA,UACJ,OAAO,IAAI,MAAA;AAAA,UACX,UAAU,WAAmB;AAC3B,gBAAI,SAAS,OAAO,IAAI,SAAS;AACjC,gBAAI,QAAQ;AACV,qBAAO;AAAA,YACT;AAEA,kBAAM,YAAY,iBAAiB,YAAY,SAAS;AACxD,kBAAM,EAAC,eAAc,UAAU;AAE/B,qBAAS,IAAI;AAAA,cACX;AAAA,cACA,OAAO;AAAA,cACP;AAAA,cACA;AAAA,cACA,UAAU;AAAA,cACV;AAAA,cACA;AAAA,YAAA;AAEF,mBAAO,IAAI,WAAW,MAAM;AAC5B,mBAAO;AAAA,UACT;AAAA,UACA,gBAAgB;AACd,mBAAO,IAAI,cAAA;AAAA,UACb;AAAA,UACA,qBAAqB,CAAA,UAAS;AAAA,UAC9B,eAAe,CAAA,UAAS;AAAA,UACxB,UAAU;AAAA,UAAC;AAAA,UACX,qBAAqB,CAAA,UAAS;AAAA,QAAA;AAAA,MAChC;AAAA,MAEF;AAAA,IAAA;AAGF,WAAO,cAAc,eAAe,OAAO,wBAAwB,CAAA,GAAI,EAAE;AAEzE,QAAI,cAAc;AAChB,aAAO,YAAY,yBAAyB,aAAa,MAAM;AAAA,IACjE;AAEA,WAAO;AAAA,WA5EP;AAAA;AAAA;AAAA;AAAA;AA6EF;"}
1
+ {"version":3,"file":"analyze.js","sources":["../../../../../zero-cache/src/services/analyze.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST} from '../../../zero-protocol/src/ast.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport {Debug} from '../../../zql/src/builder/debug-delegate.ts';\nimport {MemoryStorage} from '../../../zql/src/ivm/memory-storage.ts';\nimport {\n AccumulatorDebugger,\n serializePlanDebugEvents,\n} from '../../../zql/src/planner/planner-debug.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\nimport {explainQueries} from '../../../zqlite/src/explain-queries.ts';\nimport {createSQLiteCostModel} from '../../../zqlite/src/sqlite-cost-model.ts';\nimport {TableSource} from '../../../zqlite/src/table-source.ts';\nimport type {JWTAuth} from '../auth/auth.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {computeZqlSpecs, mustGetTableSpec} from '../db/lite-tables.ts';\nimport type {LiteAndZqlSpec, LiteTableSpec} from '../db/specs.ts';\nimport {runAst} from './run-ast.ts';\nimport {TimeSliceTimer} from './view-syncer/view-syncer.ts';\n\nconst TIME_SLICE_LAP_THRESHOLD_MS = 200;\n\nexport async function analyzeQuery(\n lc: LogContext,\n config: NormalizedZeroConfig,\n clientSchema: ClientSchema,\n ast: AST,\n syncedRows = true,\n vendedRows = false,\n permissions?: PermissionsConfig,\n auth?: JWTAuth,\n joinPlans = false,\n): Promise<AnalyzeQueryResult> {\n using db = new Database(lc, config.replica.file);\n const fullTables = new Map<string, LiteTableSpec>();\n const tableSpecs = new Map<string, LiteAndZqlSpec>();\n const tables = new Map<string, TableSource>();\n\n computeZqlSpecs(\n lc,\n db,\n {includeBackfillingColumns: false},\n tableSpecs,\n fullTables,\n );\n\n const planDebugger = joinPlans ? new AccumulatorDebugger() : undefined;\n const costModel = joinPlans\n ? createSQLiteCostModel(db, tableSpecs)\n : undefined;\n const timer = await new TimeSliceTimer(lc).start();\n const shouldYield = () => timer.elapsedLap() > TIME_SLICE_LAP_THRESHOLD_MS;\n const yieldProcess = () => timer.yieldProcess();\n const result = await runAst(\n lc,\n clientSchema,\n ast,\n true,\n {\n applyPermissions: permissions !== undefined,\n syncedRows,\n vendedRows,\n auth,\n db,\n tableSpecs,\n permissions,\n costModel,\n planDebugger,\n host: {\n debug: new Debug(),\n getSource(tableName: string) {\n let source = tables.get(tableName);\n if (source) {\n return source;\n }\n\n const tableSpec = mustGetTableSpec(tableSpecs, tableName);\n const {primaryKey} = tableSpec.tableSpec;\n\n source = new TableSource(\n lc,\n config.log,\n db,\n tableName,\n tableSpec.zqlSpec,\n primaryKey,\n shouldYield,\n );\n tables.set(tableName, source);\n return source;\n },\n createStorage() {\n return new MemoryStorage();\n },\n decorateSourceInput: input => input,\n decorateInput: input => input,\n addEdge() {},\n decorateFilterInput: input => input,\n },\n },\n yieldProcess,\n );\n\n result.sqlitePlans = explainQueries(result.readRowCountsByQuery ?? {}, db);\n\n if (planDebugger) {\n result.joinPlans = serializePlanDebugEvents(planDebugger.events);\n }\n\n return result;\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsBA,MAAM,8BAA8B;AAEpC,eAAsB,aACpB,IACA,QACA,cACA,KACA,aAAa,MACb,aAAa,OACb,aACA,MACA,YAAY,OACiB;AAC7B;AAAA;AAAA,UAAM,KAAK,oBAAI,SAAS,IAAI,OAAO,QAAQ,IAAI;AAC/C,UAAM,iCAAiB,IAAA;AACvB,UAAM,iCAAiB,IAAA;AACvB,UAAM,6BAAa,IAAA;AAEnB;AAAA,MACE;AAAA,MACA;AAAA,MACA,EAAC,2BAA2B,MAAA;AAAA,MAC5B;AAAA,MACA;AAAA,IAAA;AAGF,UAAM,eAAe,YAAY,IAAI,oBAAA,IAAwB;AAC7D,UAAM,YAAY,YACd,sBAAsB,IAAI,UAAU,IACpC;AACJ,UAAM,QAAQ,MAAM,IAAI,eAAe,EAAE,EAAE,MAAA;AAC3C,UAAM,cAAc,MAAM,MAAM,WAAA,IAAe;AAC/C,UAAM,eAAe,MAAM,MAAM,aAAA;AACjC,UAAM,SAAS,MAAM;AAAA,MACnB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,QACE,kBAAkB,gBAAgB;AAAA,QAClC;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA,MAAM;AAAA,UACJ,OAAO,IAAI,MAAA;AAAA,UACX,UAAU,WAAmB;AAC3B,gBAAI,SAAS,OAAO,IAAI,SAAS;AACjC,gBAAI,QAAQ;AACV,qBAAO;AAAA,YACT;AAEA,kBAAM,YAAY,iBAAiB,YAAY,SAAS;AACxD,kBAAM,EAAC,eAAc,UAAU;AAE/B,qBAAS,IAAI;AAAA,cACX;AAAA,cACA,OAAO;AAAA,cACP;AAAA,cACA;AAAA,cACA,UAAU;AAAA,cACV;AAAA,cACA;AAAA,YAAA;AAEF,mBAAO,IAAI,WAAW,MAAM;AAC5B,mBAAO;AAAA,UACT;AAAA,UACA,gBAAgB;AACd,mBAAO,IAAI,cAAA;AAAA,UACb;AAAA,UACA,qBAAqB,CAAA,UAAS;AAAA,UAC9B,eAAe,CAAA,UAAS;AAAA,UACxB,UAAU;AAAA,UAAC;AAAA,UACX,qBAAqB,CAAA,UAAS;AAAA,QAAA;AAAA,MAChC;AAAA,MAEF;AAAA,IAAA;AAGF,WAAO,cAAc,eAAe,OAAO,wBAAwB,CAAA,GAAI,EAAE;AAEzE,QAAI,cAAc;AAChB,aAAO,YAAY,yBAAyB,aAAa,MAAM;AAAA,IACjE;AAEA,WAAO;AAAA,WA5EP;AAAA;AAAA;AAAA;AAAA;AA6EF;"}
@@ -5,5 +5,7 @@ export declare function initReplica(log: LogContext, debugName: string, dbPath:
5
5
  export declare function upgradeReplica(log: LogContext, debugName: string, dbPath: string): Promise<void>;
6
6
  export declare const CREATE_V6_COLUMN_METADATA_TABLE = "\n CREATE TABLE \"_zero.column_metadata\" (\n table_name TEXT NOT NULL,\n column_name TEXT NOT NULL,\n upstream_type TEXT NOT NULL,\n is_not_null INTEGER NOT NULL,\n is_enum INTEGER NOT NULL,\n is_array INTEGER NOT NULL,\n character_max_length INTEGER,\n PRIMARY KEY (table_name, column_name)\n );\n";
7
7
  export declare const CREATE_V7_CHANGE_LOG = "\n CREATE TABLE \"_zero.changeLog2\" (\n \"stateVersion\" TEXT NOT NULL,\n \"pos\" INT NOT NULL,\n \"table\" TEXT NOT NULL,\n \"rowKey\" TEXT NOT NULL,\n \"op\" TEXT NOT NULL,\n PRIMARY KEY(\"stateVersion\", \"pos\"),\n UNIQUE(\"table\", \"rowKey\")\n );\n";
8
+ export declare const CREATE_V9_TABLE_METADATA_TABLE = "\n CREATE TABLE \"_zero.tableMetadata\" (\n \"schema\" TEXT NOT NULL,\n \"table\" TEXT NOT NULL,\n \"metadata\" TEXT NOT NULL,\n PRIMARY KEY (\"schema\", \"table\")\n );\n";
8
9
  export declare const schemaVersionMigrationMap: IncrementalMigrationMap;
10
+ export declare const CURRENT_SCHEMA_VERSION: number;
9
11
  //# sourceMappingURL=replica-schema.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"replica-schema.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/common/replica-schema.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iCAAiC,CAAC;AAE9D,OAAO,EAEL,KAAK,uBAAuB,EAE7B,MAAM,+BAA+B,CAAC;AASvC,wBAAsB,WAAW,CAC/B,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,EACd,WAAW,EAAE,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,QAAQ,KAAK,OAAO,CAAC,IAAI,CAAC,GAC3D,OAAO,CAAC,IAAI,CAAC,CAoBf;AAED,wBAAsB,cAAc,CAClC,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,iBAgBf;AAED,eAAO,MAAM,+BAA+B,0UAW3C,CAAC;AAEF,eAAO,MAAM,oBAAoB,8XAUhC,CAAC;AAEF,eAAO,MAAM,yBAAyB,EAAE,uBA4EvC,CAAC"}
1
+ {"version":3,"file":"replica-schema.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/common/replica-schema.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iCAAiC,CAAC;AAE9D,OAAO,EAEL,KAAK,uBAAuB,EAE7B,MAAM,+BAA+B,CAAC;AAQvC,wBAAsB,WAAW,CAC/B,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,EACd,WAAW,EAAE,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,QAAQ,KAAK,OAAO,CAAC,IAAI,CAAC,GAC3D,OAAO,CAAC,IAAI,CAAC,CAoBf;AAED,wBAAsB,cAAc,CAClC,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,iBAgBf;AAED,eAAO,MAAM,+BAA+B,0UAW3C,CAAC;AAEF,eAAO,MAAM,oBAAoB,8XAUhC,CAAC;AAEF,eAAO,MAAM,8BAA8B,sMAO1C,CAAC;AAEF,eAAO,MAAM,yBAAyB,EAAE,uBAgHvC,CAAC;AAGF,eAAO,MAAM,sBAAsB,QAEwB,CAAC"}
@@ -4,7 +4,6 @@ import { runSchemaMigrations } from "../../../db/migration-lite.js";
4
4
  import { AutoResetSignal } from "../../change-streamer/schema/tables.js";
5
5
  import { populateFromExistingTables } from "../../replicator/schema/column-metadata.js";
6
6
  import { recordEvent, CREATE_RUNTIME_EVENTS_TABLE } from "../../replicator/schema/replication-state.js";
7
- import { CREATE_TABLE_METADATA_TABLE } from "../../replicator/schema/table-metadata.js";
8
7
  async function initReplica(log, debugName, dbPath, initialSync) {
9
8
  const setupMigration = {
10
9
  migrateSchema: (log2, tx) => initialSync(log2, tx),
@@ -70,6 +69,17 @@ const CREATE_V7_CHANGE_LOG = (
70
69
  );
71
70
  `
72
71
  );
72
+ const CREATE_V9_TABLE_METADATA_TABLE = (
73
+ /*sql*/
74
+ `
75
+ CREATE TABLE "_zero.tableMetadata" (
76
+ "schema" TEXT NOT NULL,
77
+ "table" TEXT NOT NULL,
78
+ "metadata" TEXT NOT NULL,
79
+ PRIMARY KEY ("schema", "table")
80
+ );
81
+ `
82
+ );
73
83
  const schemaVersionMigrationMap = {
74
84
  // There's no incremental migration from v1. Just reset the replica.
75
85
  4: {
@@ -108,7 +118,7 @@ const schemaVersionMigrationMap = {
108
118
  /*sql*/
109
119
  `DELETE FROM "_zero.column_metadata"`
110
120
  );
111
- const tables = listTables(db, false);
121
+ const tables = listTables(db, false, false);
112
122
  populateFromExistingTables(db, tables);
113
123
  }
114
124
  },
@@ -121,7 +131,7 @@ const schemaVersionMigrationMap = {
121
131
  ADD COLUMN "backfillingColumnVersions" TEXT DEFAULT '{}';
122
132
  ALTER TABLE "_zero.column_metadata"
123
133
  ADD COLUMN backfill TEXT;
124
- ` + CREATE_TABLE_METADATA_TABLE
134
+ ` + CREATE_V9_TABLE_METADATA_TABLE
125
135
  );
126
136
  }
127
137
  },
@@ -135,11 +145,54 @@ const schemaVersionMigrationMap = {
135
145
  `
136
146
  );
137
147
  }
148
+ },
149
+ 11: {
150
+ migrateSchema: (_, db) => {
151
+ db.exec(
152
+ /*sql*/
153
+ `
154
+ ALTER TABLE "_zero.tableMetadata"
155
+ ADD COLUMN "minRowVersion" TEXT NOT NULL DEFAULT '00';
156
+
157
+ -- Removing the NOT NULL constraint from "metadata" requires copying
158
+ -- the column. We piggyback the rename to "upstreamMetadata" here.
159
+ ALTER TABLE "_zero.tableMetadata"
160
+ ADD COLUMN "upstreamMetadata" TEXT;
161
+ UPDATE "_zero.tableMetadata" SET "upstreamMetadata" = "metadata";
162
+ ALTER TABLE "_zero.tableMetadata" DROP "metadata";
163
+ `
164
+ );
165
+ }
166
+ },
167
+ 12: {
168
+ migrateSchema: (_, db) => {
169
+ db.exec(
170
+ /*sql*/
171
+ `
172
+ ALTER TABLE "_zero.tableMetadata"
173
+ ADD COLUMN "metadata" TEXT;
174
+ `
175
+ );
176
+ },
177
+ migrateData: (_, db) => {
178
+ db.exec(
179
+ /*sql*/
180
+ `
181
+ UPDATE "_zero.tableMetadata"
182
+ SET "upstreamMetadata" = COALESCE("metadata", "upstreamMetadata"),
183
+ "metadata" = NULL;
184
+ `
185
+ );
186
+ }
138
187
  }
139
188
  };
189
+ Object.keys(
190
+ schemaVersionMigrationMap
191
+ ).reduce((prev, curr) => Math.max(prev, parseInt(curr)), 0);
140
192
  export {
141
193
  CREATE_V6_COLUMN_METADATA_TABLE,
142
194
  CREATE_V7_CHANGE_LOG,
195
+ CREATE_V9_TABLE_METADATA_TABLE,
143
196
  initReplica,
144
197
  schemaVersionMigrationMap,
145
198
  upgradeReplica
@@ -1 +1 @@
1
- {"version":3,"file":"replica-schema.js","sources":["../../../../../../../zero-cache/src/services/change-source/common/replica-schema.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {SqliteError} from '@rocicorp/zero-sqlite3';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport {listTables} from '../../../db/lite-tables.ts';\nimport {\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../db/migration-lite.ts';\nimport {AutoResetSignal} from '../../change-streamer/schema/tables.ts';\nimport {populateFromExistingTables} from '../../replicator/schema/column-metadata.ts';\nimport {\n CREATE_RUNTIME_EVENTS_TABLE,\n recordEvent,\n} from '../../replicator/schema/replication-state.ts';\nimport {CREATE_TABLE_METADATA_TABLE} from '../../replicator/schema/table-metadata.ts';\n\nexport async function initReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n initialSync: (lc: LogContext, tx: Database) => Promise<void>,\n): Promise<void> {\n const setupMigration: Migration = {\n migrateSchema: (log, tx) => initialSync(log, tx),\n minSafeVersion: 1,\n };\n\n try {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n setupMigration,\n schemaVersionMigrationMap,\n );\n } catch (e) {\n if (e instanceof SqliteError && e.code === 'SQLITE_CORRUPT') {\n throw new AutoResetSignal(e.message);\n }\n throw e;\n }\n}\n\nexport async function upgradeReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n) {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n // setupMigration should never be invoked\n {\n migrateSchema: () => {\n throw new Error(\n 'This should only be called for already synced replicas',\n );\n },\n },\n schemaVersionMigrationMap,\n );\n}\n\nexport const CREATE_V6_COLUMN_METADATA_TABLE = /*sql*/ `\n CREATE TABLE \"_zero.column_metadata\" (\n table_name TEXT NOT NULL,\n column_name TEXT NOT NULL,\n upstream_type TEXT NOT NULL,\n is_not_null INTEGER NOT NULL,\n is_enum INTEGER NOT NULL,\n is_array INTEGER NOT NULL,\n character_max_length INTEGER,\n PRIMARY KEY (table_name, column_name)\n );\n`;\n\nexport const CREATE_V7_CHANGE_LOG = /*sql*/ `\n CREATE TABLE \"_zero.changeLog2\" (\n \"stateVersion\" TEXT NOT NULL,\n \"pos\" INT NOT NULL,\n \"table\" TEXT NOT NULL,\n \"rowKey\" TEXT NOT NULL,\n \"op\" TEXT NOT NULL,\n PRIMARY KEY(\"stateVersion\", \"pos\"),\n UNIQUE(\"table\", \"rowKey\")\n );\n`;\n\nexport const schemaVersionMigrationMap: IncrementalMigrationMap = {\n // There's no incremental migration from v1. Just reset the replica.\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('upgrading replica to new schema');\n },\n minSafeVersion: 3,\n },\n\n 5: {\n migrateSchema: (_, db) => {\n db.exec(CREATE_RUNTIME_EVENTS_TABLE);\n },\n migrateData: (_, db) => {\n recordEvent(db, 'upgrade');\n },\n },\n\n // Revised in the migration to v8 because the v6 code was incomplete.\n 6: {},\n\n 7: {\n migrateSchema: (_, db) => {\n // Note: The original \"changeLog\" table is kept so that the replica file\n // is compatible with older zero-caches. However, it is truncated for\n // space savings (since historic changes were never read).\n db.exec(`DELETE FROM \"_zero.changeLog\"`);\n // First version of changeLog2\n db.exec(CREATE_V7_CHANGE_LOG);\n },\n },\n\n 8: {\n migrateSchema: (_, db) => {\n const tableExists = db\n .prepare(\n `SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = '_zero.column_metadata'`,\n )\n .get();\n\n if (!tableExists) {\n db.exec(CREATE_V6_COLUMN_METADATA_TABLE);\n }\n },\n migrateData: (_, db) => {\n // Re-populate the ColumnMetadataStore; the original migration\n // at v6 was incomplete, as covered replicas migrated from earlier\n // versions but did not initialize the table for new replicas.\n db.exec(/*sql*/ `DELETE FROM \"_zero.column_metadata\"`);\n\n const tables = listTables(db, false);\n populateFromExistingTables(db, tables);\n },\n },\n\n 9: {\n migrateSchema: (_, db) => {\n db.exec(\n /*sql*/ `\n ALTER TABLE \"_zero.changeLog2\" \n ADD COLUMN \"backfillingColumnVersions\" TEXT DEFAULT '{}';\n ALTER TABLE \"_zero.column_metadata\"\n ADD COLUMN backfill TEXT;\n ` + CREATE_TABLE_METADATA_TABLE,\n );\n },\n },\n\n 10: {\n migrateSchema: (_, db) => {\n db.exec(/*sql*/ `\n ALTER TABLE \"_zero.replicationConfig\" \n ADD COLUMN \"initialSyncContext\" TEXT DEFAULT '{}';\n `);\n },\n },\n};\n"],"names":["log"],"mappings":";;;;;;;AAiBA,eAAsB,YACpB,KACA,WACA,QACA,aACe;AACf,QAAM,iBAA4B;AAAA,IAChC,eAAe,CAACA,MAAK,OAAO,YAAYA,MAAK,EAAE;AAAA,IAC/C,gBAAgB;AAAA,EAAA;AAGlB,MAAI;AACF,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ,SAAS,GAAG;AACV,QAAI,aAAa,eAAe,EAAE,SAAS,kBAAkB;AAC3D,YAAM,IAAI,gBAAgB,EAAE,OAAO;AAAA,IACrC;AACA,UAAM;AAAA,EACR;AACF;AAEA,eAAsB,eACpB,KACA,WACA,QACA;AACA,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAEA;AAAA,MACE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR;AAAA,QAAA;AAAA,MAEJ;AAAA,IAAA;AAAA,IAEF;AAAA,EAAA;AAEJ;AAEO,MAAM;AAAA;AAAA,EAA0C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAahD,MAAM;AAAA;AAAA,EAA+B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAYrC,MAAM,4BAAqD;AAAA;AAAA,EAEhE,GAAG;AAAA,IACD,eAAe,MAAM;AACnB,YAAM,IAAI,gBAAgB,iCAAiC;AAAA,IAC7D;AAAA,IACA,gBAAgB;AAAA,EAAA;AAAA,EAGlB,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG,KAAK,2BAA2B;AAAA,IACrC;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AACtB,kBAAY,IAAI,SAAS;AAAA,IAC3B;AAAA,EAAA;AAAA;AAAA,EAIF,GAAG,CAAA;AAAA,EAEH,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AAIxB,SAAG,KAAK,+BAA+B;AAEvC,SAAG,KAAK,oBAAoB;AAAA,IAC9B;AAAA,EAAA;AAAA,EAGF,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,YAAM,cAAc,GACjB;AAAA,QACC;AAAA,MAAA,EAED,IAAA;AAEH,UAAI,CAAC,aAAa;AAChB,WAAG,KAAK,+BAA+B;AAAA,MACzC;AAAA,IACF;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AAItB,SAAG;AAAA;AAAA,QAAa;AAAA,MAAA;AAEhB,YAAM,SAAS,WAAW,IAAI,KAAK;AACnC,iCAA2B,IAAI,MAAM;AAAA,IACvC;AAAA,EAAA;AAAA,EAGF,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG;AAAA;AAAA,QACO;AAAA;AAAA;AAAA;AAAA;AAAA,UAKN;AAAA,MAAA;AAAA,IAEN;AAAA,EAAA;AAAA,EAGF,IAAI;AAAA,IACF,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG;AAAA;AAAA,QAAa;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,IAIlB;AAAA,EAAA;AAEJ;"}
1
+ {"version":3,"file":"replica-schema.js","sources":["../../../../../../../zero-cache/src/services/change-source/common/replica-schema.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {SqliteError} from '@rocicorp/zero-sqlite3';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport {listTables} from '../../../db/lite-tables.ts';\nimport {\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../db/migration-lite.ts';\nimport {AutoResetSignal} from '../../change-streamer/schema/tables.ts';\nimport {populateFromExistingTables} from '../../replicator/schema/column-metadata.ts';\nimport {\n CREATE_RUNTIME_EVENTS_TABLE,\n recordEvent,\n} from '../../replicator/schema/replication-state.ts';\n\nexport async function initReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n initialSync: (lc: LogContext, tx: Database) => Promise<void>,\n): Promise<void> {\n const setupMigration: Migration = {\n migrateSchema: (log, tx) => initialSync(log, tx),\n minSafeVersion: 1,\n };\n\n try {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n setupMigration,\n schemaVersionMigrationMap,\n );\n } catch (e) {\n if (e instanceof SqliteError && e.code === 'SQLITE_CORRUPT') {\n throw new AutoResetSignal(e.message);\n }\n throw e;\n }\n}\n\nexport async function upgradeReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n) {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n // setupMigration should never be invoked\n {\n migrateSchema: () => {\n throw new Error(\n 'This should only be called for already synced replicas',\n );\n },\n },\n schemaVersionMigrationMap,\n );\n}\n\nexport const CREATE_V6_COLUMN_METADATA_TABLE = /*sql*/ `\n CREATE TABLE \"_zero.column_metadata\" (\n table_name TEXT NOT NULL,\n column_name TEXT NOT NULL,\n upstream_type TEXT NOT NULL,\n is_not_null INTEGER NOT NULL,\n is_enum INTEGER NOT NULL,\n is_array INTEGER NOT NULL,\n character_max_length INTEGER,\n PRIMARY KEY (table_name, column_name)\n );\n`;\n\nexport const CREATE_V7_CHANGE_LOG = /*sql*/ `\n CREATE TABLE \"_zero.changeLog2\" (\n \"stateVersion\" TEXT NOT NULL,\n \"pos\" INT NOT NULL,\n \"table\" TEXT NOT NULL,\n \"rowKey\" TEXT NOT NULL,\n \"op\" TEXT NOT NULL,\n PRIMARY KEY(\"stateVersion\", \"pos\"),\n UNIQUE(\"table\", \"rowKey\")\n );\n`;\n\nexport const CREATE_V9_TABLE_METADATA_TABLE = /*sql*/ `\n CREATE TABLE \"_zero.tableMetadata\" (\n \"schema\" TEXT NOT NULL,\n \"table\" TEXT NOT NULL,\n \"metadata\" TEXT NOT NULL,\n PRIMARY KEY (\"schema\", \"table\")\n );\n`;\n\nexport const schemaVersionMigrationMap: IncrementalMigrationMap = {\n // There's no incremental migration from v1. Just reset the replica.\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('upgrading replica to new schema');\n },\n minSafeVersion: 3,\n },\n\n 5: {\n migrateSchema: (_, db) => {\n db.exec(CREATE_RUNTIME_EVENTS_TABLE);\n },\n migrateData: (_, db) => {\n recordEvent(db, 'upgrade');\n },\n },\n\n // Revised in the migration to v8 because the v6 code was incomplete.\n 6: {},\n\n 7: {\n migrateSchema: (_, db) => {\n // Note: The original \"changeLog\" table is kept so that the replica file\n // is compatible with older zero-caches. However, it is truncated for\n // space savings (since historic changes were never read).\n db.exec(`DELETE FROM \"_zero.changeLog\"`);\n // First version of changeLog2\n db.exec(CREATE_V7_CHANGE_LOG);\n },\n },\n\n 8: {\n migrateSchema: (_, db) => {\n const tableExists = db\n .prepare(\n `SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = '_zero.column_metadata'`,\n )\n .get();\n\n if (!tableExists) {\n db.exec(CREATE_V6_COLUMN_METADATA_TABLE);\n }\n },\n migrateData: (_, db) => {\n // Re-populate the ColumnMetadataStore; the original migration\n // at v6 was incomplete, as covered replicas migrated from earlier\n // versions but did not initialize the table for new replicas.\n db.exec(/*sql*/ `DELETE FROM \"_zero.column_metadata\"`);\n\n const tables = listTables(db, false, false);\n populateFromExistingTables(db, tables);\n },\n },\n\n 9: {\n migrateSchema: (_, db) => {\n db.exec(\n /*sql*/ `\n ALTER TABLE \"_zero.changeLog2\" \n ADD COLUMN \"backfillingColumnVersions\" TEXT DEFAULT '{}';\n ALTER TABLE \"_zero.column_metadata\"\n ADD COLUMN backfill TEXT;\n ` + CREATE_V9_TABLE_METADATA_TABLE,\n );\n },\n },\n\n 10: {\n migrateSchema: (_, db) => {\n db.exec(/*sql*/ `\n ALTER TABLE \"_zero.replicationConfig\" \n ADD COLUMN \"initialSyncContext\" TEXT DEFAULT '{}';\n `);\n },\n },\n\n 11: {\n migrateSchema: (_, db) => {\n db.exec(/*sql*/ `\n ALTER TABLE \"_zero.tableMetadata\"\n ADD COLUMN \"minRowVersion\" TEXT NOT NULL DEFAULT '00';\n\n -- Removing the NOT NULL constraint from \"metadata\" requires copying\n -- the column. We piggyback the rename to \"upstreamMetadata\" here.\n ALTER TABLE \"_zero.tableMetadata\"\n ADD COLUMN \"upstreamMetadata\" TEXT;\n UPDATE \"_zero.tableMetadata\" SET \"upstreamMetadata\" = \"metadata\";\n ALTER TABLE \"_zero.tableMetadata\" DROP \"metadata\";\n `);\n },\n },\n\n 12: {\n migrateSchema: (_, db) => {\n // Bring back the \"metadata\" column removed in v11, but as a NULL-able column.\n // It is needed for backwards compatibility.\n db.exec(/*sql*/ `\n ALTER TABLE \"_zero.tableMetadata\"\n ADD COLUMN \"metadata\" TEXT;\n `);\n },\n\n migrateData: (_, db) => {\n // For rollback then roll forward, re-copy anything written to metadata.\n db.exec(/*sql*/ `\n UPDATE \"_zero.tableMetadata\" \n SET \"upstreamMetadata\" = COALESCE(\"metadata\", \"upstreamMetadata\"),\n \"metadata\" = NULL;\n `);\n },\n },\n};\n\n// Referenced in tests.\nexport const CURRENT_SCHEMA_VERSION = Object.keys(\n schemaVersionMigrationMap,\n).reduce((prev, curr) => Math.max(prev, parseInt(curr)), 0);\n"],"names":["log"],"mappings":";;;;;;AAgBA,eAAsB,YACpB,KACA,WACA,QACA,aACe;AACf,QAAM,iBAA4B;AAAA,IAChC,eAAe,CAACA,MAAK,OAAO,YAAYA,MAAK,EAAE;AAAA,IAC/C,gBAAgB;AAAA,EAAA;AAGlB,MAAI;AACF,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ,SAAS,GAAG;AACV,QAAI,aAAa,eAAe,EAAE,SAAS,kBAAkB;AAC3D,YAAM,IAAI,gBAAgB,EAAE,OAAO;AAAA,IACrC;AACA,UAAM;AAAA,EACR;AACF;AAEA,eAAsB,eACpB,KACA,WACA,QACA;AACA,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAEA;AAAA,MACE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR;AAAA,QAAA;AAAA,MAEJ;AAAA,IAAA;AAAA,IAEF;AAAA,EAAA;AAEJ;AAEO,MAAM;AAAA;AAAA,EAA0C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAahD,MAAM;AAAA;AAAA,EAA+B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAYrC,MAAM;AAAA;AAAA,EAAyC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAS/C,MAAM,4BAAqD;AAAA;AAAA,EAEhE,GAAG;AAAA,IACD,eAAe,MAAM;AACnB,YAAM,IAAI,gBAAgB,iCAAiC;AAAA,IAC7D;AAAA,IACA,gBAAgB;AAAA,EAAA;AAAA,EAGlB,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG,KAAK,2BAA2B;AAAA,IACrC;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AACtB,kBAAY,IAAI,SAAS;AAAA,IAC3B;AAAA,EAAA;AAAA;AAAA,EAIF,GAAG,CAAA;AAAA,EAEH,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AAIxB,SAAG,KAAK,+BAA+B;AAEvC,SAAG,KAAK,oBAAoB;AAAA,IAC9B;AAAA,EAAA;AAAA,EAGF,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,YAAM,cAAc,GACjB;AAAA,QACC;AAAA,MAAA,EAED,IAAA;AAEH,UAAI,CAAC,aAAa;AAChB,WAAG,KAAK,+BAA+B;AAAA,MACzC;AAAA,IACF;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AAItB,SAAG;AAAA;AAAA,QAAa;AAAA,MAAA;AAEhB,YAAM,SAAS,WAAW,IAAI,OAAO,KAAK;AAC1C,iCAA2B,IAAI,MAAM;AAAA,IACvC;AAAA,EAAA;AAAA,EAGF,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG;AAAA;AAAA,QACO;AAAA;AAAA;AAAA;AAAA;AAAA,UAKN;AAAA,MAAA;AAAA,IAEN;AAAA,EAAA;AAAA,EAGF,IAAI;AAAA,IACF,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG;AAAA;AAAA,QAAa;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,IAIlB;AAAA,EAAA;AAAA,EAGF,IAAI;AAAA,IACF,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG;AAAA;AAAA,QAAa;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,IAWlB;AAAA,EAAA;AAAA,EAGF,IAAI;AAAA,IACF,eAAe,CAAC,GAAG,OAAO;AAGxB,SAAG;AAAA;AAAA,QAAa;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,IAIlB;AAAA,IAEA,aAAa,CAAC,GAAG,OAAO;AAEtB,SAAG;AAAA;AAAA,QAAa;AAAA;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,IAKlB;AAAA,EAAA;AAEJ;AAGsC,OAAO;AAAA,EAC3C;AACF,EAAE,OAAO,CAAC,MAAM,SAAS,KAAK,IAAI,MAAM,SAAS,IAAI,CAAC,GAAG,CAAC;"}
@@ -1 +1 @@
1
- {"version":3,"file":"backfill-stream.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAWjD,OAAO,KAAK,EACV,iBAAiB,EACjB,eAAe,EAGf,eAAe,EAChB,MAAM,wBAAwB,CAAC;AAYhC,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,mBAAmB,CAAC;AAI/C,KAAK,aAAa,GAAG;IACnB;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;CAC9B,CAAC;AAOF;;;;;GAKG;AACH,wBAAuB,cAAc,CACnC,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,EAAC,IAAI,EAAE,YAAY,EAAC,EAAE,IAAI,CAAC,OAAO,EAAE,MAAM,GAAG,cAAc,CAAC,EAC5D,EAAE,EAAE,eAAe,EACnB,IAAI,GAAE,aAAkB,GACvB,cAAc,CAAC,eAAe,GAAG,iBAAiB,CAAC,CAyDrD"}
1
+ {"version":3,"file":"backfill-stream.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAWjD,OAAO,KAAK,EACV,iBAAiB,EACjB,eAAe,EAGf,eAAe,EAChB,MAAM,wBAAwB,CAAC;AAYhC,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,mBAAmB,CAAC;AAI/C,KAAK,aAAa,GAAG;IACnB;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;CAC9B,CAAC;AAOF;;;;;GAKG;AACH,wBAAuB,cAAc,CACnC,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,EAAC,IAAI,EAAE,YAAY,EAAC,EAAE,IAAI,CAAC,OAAO,EAAE,MAAM,GAAG,cAAc,CAAC,EAC5D,EAAE,EAAE,eAAe,EACnB,IAAI,GAAE,aAAkB,GACvB,cAAc,CAAC,eAAe,GAAG,iBAAiB,CAAC,CA6DrD"}
@@ -5,7 +5,7 @@ import { parse } from "../../../../../shared/src/valita.js";
5
5
  import { READONLY } from "../../../db/mode-enum.js";
6
6
  import { TsvParser } from "../../../db/pg-copy.js";
7
7
  import { getTypeParsers } from "../../../db/pg-type-parser.js";
8
- import { TransactionPool } from "../../../db/transaction-pool.js";
8
+ import { importSnapshot, TransactionPool } from "../../../db/transaction-pool.js";
9
9
  import { pgClient } from "../../../types/pg.js";
10
10
  import { SchemaIncompatibilityError } from "../common/backfill-manager.js";
11
11
  import { tableMetadataSchema, columnMetadataSchema } from "./backfill-metadata.js";
@@ -21,9 +21,15 @@ async function* streamBackfill(lc, upstreamURI, { slot, publications }, bf, opts
21
21
  ["max_lifetime"]: 120 * 60
22
22
  // set a long (2h) limit for COPY streaming
23
23
  });
24
- const tx = new TransactionPool(lc, READONLY).run(db);
24
+ let tx;
25
+ let watermark;
25
26
  try {
26
- const watermark = await setSnapshot(lc, upstreamURI, tx, slot);
27
+ ({ tx, watermark } = await createSnapshotTransaction(
28
+ lc,
29
+ upstreamURI,
30
+ db,
31
+ slot
32
+ ));
27
33
  const { tableSpec, backfill } = await validateSchema(
28
34
  tx,
29
35
  publications,
@@ -47,9 +53,7 @@ async function* streamBackfill(lc, upstreamURI, { slot, publications }, bf, opts
47
53
  }
48
54
  throw e;
49
55
  } finally {
50
- tx.setDone();
51
- void tx.done().catch(() => {
52
- });
56
+ tx?.setDone();
53
57
  void db.end().catch((e) => lc.warn?.(`error closing backfill connection`, e));
54
58
  }
55
59
  }
@@ -117,7 +121,7 @@ async function* stream(lc, tx, backfill, { select, getTotalRows, getTotalBytes }
117
121
  `Finished streaming ${status.rows} rows, ${totalMsgs} msgs, ${totalBytes} bytes (${elapsed} ms)`
118
122
  );
119
123
  }
120
- async function setSnapshot(lc, upstreamURI, tx, slotNamePrefix) {
124
+ async function createSnapshotTransaction(lc, upstreamURI, db, slotNamePrefix) {
121
125
  const replicationSession = pgClient(lc, upstreamURI, {
122
126
  ["fetch_types"]: false,
123
127
  // Necessary for the streaming protocol
@@ -127,13 +131,13 @@ async function setSnapshot(lc, upstreamURI, tx, slotNamePrefix) {
127
131
  const tempSlot = `${slotNamePrefix}_bf_${Date.now()}`;
128
132
  try {
129
133
  const { snapshot_name: snapshot, consistent_point: lsn } = await createReplicationSlot(lc, replicationSession, tempSlot);
130
- await tx.processReadTask(
131
- (sql) => sql.unsafe(`SET TRANSACTION SNAPSHOT '${snapshot}'`)
132
- );
134
+ const { init, imported } = importSnapshot(snapshot);
135
+ const tx = new TransactionPool(lc, READONLY, init).run(db);
136
+ await imported;
133
137
  await replicationSession.unsafe(`DROP_REPLICATION_SLOT "${tempSlot}"`);
134
138
  const watermark = toStateVersionString(lsn);
135
139
  lc.info?.(`Opened snapshot transaction at LSN ${lsn} (${watermark})`);
136
- return watermark;
140
+ return { tx, watermark };
137
141
  } catch (e) {
138
142
  await replicationSession.unsafe(
139
143
  /*sql*/
@@ -1 +1 @@
1
- {"version":3,"file":"backfill-stream.js","sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"sourcesContent":["import {\n PG_UNDEFINED_COLUMN,\n PG_UNDEFINED_TABLE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres from 'postgres';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {READONLY} from '../../../db/mode-enum.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {getTypeParsers, type TypeParser} from '../../../db/pg-type-parser.ts';\nimport type {PublishedTableSpec} from '../../../db/specs.ts';\nimport {TransactionPool} from '../../../db/transaction-pool.ts';\nimport {pgClient} from '../../../types/pg.ts';\nimport {SchemaIncompatibilityError} from '../common/backfill-manager.ts';\nimport type {\n BackfillCompleted,\n BackfillRequest,\n DownloadStatus,\n JSONValue,\n MessageBackfill,\n} from '../protocol/current.ts';\nimport {\n columnMetadataSchema,\n tableMetadataSchema,\n} from './backfill-metadata.ts';\nimport {\n createReplicationSlot,\n makeDownloadStatements,\n type DownloadStatements,\n} from './initial-sync.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport type {Replica} from './schema/shard.ts';\n\ntype BackfillParams = Omit<BackfillCompleted, 'tag'>;\n\ntype StreamOptions = {\n /**\n * The number of bytes at which to flush a batch of rows in a\n * backfill message. Defaults to Node's getDefaultHighWatermark().\n */\n flushThresholdBytes?: number;\n};\n\n// The size of chunks that Postgres sends on COPY stream.\n// This happens to match NodeJS's getDefaultHighWatermark()\n// (for Node v20+).\nconst POSTGRES_COPY_CHUNK_SIZE = 64 * 1024;\n\n/**\n * Streams a series of `backfill` messages (ending with `backfill-complete`)\n * at a set watermark (i.e. LSN). The data is retrieved via a COPY stream\n * made at a transaction snapshot corresponding to specific LSN, obtained by\n * creating a short-lived replication slot.\n */\nexport async function* streamBackfill(\n lc: LogContext,\n upstreamURI: string,\n {slot, publications}: Pick<Replica, 'slot' | 'publications'>,\n bf: BackfillRequest,\n opts: StreamOptions = {},\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n lc = lc\n .withContext('component', 'backfill')\n .withContext('table', bf.table.name);\n\n const {flushThresholdBytes = POSTGRES_COPY_CHUNK_SIZE} = opts;\n const db = pgClient(lc, upstreamURI, {\n connection: {['application_name']: 'backfill-stream'},\n ['max_lifetime']: 120 * 60, // set a long (2h) limit for COPY streaming\n });\n const tx = new TransactionPool(lc, READONLY).run(db);\n try {\n const watermark = await setSnapshot(lc, upstreamURI, tx, slot);\n const {tableSpec, backfill} = await validateSchema(\n tx,\n publications,\n bf,\n watermark,\n );\n const types = await getTypeParsers(db, {returnJsonAsString: true});\n\n // Note: validateSchema ensures that the rowKey and columns are disjoint\n const {relation, columns} = backfill;\n const cols = [...relation.rowKey.columns, ...columns];\n\n yield* stream(\n lc,\n tx,\n backfill,\n makeDownloadStatements(tableSpec, cols),\n cols.map(col => types.getTypeParser(tableSpec.columns[col].typeOID)),\n flushThresholdBytes,\n );\n } catch (e) {\n // Although we make the best effort to validate the schema at the\n // transaction snapshot, certain forms of `ALTER TABLE` are not\n // MVCC safe and not \"frozen\" in the snapshot:\n //\n // https://www.postgresql.org/docs/current/mvcc-caveats.html\n //\n // Handle these errors as schema incompatibility errors rather than\n // unknown runtime errors.\n if (\n e instanceof postgres.PostgresError &&\n (e.code === PG_UNDEFINED_TABLE || e.code === PG_UNDEFINED_COLUMN)\n ) {\n throw new SchemaIncompatibilityError(bf, String(e), {cause: e});\n }\n throw e;\n } finally {\n tx.setDone();\n // errors are already thrown and handled from processReadTask()\n void tx.done().catch(() => {});\n // Workaround postgres.js hanging at the end of some COPY commands:\n // https://github.com/porsager/postgres/issues/499\n void db.end().catch(e => lc.warn?.(`error closing backfill connection`, e));\n }\n}\n\nasync function* stream(\n lc: LogContext,\n tx: TransactionPool,\n backfill: BackfillParams,\n {select, getTotalRows, getTotalBytes}: DownloadStatements,\n colParsers: TypeParser[],\n flushThresholdBytes: number,\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n const start = performance.now();\n const [rows, bytes] = await tx.processReadTask(sql =>\n Promise.all([\n sql.unsafe<{totalRows: bigint}[]>(getTotalRows),\n sql.unsafe<{totalBytes: bigint}[]>(getTotalBytes),\n ]),\n );\n const status: DownloadStatus = {\n rows: 0,\n totalRows: Number(rows[0].totalRows),\n totalBytes: Number(bytes[0].totalBytes),\n };\n\n let elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(`Computed total rows and bytes for: ${select} (${elapsed} ms)`, {\n status,\n });\n const copyStream = await tx.processReadTask(sql =>\n sql.unsafe(`COPY (${select}) TO STDOUT`).readable(),\n );\n\n const tsvParser = new TsvParser();\n let totalBytes = 0;\n let totalMsgs = 0;\n let rowValues: JSONValue[][] = [];\n let bufferedBytes = 0;\n\n const logFlushed = () => {\n lc.debug?.(\n `Flushed ${rowValues.length} rows, ${bufferedBytes} bytes ` +\n `(total: rows=${status.rows}, msgs=${totalMsgs}, bytes=${totalBytes})`,\n );\n };\n\n // Tracks the row being parsed.\n let row: JSONValue[] = Array.from({length: colParsers.length});\n let col = 0;\n\n for await (const data of copyStream) {\n const chunk = data as Buffer;\n for (const text of tsvParser.parse(chunk)) {\n row[col] = text === null ? null : (colParsers[col](text) as JSONValue);\n\n if (++col === colParsers.length) {\n rowValues.push(row);\n status.rows++;\n row = Array.from({length: colParsers.length});\n col = 0;\n }\n }\n bufferedBytes += chunk.byteLength;\n totalBytes += chunk.byteLength;\n\n if (bufferedBytes >= flushThresholdBytes) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n rowValues = [];\n bufferedBytes = 0;\n }\n }\n\n // Flush the last batch of rows.\n if (rowValues.length > 0) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n }\n\n yield {tag: 'backfill-completed', ...backfill, status};\n elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(\n `Finished streaming ${status.rows} rows, ${totalMsgs} msgs, ${totalBytes} bytes ` +\n `(${elapsed} ms)`,\n );\n}\n\n/**\n * Creates (and drops) a replication slot in order to obtain a snapshot\n * that corresponds with a specific LSN. Sets the snapshot on the\n * TransactionPool and returns the watermark corresponding to the LSN.\n *\n * (Note that PG's other LSN-related functions are not scoped to a\n * transaction; this is the only way to get set a transaction at a specific\n * LSN.)\n */\nasync function setSnapshot(\n lc: LogContext,\n upstreamURI: string,\n tx: TransactionPool,\n slotNamePrefix: string,\n) {\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const tempSlot = `${slotNamePrefix}_bf_${Date.now()}`;\n try {\n const {snapshot_name: snapshot, consistent_point: lsn} =\n await createReplicationSlot(lc, replicationSession, tempSlot);\n\n await tx.processReadTask(sql =>\n sql.unsafe(`SET TRANSACTION SNAPSHOT '${snapshot}'`),\n );\n // Once the snapshot has been set, the replication session and slot can\n // be closed / dropped.\n await replicationSession.unsafe(`DROP_REPLICATION_SLOT \"${tempSlot}\"`);\n\n const watermark = toStateVersionString(lsn);\n lc.info?.(`Opened snapshot transaction at LSN ${lsn} (${watermark})`);\n return watermark;\n } catch (e) {\n // In the event of a failure, clean up the replication slot if created.\n await replicationSession.unsafe(\n /*sql*/\n `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = '${tempSlot}'`,\n );\n lc.error?.(`Failed to create backfill snapshot`, e);\n throw e;\n } finally {\n await replicationSession.end();\n }\n}\n\nfunction validateSchema(\n tx: TransactionPool,\n publications: string[],\n bf: BackfillRequest,\n watermark: string,\n): Promise<{\n tableSpec: PublishedTableSpec;\n backfill: BackfillParams;\n}> {\n return tx.processReadTask(async sql => {\n const {tables} = await getPublicationInfo(sql, publications);\n const spec = tables.find(\n spec => spec.schema === bf.table.schema && spec.name === bf.table.name,\n );\n if (!spec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table has been renamed or dropped`,\n );\n }\n const tableMeta = v.parse(bf.table.metadata, tableMetadataSchema);\n if (spec.schemaOID !== tableMeta.schemaOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Schema no longer corresponds to the original schema`,\n );\n }\n if (spec.oid !== tableMeta.relationOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table no longer corresponds to the original table`,\n );\n }\n if (\n !equals(\n new Set(Object.keys(tableMeta.rowKey)),\n new Set(spec.replicaIdentityColumns),\n )\n ) {\n throw new SchemaIncompatibilityError(\n bf,\n 'Row key (e.g. PRIMARY KEY or INDEX) has changed',\n );\n }\n const allCols = [\n ...Object.entries(tableMeta.rowKey),\n ...Object.entries(bf.columns),\n ];\n for (const [col, val] of allCols) {\n const colSpec = spec.columns[col];\n if (!colSpec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} has been renamed or dropped`,\n );\n }\n const colMeta = v.parse(val, columnMetadataSchema);\n if (colMeta.attNum !== colSpec.pos) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} no longer corresponds to the original column`,\n );\n }\n }\n const backfill: BackfillParams = {\n relation: {\n schema: bf.table.schema,\n name: bf.table.name,\n rowKey: {columns: Object.keys(tableMeta.rowKey)},\n },\n columns: Object.keys(bf.columns).filter(\n col => !(col in tableMeta.rowKey),\n ),\n watermark,\n };\n return {tableSpec: spec, backfill};\n });\n}\n"],"names":["spec","v.parse"],"mappings":";;;;;;;;;;;;;;AAgDA,MAAM,2BAA2B,KAAK;AAQtC,gBAAuB,eACrB,IACA,aACA,EAAC,MAAM,gBACP,IACA,OAAsB,IAC+B;AACrD,OAAK,GACF,YAAY,aAAa,UAAU,EACnC,YAAY,SAAS,GAAG,MAAM,IAAI;AAErC,QAAM,EAAC,sBAAsB,yBAAA,IAA4B;AACzD,QAAM,KAAK,SAAS,IAAI,aAAa;AAAA,IACnC,YAAY,EAAC,CAAC,kBAAkB,GAAG,kBAAA;AAAA,IACnC,CAAC,cAAc,GAAG,MAAM;AAAA;AAAA,EAAA,CACzB;AACD,QAAM,KAAK,IAAI,gBAAgB,IAAI,QAAQ,EAAE,IAAI,EAAE;AACnD,MAAI;AACF,UAAM,YAAY,MAAM,YAAY,IAAI,aAAa,IAAI,IAAI;AAC7D,UAAM,EAAC,WAAW,SAAA,IAAY,MAAM;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,UAAM,QAAQ,MAAM,eAAe,IAAI,EAAC,oBAAoB,MAAK;AAGjE,UAAM,EAAC,UAAU,QAAA,IAAW;AAC5B,UAAM,OAAO,CAAC,GAAG,SAAS,OAAO,SAAS,GAAG,OAAO;AAEpD,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA,uBAAuB,WAAW,IAAI;AAAA,MACtC,KAAK,IAAI,CAAA,QAAO,MAAM,cAAc,UAAU,QAAQ,GAAG,EAAE,OAAO,CAAC;AAAA,MACnE;AAAA,IAAA;AAAA,EAEJ,SAAS,GAAG;AASV,QACE,aAAa,SAAS,kBACrB,EAAE,SAAS,sBAAsB,EAAE,SAAS,sBAC7C;AACA,YAAM,IAAI,2BAA2B,IAAI,OAAO,CAAC,GAAG,EAAC,OAAO,GAAE;AAAA,IAChE;AACA,UAAM;AAAA,EACR,UAAA;AACE,OAAG,QAAA;AAEH,SAAK,GAAG,OAAO,MAAM,MAAM;AAAA,IAAC,CAAC;AAG7B,SAAK,GAAG,MAAM,MAAM,OAAK,GAAG,OAAO,qCAAqC,CAAC,CAAC;AAAA,EAC5E;AACF;AAEA,gBAAgB,OACd,IACA,IACA,UACA,EAAC,QAAQ,cAAc,cAAA,GACvB,YACA,qBACqD;AACrD,QAAM,QAAQ,YAAY,IAAA;AAC1B,QAAM,CAAC,MAAM,KAAK,IAAI,MAAM,GAAG;AAAA,IAAgB,CAAA,QAC7C,QAAQ,IAAI;AAAA,MACV,IAAI,OAA8B,YAAY;AAAA,MAC9C,IAAI,OAA+B,aAAa;AAAA,IAAA,CACjD;AAAA,EAAA;AAEH,QAAM,SAAyB;AAAA,IAC7B,MAAM;AAAA,IACN,WAAW,OAAO,KAAK,CAAC,EAAE,SAAS;AAAA,IACnC,YAAY,OAAO,MAAM,CAAC,EAAE,UAAU;AAAA,EAAA;AAGxC,MAAI,WAAW,YAAY,IAAA,IAAQ,OAAO,QAAQ,CAAC;AACnD,KAAG,OAAO,sCAAsC,MAAM,KAAK,OAAO,QAAQ;AAAA,IACxE;AAAA,EAAA,CACD;AACD,QAAM,aAAa,MAAM,GAAG;AAAA,IAAgB,SAC1C,IAAI,OAAO,SAAS,MAAM,aAAa,EAAE,SAAA;AAAA,EAAS;AAGpD,QAAM,YAAY,IAAI,UAAA;AACtB,MAAI,aAAa;AACjB,MAAI,YAAY;AAChB,MAAI,YAA2B,CAAA;AAC/B,MAAI,gBAAgB;AAEpB,QAAM,aAAa,MAAM;AACvB,OAAG;AAAA,MACD,WAAW,UAAU,MAAM,UAAU,aAAa,uBAChC,OAAO,IAAI,UAAU,SAAS,WAAW,UAAU;AAAA,IAAA;AAAA,EAEzE;AAGA,MAAI,MAAmB,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO;AAC7D,MAAI,MAAM;AAEV,mBAAiB,QAAQ,YAAY;AACnC,UAAM,QAAQ;AACd,eAAW,QAAQ,UAAU,MAAM,KAAK,GAAG;AACzC,UAAI,GAAG,IAAI,SAAS,OAAO,OAAQ,WAAW,GAAG,EAAE,IAAI;AAEvD,UAAI,EAAE,QAAQ,WAAW,QAAQ;AAC/B,kBAAU,KAAK,GAAG;AAClB,eAAO;AACP,cAAM,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO;AAC5C,cAAM;AAAA,MACR;AAAA,IACF;AACA,qBAAiB,MAAM;AACvB,kBAAc,MAAM;AAEpB,QAAI,iBAAiB,qBAAqB;AACxC,YAAM,EAAC,KAAK,YAAY,GAAG,UAAU,WAAW,OAAA;AAChD;AACA,iBAAA;AACA,kBAAY,CAAA;AACZ,sBAAgB;AAAA,IAClB;AAAA,EACF;AAGA,MAAI,UAAU,SAAS,GAAG;AACxB,UAAM,EAAC,KAAK,YAAY,GAAG,UAAU,WAAW,OAAA;AAChD;AACA,eAAA;AAAA,EACF;AAEA,QAAM,EAAC,KAAK,sBAAsB,GAAG,UAAU,OAAA;AAC/C,aAAW,YAAY,IAAA,IAAQ,OAAO,QAAQ,CAAC;AAC/C,KAAG;AAAA,IACD,sBAAsB,OAAO,IAAI,UAAU,SAAS,UAAU,UAAU,WAClE,OAAO;AAAA,EAAA;AAEjB;AAWA,eAAe,YACb,IACA,aACA,IACA,gBACA;AACA,QAAM,qBAAqB,SAAS,IAAI,aAAa;AAAA,IACnD,CAAC,aAAa,GAAG;AAAA;AAAA,IACjB,YAAY,EAAC,aAAa,WAAA;AAAA;AAAA,EAAU,CACrC;AACD,QAAM,WAAW,GAAG,cAAc,OAAO,KAAK,KAAK;AACnD,MAAI;AACF,UAAM,EAAC,eAAe,UAAU,kBAAkB,QAChD,MAAM,sBAAsB,IAAI,oBAAoB,QAAQ;AAE9D,UAAM,GAAG;AAAA,MAAgB,CAAA,QACvB,IAAI,OAAO,6BAA6B,QAAQ,GAAG;AAAA,IAAA;AAIrD,UAAM,mBAAmB,OAAO,0BAA0B,QAAQ,GAAG;AAErE,UAAM,YAAY,qBAAqB,GAAG;AAC1C,OAAG,OAAO,sCAAsC,GAAG,KAAK,SAAS,GAAG;AACpE,WAAO;AAAA,EACT,SAAS,GAAG;AAEV,UAAM,mBAAmB;AAAA;AAAA,MAEvB;AAAA,8BACwB,QAAQ;AAAA,IAAA;AAElC,OAAG,QAAQ,sCAAsC,CAAC;AAClD,UAAM;AAAA,EACR,UAAA;AACE,UAAM,mBAAmB,IAAA;AAAA,EAC3B;AACF;AAEA,SAAS,eACP,IACA,cACA,IACA,WAIC;AACD,SAAO,GAAG,gBAAgB,OAAM,QAAO;AACrC,UAAM,EAAC,OAAA,IAAU,MAAM,mBAAmB,KAAK,YAAY;AAC3D,UAAM,OAAO,OAAO;AAAA,MAClB,CAAAA,UAAQA,MAAK,WAAW,GAAG,MAAM,UAAUA,MAAK,SAAS,GAAG,MAAM;AAAA,IAAA;AAEpE,QAAI,CAAC,MAAM;AACT,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM,YAAYC,MAAQ,GAAG,MAAM,UAAU,mBAAmB;AAChE,QAAI,KAAK,cAAc,UAAU,WAAW;AAC1C,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,QAAI,KAAK,QAAQ,UAAU,aAAa;AACtC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,QACE,CAAC;AAAA,MACC,IAAI,IAAI,OAAO,KAAK,UAAU,MAAM,CAAC;AAAA,MACrC,IAAI,IAAI,KAAK,sBAAsB;AAAA,IAAA,GAErC;AACA,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM,UAAU;AAAA,MACd,GAAG,OAAO,QAAQ,UAAU,MAAM;AAAA,MAClC,GAAG,OAAO,QAAQ,GAAG,OAAO;AAAA,IAAA;AAE9B,eAAW,CAAC,KAAK,GAAG,KAAK,SAAS;AAChC,YAAM,UAAU,KAAK,QAAQ,GAAG;AAChC,UAAI,CAAC,SAAS;AACZ,cAAM,IAAI;AAAA,UACR;AAAA,UACA,UAAU,GAAG;AAAA,QAAA;AAAA,MAEjB;AACA,YAAM,UAAUA,MAAQ,KAAK,oBAAoB;AACjD,UAAI,QAAQ,WAAW,QAAQ,KAAK;AAClC,cAAM,IAAI;AAAA,UACR;AAAA,UACA,UAAU,GAAG;AAAA,QAAA;AAAA,MAEjB;AAAA,IACF;AACA,UAAM,WAA2B;AAAA,MAC/B,UAAU;AAAA,QACR,QAAQ,GAAG,MAAM;AAAA,QACjB,MAAM,GAAG,MAAM;AAAA,QACf,QAAQ,EAAC,SAAS,OAAO,KAAK,UAAU,MAAM,EAAA;AAAA,MAAC;AAAA,MAEjD,SAAS,OAAO,KAAK,GAAG,OAAO,EAAE;AAAA,QAC/B,CAAA,QAAO,EAAE,OAAO,UAAU;AAAA,MAAA;AAAA,MAE5B;AAAA,IAAA;AAEF,WAAO,EAAC,WAAW,MAAM,SAAA;AAAA,EAC3B,CAAC;AACH;"}
1
+ {"version":3,"file":"backfill-stream.js","sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"sourcesContent":["import {\n PG_UNDEFINED_COLUMN,\n PG_UNDEFINED_TABLE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres from 'postgres';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {READONLY} from '../../../db/mode-enum.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {getTypeParsers, type TypeParser} from '../../../db/pg-type-parser.ts';\nimport type {PublishedTableSpec} from '../../../db/specs.ts';\nimport {importSnapshot, TransactionPool} from '../../../db/transaction-pool.ts';\nimport {pgClient, type PostgresDB} from '../../../types/pg.ts';\nimport {SchemaIncompatibilityError} from '../common/backfill-manager.ts';\nimport type {\n BackfillCompleted,\n BackfillRequest,\n DownloadStatus,\n JSONValue,\n MessageBackfill,\n} from '../protocol/current.ts';\nimport {\n columnMetadataSchema,\n tableMetadataSchema,\n} from './backfill-metadata.ts';\nimport {\n createReplicationSlot,\n makeDownloadStatements,\n type DownloadStatements,\n} from './initial-sync.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport type {Replica} from './schema/shard.ts';\n\ntype BackfillParams = Omit<BackfillCompleted, 'tag'>;\n\ntype StreamOptions = {\n /**\n * The number of bytes at which to flush a batch of rows in a\n * backfill message. Defaults to Node's getDefaultHighWatermark().\n */\n flushThresholdBytes?: number;\n};\n\n// The size of chunks that Postgres sends on COPY stream.\n// This happens to match NodeJS's getDefaultHighWatermark()\n// (for Node v20+).\nconst POSTGRES_COPY_CHUNK_SIZE = 64 * 1024;\n\n/**\n * Streams a series of `backfill` messages (ending with `backfill-complete`)\n * at a set watermark (i.e. LSN). The data is retrieved via a COPY stream\n * made at a transaction snapshot corresponding to specific LSN, obtained by\n * creating a short-lived replication slot.\n */\nexport async function* streamBackfill(\n lc: LogContext,\n upstreamURI: string,\n {slot, publications}: Pick<Replica, 'slot' | 'publications'>,\n bf: BackfillRequest,\n opts: StreamOptions = {},\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n lc = lc\n .withContext('component', 'backfill')\n .withContext('table', bf.table.name);\n\n const {flushThresholdBytes = POSTGRES_COPY_CHUNK_SIZE} = opts;\n const db = pgClient(lc, upstreamURI, {\n connection: {['application_name']: 'backfill-stream'},\n ['max_lifetime']: 120 * 60, // set a long (2h) limit for COPY streaming\n });\n let tx: TransactionPool | undefined;\n let watermark: string;\n try {\n ({tx, watermark} = await createSnapshotTransaction(\n lc,\n upstreamURI,\n db,\n slot,\n ));\n const {tableSpec, backfill} = await validateSchema(\n tx,\n publications,\n bf,\n watermark,\n );\n const types = await getTypeParsers(db, {returnJsonAsString: true});\n\n // Note: validateSchema ensures that the rowKey and columns are disjoint\n const {relation, columns} = backfill;\n const cols = [...relation.rowKey.columns, ...columns];\n\n yield* stream(\n lc,\n tx,\n backfill,\n makeDownloadStatements(tableSpec, cols),\n cols.map(col => types.getTypeParser(tableSpec.columns[col].typeOID)),\n flushThresholdBytes,\n );\n } catch (e) {\n // Although we make the best effort to validate the schema at the\n // transaction snapshot, certain forms of `ALTER TABLE` are not\n // MVCC safe and not \"frozen\" in the snapshot:\n //\n // https://www.postgresql.org/docs/current/mvcc-caveats.html\n //\n // Handle these errors as schema incompatibility errors rather than\n // unknown runtime errors.\n if (\n e instanceof postgres.PostgresError &&\n (e.code === PG_UNDEFINED_TABLE || e.code === PG_UNDEFINED_COLUMN)\n ) {\n throw new SchemaIncompatibilityError(bf, String(e), {cause: e});\n }\n throw e;\n } finally {\n tx?.setDone();\n // Workaround postgres.js hanging at the end of some COPY commands:\n // https://github.com/porsager/postgres/issues/499\n void db.end().catch(e => lc.warn?.(`error closing backfill connection`, e));\n }\n}\n\nasync function* stream(\n lc: LogContext,\n tx: TransactionPool,\n backfill: BackfillParams,\n {select, getTotalRows, getTotalBytes}: DownloadStatements,\n colParsers: TypeParser[],\n flushThresholdBytes: number,\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n const start = performance.now();\n const [rows, bytes] = await tx.processReadTask(sql =>\n Promise.all([\n sql.unsafe<{totalRows: bigint}[]>(getTotalRows),\n sql.unsafe<{totalBytes: bigint}[]>(getTotalBytes),\n ]),\n );\n const status: DownloadStatus = {\n rows: 0,\n totalRows: Number(rows[0].totalRows),\n totalBytes: Number(bytes[0].totalBytes),\n };\n\n let elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(`Computed total rows and bytes for: ${select} (${elapsed} ms)`, {\n status,\n });\n const copyStream = await tx.processReadTask(sql =>\n sql.unsafe(`COPY (${select}) TO STDOUT`).readable(),\n );\n\n const tsvParser = new TsvParser();\n let totalBytes = 0;\n let totalMsgs = 0;\n let rowValues: JSONValue[][] = [];\n let bufferedBytes = 0;\n\n const logFlushed = () => {\n lc.debug?.(\n `Flushed ${rowValues.length} rows, ${bufferedBytes} bytes ` +\n `(total: rows=${status.rows}, msgs=${totalMsgs}, bytes=${totalBytes})`,\n );\n };\n\n // Tracks the row being parsed.\n let row: JSONValue[] = Array.from({length: colParsers.length});\n let col = 0;\n\n for await (const data of copyStream) {\n const chunk = data as Buffer;\n for (const text of tsvParser.parse(chunk)) {\n row[col] = text === null ? null : (colParsers[col](text) as JSONValue);\n\n if (++col === colParsers.length) {\n rowValues.push(row);\n status.rows++;\n row = Array.from({length: colParsers.length});\n col = 0;\n }\n }\n bufferedBytes += chunk.byteLength;\n totalBytes += chunk.byteLength;\n\n if (bufferedBytes >= flushThresholdBytes) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n rowValues = [];\n bufferedBytes = 0;\n }\n }\n\n // Flush the last batch of rows.\n if (rowValues.length > 0) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n }\n\n yield {tag: 'backfill-completed', ...backfill, status};\n elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(\n `Finished streaming ${status.rows} rows, ${totalMsgs} msgs, ${totalBytes} bytes ` +\n `(${elapsed} ms)`,\n );\n}\n\n/**\n * Creates (and drops) a replication slot in order to obtain a snapshot\n * that corresponds with a specific LSN. Sets the snapshot on the\n * TransactionPool and returns the watermark corresponding to the LSN.\n *\n * (Note that PG's other LSN-related functions are not scoped to a\n * transaction; this is the only way to get set a transaction at a specific\n * LSN.)\n */\nasync function createSnapshotTransaction(\n lc: LogContext,\n upstreamURI: string,\n db: PostgresDB,\n slotNamePrefix: string,\n) {\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const tempSlot = `${slotNamePrefix}_bf_${Date.now()}`;\n try {\n const {snapshot_name: snapshot, consistent_point: lsn} =\n await createReplicationSlot(lc, replicationSession, tempSlot);\n\n const {init, imported} = importSnapshot(snapshot);\n const tx = new TransactionPool(lc, READONLY, init).run(db);\n await imported;\n await replicationSession.unsafe(`DROP_REPLICATION_SLOT \"${tempSlot}\"`);\n\n const watermark = toStateVersionString(lsn);\n lc.info?.(`Opened snapshot transaction at LSN ${lsn} (${watermark})`);\n return {tx, watermark};\n } catch (e) {\n // In the event of a failure, clean up the replication slot if created.\n await replicationSession.unsafe(\n /*sql*/\n `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = '${tempSlot}'`,\n );\n lc.error?.(`Failed to create backfill snapshot`, e);\n throw e;\n } finally {\n await replicationSession.end();\n }\n}\n\nfunction validateSchema(\n tx: TransactionPool,\n publications: string[],\n bf: BackfillRequest,\n watermark: string,\n): Promise<{\n tableSpec: PublishedTableSpec;\n backfill: BackfillParams;\n}> {\n return tx.processReadTask(async sql => {\n const {tables} = await getPublicationInfo(sql, publications);\n const spec = tables.find(\n spec => spec.schema === bf.table.schema && spec.name === bf.table.name,\n );\n if (!spec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table has been renamed or dropped`,\n );\n }\n const tableMeta = v.parse(bf.table.metadata, tableMetadataSchema);\n if (spec.schemaOID !== tableMeta.schemaOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Schema no longer corresponds to the original schema`,\n );\n }\n if (spec.oid !== tableMeta.relationOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table no longer corresponds to the original table`,\n );\n }\n if (\n !equals(\n new Set(Object.keys(tableMeta.rowKey)),\n new Set(spec.replicaIdentityColumns),\n )\n ) {\n throw new SchemaIncompatibilityError(\n bf,\n 'Row key (e.g. PRIMARY KEY or INDEX) has changed',\n );\n }\n const allCols = [\n ...Object.entries(tableMeta.rowKey),\n ...Object.entries(bf.columns),\n ];\n for (const [col, val] of allCols) {\n const colSpec = spec.columns[col];\n if (!colSpec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} has been renamed or dropped`,\n );\n }\n const colMeta = v.parse(val, columnMetadataSchema);\n if (colMeta.attNum !== colSpec.pos) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} no longer corresponds to the original column`,\n );\n }\n }\n const backfill: BackfillParams = {\n relation: {\n schema: bf.table.schema,\n name: bf.table.name,\n rowKey: {columns: Object.keys(tableMeta.rowKey)},\n },\n columns: Object.keys(bf.columns).filter(\n col => !(col in tableMeta.rowKey),\n ),\n watermark,\n };\n return {tableSpec: spec, backfill};\n });\n}\n"],"names":["spec","v.parse"],"mappings":";;;;;;;;;;;;;;AAgDA,MAAM,2BAA2B,KAAK;AAQtC,gBAAuB,eACrB,IACA,aACA,EAAC,MAAM,gBACP,IACA,OAAsB,IAC+B;AACrD,OAAK,GACF,YAAY,aAAa,UAAU,EACnC,YAAY,SAAS,GAAG,MAAM,IAAI;AAErC,QAAM,EAAC,sBAAsB,yBAAA,IAA4B;AACzD,QAAM,KAAK,SAAS,IAAI,aAAa;AAAA,IACnC,YAAY,EAAC,CAAC,kBAAkB,GAAG,kBAAA;AAAA,IACnC,CAAC,cAAc,GAAG,MAAM;AAAA;AAAA,EAAA,CACzB;AACD,MAAI;AACJ,MAAI;AACJ,MAAI;AACF,KAAC,EAAC,IAAI,UAAA,IAAa,MAAM;AAAA,MACvB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,UAAM,EAAC,WAAW,SAAA,IAAY,MAAM;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,UAAM,QAAQ,MAAM,eAAe,IAAI,EAAC,oBAAoB,MAAK;AAGjE,UAAM,EAAC,UAAU,QAAA,IAAW;AAC5B,UAAM,OAAO,CAAC,GAAG,SAAS,OAAO,SAAS,GAAG,OAAO;AAEpD,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA,uBAAuB,WAAW,IAAI;AAAA,MACtC,KAAK,IAAI,CAAA,QAAO,MAAM,cAAc,UAAU,QAAQ,GAAG,EAAE,OAAO,CAAC;AAAA,MACnE;AAAA,IAAA;AAAA,EAEJ,SAAS,GAAG;AASV,QACE,aAAa,SAAS,kBACrB,EAAE,SAAS,sBAAsB,EAAE,SAAS,sBAC7C;AACA,YAAM,IAAI,2BAA2B,IAAI,OAAO,CAAC,GAAG,EAAC,OAAO,GAAE;AAAA,IAChE;AACA,UAAM;AAAA,EACR,UAAA;AACE,QAAI,QAAA;AAGJ,SAAK,GAAG,MAAM,MAAM,OAAK,GAAG,OAAO,qCAAqC,CAAC,CAAC;AAAA,EAC5E;AACF;AAEA,gBAAgB,OACd,IACA,IACA,UACA,EAAC,QAAQ,cAAc,cAAA,GACvB,YACA,qBACqD;AACrD,QAAM,QAAQ,YAAY,IAAA;AAC1B,QAAM,CAAC,MAAM,KAAK,IAAI,MAAM,GAAG;AAAA,IAAgB,CAAA,QAC7C,QAAQ,IAAI;AAAA,MACV,IAAI,OAA8B,YAAY;AAAA,MAC9C,IAAI,OAA+B,aAAa;AAAA,IAAA,CACjD;AAAA,EAAA;AAEH,QAAM,SAAyB;AAAA,IAC7B,MAAM;AAAA,IACN,WAAW,OAAO,KAAK,CAAC,EAAE,SAAS;AAAA,IACnC,YAAY,OAAO,MAAM,CAAC,EAAE,UAAU;AAAA,EAAA;AAGxC,MAAI,WAAW,YAAY,IAAA,IAAQ,OAAO,QAAQ,CAAC;AACnD,KAAG,OAAO,sCAAsC,MAAM,KAAK,OAAO,QAAQ;AAAA,IACxE;AAAA,EAAA,CACD;AACD,QAAM,aAAa,MAAM,GAAG;AAAA,IAAgB,SAC1C,IAAI,OAAO,SAAS,MAAM,aAAa,EAAE,SAAA;AAAA,EAAS;AAGpD,QAAM,YAAY,IAAI,UAAA;AACtB,MAAI,aAAa;AACjB,MAAI,YAAY;AAChB,MAAI,YAA2B,CAAA;AAC/B,MAAI,gBAAgB;AAEpB,QAAM,aAAa,MAAM;AACvB,OAAG;AAAA,MACD,WAAW,UAAU,MAAM,UAAU,aAAa,uBAChC,OAAO,IAAI,UAAU,SAAS,WAAW,UAAU;AAAA,IAAA;AAAA,EAEzE;AAGA,MAAI,MAAmB,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO;AAC7D,MAAI,MAAM;AAEV,mBAAiB,QAAQ,YAAY;AACnC,UAAM,QAAQ;AACd,eAAW,QAAQ,UAAU,MAAM,KAAK,GAAG;AACzC,UAAI,GAAG,IAAI,SAAS,OAAO,OAAQ,WAAW,GAAG,EAAE,IAAI;AAEvD,UAAI,EAAE,QAAQ,WAAW,QAAQ;AAC/B,kBAAU,KAAK,GAAG;AAClB,eAAO;AACP,cAAM,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO;AAC5C,cAAM;AAAA,MACR;AAAA,IACF;AACA,qBAAiB,MAAM;AACvB,kBAAc,MAAM;AAEpB,QAAI,iBAAiB,qBAAqB;AACxC,YAAM,EAAC,KAAK,YAAY,GAAG,UAAU,WAAW,OAAA;AAChD;AACA,iBAAA;AACA,kBAAY,CAAA;AACZ,sBAAgB;AAAA,IAClB;AAAA,EACF;AAGA,MAAI,UAAU,SAAS,GAAG;AACxB,UAAM,EAAC,KAAK,YAAY,GAAG,UAAU,WAAW,OAAA;AAChD;AACA,eAAA;AAAA,EACF;AAEA,QAAM,EAAC,KAAK,sBAAsB,GAAG,UAAU,OAAA;AAC/C,aAAW,YAAY,IAAA,IAAQ,OAAO,QAAQ,CAAC;AAC/C,KAAG;AAAA,IACD,sBAAsB,OAAO,IAAI,UAAU,SAAS,UAAU,UAAU,WAClE,OAAO;AAAA,EAAA;AAEjB;AAWA,eAAe,0BACb,IACA,aACA,IACA,gBACA;AACA,QAAM,qBAAqB,SAAS,IAAI,aAAa;AAAA,IACnD,CAAC,aAAa,GAAG;AAAA;AAAA,IACjB,YAAY,EAAC,aAAa,WAAA;AAAA;AAAA,EAAU,CACrC;AACD,QAAM,WAAW,GAAG,cAAc,OAAO,KAAK,KAAK;AACnD,MAAI;AACF,UAAM,EAAC,eAAe,UAAU,kBAAkB,QAChD,MAAM,sBAAsB,IAAI,oBAAoB,QAAQ;AAE9D,UAAM,EAAC,MAAM,aAAY,eAAe,QAAQ;AAChD,UAAM,KAAK,IAAI,gBAAgB,IAAI,UAAU,IAAI,EAAE,IAAI,EAAE;AACzD,UAAM;AACN,UAAM,mBAAmB,OAAO,0BAA0B,QAAQ,GAAG;AAErE,UAAM,YAAY,qBAAqB,GAAG;AAC1C,OAAG,OAAO,sCAAsC,GAAG,KAAK,SAAS,GAAG;AACpE,WAAO,EAAC,IAAI,UAAA;AAAA,EACd,SAAS,GAAG;AAEV,UAAM,mBAAmB;AAAA;AAAA,MAEvB;AAAA,8BACwB,QAAQ;AAAA,IAAA;AAElC,OAAG,QAAQ,sCAAsC,CAAC;AAClD,UAAM;AAAA,EACR,UAAA;AACE,UAAM,mBAAmB,IAAA;AAAA,EAC3B;AACF;AAEA,SAAS,eACP,IACA,cACA,IACA,WAIC;AACD,SAAO,GAAG,gBAAgB,OAAM,QAAO;AACrC,UAAM,EAAC,OAAA,IAAU,MAAM,mBAAmB,KAAK,YAAY;AAC3D,UAAM,OAAO,OAAO;AAAA,MAClB,CAAAA,UAAQA,MAAK,WAAW,GAAG,MAAM,UAAUA,MAAK,SAAS,GAAG,MAAM;AAAA,IAAA;AAEpE,QAAI,CAAC,MAAM;AACT,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM,YAAYC,MAAQ,GAAG,MAAM,UAAU,mBAAmB;AAChE,QAAI,KAAK,cAAc,UAAU,WAAW;AAC1C,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,QAAI,KAAK,QAAQ,UAAU,aAAa;AACtC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,QACE,CAAC;AAAA,MACC,IAAI,IAAI,OAAO,KAAK,UAAU,MAAM,CAAC;AAAA,MACrC,IAAI,IAAI,KAAK,sBAAsB;AAAA,IAAA,GAErC;AACA,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM,UAAU;AAAA,MACd,GAAG,OAAO,QAAQ,UAAU,MAAM;AAAA,MAClC,GAAG,OAAO,QAAQ,GAAG,OAAO;AAAA,IAAA;AAE9B,eAAW,CAAC,KAAK,GAAG,KAAK,SAAS;AAChC,YAAM,UAAU,KAAK,QAAQ,GAAG;AAChC,UAAI,CAAC,SAAS;AACZ,cAAM,IAAI;AAAA,UACR;AAAA,UACA,UAAU,GAAG;AAAA,QAAA;AAAA,MAEjB;AACA,YAAM,UAAUA,MAAQ,KAAK,oBAAoB;AACjD,UAAI,QAAQ,WAAW,QAAQ,KAAK;AAClC,cAAM,IAAI;AAAA,UACR;AAAA,UACA,UAAU,GAAG;AAAA,QAAA;AAAA,MAEjB;AAAA,IACF;AACA,UAAM,WAA2B;AAAA,MAC/B,UAAU;AAAA,QACR,QAAQ,GAAG,MAAM;AAAA,QACjB,MAAM,GAAG,MAAM;AAAA,QACf,QAAQ,EAAC,SAAS,OAAO,KAAK,UAAU,MAAM,EAAA;AAAA,MAAC;AAAA,MAEjD,SAAS,OAAO,KAAK,GAAG,OAAO,EAAE;AAAA,QAC/B,CAAA,QAAO,EAAE,OAAO,UAAU;AAAA,MAAA;AAAA,MAE5B;AAAA,IAAA;AAEF,WAAO,EAAC,WAAW,MAAM,SAAA;AAAA,EAC3B,CAAC;AACH;"}
@@ -1 +1 @@
1
- {"version":3,"file":"change-source.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAqBjD,OAAO,KAAK,EAAa,kBAAkB,EAAC,MAAM,sBAAsB,CAAC;AAEzE,OAAO,EAAC,KAAK,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAEhE,OAAO,EAEL,KAAK,WAAW,EAEjB,MAAM,0BAA0B,CAAC;AAKlC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,2BAA2B,CAAC;AAEpD,OAAO,EAEL,KAAK,iBAAiB,EAEvB,MAAM,8CAA8C,CAAC;AACtD,OAAO,KAAK,EAAC,YAAY,EAAe,MAAM,qBAAqB,CAAC;AAEpE,OAAO,EAEL,KAAK,QAAQ,EACd,MAAM,wCAAwC,CAAC;AAUhD,OAAO,KAAK,EAEV,mBAAmB,EAEpB,MAAM,mCAAmC,CAAC;AAG3C,OAAO,EAEL,KAAK,kBAAkB,EACvB,KAAK,aAAa,EACnB,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EAGV,eAAe,IAAI,gBAAgB,EACpC,MAAM,yCAAyC,CAAC;AAuBjD;;;;GAIG;AACH,wBAAsB,8BAA8B,CAClD,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,WAAW,EAClB,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,kBAAkB,EAC/B,OAAO,EAAE,aAAa,GACrB,OAAO,CAAC;IAAC,iBAAiB,EAAE,iBAAiB,CAAC;IAAC,YAAY,EAAE,YAAY,CAAA;CAAC,CAAC,CAqC7E;AA8YD,qBAAa,KAAM,YAAW,QAAQ;;gBAIxB,IAAI,EAAE,IAAI,CAAC,MAAM,CAAC;IAI9B,QAAQ,CAAC,MAAM,EAAE,mBAAmB,GAAG,IAAI;IAgC3C,GAAG,CAAC,SAAS,EAAE,WAAW;CAoB3B;AA0gBD,wBAAgB,iBAAiB,CAAC,CAAC,EAAE,kBAAkB,EAAE,CAAC,EAAE,gBAAgB,WAwB3E"}
1
+ {"version":3,"file":"change-source.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAqBjD,OAAO,KAAK,EAGV,kBAAkB,EACnB,MAAM,sBAAsB,CAAC;AAE9B,OAAO,EAAC,KAAK,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAEhE,OAAO,EAEL,KAAK,WAAW,EAEjB,MAAM,0BAA0B,CAAC;AAKlC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,2BAA2B,CAAC;AAEpD,OAAO,EAEL,KAAK,iBAAiB,EAEvB,MAAM,8CAA8C,CAAC;AACtD,OAAO,KAAK,EAAC,YAAY,EAAe,MAAM,qBAAqB,CAAC;AAEpE,OAAO,EAEL,KAAK,QAAQ,EACd,MAAM,wCAAwC,CAAC;AAUhD,OAAO,KAAK,EAEV,mBAAmB,EAEpB,MAAM,mCAAmC,CAAC;AAG3C,OAAO,EAEL,KAAK,kBAAkB,EACvB,KAAK,aAAa,EACnB,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EAGV,eAAe,IAAI,gBAAgB,EACpC,MAAM,yCAAyC,CAAC;AAuBjD;;;;GAIG;AACH,wBAAsB,8BAA8B,CAClD,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,WAAW,EAClB,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,kBAAkB,EAC/B,OAAO,EAAE,aAAa,GACrB,OAAO,CAAC;IAAC,iBAAiB,EAAE,iBAAiB,CAAC;IAAC,YAAY,EAAE,YAAY,CAAA;CAAC,CAAC,CAqC7E;AA8YD,qBAAa,KAAM,YAAW,QAAQ;;gBAIxB,IAAI,EAAE,IAAI,CAAC,MAAM,CAAC;IAI9B,QAAQ,CAAC,MAAM,EAAE,mBAAmB,GAAG,IAAI;IAgC3C,GAAG,CAAC,SAAS,EAAE,WAAW;CAoB3B;AAgiBD,wBAAgB,iBAAiB,CAAC,CAAC,EAAE,kBAAkB,EAAE,CAAC,EAAE,gBAAgB,WAwB3E"}