@rocicorp/zero 0.26.0 → 0.26.1-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/out/analyze-query/src/run-ast.d.ts.map +1 -1
  2. package/out/analyze-query/src/run-ast.js +4 -1
  3. package/out/analyze-query/src/run-ast.js.map +1 -1
  4. package/out/replicache/src/btree/node.js +4 -4
  5. package/out/replicache/src/btree/node.js.map +1 -1
  6. package/out/replicache/src/btree/write.js +2 -2
  7. package/out/replicache/src/btree/write.js.map +1 -1
  8. package/out/replicache/src/dag/gc.js +5 -2
  9. package/out/replicache/src/dag/gc.js.map +1 -1
  10. package/out/replicache/src/db/write.d.ts.map +1 -1
  11. package/out/replicache/src/db/write.js +21 -6
  12. package/out/replicache/src/db/write.js.map +1 -1
  13. package/out/replicache/src/error-responses.d.ts.map +1 -1
  14. package/out/replicache/src/error-responses.js +4 -1
  15. package/out/replicache/src/error-responses.js.map +1 -1
  16. package/out/replicache/src/persist/clients.d.ts.map +1 -1
  17. package/out/replicache/src/persist/clients.js +4 -1
  18. package/out/replicache/src/persist/clients.js.map +1 -1
  19. package/out/replicache/src/persist/collect-idb-databases.d.ts.map +1 -1
  20. package/out/replicache/src/persist/collect-idb-databases.js +2 -1
  21. package/out/replicache/src/persist/collect-idb-databases.js.map +1 -1
  22. package/out/replicache/src/persist/idb-databases-store.d.ts.map +1 -1
  23. package/out/replicache/src/persist/idb-databases-store.js +4 -1
  24. package/out/replicache/src/persist/idb-databases-store.js.map +1 -1
  25. package/out/replicache/src/process-scheduler.js +4 -1
  26. package/out/replicache/src/process-scheduler.js.map +1 -1
  27. package/out/replicache/src/replicache-impl.js +2 -2
  28. package/out/replicache/src/replicache-impl.js.map +1 -1
  29. package/out/replicache/src/subscriptions.d.ts.map +1 -1
  30. package/out/replicache/src/subscriptions.js +5 -2
  31. package/out/replicache/src/subscriptions.js.map +1 -1
  32. package/out/replicache/src/sync/diff.d.ts.map +1 -1
  33. package/out/replicache/src/sync/diff.js +4 -1
  34. package/out/replicache/src/sync/diff.js.map +1 -1
  35. package/out/replicache/src/sync/pull.d.ts.map +1 -1
  36. package/out/replicache/src/sync/pull.js +4 -1
  37. package/out/replicache/src/sync/pull.js.map +1 -1
  38. package/out/replicache/src/sync/push.d.ts.map +1 -1
  39. package/out/replicache/src/sync/push.js +5 -2
  40. package/out/replicache/src/sync/push.js.map +1 -1
  41. package/out/shared/src/asserts.d.ts +1 -1
  42. package/out/shared/src/asserts.d.ts.map +1 -1
  43. package/out/shared/src/asserts.js +1 -1
  44. package/out/shared/src/asserts.js.map +1 -1
  45. package/out/z2s/src/compiler.d.ts.map +1 -1
  46. package/out/z2s/src/compiler.js +8 -2
  47. package/out/z2s/src/compiler.js.map +1 -1
  48. package/out/zero/package.json.js +1 -1
  49. package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
  50. package/out/zero-cache/src/db/transaction-pool.js +17 -11
  51. package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
  52. package/out/zero-cache/src/observability/events.d.ts.map +1 -1
  53. package/out/zero-cache/src/observability/events.js +28 -9
  54. package/out/zero-cache/src/observability/events.js.map +1 -1
  55. package/out/zero-cache/src/services/analyze.js +1 -0
  56. package/out/zero-cache/src/services/analyze.js.map +1 -1
  57. package/out/zero-cache/src/services/change-source/pg/backfill-stream.d.ts.map +1 -1
  58. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +29 -14
  59. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
  60. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +6 -1
  61. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
  62. package/out/zero-cache/src/services/change-source/pg/initial-sync.js +69 -25
  63. package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
  64. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
  65. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +6 -1
  66. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
  67. package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
  68. package/out/zero-cache/src/services/change-source/pg/schema/init.js +12 -8
  69. package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
  70. package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts +26 -0
  71. package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts.map +1 -1
  72. package/out/zero-cache/src/services/change-source/protocol/current/data.js +15 -3
  73. package/out/zero-cache/src/services/change-source/protocol/current/data.js.map +1 -1
  74. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +30 -0
  75. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
  76. package/out/zero-cache/src/services/change-source/protocol/current.js +2 -1
  77. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
  78. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +8 -2
  79. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  80. package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts +10 -0
  81. package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts.map +1 -1
  82. package/out/zero-cache/src/services/replicator/change-processor.d.ts +2 -0
  83. package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
  84. package/out/zero-cache/src/services/replicator/change-processor.js +8 -6
  85. package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
  86. package/out/zero-cache/src/services/replicator/incremental-sync.d.ts.map +1 -1
  87. package/out/zero-cache/src/services/replicator/incremental-sync.js +39 -1
  88. package/out/zero-cache/src/services/replicator/incremental-sync.js.map +1 -1
  89. package/out/zero-cache/src/services/replicator/replication-status.d.ts +4 -3
  90. package/out/zero-cache/src/services/replicator/replication-status.d.ts.map +1 -1
  91. package/out/zero-cache/src/services/replicator/replication-status.js +25 -10
  92. package/out/zero-cache/src/services/replicator/replication-status.js.map +1 -1
  93. package/out/zero-cache/src/services/run-ast.d.ts.map +1 -1
  94. package/out/zero-cache/src/services/run-ast.js +22 -2
  95. package/out/zero-cache/src/services/run-ast.js.map +1 -1
  96. package/out/zero-cache/src/services/running-state.d.ts +1 -0
  97. package/out/zero-cache/src/services/running-state.d.ts.map +1 -1
  98. package/out/zero-cache/src/services/running-state.js +4 -0
  99. package/out/zero-cache/src/services/running-state.js.map +1 -1
  100. package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
  101. package/out/zero-cache/src/services/view-syncer/cvr.js +8 -2
  102. package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
  103. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  104. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +10 -1
  105. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  106. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +1 -1
  107. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
  108. package/out/zero-cache/src/services/view-syncer/snapshotter.js +15 -7
  109. package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
  110. package/out/zero-cache/src/types/subscription.d.ts +3 -1
  111. package/out/zero-cache/src/types/subscription.d.ts.map +1 -1
  112. package/out/zero-cache/src/types/subscription.js +29 -9
  113. package/out/zero-cache/src/types/subscription.js.map +1 -1
  114. package/out/zero-client/src/client/http-string.js.map +1 -1
  115. package/out/zero-client/src/client/version.js +1 -1
  116. package/out/zero-client/src/client/zero.js.map +1 -1
  117. package/out/zero-events/src/status.d.ts +8 -0
  118. package/out/zero-events/src/status.d.ts.map +1 -1
  119. package/out/zero-schema/src/permissions.d.ts.map +1 -1
  120. package/out/zero-schema/src/permissions.js +4 -1
  121. package/out/zero-schema/src/permissions.js.map +1 -1
  122. package/out/zero-server/src/process-mutations.d.ts.map +1 -1
  123. package/out/zero-server/src/process-mutations.js +13 -19
  124. package/out/zero-server/src/process-mutations.js.map +1 -1
  125. package/out/zql/src/builder/filter.d.ts.map +1 -1
  126. package/out/zql/src/builder/filter.js +5 -2
  127. package/out/zql/src/builder/filter.js.map +1 -1
  128. package/out/zql/src/ivm/constraint.js.map +1 -1
  129. package/package.json +1 -1
@@ -1 +1 @@
1
- {"version":3,"file":"change-processor.js","sources":["../../../../../../zero-cache/src/services/replicator/change-processor.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {SqliteError} from '@rocicorp/zero-sqlite3';\nimport {AbortError} from '../../../../shared/src/abort-error.ts';\nimport {assert, unreachable} from '../../../../shared/src/asserts.ts';\nimport {stringify} from '../../../../shared/src/bigint-json.ts';\nimport {must} from '../../../../shared/src/must.ts';\nimport {\n createLiteIndexStatement,\n createLiteTableStatement,\n liteColumnDef,\n} from '../../db/create.ts';\nimport {\n computeZqlSpecs,\n listIndexes,\n listTables,\n type LiteTableSpecWithReplicationStatus,\n} from '../../db/lite-tables.ts';\nimport {\n mapPostgresToLite,\n mapPostgresToLiteColumn,\n mapPostgresToLiteIndex,\n} from '../../db/pg-to-lite.ts';\nimport type {LiteTableSpec} from '../../db/specs.ts';\nimport type {StatementRunner} from '../../db/statements.ts';\nimport type {LexiVersion} from '../../types/lexi-version.ts';\nimport {\n JSON_PARSED,\n liteRow,\n type JSONFormat,\n type LiteRow,\n type LiteRowKey,\n type LiteValueType,\n} from '../../types/lite.ts';\nimport {liteTableName} from '../../types/names.ts';\nimport {id} from '../../types/sql.ts';\nimport type {\n BackfillCompleted,\n Change,\n ColumnAdd,\n ColumnDrop,\n ColumnUpdate,\n IndexCreate,\n IndexDrop,\n MessageBackfill,\n MessageCommit,\n MessageDelete,\n MessageInsert,\n MessageRelation,\n MessageTruncate,\n MessageUpdate,\n TableCreate,\n TableDrop,\n TableRename,\n TableUpdateMetadata,\n} from '../change-source/protocol/current/data.ts';\nimport type {ChangeStreamData} from '../change-source/protocol/current/downstream.ts';\nimport type {ReplicatorMode} from './replicator.ts';\nimport {ChangeLog, DEL_OP, SET_OP} from './schema/change-log.ts';\nimport {ColumnMetadataStore} from './schema/column-metadata.ts';\nimport {\n ZERO_VERSION_COLUMN_NAME,\n updateReplicationWatermark,\n} from './schema/replication-state.ts';\nimport {TableMetadataTracker} from './schema/table-metadata.ts';\n\nexport type ChangeProcessorMode = ReplicatorMode | 'initial-sync';\n\nexport type CommitResult = {\n watermark: string;\n schemaUpdated: boolean;\n changeLogUpdated: boolean;\n};\n\n/**\n * The ChangeProcessor partitions the stream of messages into transactions\n * by creating a {@link TransactionProcessor} when a transaction begins, and dispatching\n * messages to it until the commit is received.\n *\n * From https://www.postgresql.org/docs/current/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW :\n *\n * \"The logical replication protocol sends individual transactions one by one.\n * This means that all messages between a pair of Begin and Commit messages\n * belong to the same transaction.\"\n */\nexport class ChangeProcessor {\n readonly #db: StatementRunner;\n readonly #changeLog: ChangeLog;\n readonly #tableMetadata: TableMetadataTracker;\n readonly #mode: ChangeProcessorMode;\n readonly #failService: (lc: LogContext, err: unknown) => void;\n\n // The TransactionProcessor lazily loads table specs into this Map,\n // and reloads them after a schema change. It is cached here to avoid\n // reading them from the DB on every transaction.\n readonly #tableSpecs = new Map<string, LiteTableSpec>();\n\n #currentTx: TransactionProcessor | null = null;\n\n #failure: Error | undefined;\n\n constructor(\n db: StatementRunner,\n mode: ChangeProcessorMode,\n failService: (lc: LogContext, err: unknown) => void,\n ) {\n this.#db = db;\n this.#changeLog = new ChangeLog(db.db);\n this.#tableMetadata = new TableMetadataTracker(db.db);\n this.#mode = mode;\n this.#failService = failService;\n }\n\n #fail(lc: LogContext, err: unknown) {\n if (!this.#failure) {\n this.#currentTx?.abort(lc); // roll back any pending transaction.\n\n this.#failure = ensureError(err);\n\n if (!(err instanceof AbortError)) {\n // Propagate the failure up to the service.\n lc.error?.('Message Processing failed:', this.#failure);\n this.#failService(lc, this.#failure);\n }\n }\n }\n\n abort(lc: LogContext) {\n this.#fail(lc, new AbortError());\n }\n\n /** @return If a transaction was committed. */\n processMessage(\n lc: LogContext,\n downstream: ChangeStreamData,\n ): CommitResult | null {\n const [type, message] = downstream;\n if (this.#failure) {\n lc.debug?.(`Dropping ${message.tag}`);\n return null;\n }\n try {\n const watermark =\n type === 'begin'\n ? downstream[2].commitWatermark\n : type === 'commit'\n ? downstream[2].watermark\n : undefined;\n return this.#processMessage(lc, message, watermark);\n } catch (e) {\n this.#fail(lc, e);\n }\n return null;\n }\n\n #beginTransaction(\n lc: LogContext,\n commitVersion: string,\n jsonFormat: JSONFormat,\n ): TransactionProcessor {\n const start = Date.now();\n\n // litestream can technically hold the lock for an arbitrary amount of time\n // when checkpointing a large commit. Crashing on the busy-timeout in this\n // scenario will either produce a corrupt backup or otherwise prevent\n // replication from proceeding.\n //\n // Instead, retry the lock acquisition indefinitely. If this masks\n // an unknown deadlock situation, manual intervention will be necessary.\n for (let i = 0; ; i++) {\n try {\n return new TransactionProcessor(\n lc,\n this.#db,\n this.#mode,\n this.#changeLog,\n this.#tableMetadata,\n this.#tableSpecs,\n commitVersion,\n jsonFormat,\n );\n } catch (e) {\n if (e instanceof SqliteError && e.code === 'SQLITE_BUSY') {\n lc.warn?.(\n `SQLITE_BUSY for ${Date.now() - start} ms (attempt ${i + 1}). ` +\n `This is only expected if litestream is performing a large ` +\n `checkpoint.`,\n e,\n );\n continue;\n }\n throw e;\n }\n }\n }\n\n /** @return If a transaction was committed. */\n #processMessage(\n lc: LogContext,\n msg: Change,\n watermark: string | undefined,\n ): CommitResult | null {\n if (msg.tag === 'begin') {\n if (this.#currentTx) {\n throw new Error(`Already in a transaction ${stringify(msg)}`);\n }\n this.#currentTx = this.#beginTransaction(\n lc,\n must(watermark),\n msg.json ?? JSON_PARSED,\n );\n return null;\n }\n\n // For non-begin messages, there should be a #currentTx set.\n const tx = this.#currentTx;\n if (!tx) {\n throw new Error(\n `Received message outside of transaction: ${stringify(msg)}`,\n );\n }\n\n if (msg.tag === 'commit') {\n // Undef this.#currentTx to allow the assembly of the next transaction.\n this.#currentTx = null;\n\n assert(watermark, 'watermark is required for commit messages');\n const {schemaUpdated, changeLogUpdated} = tx.processCommit(\n msg,\n watermark,\n );\n return {watermark, schemaUpdated, changeLogUpdated};\n }\n\n if (msg.tag === 'rollback') {\n this.#currentTx?.abort(lc);\n this.#currentTx = null;\n return null;\n }\n\n switch (msg.tag) {\n case 'insert':\n tx.processInsert(msg);\n break;\n case 'update':\n tx.processUpdate(msg);\n break;\n case 'delete':\n tx.processDelete(msg);\n break;\n case 'truncate':\n tx.processTruncate(msg);\n break;\n case 'create-table':\n tx.processCreateTable(msg);\n break;\n case 'rename-table':\n tx.processRenameTable(msg);\n break;\n case 'update-table-metadata':\n tx.processTableMetadata(msg);\n break;\n case 'add-column':\n tx.processAddColumn(msg);\n break;\n case 'update-column':\n tx.processUpdateColumn(msg);\n break;\n case 'drop-column':\n tx.processDropColumn(msg);\n break;\n case 'drop-table':\n tx.processDropTable(msg);\n break;\n case 'create-index':\n tx.processCreateIndex(msg);\n break;\n case 'drop-index':\n tx.processDropIndex(msg);\n break;\n case 'backfill':\n tx.processBackfill(msg);\n break;\n case 'backfill-completed':\n tx.processBackfillCompleted(msg);\n break;\n default:\n unreachable(msg);\n }\n\n return null;\n }\n}\n\n/**\n * The {@link TransactionProcessor} handles the sequence of messages from\n * upstream, from `BEGIN` to `COMMIT` and executes the corresponding mutations\n * on the {@link postgres.TransactionSql} on the replica.\n *\n * When applying row contents to the replica, the `_0_version` column is added / updated,\n * and a corresponding entry in the `ChangeLog` is added. The version value is derived\n * from the watermark of the preceding transaction (stored as the `nextStateVersion` in the\n * `ReplicationState` table).\n *\n * Side note: For non-streaming Postgres transactions, the commitEndLsn (and thus\n * commit watermark) is available in the `begin` message, so it could theoretically\n * be used for the row version of changes within the transaction. However, the\n * commitEndLsn is not available in the streaming (in-progress) transaction\n * protocol, and may not be available for CDC streams of other upstream types.\n * Therefore, the zero replication protocol is designed to not require the commit\n * watermark when a transaction begins.\n *\n * Also of interest is the fact that all INSERT Messages are logically applied as\n * UPSERTs. See {@link processInsert} for the underlying motivation.\n */\nclass TransactionProcessor {\n readonly #lc: LogContext;\n readonly #startMs: number;\n readonly #db: StatementRunner;\n readonly #mode: ChangeProcessorMode;\n readonly #version: LexiVersion;\n readonly #changeLog: ChangeLog;\n readonly #tableMetadata: TableMetadataTracker;\n readonly #tableSpecs: Map<string, LiteTableSpecWithReplicationStatus>;\n readonly #jsonFormat: JSONFormat;\n readonly #columnMetadata: ColumnMetadataStore;\n\n #pos = 0;\n #schemaChanged = false;\n #numChangeLogEntries = 0;\n\n constructor(\n lc: LogContext,\n db: StatementRunner,\n mode: ChangeProcessorMode,\n changeLog: ChangeLog,\n tableMetadata: TableMetadataTracker,\n tableSpecs: Map<string, LiteTableSpecWithReplicationStatus>,\n commitVersion: LexiVersion,\n jsonFormat: JSONFormat,\n ) {\n this.#startMs = Date.now();\n this.#mode = mode;\n this.#jsonFormat = jsonFormat;\n\n switch (mode) {\n case 'serving':\n // Although the Replicator / Incremental Syncer is the only writer of the replica,\n // a `BEGIN CONCURRENT` transaction is used to allow View Syncers to simulate\n // (i.e. and `ROLLBACK`) changes on historic snapshots of the database for the\n // purpose of IVM).\n //\n // This TransactionProcessor is the only logic that will actually\n // `COMMIT` any transactions to the replica.\n db.beginConcurrent();\n break;\n case 'backup':\n // For the backup-replicator (i.e. replication-manager), there are no View Syncers\n // and thus BEGIN CONCURRENT is not necessary. In fact, BEGIN CONCURRENT can cause\n // deadlocks with forced wal-checkpoints (which `litestream replicate` performs),\n // so it is important to use vanilla transactions in this configuration.\n db.beginImmediate();\n break;\n case 'initial-sync':\n // When the ChangeProcessor is used for initial-sync, the calling code\n // handles the transaction boundaries.\n break;\n default:\n unreachable();\n }\n this.#db = db;\n this.#version = commitVersion;\n this.#lc = lc.withContext('version', commitVersion);\n this.#changeLog = changeLog;\n this.#tableMetadata = tableMetadata;\n this.#tableSpecs = tableSpecs;\n // The column_metadata table is guaranteed to exist since the\n // replica-schema.ts migration to v8.\n this.#columnMetadata = must(ColumnMetadataStore.getInstance(db.db));\n\n if (this.#tableSpecs.size === 0) {\n this.#reloadTableSpecs();\n }\n }\n\n #reloadTableSpecs() {\n this.#tableSpecs.clear();\n // zqlSpecs include the primary key derived from unique indexes\n const zqlSpecs = computeZqlSpecs(this.#lc, this.#db.db, {\n includeBackfillingColumns: true,\n });\n for (let spec of listTables(this.#db.db)) {\n if (!spec.primaryKey) {\n spec = {\n ...spec,\n primaryKey: [\n ...(zqlSpecs.get(spec.name)?.tableSpec.primaryKey ?? []),\n ],\n };\n }\n this.#tableSpecs.set(spec.name, spec);\n }\n }\n\n #tableSpec(name: string) {\n return must(this.#tableSpecs.get(name), `Unknown table ${name}`);\n }\n\n #getKey(\n {row, numCols}: {row: LiteRow; numCols: number},\n {relation}: {relation: MessageRelation},\n ): LiteRowKey {\n const keyColumns =\n relation.rowKey.type !== 'full'\n ? relation.rowKey.columns // already a suitable key\n : this.#tableSpec(liteTableName(relation)).primaryKey;\n if (!keyColumns?.length) {\n throw new Error(\n `Cannot replicate table \"${relation.name}\" without a PRIMARY KEY or UNIQUE INDEX`,\n );\n }\n // For the common case (replica identity default), the row is already the\n // key for deletes and updates, in which case a new object can be avoided.\n if (numCols === keyColumns.length) {\n return row;\n }\n const key: Record<string, LiteValueType> = {};\n for (const col of keyColumns) {\n key[col] = row[col];\n }\n return key;\n }\n\n processInsert(insert: MessageInsert) {\n const table = liteTableName(insert.relation);\n const tableSpec = this.#tableSpec(table);\n const newRow = liteRow(insert.new, tableSpec, this.#jsonFormat);\n\n this.#upsert(table, {\n ...newRow.row,\n [ZERO_VERSION_COLUMN_NAME]: this.#version,\n });\n\n if (insert.relation.rowKey.columns.length === 0) {\n // INSERTs can be replicated for rows without a PRIMARY KEY or a\n // UNIQUE INDEX. These are written to the replica but not recorded\n // in the changeLog, because these rows cannot participate in IVM.\n //\n // (Once the table schema has been corrected to include a key, the\n // associated schema change will reset pipelines and data can be\n // loaded via hydration.)\n return;\n }\n const key = this.#getKey(newRow, insert);\n this.#logSetOp(table, key, getBackfilledColumns(newRow.row, tableSpec));\n }\n\n #upsert(table: string, row: LiteRow) {\n const columns = Object.keys(row).map(c => id(c));\n this.#db.run(\n `\n INSERT OR REPLACE INTO ${id(table)} (${columns.join(',')})\n VALUES (${Array.from({length: columns.length}).fill('?').join(',')})\n `,\n Object.values(row),\n );\n }\n\n // Updates by default are applied as UPDATE commands to support partial\n // row specifications from the change source. In particular, this is needed\n // to handle updates for which unchanged TOASTed values are not sent:\n //\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-TUPLEDATA\n //\n // However, in certain cases an UPDATE may be received for a row that\n // was not initially synced, such as when, an existing table is added\n // to the app's publication.\n //\n // In order to facilitate \"resumptive\" replication, the logic falls back to\n // an INSERT if the update did not change any rows.\n processUpdate(update: MessageUpdate) {\n const table = liteTableName(update.relation);\n const tableSpec = this.#tableSpec(table);\n const newRow = liteRow(update.new, tableSpec, this.#jsonFormat);\n const row = {...newRow.row, [ZERO_VERSION_COLUMN_NAME]: this.#version};\n\n // update.key is set with the old values if the key has changed.\n const oldKey = update.key\n ? this.#getKey(\n liteRow(update.key, this.#tableSpec(table), this.#jsonFormat),\n update,\n )\n : null;\n const newKey = this.#getKey(newRow, update);\n\n if (oldKey) {\n this.#logDeleteOp(table, oldKey, tableSpec.backfilling);\n }\n this.#logSetOp(table, newKey, getBackfilledColumns(newRow.row, tableSpec));\n\n const currKey = oldKey ?? newKey;\n const conds = Object.keys(currKey).map(col => `${id(col)}=?`);\n const setExprs = Object.keys(row).map(col => `${id(col)}=?`);\n\n const {changes} = this.#db.run(\n `\n UPDATE ${id(table)}\n SET ${setExprs.join(',')}\n WHERE ${conds.join(' AND ')}\n `,\n [...Object.values(row), ...Object.values(currKey)],\n );\n\n // If the UPDATE did not affect any rows, perform an UPSERT of the\n // new row for resumptive replication.\n if (changes === 0) {\n this.#upsert(table, row);\n }\n }\n\n processDelete(del: MessageDelete) {\n const table = liteTableName(del.relation);\n const tableSpec = this.#tableSpec(table);\n const rowKey = this.#getKey(\n liteRow(del.key, tableSpec, this.#jsonFormat),\n del,\n );\n\n this.#delete(table, rowKey);\n this.#logDeleteOp(table, rowKey, tableSpec.backfilling);\n }\n\n #delete(table: string, rowKey: LiteRowKey) {\n const conds = Object.keys(rowKey).map(col => `${id(col)}=?`);\n this.#db.run(\n `DELETE FROM ${id(table)} WHERE ${conds.join(' AND ')}`,\n Object.values(rowKey),\n );\n }\n\n processTruncate(truncate: MessageTruncate) {\n for (const relation of truncate.relations) {\n const table = liteTableName(relation);\n // Update replica data.\n this.#db.run(`DELETE FROM ${id(table)}`);\n\n // Update change log.\n this.#logTruncateOp(table);\n }\n }\n\n processCreateTable(create: TableCreate) {\n if (create.metadata) {\n this.#tableMetadata.set(create.spec, create.metadata);\n }\n const table = mapPostgresToLite(create.spec);\n this.#db.db.exec(createLiteTableStatement(table));\n\n // Write to metadata table\n for (const [colName, colSpec] of Object.entries(create.spec.columns)) {\n this.#columnMetadata.insert(\n table.name,\n colName,\n colSpec,\n create.backfill?.[colName],\n );\n }\n\n if (\n Object.keys(create.backfill ?? {}).length ===\n Object.keys(create.spec.columns).length\n ) {\n this.#reloadTableSpecs();\n } else {\n // Make the table visible immediately unless all of the columns are\n // being backfilled. In the backfill case, the version bump will happen\n // with the backfill is complete.\n this.#logResetOp(table.name);\n }\n this.#lc.info?.(create.tag, table.name);\n }\n\n processTableMetadata(msg: TableUpdateMetadata) {\n this.#tableMetadata.set(msg.table, msg.new);\n }\n\n processRenameTable(rename: TableRename) {\n this.#tableMetadata.rename(rename.old, rename.new);\n\n const oldName = liteTableName(rename.old);\n const newName = liteTableName(rename.new);\n this.#db.db.exec(`ALTER TABLE ${id(oldName)} RENAME TO ${id(newName)}`);\n\n // Rename in metadata table\n this.#columnMetadata.renameTable(oldName, newName);\n\n this.#bumpVersions(newName);\n this.#logResetOp(oldName);\n this.#lc.info?.(rename.tag, oldName, newName);\n }\n\n processAddColumn(msg: ColumnAdd) {\n if (msg.tableMetadata) {\n this.#tableMetadata.set(msg.table, msg.tableMetadata);\n }\n const table = liteTableName(msg.table);\n const {name} = msg.column;\n const spec = mapPostgresToLiteColumn(table, msg.column);\n this.#db.db.exec(\n `ALTER TABLE ${id(table)} ADD ${id(name)} ${liteColumnDef(spec)}`,\n );\n\n // Write to metadata table\n this.#columnMetadata.insert(table, name, msg.column.spec, msg.backfill);\n\n if (msg.backfill) {\n this.#reloadTableSpecs();\n } else {\n // Make the new column visible immediately if it's not being backfilled.\n // Otherwise, the version bump will happen with the backfill is complete.\n this.#bumpVersions(table);\n }\n this.#lc.info?.(msg.tag, table, msg.column);\n }\n\n processUpdateColumn(msg: ColumnUpdate) {\n const table = liteTableName(msg.table);\n let oldName = msg.old.name;\n const newName = msg.new.name;\n\n // update-column can ignore defaults because it does not change the values\n // in existing rows.\n //\n // https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-DESC-SET-DROP-DEFAULT\n //\n // \"The new default value will only apply in subsequent INSERT or UPDATE\n // commands; it does not cause rows already in the table to change.\"\n //\n // This allows support for _changing_ column defaults to any expression,\n // since it does not affect what the replica needs to do.\n const oldSpec = mapPostgresToLiteColumn(table, msg.old, 'ignore-default');\n const newSpec = mapPostgresToLiteColumn(table, msg.new, 'ignore-default');\n\n // The only updates that are relevant are the column name and the data type.\n if (oldName === newName && oldSpec.dataType === newSpec.dataType) {\n this.#lc.info?.(msg.tag, 'no thing to update', oldSpec, newSpec);\n return;\n }\n // If the data type changes, we have to make a new column with the new data type\n // and copy the values over.\n if (oldSpec.dataType !== newSpec.dataType) {\n // Remember (and drop) the indexes that reference the column.\n const indexes = listIndexes(this.#db.db).filter(\n idx => idx.tableName === table && oldName in idx.columns,\n );\n const stmts = indexes.map(idx => `DROP INDEX IF EXISTS ${id(idx.name)};`);\n const tmpName = `tmp.${newName}`;\n stmts.push(`\n ALTER TABLE ${id(table)} ADD ${id(tmpName)} ${liteColumnDef(newSpec)};\n UPDATE ${id(table)} SET ${id(tmpName)} = ${id(oldName)};\n ALTER TABLE ${id(table)} DROP ${id(oldName)};\n `);\n for (const idx of indexes) {\n // Re-create the indexes to reference the new column.\n idx.columns[tmpName] = idx.columns[oldName];\n delete idx.columns[oldName];\n stmts.push(createLiteIndexStatement(idx));\n }\n this.#db.db.exec(stmts.join(''));\n oldName = tmpName;\n }\n if (oldName !== newName) {\n this.#db.db.exec(\n `ALTER TABLE ${id(table)} RENAME ${id(oldName)} TO ${id(newName)}`,\n );\n }\n\n // Update metadata table\n this.#columnMetadata.update(\n table,\n msg.old.name,\n msg.new.name,\n msg.new.spec,\n );\n\n this.#bumpVersions(table);\n this.#lc.info?.(msg.tag, table, msg.new);\n }\n\n processDropColumn(msg: ColumnDrop) {\n const table = liteTableName(msg.table);\n const {column} = msg;\n this.#db.db.exec(`ALTER TABLE ${id(table)} DROP ${id(column)}`);\n\n // Delete from metadata table\n this.#columnMetadata.deleteColumn(table, column);\n\n this.#bumpVersions(table);\n this.#lc.info?.(msg.tag, table, column);\n }\n\n processDropTable(drop: TableDrop) {\n this.#tableMetadata.drop(drop.id);\n\n const name = liteTableName(drop.id);\n this.#db.db.exec(`DROP TABLE IF EXISTS ${id(name)}`);\n\n // Delete from metadata table\n this.#columnMetadata.deleteTable(name);\n\n this.#logResetOp(name);\n this.#lc.info?.(drop.tag, name);\n }\n\n processCreateIndex(create: IndexCreate) {\n const index = mapPostgresToLiteIndex(create.spec);\n this.#db.db.exec(createLiteIndexStatement(index));\n\n // indexes affect tables visibility (e.g. sync-ability is gated on\n // having a unique index), so reset pipelines to refresh table schemas.\n // However, the reset is not necessary if the index is for a table\n // that is not yet visible due to backfilling.\n const tableSpec = must(this.#tableSpecs.get(index.tableName));\n if (\n (tableSpec.backfilling ?? []).length ===\n Object.entries(tableSpec.columns).length - 1 // don't count _0_version\n ) {\n this.#reloadTableSpecs();\n } else {\n this.#logResetOp(index.tableName);\n }\n this.#lc.info?.(create.tag, index.name);\n }\n\n processDropIndex(drop: IndexDrop) {\n const name = liteTableName(drop.id);\n this.#db.db.exec(`DROP INDEX IF EXISTS ${id(name)}`);\n this.#lc.info?.(drop.tag, name);\n }\n\n #bumpVersions(table: string) {\n this.#db.run(\n `UPDATE ${id(table)} SET ${id(ZERO_VERSION_COLUMN_NAME)} = ?`,\n this.#version,\n );\n this.#logResetOp(table);\n }\n\n /**\n * @param backfilledColumns `backfilling` columns for which values were set\n */\n #logSetOp(\n table: string,\n key: LiteRowKey,\n backfilledColumns: string[] | undefined,\n ) {\n // The \"serving\" replicator always writes to the change-log (for IVM).\n // The \"backup\" replicator only needs to write to the change log\n // when writing columns that are being backfilled.\n if (this.#mode === 'serving' || backfilledColumns !== undefined) {\n this.#changeLog.logSetOp(\n this.#version,\n this.#pos++,\n table,\n key,\n backfilledColumns,\n );\n this.#numChangeLogEntries++;\n }\n }\n\n #logDeleteOp(table: string, key: LiteRowKey, backfilling?: string[]) {\n // The \"serving\" replicator always writes to the change-log (for IVM).\n // The \"backup\" replicator only needs to write to the change log\n // when writing columns that are being backfilled.\n if (this.#mode === 'serving' || backfilling?.length) {\n this.#changeLog.logDeleteOp(this.#version, this.#pos++, table, key);\n this.#numChangeLogEntries++;\n }\n }\n\n #logTruncateOp(table: string) {\n if (this.#mode === 'serving') {\n this.#changeLog.logTruncateOp(this.#version, table);\n this.#numChangeLogEntries++;\n }\n }\n\n #logResetOp(table: string) {\n this.#schemaChanged = true;\n if (this.#mode === 'serving') {\n this.#changeLog.logResetOp(this.#version, table);\n this.#numChangeLogEntries++;\n }\n this.#reloadTableSpecs();\n }\n\n processBackfill({relation, watermark, columns, rowValues}: MessageBackfill) {\n const tableName = liteTableName(relation);\n const tableSpec = must(this.#tableSpecs.get(tableName));\n const rowKeyCols = relation.rowKey.columns;\n const cols = [...rowKeyCols, ...columns];\n\n // Common parts of the INSERT sql statement.\n const insertColsStr = [...cols, ZERO_VERSION_COLUMN_NAME].map(id).join(',');\n const qMarks = Array.from({length: cols.length + 1}, () => '?').join(',');\n const rowKeyColsStr = rowKeyCols.map(id).join(',');\n\n let backfilled = 0;\n let skipped = 0;\n for (const v of rowValues) {\n const row = liteRow(\n Object.fromEntries(cols.map((c, i) => [c, v[i]])),\n tableSpec,\n this.#jsonFormat,\n );\n const rowKey = this.#getKey(row, {relation});\n const rowOp = this.#changeLog.getLatestRowOp(tableName, rowKey);\n if (rowOp?.op === DEL_OP && rowOp.stateVersion > watermark) {\n skipped++;\n continue; // the row was deleted after the backfill snapshot\n }\n const updates =\n rowOp?.op === SET_OP\n ? cols.filter(\n c => (rowOp.backfillingColumnVersions[c] ?? '') <= watermark,\n )\n : cols;\n if (updates.length === 0) {\n // row already has newer values for all backfilling columns.\n skipped++;\n continue;\n }\n const updateStmts = updates.map(col => `${id(col)}=excluded.${id(col)}`);\n this.#db.run(\n /*sql*/ `\n INSERT INTO ${id(tableName)} (${insertColsStr}) VALUES (${qMarks})\n ON CONFLICT (${rowKeyColsStr})\n DO UPDATE SET ${updateStmts.join(',')};\n `,\n ...Object.values(row.row),\n watermark, // the _0_version for new rows (i.e. table backfill)\n );\n backfilled++;\n }\n\n this.#lc.debug?.(\n `backfilled ${backfilled} rows (skipped ${skipped}) into ${tableName}`,\n );\n }\n\n processBackfillCompleted({relation, columns}: BackfillCompleted) {\n const tableName = liteTableName(relation);\n const rowKeyCols = relation.rowKey.columns;\n const cols = [...rowKeyCols, ...columns];\n\n const columnMetadata = must(ColumnMetadataStore.getInstance(this.#db.db));\n for (const col of cols) {\n columnMetadata.clearBackfilling(tableName, col);\n }\n // Given that new columns are being exposed for every row in the table, bump the\n // row version for all rows.\n this.#bumpVersions(tableName);\n this.#lc.info?.(`finished backfilling ${tableName}`);\n\n // Note that there is no need to clear the backfillingColumnVersions values\n // in the changeLog. It could theoretically be done for clarity but:\n // (1) it could be non-trivial in terms of latency introduced and\n // (2) the data must be preserved if _other_ columns are in the process\n // of being backfilled\n //\n // Thus, for speed and simplicity, the values are left as is. (Note that\n // subsequent replicated changes to those rows will clear the values if\n // no backfills are in progress).\n }\n\n processCommit(\n commit: MessageCommit,\n watermark: string,\n ): {schemaUpdated: boolean; changeLogUpdated: boolean} {\n if (watermark !== this.#version) {\n throw new Error(\n `'commit' version ${watermark} does not match 'begin' version ${\n this.#version\n }: ${stringify(commit)}`,\n );\n }\n updateReplicationWatermark(this.#db, watermark);\n\n if (this.#schemaChanged) {\n const start = Date.now();\n this.#db.db.pragma('optimize');\n this.#lc.info?.(\n `PRAGMA optimized after schema change (${Date.now() - start} ms)`,\n );\n }\n\n if (this.#mode !== 'initial-sync') {\n this.#db.commit();\n }\n\n const elapsedMs = Date.now() - this.#startMs;\n this.#lc.debug?.(`Committed tx@${this.#version} (${elapsedMs} ms)`);\n\n return {\n schemaUpdated: this.#schemaChanged,\n changeLogUpdated: this.#numChangeLogEntries > 0,\n };\n }\n\n abort(lc: LogContext) {\n lc.info?.(`aborting transaction ${this.#version}`);\n this.#db.rollback();\n }\n}\n\nfunction getBackfilledColumns(\n row: LiteRow,\n {backfilling}: LiteTableSpecWithReplicationStatus,\n): string[] | undefined {\n if (!backfilling?.length) {\n return undefined; // common case\n }\n return backfilling.filter(col => col in row);\n}\n\nfunction ensureError(err: unknown): Error {\n if (err instanceof Error) {\n return err;\n }\n const error = new Error();\n error.cause = err;\n return error;\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;AAoFO,MAAM,gBAAgB;AAAA,EAClB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA,EAKA,kCAAkB,IAAA;AAAA,EAE3B,aAA0C;AAAA,EAE1C;AAAA,EAEA,YACE,IACA,MACA,aACA;AACA,SAAK,MAAM;AACX,SAAK,aAAa,IAAI,UAAU,GAAG,EAAE;AACrC,SAAK,iBAAiB,IAAI,qBAAqB,GAAG,EAAE;AACpD,SAAK,QAAQ;AACb,SAAK,eAAe;AAAA,EACtB;AAAA,EAEA,MAAM,IAAgB,KAAc;AAClC,QAAI,CAAC,KAAK,UAAU;AAClB,WAAK,YAAY,MAAM,EAAE;AAEzB,WAAK,WAAW,YAAY,GAAG;AAE/B,UAAI,EAAE,eAAe,aAAa;AAEhC,WAAG,QAAQ,8BAA8B,KAAK,QAAQ;AACtD,aAAK,aAAa,IAAI,KAAK,QAAQ;AAAA,MACrC;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,IAAgB;AACpB,SAAK,MAAM,IAAI,IAAI,WAAA,CAAY;AAAA,EACjC;AAAA;AAAA,EAGA,eACE,IACA,YACqB;AACrB,UAAM,CAAC,MAAM,OAAO,IAAI;AACxB,QAAI,KAAK,UAAU;AACjB,SAAG,QAAQ,YAAY,QAAQ,GAAG,EAAE;AACpC,aAAO;AAAA,IACT;AACA,QAAI;AACF,YAAM,YACJ,SAAS,UACL,WAAW,CAAC,EAAE,kBACd,SAAS,WACP,WAAW,CAAC,EAAE,YACd;AACR,aAAO,KAAK,gBAAgB,IAAI,SAAS,SAAS;AAAA,IACpD,SAAS,GAAG;AACV,WAAK,MAAM,IAAI,CAAC;AAAA,IAClB;AACA,WAAO;AAAA,EACT;AAAA,EAEA,kBACE,IACA,eACA,YACsB;AACtB,UAAM,QAAQ,KAAK,IAAA;AASnB,aAAS,IAAI,KAAK,KAAK;AACrB,UAAI;AACF,eAAO,IAAI;AAAA,UACT;AAAA,UACA,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,UACL;AAAA,UACA;AAAA,QAAA;AAAA,MAEJ,SAAS,GAAG;AACV,YAAI,aAAa,eAAe,EAAE,SAAS,eAAe;AACxD,aAAG;AAAA,YACD,mBAAmB,KAAK,IAAA,IAAQ,KAAK,gBAAgB,IAAI,CAAC;AAAA,YAG1D;AAAA,UAAA;AAEF;AAAA,QACF;AACA,cAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAAA;AAAA,EAGA,gBACE,IACA,KACA,WACqB;AACrB,QAAI,IAAI,QAAQ,SAAS;AACvB,UAAI,KAAK,YAAY;AACnB,cAAM,IAAI,MAAM,4BAA4B,UAAU,GAAG,CAAC,EAAE;AAAA,MAC9D;AACA,WAAK,aAAa,KAAK;AAAA,QACrB;AAAA,QACA,KAAK,SAAS;AAAA,QACd,IAAI,QAAQ;AAAA,MAAA;AAEd,aAAO;AAAA,IACT;AAGA,UAAM,KAAK,KAAK;AAChB,QAAI,CAAC,IAAI;AACP,YAAM,IAAI;AAAA,QACR,4CAA4C,UAAU,GAAG,CAAC;AAAA,MAAA;AAAA,IAE9D;AAEA,QAAI,IAAI,QAAQ,UAAU;AAExB,WAAK,aAAa;AAElB,aAAO,WAAW,2CAA2C;AAC7D,YAAM,EAAC,eAAe,iBAAA,IAAoB,GAAG;AAAA,QAC3C;AAAA,QACA;AAAA,MAAA;AAEF,aAAO,EAAC,WAAW,eAAe,iBAAA;AAAA,IACpC;AAEA,QAAI,IAAI,QAAQ,YAAY;AAC1B,WAAK,YAAY,MAAM,EAAE;AACzB,WAAK,aAAa;AAClB,aAAO;AAAA,IACT;AAEA,YAAQ,IAAI,KAAA;AAAA,MACV,KAAK;AACH,WAAG,cAAc,GAAG;AACpB;AAAA,MACF,KAAK;AACH,WAAG,cAAc,GAAG;AACpB;AAAA,MACF,KAAK;AACH,WAAG,cAAc,GAAG;AACpB;AAAA,MACF,KAAK;AACH,WAAG,gBAAgB,GAAG;AACtB;AAAA,MACF,KAAK;AACH,WAAG,mBAAmB,GAAG;AACzB;AAAA,MACF,KAAK;AACH,WAAG,mBAAmB,GAAG;AACzB;AAAA,MACF,KAAK;AACH,WAAG,qBAAqB,GAAG;AAC3B;AAAA,MACF,KAAK;AACH,WAAG,iBAAiB,GAAG;AACvB;AAAA,MACF,KAAK;AACH,WAAG,oBAAoB,GAAG;AAC1B;AAAA,MACF,KAAK;AACH,WAAG,kBAAkB,GAAG;AACxB;AAAA,MACF,KAAK;AACH,WAAG,iBAAiB,GAAG;AACvB;AAAA,MACF,KAAK;AACH,WAAG,mBAAmB,GAAG;AACzB;AAAA,MACF,KAAK;AACH,WAAG,iBAAiB,GAAG;AACvB;AAAA,MACF,KAAK;AACH,WAAG,gBAAgB,GAAG;AACtB;AAAA,MACF,KAAK;AACH,WAAG,yBAAyB,GAAG;AAC/B;AAAA,MACF;AACE,oBAAe;AAAA,IAAA;AAGnB,WAAO;AAAA,EACT;AACF;AAuBA,MAAM,qBAAqB;AAAA,EAChB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAET,OAAO;AAAA,EACP,iBAAiB;AAAA,EACjB,uBAAuB;AAAA,EAEvB,YACE,IACA,IACA,MACA,WACA,eACA,YACA,eACA,YACA;AACA,SAAK,WAAW,KAAK,IAAA;AACrB,SAAK,QAAQ;AACb,SAAK,cAAc;AAEnB,YAAQ,MAAA;AAAA,MACN,KAAK;AAQH,WAAG,gBAAA;AACH;AAAA,MACF,KAAK;AAKH,WAAG,eAAA;AACH;AAAA,MACF,KAAK;AAGH;AAAA,MACF;AACE,oBAAA;AAAA,IAAY;AAEhB,SAAK,MAAM;AACX,SAAK,WAAW;AAChB,SAAK,MAAM,GAAG,YAAY,WAAW,aAAa;AAClD,SAAK,aAAa;AAClB,SAAK,iBAAiB;AACtB,SAAK,cAAc;AAGnB,SAAK,kBAAkB,KAAK,oBAAoB,YAAY,GAAG,EAAE,CAAC;AAElE,QAAI,KAAK,YAAY,SAAS,GAAG;AAC/B,WAAK,kBAAA;AAAA,IACP;AAAA,EACF;AAAA,EAEA,oBAAoB;AAClB,SAAK,YAAY,MAAA;AAEjB,UAAM,WAAW,gBAAgB,KAAK,KAAK,KAAK,IAAI,IAAI;AAAA,MACtD,2BAA2B;AAAA,IAAA,CAC5B;AACD,aAAS,QAAQ,WAAW,KAAK,IAAI,EAAE,GAAG;AACxC,UAAI,CAAC,KAAK,YAAY;AACpB,eAAO;AAAA,UACL,GAAG;AAAA,UACH,YAAY;AAAA,YACV,GAAI,SAAS,IAAI,KAAK,IAAI,GAAG,UAAU,cAAc,CAAA;AAAA,UAAC;AAAA,QACxD;AAAA,MAEJ;AACA,WAAK,YAAY,IAAI,KAAK,MAAM,IAAI;AAAA,IACtC;AAAA,EACF;AAAA,EAEA,WAAW,MAAc;AACvB,WAAO,KAAK,KAAK,YAAY,IAAI,IAAI,GAAG,iBAAiB,IAAI,EAAE;AAAA,EACjE;AAAA,EAEA,QACE,EAAC,KAAK,WACN,EAAC,YACW;AACZ,UAAM,aACJ,SAAS,OAAO,SAAS,SACrB,SAAS,OAAO,UAChB,KAAK,WAAW,cAAc,QAAQ,CAAC,EAAE;AAC/C,QAAI,CAAC,YAAY,QAAQ;AACvB,YAAM,IAAI;AAAA,QACR,2BAA2B,SAAS,IAAI;AAAA,MAAA;AAAA,IAE5C;AAGA,QAAI,YAAY,WAAW,QAAQ;AACjC,aAAO;AAAA,IACT;AACA,UAAM,MAAqC,CAAA;AAC3C,eAAW,OAAO,YAAY;AAC5B,UAAI,GAAG,IAAI,IAAI,GAAG;AAAA,IACpB;AACA,WAAO;AAAA,EACT;AAAA,EAEA,cAAc,QAAuB;AACnC,UAAM,QAAQ,cAAc,OAAO,QAAQ;AAC3C,UAAM,YAAY,KAAK,WAAW,KAAK;AACvC,UAAM,SAAS,QAAQ,OAAO,KAAK,WAAW,KAAK,WAAW;AAE9D,SAAK,QAAQ,OAAO;AAAA,MAClB,GAAG,OAAO;AAAA,MACV,CAAC,wBAAwB,GAAG,KAAK;AAAA,IAAA,CAClC;AAED,QAAI,OAAO,SAAS,OAAO,QAAQ,WAAW,GAAG;AAQ/C;AAAA,IACF;AACA,UAAM,MAAM,KAAK,QAAQ,QAAQ,MAAM;AACvC,SAAK,UAAU,OAAO,KAAK,qBAAqB,OAAO,KAAK,SAAS,CAAC;AAAA,EACxE;AAAA,EAEA,QAAQ,OAAe,KAAc;AACnC,UAAM,UAAU,OAAO,KAAK,GAAG,EAAE,IAAI,CAAA,MAAK,GAAG,CAAC,CAAC;AAC/C,SAAK,IAAI;AAAA,MACP;AAAA,+BACyB,GAAG,KAAK,CAAC,KAAK,QAAQ,KAAK,GAAG,CAAC;AAAA,kBAC5C,MAAM,KAAK,EAAC,QAAQ,QAAQ,QAAO,EAAE,KAAK,GAAG,EAAE,KAAK,GAAG,CAAC;AAAA;AAAA,MAEpE,OAAO,OAAO,GAAG;AAAA,IAAA;AAAA,EAErB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,cAAc,QAAuB;AACnC,UAAM,QAAQ,cAAc,OAAO,QAAQ;AAC3C,UAAM,YAAY,KAAK,WAAW,KAAK;AACvC,UAAM,SAAS,QAAQ,OAAO,KAAK,WAAW,KAAK,WAAW;AAC9D,UAAM,MAAM,EAAC,GAAG,OAAO,KAAK,CAAC,wBAAwB,GAAG,KAAK,SAAA;AAG7D,UAAM,SAAS,OAAO,MAClB,KAAK;AAAA,MACH,QAAQ,OAAO,KAAK,KAAK,WAAW,KAAK,GAAG,KAAK,WAAW;AAAA,MAC5D;AAAA,IAAA,IAEF;AACJ,UAAM,SAAS,KAAK,QAAQ,QAAQ,MAAM;AAE1C,QAAI,QAAQ;AACV,WAAK,aAAa,OAAO,QAAQ,UAAU,WAAW;AAAA,IACxD;AACA,SAAK,UAAU,OAAO,QAAQ,qBAAqB,OAAO,KAAK,SAAS,CAAC;AAEzE,UAAM,UAAU,UAAU;AAC1B,UAAM,QAAQ,OAAO,KAAK,OAAO,EAAE,IAAI,CAAA,QAAO,GAAG,GAAG,GAAG,CAAC,IAAI;AAC5D,UAAM,WAAW,OAAO,KAAK,GAAG,EAAE,IAAI,CAAA,QAAO,GAAG,GAAG,GAAG,CAAC,IAAI;AAE3D,UAAM,EAAC,QAAA,IAAW,KAAK,IAAI;AAAA,MACzB;AAAA,eACS,GAAG,KAAK,CAAC;AAAA,cACV,SAAS,KAAK,GAAG,CAAC;AAAA,gBAChB,MAAM,KAAK,OAAO,CAAC;AAAA;AAAA,MAE7B,CAAC,GAAG,OAAO,OAAO,GAAG,GAAG,GAAG,OAAO,OAAO,OAAO,CAAC;AAAA,IAAA;AAKnD,QAAI,YAAY,GAAG;AACjB,WAAK,QAAQ,OAAO,GAAG;AAAA,IACzB;AAAA,EACF;AAAA,EAEA,cAAc,KAAoB;AAChC,UAAM,QAAQ,cAAc,IAAI,QAAQ;AACxC,UAAM,YAAY,KAAK,WAAW,KAAK;AACvC,UAAM,SAAS,KAAK;AAAA,MAClB,QAAQ,IAAI,KAAK,WAAW,KAAK,WAAW;AAAA,MAC5C;AAAA,IAAA;AAGF,SAAK,QAAQ,OAAO,MAAM;AAC1B,SAAK,aAAa,OAAO,QAAQ,UAAU,WAAW;AAAA,EACxD;AAAA,EAEA,QAAQ,OAAe,QAAoB;AACzC,UAAM,QAAQ,OAAO,KAAK,MAAM,EAAE,IAAI,CAAA,QAAO,GAAG,GAAG,GAAG,CAAC,IAAI;AAC3D,SAAK,IAAI;AAAA,MACP,eAAe,GAAG,KAAK,CAAC,UAAU,MAAM,KAAK,OAAO,CAAC;AAAA,MACrD,OAAO,OAAO,MAAM;AAAA,IAAA;AAAA,EAExB;AAAA,EAEA,gBAAgB,UAA2B;AACzC,eAAW,YAAY,SAAS,WAAW;AACzC,YAAM,QAAQ,cAAc,QAAQ;AAEpC,WAAK,IAAI,IAAI,eAAe,GAAG,KAAK,CAAC,EAAE;AAGvC,WAAK,eAAe,KAAK;AAAA,IAC3B;AAAA,EACF;AAAA,EAEA,mBAAmB,QAAqB;AACtC,QAAI,OAAO,UAAU;AACnB,WAAK,eAAe,IAAI,OAAO,MAAM,OAAO,QAAQ;AAAA,IACtD;AACA,UAAM,QAAQ,kBAAkB,OAAO,IAAI;AAC3C,SAAK,IAAI,GAAG,KAAK,yBAAyB,KAAK,CAAC;AAGhD,eAAW,CAAC,SAAS,OAAO,KAAK,OAAO,QAAQ,OAAO,KAAK,OAAO,GAAG;AACpE,WAAK,gBAAgB;AAAA,QACnB,MAAM;AAAA,QACN;AAAA,QACA;AAAA,QACA,OAAO,WAAW,OAAO;AAAA,MAAA;AAAA,IAE7B;AAEA,QACE,OAAO,KAAK,OAAO,YAAY,CAAA,CAAE,EAAE,WACnC,OAAO,KAAK,OAAO,KAAK,OAAO,EAAE,QACjC;AACA,WAAK,kBAAA;AAAA,IACP,OAAO;AAIL,WAAK,YAAY,MAAM,IAAI;AAAA,IAC7B;AACA,SAAK,IAAI,OAAO,OAAO,KAAK,MAAM,IAAI;AAAA,EACxC;AAAA,EAEA,qBAAqB,KAA0B;AAC7C,SAAK,eAAe,IAAI,IAAI,OAAO,IAAI,GAAG;AAAA,EAC5C;AAAA,EAEA,mBAAmB,QAAqB;AACtC,SAAK,eAAe,OAAO,OAAO,KAAK,OAAO,GAAG;AAEjD,UAAM,UAAU,cAAc,OAAO,GAAG;AACxC,UAAM,UAAU,cAAc,OAAO,GAAG;AACxC,SAAK,IAAI,GAAG,KAAK,eAAe,GAAG,OAAO,CAAC,cAAc,GAAG,OAAO,CAAC,EAAE;AAGtE,SAAK,gBAAgB,YAAY,SAAS,OAAO;AAEjD,SAAK,cAAc,OAAO;AAC1B,SAAK,YAAY,OAAO;AACxB,SAAK,IAAI,OAAO,OAAO,KAAK,SAAS,OAAO;AAAA,EAC9C;AAAA,EAEA,iBAAiB,KAAgB;AAC/B,QAAI,IAAI,eAAe;AACrB,WAAK,eAAe,IAAI,IAAI,OAAO,IAAI,aAAa;AAAA,IACtD;AACA,UAAM,QAAQ,cAAc,IAAI,KAAK;AACrC,UAAM,EAAC,SAAQ,IAAI;AACnB,UAAM,OAAO,wBAAwB,OAAO,IAAI,MAAM;AACtD,SAAK,IAAI,GAAG;AAAA,MACV,eAAe,GAAG,KAAK,CAAC,QAAQ,GAAG,IAAI,CAAC,IAAI,cAAc,IAAI,CAAC;AAAA,IAAA;AAIjE,SAAK,gBAAgB,OAAO,OAAO,MAAM,IAAI,OAAO,MAAM,IAAI,QAAQ;AAEtE,QAAI,IAAI,UAAU;AAChB,WAAK,kBAAA;AAAA,IACP,OAAO;AAGL,WAAK,cAAc,KAAK;AAAA,IAC1B;AACA,SAAK,IAAI,OAAO,IAAI,KAAK,OAAO,IAAI,MAAM;AAAA,EAC5C;AAAA,EAEA,oBAAoB,KAAmB;AACrC,UAAM,QAAQ,cAAc,IAAI,KAAK;AACrC,QAAI,UAAU,IAAI,IAAI;AACtB,UAAM,UAAU,IAAI,IAAI;AAYxB,UAAM,UAAU,wBAAwB,OAAO,IAAI,KAAK,gBAAgB;AACxE,UAAM,UAAU,wBAAwB,OAAO,IAAI,KAAK,gBAAgB;AAGxE,QAAI,YAAY,WAAW,QAAQ,aAAa,QAAQ,UAAU;AAChE,WAAK,IAAI,OAAO,IAAI,KAAK,sBAAsB,SAAS,OAAO;AAC/D;AAAA,IACF;AAGA,QAAI,QAAQ,aAAa,QAAQ,UAAU;AAEzC,YAAM,UAAU,YAAY,KAAK,IAAI,EAAE,EAAE;AAAA,QACvC,CAAA,QAAO,IAAI,cAAc,SAAS,WAAW,IAAI;AAAA,MAAA;AAEnD,YAAM,QAAQ,QAAQ,IAAI,CAAA,QAAO,wBAAwB,GAAG,IAAI,IAAI,CAAC,GAAG;AACxE,YAAM,UAAU,OAAO,OAAO;AAC9B,YAAM,KAAK;AAAA,sBACK,GAAG,KAAK,CAAC,QAAQ,GAAG,OAAO,CAAC,IAAI,cAAc,OAAO,CAAC;AAAA,iBAC3D,GAAG,KAAK,CAAC,QAAQ,GAAG,OAAO,CAAC,MAAM,GAAG,OAAO,CAAC;AAAA,sBACxC,GAAG,KAAK,CAAC,SAAS,GAAG,OAAO,CAAC;AAAA,SAC1C;AACH,iBAAW,OAAO,SAAS;AAEzB,YAAI,QAAQ,OAAO,IAAI,IAAI,QAAQ,OAAO;AAC1C,eAAO,IAAI,QAAQ,OAAO;AAC1B,cAAM,KAAK,yBAAyB,GAAG,CAAC;AAAA,MAC1C;AACA,WAAK,IAAI,GAAG,KAAK,MAAM,KAAK,EAAE,CAAC;AAC/B,gBAAU;AAAA,IACZ;AACA,QAAI,YAAY,SAAS;AACvB,WAAK,IAAI,GAAG;AAAA,QACV,eAAe,GAAG,KAAK,CAAC,WAAW,GAAG,OAAO,CAAC,OAAO,GAAG,OAAO,CAAC;AAAA,MAAA;AAAA,IAEpE;AAGA,SAAK,gBAAgB;AAAA,MACnB;AAAA,MACA,IAAI,IAAI;AAAA,MACR,IAAI,IAAI;AAAA,MACR,IAAI,IAAI;AAAA,IAAA;AAGV,SAAK,cAAc,KAAK;AACxB,SAAK,IAAI,OAAO,IAAI,KAAK,OAAO,IAAI,GAAG;AAAA,EACzC;AAAA,EAEA,kBAAkB,KAAiB;AACjC,UAAM,QAAQ,cAAc,IAAI,KAAK;AACrC,UAAM,EAAC,WAAU;AACjB,SAAK,IAAI,GAAG,KAAK,eAAe,GAAG,KAAK,CAAC,SAAS,GAAG,MAAM,CAAC,EAAE;AAG9D,SAAK,gBAAgB,aAAa,OAAO,MAAM;AAE/C,SAAK,cAAc,KAAK;AACxB,SAAK,IAAI,OAAO,IAAI,KAAK,OAAO,MAAM;AAAA,EACxC;AAAA,EAEA,iBAAiB,MAAiB;AAChC,SAAK,eAAe,KAAK,KAAK,EAAE;AAEhC,UAAM,OAAO,cAAc,KAAK,EAAE;AAClC,SAAK,IAAI,GAAG,KAAK,wBAAwB,GAAG,IAAI,CAAC,EAAE;AAGnD,SAAK,gBAAgB,YAAY,IAAI;AAErC,SAAK,YAAY,IAAI;AACrB,SAAK,IAAI,OAAO,KAAK,KAAK,IAAI;AAAA,EAChC;AAAA,EAEA,mBAAmB,QAAqB;AACtC,UAAM,QAAQ,uBAAuB,OAAO,IAAI;AAChD,SAAK,IAAI,GAAG,KAAK,yBAAyB,KAAK,CAAC;AAMhD,UAAM,YAAY,KAAK,KAAK,YAAY,IAAI,MAAM,SAAS,CAAC;AAC5D,SACG,UAAU,eAAe,CAAA,GAAI,WAC9B,OAAO,QAAQ,UAAU,OAAO,EAAE,SAAS,GAC3C;AACA,WAAK,kBAAA;AAAA,IACP,OAAO;AACL,WAAK,YAAY,MAAM,SAAS;AAAA,IAClC;AACA,SAAK,IAAI,OAAO,OAAO,KAAK,MAAM,IAAI;AAAA,EACxC;AAAA,EAEA,iBAAiB,MAAiB;AAChC,UAAM,OAAO,cAAc,KAAK,EAAE;AAClC,SAAK,IAAI,GAAG,KAAK,wBAAwB,GAAG,IAAI,CAAC,EAAE;AACnD,SAAK,IAAI,OAAO,KAAK,KAAK,IAAI;AAAA,EAChC;AAAA,EAEA,cAAc,OAAe;AAC3B,SAAK,IAAI;AAAA,MACP,UAAU,GAAG,KAAK,CAAC,QAAQ,GAAG,wBAAwB,CAAC;AAAA,MACvD,KAAK;AAAA,IAAA;AAEP,SAAK,YAAY,KAAK;AAAA,EACxB;AAAA;AAAA;AAAA;AAAA,EAKA,UACE,OACA,KACA,mBACA;AAIA,QAAI,KAAK,UAAU,aAAa,sBAAsB,QAAW;AAC/D,WAAK,WAAW;AAAA,QACd,KAAK;AAAA,QACL,KAAK;AAAA,QACL;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAEF,WAAK;AAAA,IACP;AAAA,EACF;AAAA,EAEA,aAAa,OAAe,KAAiB,aAAwB;AAInE,QAAI,KAAK,UAAU,aAAa,aAAa,QAAQ;AACnD,WAAK,WAAW,YAAY,KAAK,UAAU,KAAK,QAAQ,OAAO,GAAG;AAClE,WAAK;AAAA,IACP;AAAA,EACF;AAAA,EAEA,eAAe,OAAe;AAC5B,QAAI,KAAK,UAAU,WAAW;AAC5B,WAAK,WAAW,cAAc,KAAK,UAAU,KAAK;AAClD,WAAK;AAAA,IACP;AAAA,EACF;AAAA,EAEA,YAAY,OAAe;AACzB,SAAK,iBAAiB;AACtB,QAAI,KAAK,UAAU,WAAW;AAC5B,WAAK,WAAW,WAAW,KAAK,UAAU,KAAK;AAC/C,WAAK;AAAA,IACP;AACA,SAAK,kBAAA;AAAA,EACP;AAAA,EAEA,gBAAgB,EAAC,UAAU,WAAW,SAAS,aAA6B;AAC1E,UAAM,YAAY,cAAc,QAAQ;AACxC,UAAM,YAAY,KAAK,KAAK,YAAY,IAAI,SAAS,CAAC;AACtD,UAAM,aAAa,SAAS,OAAO;AACnC,UAAM,OAAO,CAAC,GAAG,YAAY,GAAG,OAAO;AAGvC,UAAM,gBAAgB,CAAC,GAAG,MAAM,wBAAwB,EAAE,IAAI,EAAE,EAAE,KAAK,GAAG;AAC1E,UAAM,SAAS,MAAM,KAAK,EAAC,QAAQ,KAAK,SAAS,EAAA,GAAI,MAAM,GAAG,EAAE,KAAK,GAAG;AACxE,UAAM,gBAAgB,WAAW,IAAI,EAAE,EAAE,KAAK,GAAG;AAEjD,QAAI,aAAa;AACjB,QAAI,UAAU;AACd,eAAW,KAAK,WAAW;AACzB,YAAM,MAAM;AAAA,QACV,OAAO,YAAY,KAAK,IAAI,CAAC,GAAG,MAAM,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC;AAAA,QAChD;AAAA,QACA,KAAK;AAAA,MAAA;AAEP,YAAM,SAAS,KAAK,QAAQ,KAAK,EAAC,UAAS;AAC3C,YAAM,QAAQ,KAAK,WAAW,eAAe,WAAW,MAAM;AAC9D,UAAI,OAAO,OAAO,UAAU,MAAM,eAAe,WAAW;AAC1D;AACA;AAAA,MACF;AACA,YAAM,UACJ,OAAO,OAAO,SACV,KAAK;AAAA,QACH,CAAA,OAAM,MAAM,0BAA0B,CAAC,KAAK,OAAO;AAAA,MAAA,IAErD;AACN,UAAI,QAAQ,WAAW,GAAG;AAExB;AACA;AAAA,MACF;AACA,YAAM,cAAc,QAAQ,IAAI,CAAA,QAAO,GAAG,GAAG,GAAG,CAAC,aAAa,GAAG,GAAG,CAAC,EAAE;AACvE,WAAK,IAAI;AAAA;AAAA,QACC;AAAA,sBACM,GAAG,SAAS,CAAC,KAAK,aAAa,aAAa,MAAM;AAAA,yBAC/C,aAAa;AAAA,0BACZ,YAAY,KAAK,GAAG,CAAC;AAAA;AAAA,QAEvC,GAAG,OAAO,OAAO,IAAI,GAAG;AAAA,QACxB;AAAA;AAAA,MAAA;AAEF;AAAA,IACF;AAEA,SAAK,IAAI;AAAA,MACP,cAAc,UAAU,kBAAkB,OAAO,UAAU,SAAS;AAAA,IAAA;AAAA,EAExE;AAAA,EAEA,yBAAyB,EAAC,UAAU,WAA6B;AAC/D,UAAM,YAAY,cAAc,QAAQ;AACxC,UAAM,aAAa,SAAS,OAAO;AACnC,UAAM,OAAO,CAAC,GAAG,YAAY,GAAG,OAAO;AAEvC,UAAM,iBAAiB,KAAK,oBAAoB,YAAY,KAAK,IAAI,EAAE,CAAC;AACxE,eAAW,OAAO,MAAM;AACtB,qBAAe,iBAAiB,WAAW,GAAG;AAAA,IAChD;AAGA,SAAK,cAAc,SAAS;AAC5B,SAAK,IAAI,OAAO,wBAAwB,SAAS,EAAE;AAAA,EAWrD;AAAA,EAEA,cACE,QACA,WACqD;AACrD,QAAI,cAAc,KAAK,UAAU;AAC/B,YAAM,IAAI;AAAA,QACR,oBAAoB,SAAS,mCAC3B,KAAK,QACP,KAAK,UAAU,MAAM,CAAC;AAAA,MAAA;AAAA,IAE1B;AACA,+BAA2B,KAAK,KAAK,SAAS;AAE9C,QAAI,KAAK,gBAAgB;AACvB,YAAM,QAAQ,KAAK,IAAA;AACnB,WAAK,IAAI,GAAG,OAAO,UAAU;AAC7B,WAAK,IAAI;AAAA,QACP,yCAAyC,KAAK,IAAA,IAAQ,KAAK;AAAA,MAAA;AAAA,IAE/D;AAEA,QAAI,KAAK,UAAU,gBAAgB;AACjC,WAAK,IAAI,OAAA;AAAA,IACX;AAEA,UAAM,YAAY,KAAK,IAAA,IAAQ,KAAK;AACpC,SAAK,IAAI,QAAQ,gBAAgB,KAAK,QAAQ,KAAK,SAAS,MAAM;AAElE,WAAO;AAAA,MACL,eAAe,KAAK;AAAA,MACpB,kBAAkB,KAAK,uBAAuB;AAAA,IAAA;AAAA,EAElD;AAAA,EAEA,MAAM,IAAgB;AACpB,OAAG,OAAO,wBAAwB,KAAK,QAAQ,EAAE;AACjD,SAAK,IAAI,SAAA;AAAA,EACX;AACF;AAEA,SAAS,qBACP,KACA,EAAC,eACqB;AACtB,MAAI,CAAC,aAAa,QAAQ;AACxB,WAAO;AAAA,EACT;AACA,SAAO,YAAY,OAAO,CAAA,QAAO,OAAO,GAAG;AAC7C;AAEA,SAAS,YAAY,KAAqB;AACxC,MAAI,eAAe,OAAO;AACxB,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,IAAI,MAAA;AAClB,QAAM,QAAQ;AACd,SAAO;AACT;"}
1
+ {"version":3,"file":"change-processor.js","sources":["../../../../../../zero-cache/src/services/replicator/change-processor.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {SqliteError} from '@rocicorp/zero-sqlite3';\nimport {AbortError} from '../../../../shared/src/abort-error.ts';\nimport {assert, unreachable} from '../../../../shared/src/asserts.ts';\nimport {stringify} from '../../../../shared/src/bigint-json.ts';\nimport {must} from '../../../../shared/src/must.ts';\nimport type {DownloadStatus} from '../../../../zero-events/src/status.ts';\nimport {\n createLiteIndexStatement,\n createLiteTableStatement,\n liteColumnDef,\n} from '../../db/create.ts';\nimport {\n computeZqlSpecs,\n listIndexes,\n listTables,\n type LiteTableSpecWithReplicationStatus,\n} from '../../db/lite-tables.ts';\nimport {\n mapPostgresToLite,\n mapPostgresToLiteColumn,\n mapPostgresToLiteIndex,\n} from '../../db/pg-to-lite.ts';\nimport type {LiteTableSpec} from '../../db/specs.ts';\nimport type {StatementRunner} from '../../db/statements.ts';\nimport type {LexiVersion} from '../../types/lexi-version.ts';\nimport {\n JSON_PARSED,\n liteRow,\n type JSONFormat,\n type LiteRow,\n type LiteRowKey,\n type LiteValueType,\n} from '../../types/lite.ts';\nimport {liteTableName} from '../../types/names.ts';\nimport {id} from '../../types/sql.ts';\nimport type {\n BackfillCompleted,\n Change,\n ColumnAdd,\n ColumnDrop,\n ColumnUpdate,\n IndexCreate,\n IndexDrop,\n MessageBackfill,\n MessageCommit,\n MessageDelete,\n MessageInsert,\n MessageRelation,\n MessageTruncate,\n MessageUpdate,\n TableCreate,\n TableDrop,\n TableRename,\n TableUpdateMetadata,\n} from '../change-source/protocol/current/data.ts';\nimport type {ChangeStreamData} from '../change-source/protocol/current/downstream.ts';\nimport type {ReplicatorMode} from './replicator.ts';\nimport {ChangeLog, DEL_OP, SET_OP} from './schema/change-log.ts';\nimport {ColumnMetadataStore} from './schema/column-metadata.ts';\nimport {\n ZERO_VERSION_COLUMN_NAME,\n updateReplicationWatermark,\n} from './schema/replication-state.ts';\nimport {TableMetadataTracker} from './schema/table-metadata.ts';\n\nexport type ChangeProcessorMode = ReplicatorMode | 'initial-sync';\n\nexport type CommitResult = {\n watermark: string;\n completedBackfill: DownloadStatus | undefined;\n schemaUpdated: boolean;\n changeLogUpdated: boolean;\n};\n\n/**\n * The ChangeProcessor partitions the stream of messages into transactions\n * by creating a {@link TransactionProcessor} when a transaction begins, and dispatching\n * messages to it until the commit is received.\n *\n * From https://www.postgresql.org/docs/current/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW :\n *\n * \"The logical replication protocol sends individual transactions one by one.\n * This means that all messages between a pair of Begin and Commit messages\n * belong to the same transaction.\"\n */\nexport class ChangeProcessor {\n readonly #db: StatementRunner;\n readonly #changeLog: ChangeLog;\n readonly #tableMetadata: TableMetadataTracker;\n readonly #mode: ChangeProcessorMode;\n readonly #failService: (lc: LogContext, err: unknown) => void;\n\n // The TransactionProcessor lazily loads table specs into this Map,\n // and reloads them after a schema change. It is cached here to avoid\n // reading them from the DB on every transaction.\n readonly #tableSpecs = new Map<string, LiteTableSpec>();\n\n #currentTx: TransactionProcessor | null = null;\n\n #failure: Error | undefined;\n\n constructor(\n db: StatementRunner,\n mode: ChangeProcessorMode,\n failService: (lc: LogContext, err: unknown) => void,\n ) {\n this.#db = db;\n this.#changeLog = new ChangeLog(db.db);\n this.#tableMetadata = new TableMetadataTracker(db.db);\n this.#mode = mode;\n this.#failService = failService;\n }\n\n #fail(lc: LogContext, err: unknown) {\n if (!this.#failure) {\n this.#currentTx?.abort(lc); // roll back any pending transaction.\n\n this.#failure = ensureError(err);\n\n if (!(err instanceof AbortError)) {\n // Propagate the failure up to the service.\n lc.error?.('Message Processing failed:', this.#failure);\n this.#failService(lc, this.#failure);\n }\n }\n }\n\n abort(lc: LogContext) {\n this.#fail(lc, new AbortError());\n }\n\n /** @return If a transaction was committed. */\n processMessage(\n lc: LogContext,\n downstream: ChangeStreamData,\n ): CommitResult | null {\n const [type, message] = downstream;\n if (this.#failure) {\n lc.debug?.(`Dropping ${message.tag}`);\n return null;\n }\n try {\n const watermark =\n type === 'begin'\n ? downstream[2].commitWatermark\n : type === 'commit'\n ? downstream[2].watermark\n : undefined;\n return this.#processMessage(lc, message, watermark);\n } catch (e) {\n this.#fail(lc, e);\n }\n return null;\n }\n\n #beginTransaction(\n lc: LogContext,\n commitVersion: string,\n jsonFormat: JSONFormat,\n ): TransactionProcessor {\n const start = Date.now();\n\n // litestream can technically hold the lock for an arbitrary amount of time\n // when checkpointing a large commit. Crashing on the busy-timeout in this\n // scenario will either produce a corrupt backup or otherwise prevent\n // replication from proceeding.\n //\n // Instead, retry the lock acquisition indefinitely. If this masks\n // an unknown deadlock situation, manual intervention will be necessary.\n for (let i = 0; ; i++) {\n try {\n return new TransactionProcessor(\n lc,\n this.#db,\n this.#mode,\n this.#changeLog,\n this.#tableMetadata,\n this.#tableSpecs,\n commitVersion,\n jsonFormat,\n );\n } catch (e) {\n if (e instanceof SqliteError && e.code === 'SQLITE_BUSY') {\n lc.warn?.(\n `SQLITE_BUSY for ${Date.now() - start} ms (attempt ${i + 1}). ` +\n `This is only expected if litestream is performing a large ` +\n `checkpoint.`,\n e,\n );\n continue;\n }\n throw e;\n }\n }\n }\n\n /** @return If a transaction was committed. */\n #processMessage(\n lc: LogContext,\n msg: Change,\n watermark: string | undefined,\n ): CommitResult | null {\n if (msg.tag === 'begin') {\n if (this.#currentTx) {\n throw new Error(`Already in a transaction ${stringify(msg)}`);\n }\n this.#currentTx = this.#beginTransaction(\n lc,\n must(watermark),\n msg.json ?? JSON_PARSED,\n );\n return null;\n }\n\n // For non-begin messages, there should be a #currentTx set.\n const tx = this.#currentTx;\n if (!tx) {\n throw new Error(\n `Received message outside of transaction: ${stringify(msg)}`,\n );\n }\n\n if (msg.tag === 'commit') {\n // Undef this.#currentTx to allow the assembly of the next transaction.\n this.#currentTx = null;\n\n assert(watermark, 'watermark is required for commit messages');\n return tx.processCommit(msg, watermark);\n }\n\n if (msg.tag === 'rollback') {\n this.#currentTx?.abort(lc);\n this.#currentTx = null;\n return null;\n }\n\n switch (msg.tag) {\n case 'insert':\n tx.processInsert(msg);\n break;\n case 'update':\n tx.processUpdate(msg);\n break;\n case 'delete':\n tx.processDelete(msg);\n break;\n case 'truncate':\n tx.processTruncate(msg);\n break;\n case 'create-table':\n tx.processCreateTable(msg);\n break;\n case 'rename-table':\n tx.processRenameTable(msg);\n break;\n case 'update-table-metadata':\n tx.processTableMetadata(msg);\n break;\n case 'add-column':\n tx.processAddColumn(msg);\n break;\n case 'update-column':\n tx.processUpdateColumn(msg);\n break;\n case 'drop-column':\n tx.processDropColumn(msg);\n break;\n case 'drop-table':\n tx.processDropTable(msg);\n break;\n case 'create-index':\n tx.processCreateIndex(msg);\n break;\n case 'drop-index':\n tx.processDropIndex(msg);\n break;\n case 'backfill':\n tx.processBackfill(msg);\n break;\n case 'backfill-completed':\n tx.processBackfillCompleted(msg);\n break;\n default:\n unreachable(msg);\n }\n\n return null;\n }\n}\n\n/**\n * The {@link TransactionProcessor} handles the sequence of messages from\n * upstream, from `BEGIN` to `COMMIT` and executes the corresponding mutations\n * on the {@link postgres.TransactionSql} on the replica.\n *\n * When applying row contents to the replica, the `_0_version` column is added / updated,\n * and a corresponding entry in the `ChangeLog` is added. The version value is derived\n * from the watermark of the preceding transaction (stored as the `nextStateVersion` in the\n * `ReplicationState` table).\n *\n * Side note: For non-streaming Postgres transactions, the commitEndLsn (and thus\n * commit watermark) is available in the `begin` message, so it could theoretically\n * be used for the row version of changes within the transaction. However, the\n * commitEndLsn is not available in the streaming (in-progress) transaction\n * protocol, and may not be available for CDC streams of other upstream types.\n * Therefore, the zero replication protocol is designed to not require the commit\n * watermark when a transaction begins.\n *\n * Also of interest is the fact that all INSERT Messages are logically applied as\n * UPSERTs. See {@link processInsert} for the underlying motivation.\n */\nclass TransactionProcessor {\n readonly #lc: LogContext;\n readonly #startMs: number;\n readonly #db: StatementRunner;\n readonly #mode: ChangeProcessorMode;\n readonly #version: LexiVersion;\n readonly #changeLog: ChangeLog;\n readonly #tableMetadata: TableMetadataTracker;\n readonly #tableSpecs: Map<string, LiteTableSpecWithReplicationStatus>;\n readonly #jsonFormat: JSONFormat;\n readonly #columnMetadata: ColumnMetadataStore;\n\n #pos = 0;\n #schemaChanged = false;\n #numChangeLogEntries = 0;\n\n constructor(\n lc: LogContext,\n db: StatementRunner,\n mode: ChangeProcessorMode,\n changeLog: ChangeLog,\n tableMetadata: TableMetadataTracker,\n tableSpecs: Map<string, LiteTableSpecWithReplicationStatus>,\n commitVersion: LexiVersion,\n jsonFormat: JSONFormat,\n ) {\n this.#startMs = Date.now();\n this.#mode = mode;\n this.#jsonFormat = jsonFormat;\n\n switch (mode) {\n case 'serving':\n // Although the Replicator / Incremental Syncer is the only writer of the replica,\n // a `BEGIN CONCURRENT` transaction is used to allow View Syncers to simulate\n // (i.e. and `ROLLBACK`) changes on historic snapshots of the database for the\n // purpose of IVM).\n //\n // This TransactionProcessor is the only logic that will actually\n // `COMMIT` any transactions to the replica.\n db.beginConcurrent();\n break;\n case 'backup':\n // For the backup-replicator (i.e. replication-manager), there are no View Syncers\n // and thus BEGIN CONCURRENT is not necessary. In fact, BEGIN CONCURRENT can cause\n // deadlocks with forced wal-checkpoints (which `litestream replicate` performs),\n // so it is important to use vanilla transactions in this configuration.\n db.beginImmediate();\n break;\n case 'initial-sync':\n // When the ChangeProcessor is used for initial-sync, the calling code\n // handles the transaction boundaries.\n break;\n default:\n unreachable();\n }\n this.#db = db;\n this.#version = commitVersion;\n this.#lc = lc.withContext('version', commitVersion);\n this.#changeLog = changeLog;\n this.#tableMetadata = tableMetadata;\n this.#tableSpecs = tableSpecs;\n // The column_metadata table is guaranteed to exist since the\n // replica-schema.ts migration to v8.\n this.#columnMetadata = must(ColumnMetadataStore.getInstance(db.db));\n\n if (this.#tableSpecs.size === 0) {\n this.#reloadTableSpecs();\n }\n }\n\n #reloadTableSpecs() {\n this.#tableSpecs.clear();\n // zqlSpecs include the primary key derived from unique indexes\n const zqlSpecs = computeZqlSpecs(this.#lc, this.#db.db, {\n includeBackfillingColumns: true,\n });\n for (let spec of listTables(this.#db.db)) {\n if (!spec.primaryKey) {\n spec = {\n ...spec,\n primaryKey: [\n ...(zqlSpecs.get(spec.name)?.tableSpec.primaryKey ?? []),\n ],\n };\n }\n this.#tableSpecs.set(spec.name, spec);\n }\n }\n\n #tableSpec(name: string) {\n return must(this.#tableSpecs.get(name), `Unknown table ${name}`);\n }\n\n #getKey(\n {row, numCols}: {row: LiteRow; numCols: number},\n {relation}: {relation: MessageRelation},\n ): LiteRowKey {\n const keyColumns =\n relation.rowKey.type !== 'full'\n ? relation.rowKey.columns // already a suitable key\n : this.#tableSpec(liteTableName(relation)).primaryKey;\n if (!keyColumns?.length) {\n throw new Error(\n `Cannot replicate table \"${relation.name}\" without a PRIMARY KEY or UNIQUE INDEX`,\n );\n }\n // For the common case (replica identity default), the row is already the\n // key for deletes and updates, in which case a new object can be avoided.\n if (numCols === keyColumns.length) {\n return row;\n }\n const key: Record<string, LiteValueType> = {};\n for (const col of keyColumns) {\n key[col] = row[col];\n }\n return key;\n }\n\n processInsert(insert: MessageInsert) {\n const table = liteTableName(insert.relation);\n const tableSpec = this.#tableSpec(table);\n const newRow = liteRow(insert.new, tableSpec, this.#jsonFormat);\n\n this.#upsert(table, {\n ...newRow.row,\n [ZERO_VERSION_COLUMN_NAME]: this.#version,\n });\n\n if (insert.relation.rowKey.columns.length === 0) {\n // INSERTs can be replicated for rows without a PRIMARY KEY or a\n // UNIQUE INDEX. These are written to the replica but not recorded\n // in the changeLog, because these rows cannot participate in IVM.\n //\n // (Once the table schema has been corrected to include a key, the\n // associated schema change will reset pipelines and data can be\n // loaded via hydration.)\n return;\n }\n const key = this.#getKey(newRow, insert);\n this.#logSetOp(table, key, getBackfilledColumns(newRow.row, tableSpec));\n }\n\n #upsert(table: string, row: LiteRow) {\n const columns = Object.keys(row).map(c => id(c));\n this.#db.run(\n `\n INSERT OR REPLACE INTO ${id(table)} (${columns.join(',')})\n VALUES (${Array.from({length: columns.length}).fill('?').join(',')})\n `,\n Object.values(row),\n );\n }\n\n // Updates by default are applied as UPDATE commands to support partial\n // row specifications from the change source. In particular, this is needed\n // to handle updates for which unchanged TOASTed values are not sent:\n //\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-TUPLEDATA\n //\n // However, in certain cases an UPDATE may be received for a row that\n // was not initially synced, such as when, an existing table is added\n // to the app's publication.\n //\n // In order to facilitate \"resumptive\" replication, the logic falls back to\n // an INSERT if the update did not change any rows.\n processUpdate(update: MessageUpdate) {\n const table = liteTableName(update.relation);\n const tableSpec = this.#tableSpec(table);\n const newRow = liteRow(update.new, tableSpec, this.#jsonFormat);\n const row = {...newRow.row, [ZERO_VERSION_COLUMN_NAME]: this.#version};\n\n // update.key is set with the old values if the key has changed.\n const oldKey = update.key\n ? this.#getKey(\n liteRow(update.key, this.#tableSpec(table), this.#jsonFormat),\n update,\n )\n : null;\n const newKey = this.#getKey(newRow, update);\n\n if (oldKey) {\n this.#logDeleteOp(table, oldKey, tableSpec.backfilling);\n }\n this.#logSetOp(table, newKey, getBackfilledColumns(newRow.row, tableSpec));\n\n const currKey = oldKey ?? newKey;\n const conds = Object.keys(currKey).map(col => `${id(col)}=?`);\n const setExprs = Object.keys(row).map(col => `${id(col)}=?`);\n\n const {changes} = this.#db.run(\n `\n UPDATE ${id(table)}\n SET ${setExprs.join(',')}\n WHERE ${conds.join(' AND ')}\n `,\n [...Object.values(row), ...Object.values(currKey)],\n );\n\n // If the UPDATE did not affect any rows, perform an UPSERT of the\n // new row for resumptive replication.\n if (changes === 0) {\n this.#upsert(table, row);\n }\n }\n\n processDelete(del: MessageDelete) {\n const table = liteTableName(del.relation);\n const tableSpec = this.#tableSpec(table);\n const rowKey = this.#getKey(\n liteRow(del.key, tableSpec, this.#jsonFormat),\n del,\n );\n\n this.#delete(table, rowKey);\n this.#logDeleteOp(table, rowKey, tableSpec.backfilling);\n }\n\n #delete(table: string, rowKey: LiteRowKey) {\n const conds = Object.keys(rowKey).map(col => `${id(col)}=?`);\n this.#db.run(\n `DELETE FROM ${id(table)} WHERE ${conds.join(' AND ')}`,\n Object.values(rowKey),\n );\n }\n\n processTruncate(truncate: MessageTruncate) {\n for (const relation of truncate.relations) {\n const table = liteTableName(relation);\n // Update replica data.\n this.#db.run(`DELETE FROM ${id(table)}`);\n\n // Update change log.\n this.#logTruncateOp(table);\n }\n }\n\n processCreateTable(create: TableCreate) {\n if (create.metadata) {\n this.#tableMetadata.set(create.spec, create.metadata);\n }\n const table = mapPostgresToLite(create.spec);\n this.#db.db.exec(createLiteTableStatement(table));\n\n // Write to metadata table\n for (const [colName, colSpec] of Object.entries(create.spec.columns)) {\n this.#columnMetadata.insert(\n table.name,\n colName,\n colSpec,\n create.backfill?.[colName],\n );\n }\n\n if (\n Object.keys(create.backfill ?? {}).length ===\n Object.keys(create.spec.columns).length\n ) {\n this.#reloadTableSpecs();\n } else {\n // Make the table visible immediately unless all of the columns are\n // being backfilled. In the backfill case, the version bump will happen\n // with the backfill is complete.\n this.#logResetOp(table.name);\n }\n this.#lc.info?.(create.tag, table.name);\n }\n\n processTableMetadata(msg: TableUpdateMetadata) {\n this.#tableMetadata.set(msg.table, msg.new);\n }\n\n processRenameTable(rename: TableRename) {\n this.#tableMetadata.rename(rename.old, rename.new);\n\n const oldName = liteTableName(rename.old);\n const newName = liteTableName(rename.new);\n this.#db.db.exec(`ALTER TABLE ${id(oldName)} RENAME TO ${id(newName)}`);\n\n // Rename in metadata table\n this.#columnMetadata.renameTable(oldName, newName);\n\n this.#bumpVersions(newName);\n this.#logResetOp(oldName);\n this.#lc.info?.(rename.tag, oldName, newName);\n }\n\n processAddColumn(msg: ColumnAdd) {\n if (msg.tableMetadata) {\n this.#tableMetadata.set(msg.table, msg.tableMetadata);\n }\n const table = liteTableName(msg.table);\n const {name} = msg.column;\n const spec = mapPostgresToLiteColumn(table, msg.column);\n this.#db.db.exec(\n `ALTER TABLE ${id(table)} ADD ${id(name)} ${liteColumnDef(spec)}`,\n );\n\n // Write to metadata table\n this.#columnMetadata.insert(table, name, msg.column.spec, msg.backfill);\n\n if (msg.backfill) {\n this.#reloadTableSpecs();\n } else {\n // Make the new column visible immediately if it's not being backfilled.\n // Otherwise, the version bump will happen with the backfill is complete.\n this.#bumpVersions(table);\n }\n this.#lc.info?.(msg.tag, table, msg.column);\n }\n\n processUpdateColumn(msg: ColumnUpdate) {\n const table = liteTableName(msg.table);\n let oldName = msg.old.name;\n const newName = msg.new.name;\n\n // update-column can ignore defaults because it does not change the values\n // in existing rows.\n //\n // https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-DESC-SET-DROP-DEFAULT\n //\n // \"The new default value will only apply in subsequent INSERT or UPDATE\n // commands; it does not cause rows already in the table to change.\"\n //\n // This allows support for _changing_ column defaults to any expression,\n // since it does not affect what the replica needs to do.\n const oldSpec = mapPostgresToLiteColumn(table, msg.old, 'ignore-default');\n const newSpec = mapPostgresToLiteColumn(table, msg.new, 'ignore-default');\n\n // The only updates that are relevant are the column name and the data type.\n if (oldName === newName && oldSpec.dataType === newSpec.dataType) {\n this.#lc.info?.(msg.tag, 'no thing to update', oldSpec, newSpec);\n return;\n }\n // If the data type changes, we have to make a new column with the new data type\n // and copy the values over.\n if (oldSpec.dataType !== newSpec.dataType) {\n // Remember (and drop) the indexes that reference the column.\n const indexes = listIndexes(this.#db.db).filter(\n idx => idx.tableName === table && oldName in idx.columns,\n );\n const stmts = indexes.map(idx => `DROP INDEX IF EXISTS ${id(idx.name)};`);\n const tmpName = `tmp.${newName}`;\n stmts.push(`\n ALTER TABLE ${id(table)} ADD ${id(tmpName)} ${liteColumnDef(newSpec)};\n UPDATE ${id(table)} SET ${id(tmpName)} = ${id(oldName)};\n ALTER TABLE ${id(table)} DROP ${id(oldName)};\n `);\n for (const idx of indexes) {\n // Re-create the indexes to reference the new column.\n idx.columns[tmpName] = idx.columns[oldName];\n delete idx.columns[oldName];\n stmts.push(createLiteIndexStatement(idx));\n }\n this.#db.db.exec(stmts.join(''));\n oldName = tmpName;\n }\n if (oldName !== newName) {\n this.#db.db.exec(\n `ALTER TABLE ${id(table)} RENAME ${id(oldName)} TO ${id(newName)}`,\n );\n }\n\n // Update metadata table\n this.#columnMetadata.update(\n table,\n msg.old.name,\n msg.new.name,\n msg.new.spec,\n );\n\n this.#bumpVersions(table);\n this.#lc.info?.(msg.tag, table, msg.new);\n }\n\n processDropColumn(msg: ColumnDrop) {\n const table = liteTableName(msg.table);\n const {column} = msg;\n this.#db.db.exec(`ALTER TABLE ${id(table)} DROP ${id(column)}`);\n\n // Delete from metadata table\n this.#columnMetadata.deleteColumn(table, column);\n\n this.#bumpVersions(table);\n this.#lc.info?.(msg.tag, table, column);\n }\n\n processDropTable(drop: TableDrop) {\n this.#tableMetadata.drop(drop.id);\n\n const name = liteTableName(drop.id);\n this.#db.db.exec(`DROP TABLE IF EXISTS ${id(name)}`);\n\n // Delete from metadata table\n this.#columnMetadata.deleteTable(name);\n\n this.#logResetOp(name);\n this.#lc.info?.(drop.tag, name);\n }\n\n processCreateIndex(create: IndexCreate) {\n const index = mapPostgresToLiteIndex(create.spec);\n this.#db.db.exec(createLiteIndexStatement(index));\n\n // indexes affect tables visibility (e.g. sync-ability is gated on\n // having a unique index), so reset pipelines to refresh table schemas.\n // However, the reset is not necessary if the index is for a table\n // that is not yet visible due to backfilling.\n const tableSpec = must(this.#tableSpecs.get(index.tableName));\n if (\n (tableSpec.backfilling ?? []).length ===\n Object.entries(tableSpec.columns).length - 1 // don't count _0_version\n ) {\n this.#reloadTableSpecs();\n } else {\n this.#logResetOp(index.tableName);\n }\n this.#lc.info?.(create.tag, index.name);\n }\n\n processDropIndex(drop: IndexDrop) {\n const name = liteTableName(drop.id);\n this.#db.db.exec(`DROP INDEX IF EXISTS ${id(name)}`);\n this.#lc.info?.(drop.tag, name);\n }\n\n #bumpVersions(table: string) {\n this.#db.run(\n `UPDATE ${id(table)} SET ${id(ZERO_VERSION_COLUMN_NAME)} = ?`,\n this.#version,\n );\n this.#logResetOp(table);\n }\n\n /**\n * @param backfilledColumns `backfilling` columns for which values were set\n */\n #logSetOp(\n table: string,\n key: LiteRowKey,\n backfilledColumns: string[] | undefined,\n ) {\n // The \"serving\" replicator always writes to the change-log (for IVM).\n // The \"backup\" replicator only needs to write to the change log\n // when writing columns that are being backfilled.\n if (this.#mode === 'serving' || backfilledColumns !== undefined) {\n this.#changeLog.logSetOp(\n this.#version,\n this.#pos++,\n table,\n key,\n backfilledColumns,\n );\n this.#numChangeLogEntries++;\n }\n }\n\n #logDeleteOp(table: string, key: LiteRowKey, backfilling?: string[]) {\n // The \"serving\" replicator always writes to the change-log (for IVM).\n // The \"backup\" replicator only needs to write to the change log\n // when writing columns that are being backfilled.\n if (this.#mode === 'serving' || backfilling?.length) {\n this.#changeLog.logDeleteOp(this.#version, this.#pos++, table, key);\n this.#numChangeLogEntries++;\n }\n }\n\n #logTruncateOp(table: string) {\n if (this.#mode === 'serving') {\n this.#changeLog.logTruncateOp(this.#version, table);\n this.#numChangeLogEntries++;\n }\n }\n\n #logResetOp(table: string) {\n this.#schemaChanged = true;\n if (this.#mode === 'serving') {\n this.#changeLog.logResetOp(this.#version, table);\n this.#numChangeLogEntries++;\n }\n this.#reloadTableSpecs();\n }\n\n processBackfill({relation, watermark, columns, rowValues}: MessageBackfill) {\n const tableName = liteTableName(relation);\n const tableSpec = must(this.#tableSpecs.get(tableName));\n const rowKeyCols = relation.rowKey.columns;\n const cols = [...rowKeyCols, ...columns];\n\n // Common parts of the INSERT sql statement.\n const insertColsStr = [...cols, ZERO_VERSION_COLUMN_NAME].map(id).join(',');\n const qMarks = Array.from({length: cols.length + 1}, () => '?').join(',');\n const rowKeyColsStr = rowKeyCols.map(id).join(',');\n\n let backfilled = 0;\n let skipped = 0;\n for (const v of rowValues) {\n const row = liteRow(\n Object.fromEntries(cols.map((c, i) => [c, v[i]])),\n tableSpec,\n this.#jsonFormat,\n );\n const rowKey = this.#getKey(row, {relation});\n const rowOp = this.#changeLog.getLatestRowOp(tableName, rowKey);\n if (rowOp?.op === DEL_OP && rowOp.stateVersion > watermark) {\n skipped++;\n continue; // the row was deleted after the backfill snapshot\n }\n const updates =\n rowOp?.op === SET_OP\n ? cols.filter(\n c => (rowOp.backfillingColumnVersions[c] ?? '') <= watermark,\n )\n : cols;\n if (updates.length === 0) {\n // row already has newer values for all backfilling columns.\n skipped++;\n continue;\n }\n const updateStmts = updates.map(col => `${id(col)}=excluded.${id(col)}`);\n this.#db.run(\n /*sql*/ `\n INSERT INTO ${id(tableName)} (${insertColsStr}) VALUES (${qMarks})\n ON CONFLICT (${rowKeyColsStr})\n DO UPDATE SET ${updateStmts.join(',')};\n `,\n ...Object.values(row.row),\n watermark, // the _0_version for new rows (i.e. table backfill)\n );\n backfilled++;\n }\n\n this.#lc.debug?.(\n `backfilled ${backfilled} rows (skipped ${skipped}) into ${tableName}`,\n );\n }\n\n #completedBackfill: DownloadStatus | undefined;\n\n processBackfillCompleted({relation, columns, status}: BackfillCompleted) {\n const tableName = liteTableName(relation);\n const rowKeyCols = relation.rowKey.columns;\n const cols = [...rowKeyCols, ...columns];\n\n const columnMetadata = must(ColumnMetadataStore.getInstance(this.#db.db));\n for (const col of cols) {\n columnMetadata.clearBackfilling(tableName, col);\n }\n // Given that new columns are being exposed for every row in the table, bump the\n // row version for all rows.\n this.#bumpVersions(tableName);\n if (status) {\n this.#completedBackfill = {table: tableName, columns: cols, ...status};\n }\n this.#lc.info?.(`finished backfilling ${tableName}`);\n\n // Note that there is no need to clear the backfillingColumnVersions values\n // in the changeLog. It could theoretically be done for clarity but:\n // (1) it could be non-trivial in terms of latency introduced and\n // (2) the data must be preserved if _other_ columns are in the process\n // of being backfilled\n //\n // Thus, for speed and simplicity, the values are left as is. (Note that\n // subsequent replicated changes to those rows will clear the values if\n // no backfills are in progress).\n }\n\n processCommit(commit: MessageCommit, watermark: string): CommitResult {\n if (watermark !== this.#version) {\n throw new Error(\n `'commit' version ${watermark} does not match 'begin' version ${\n this.#version\n }: ${stringify(commit)}`,\n );\n }\n updateReplicationWatermark(this.#db, watermark);\n\n if (this.#schemaChanged) {\n const start = Date.now();\n this.#db.db.pragma('optimize');\n this.#lc.info?.(\n `PRAGMA optimized after schema change (${Date.now() - start} ms)`,\n );\n }\n\n if (this.#mode !== 'initial-sync') {\n this.#db.commit();\n }\n\n const elapsedMs = Date.now() - this.#startMs;\n this.#lc.debug?.(`Committed tx@${this.#version} (${elapsedMs} ms)`);\n\n return {\n watermark,\n completedBackfill: this.#completedBackfill,\n schemaUpdated: this.#schemaChanged,\n changeLogUpdated: this.#numChangeLogEntries > 0,\n };\n }\n\n abort(lc: LogContext) {\n lc.info?.(`aborting transaction ${this.#version}`);\n this.#db.rollback();\n }\n}\n\nfunction getBackfilledColumns(\n row: LiteRow,\n {backfilling}: LiteTableSpecWithReplicationStatus,\n): string[] | undefined {\n if (!backfilling?.length) {\n return undefined; // common case\n }\n return backfilling.filter(col => col in row);\n}\n\nfunction ensureError(err: unknown): Error {\n if (err instanceof Error) {\n return err;\n }\n const error = new Error();\n error.cause = err;\n return error;\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;AAsFO,MAAM,gBAAgB;AAAA,EAClB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA,EAKA,kCAAkB,IAAA;AAAA,EAE3B,aAA0C;AAAA,EAE1C;AAAA,EAEA,YACE,IACA,MACA,aACA;AACA,SAAK,MAAM;AACX,SAAK,aAAa,IAAI,UAAU,GAAG,EAAE;AACrC,SAAK,iBAAiB,IAAI,qBAAqB,GAAG,EAAE;AACpD,SAAK,QAAQ;AACb,SAAK,eAAe;AAAA,EACtB;AAAA,EAEA,MAAM,IAAgB,KAAc;AAClC,QAAI,CAAC,KAAK,UAAU;AAClB,WAAK,YAAY,MAAM,EAAE;AAEzB,WAAK,WAAW,YAAY,GAAG;AAE/B,UAAI,EAAE,eAAe,aAAa;AAEhC,WAAG,QAAQ,8BAA8B,KAAK,QAAQ;AACtD,aAAK,aAAa,IAAI,KAAK,QAAQ;AAAA,MACrC;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,IAAgB;AACpB,SAAK,MAAM,IAAI,IAAI,WAAA,CAAY;AAAA,EACjC;AAAA;AAAA,EAGA,eACE,IACA,YACqB;AACrB,UAAM,CAAC,MAAM,OAAO,IAAI;AACxB,QAAI,KAAK,UAAU;AACjB,SAAG,QAAQ,YAAY,QAAQ,GAAG,EAAE;AACpC,aAAO;AAAA,IACT;AACA,QAAI;AACF,YAAM,YACJ,SAAS,UACL,WAAW,CAAC,EAAE,kBACd,SAAS,WACP,WAAW,CAAC,EAAE,YACd;AACR,aAAO,KAAK,gBAAgB,IAAI,SAAS,SAAS;AAAA,IACpD,SAAS,GAAG;AACV,WAAK,MAAM,IAAI,CAAC;AAAA,IAClB;AACA,WAAO;AAAA,EACT;AAAA,EAEA,kBACE,IACA,eACA,YACsB;AACtB,UAAM,QAAQ,KAAK,IAAA;AASnB,aAAS,IAAI,KAAK,KAAK;AACrB,UAAI;AACF,eAAO,IAAI;AAAA,UACT;AAAA,UACA,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,UACL,KAAK;AAAA,UACL;AAAA,UACA;AAAA,QAAA;AAAA,MAEJ,SAAS,GAAG;AACV,YAAI,aAAa,eAAe,EAAE,SAAS,eAAe;AACxD,aAAG;AAAA,YACD,mBAAmB,KAAK,IAAA,IAAQ,KAAK,gBAAgB,IAAI,CAAC;AAAA,YAG1D;AAAA,UAAA;AAEF;AAAA,QACF;AACA,cAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAAA;AAAA,EAGA,gBACE,IACA,KACA,WACqB;AACrB,QAAI,IAAI,QAAQ,SAAS;AACvB,UAAI,KAAK,YAAY;AACnB,cAAM,IAAI,MAAM,4BAA4B,UAAU,GAAG,CAAC,EAAE;AAAA,MAC9D;AACA,WAAK,aAAa,KAAK;AAAA,QACrB;AAAA,QACA,KAAK,SAAS;AAAA,QACd,IAAI,QAAQ;AAAA,MAAA;AAEd,aAAO;AAAA,IACT;AAGA,UAAM,KAAK,KAAK;AAChB,QAAI,CAAC,IAAI;AACP,YAAM,IAAI;AAAA,QACR,4CAA4C,UAAU,GAAG,CAAC;AAAA,MAAA;AAAA,IAE9D;AAEA,QAAI,IAAI,QAAQ,UAAU;AAExB,WAAK,aAAa;AAElB,aAAO,WAAW,2CAA2C;AAC7D,aAAO,GAAG,cAAc,KAAK,SAAS;AAAA,IACxC;AAEA,QAAI,IAAI,QAAQ,YAAY;AAC1B,WAAK,YAAY,MAAM,EAAE;AACzB,WAAK,aAAa;AAClB,aAAO;AAAA,IACT;AAEA,YAAQ,IAAI,KAAA;AAAA,MACV,KAAK;AACH,WAAG,cAAc,GAAG;AACpB;AAAA,MACF,KAAK;AACH,WAAG,cAAc,GAAG;AACpB;AAAA,MACF,KAAK;AACH,WAAG,cAAc,GAAG;AACpB;AAAA,MACF,KAAK;AACH,WAAG,gBAAgB,GAAG;AACtB;AAAA,MACF,KAAK;AACH,WAAG,mBAAmB,GAAG;AACzB;AAAA,MACF,KAAK;AACH,WAAG,mBAAmB,GAAG;AACzB;AAAA,MACF,KAAK;AACH,WAAG,qBAAqB,GAAG;AAC3B;AAAA,MACF,KAAK;AACH,WAAG,iBAAiB,GAAG;AACvB;AAAA,MACF,KAAK;AACH,WAAG,oBAAoB,GAAG;AAC1B;AAAA,MACF,KAAK;AACH,WAAG,kBAAkB,GAAG;AACxB;AAAA,MACF,KAAK;AACH,WAAG,iBAAiB,GAAG;AACvB;AAAA,MACF,KAAK;AACH,WAAG,mBAAmB,GAAG;AACzB;AAAA,MACF,KAAK;AACH,WAAG,iBAAiB,GAAG;AACvB;AAAA,MACF,KAAK;AACH,WAAG,gBAAgB,GAAG;AACtB;AAAA,MACF,KAAK;AACH,WAAG,yBAAyB,GAAG;AAC/B;AAAA,MACF;AACE,oBAAe;AAAA,IAAA;AAGnB,WAAO;AAAA,EACT;AACF;AAuBA,MAAM,qBAAqB;AAAA,EAChB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAET,OAAO;AAAA,EACP,iBAAiB;AAAA,EACjB,uBAAuB;AAAA,EAEvB,YACE,IACA,IACA,MACA,WACA,eACA,YACA,eACA,YACA;AACA,SAAK,WAAW,KAAK,IAAA;AACrB,SAAK,QAAQ;AACb,SAAK,cAAc;AAEnB,YAAQ,MAAA;AAAA,MACN,KAAK;AAQH,WAAG,gBAAA;AACH;AAAA,MACF,KAAK;AAKH,WAAG,eAAA;AACH;AAAA,MACF,KAAK;AAGH;AAAA,MACF;AACE,oBAAA;AAAA,IAAY;AAEhB,SAAK,MAAM;AACX,SAAK,WAAW;AAChB,SAAK,MAAM,GAAG,YAAY,WAAW,aAAa;AAClD,SAAK,aAAa;AAClB,SAAK,iBAAiB;AACtB,SAAK,cAAc;AAGnB,SAAK,kBAAkB,KAAK,oBAAoB,YAAY,GAAG,EAAE,CAAC;AAElE,QAAI,KAAK,YAAY,SAAS,GAAG;AAC/B,WAAK,kBAAA;AAAA,IACP;AAAA,EACF;AAAA,EAEA,oBAAoB;AAClB,SAAK,YAAY,MAAA;AAEjB,UAAM,WAAW,gBAAgB,KAAK,KAAK,KAAK,IAAI,IAAI;AAAA,MACtD,2BAA2B;AAAA,IAAA,CAC5B;AACD,aAAS,QAAQ,WAAW,KAAK,IAAI,EAAE,GAAG;AACxC,UAAI,CAAC,KAAK,YAAY;AACpB,eAAO;AAAA,UACL,GAAG;AAAA,UACH,YAAY;AAAA,YACV,GAAI,SAAS,IAAI,KAAK,IAAI,GAAG,UAAU,cAAc,CAAA;AAAA,UAAC;AAAA,QACxD;AAAA,MAEJ;AACA,WAAK,YAAY,IAAI,KAAK,MAAM,IAAI;AAAA,IACtC;AAAA,EACF;AAAA,EAEA,WAAW,MAAc;AACvB,WAAO,KAAK,KAAK,YAAY,IAAI,IAAI,GAAG,iBAAiB,IAAI,EAAE;AAAA,EACjE;AAAA,EAEA,QACE,EAAC,KAAK,WACN,EAAC,YACW;AACZ,UAAM,aACJ,SAAS,OAAO,SAAS,SACrB,SAAS,OAAO,UAChB,KAAK,WAAW,cAAc,QAAQ,CAAC,EAAE;AAC/C,QAAI,CAAC,YAAY,QAAQ;AACvB,YAAM,IAAI;AAAA,QACR,2BAA2B,SAAS,IAAI;AAAA,MAAA;AAAA,IAE5C;AAGA,QAAI,YAAY,WAAW,QAAQ;AACjC,aAAO;AAAA,IACT;AACA,UAAM,MAAqC,CAAA;AAC3C,eAAW,OAAO,YAAY;AAC5B,UAAI,GAAG,IAAI,IAAI,GAAG;AAAA,IACpB;AACA,WAAO;AAAA,EACT;AAAA,EAEA,cAAc,QAAuB;AACnC,UAAM,QAAQ,cAAc,OAAO,QAAQ;AAC3C,UAAM,YAAY,KAAK,WAAW,KAAK;AACvC,UAAM,SAAS,QAAQ,OAAO,KAAK,WAAW,KAAK,WAAW;AAE9D,SAAK,QAAQ,OAAO;AAAA,MAClB,GAAG,OAAO;AAAA,MACV,CAAC,wBAAwB,GAAG,KAAK;AAAA,IAAA,CAClC;AAED,QAAI,OAAO,SAAS,OAAO,QAAQ,WAAW,GAAG;AAQ/C;AAAA,IACF;AACA,UAAM,MAAM,KAAK,QAAQ,QAAQ,MAAM;AACvC,SAAK,UAAU,OAAO,KAAK,qBAAqB,OAAO,KAAK,SAAS,CAAC;AAAA,EACxE;AAAA,EAEA,QAAQ,OAAe,KAAc;AACnC,UAAM,UAAU,OAAO,KAAK,GAAG,EAAE,IAAI,CAAA,MAAK,GAAG,CAAC,CAAC;AAC/C,SAAK,IAAI;AAAA,MACP;AAAA,+BACyB,GAAG,KAAK,CAAC,KAAK,QAAQ,KAAK,GAAG,CAAC;AAAA,kBAC5C,MAAM,KAAK,EAAC,QAAQ,QAAQ,QAAO,EAAE,KAAK,GAAG,EAAE,KAAK,GAAG,CAAC;AAAA;AAAA,MAEpE,OAAO,OAAO,GAAG;AAAA,IAAA;AAAA,EAErB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,cAAc,QAAuB;AACnC,UAAM,QAAQ,cAAc,OAAO,QAAQ;AAC3C,UAAM,YAAY,KAAK,WAAW,KAAK;AACvC,UAAM,SAAS,QAAQ,OAAO,KAAK,WAAW,KAAK,WAAW;AAC9D,UAAM,MAAM,EAAC,GAAG,OAAO,KAAK,CAAC,wBAAwB,GAAG,KAAK,SAAA;AAG7D,UAAM,SAAS,OAAO,MAClB,KAAK;AAAA,MACH,QAAQ,OAAO,KAAK,KAAK,WAAW,KAAK,GAAG,KAAK,WAAW;AAAA,MAC5D;AAAA,IAAA,IAEF;AACJ,UAAM,SAAS,KAAK,QAAQ,QAAQ,MAAM;AAE1C,QAAI,QAAQ;AACV,WAAK,aAAa,OAAO,QAAQ,UAAU,WAAW;AAAA,IACxD;AACA,SAAK,UAAU,OAAO,QAAQ,qBAAqB,OAAO,KAAK,SAAS,CAAC;AAEzE,UAAM,UAAU,UAAU;AAC1B,UAAM,QAAQ,OAAO,KAAK,OAAO,EAAE,IAAI,CAAA,QAAO,GAAG,GAAG,GAAG,CAAC,IAAI;AAC5D,UAAM,WAAW,OAAO,KAAK,GAAG,EAAE,IAAI,CAAA,QAAO,GAAG,GAAG,GAAG,CAAC,IAAI;AAE3D,UAAM,EAAC,QAAA,IAAW,KAAK,IAAI;AAAA,MACzB;AAAA,eACS,GAAG,KAAK,CAAC;AAAA,cACV,SAAS,KAAK,GAAG,CAAC;AAAA,gBAChB,MAAM,KAAK,OAAO,CAAC;AAAA;AAAA,MAE7B,CAAC,GAAG,OAAO,OAAO,GAAG,GAAG,GAAG,OAAO,OAAO,OAAO,CAAC;AAAA,IAAA;AAKnD,QAAI,YAAY,GAAG;AACjB,WAAK,QAAQ,OAAO,GAAG;AAAA,IACzB;AAAA,EACF;AAAA,EAEA,cAAc,KAAoB;AAChC,UAAM,QAAQ,cAAc,IAAI,QAAQ;AACxC,UAAM,YAAY,KAAK,WAAW,KAAK;AACvC,UAAM,SAAS,KAAK;AAAA,MAClB,QAAQ,IAAI,KAAK,WAAW,KAAK,WAAW;AAAA,MAC5C;AAAA,IAAA;AAGF,SAAK,QAAQ,OAAO,MAAM;AAC1B,SAAK,aAAa,OAAO,QAAQ,UAAU,WAAW;AAAA,EACxD;AAAA,EAEA,QAAQ,OAAe,QAAoB;AACzC,UAAM,QAAQ,OAAO,KAAK,MAAM,EAAE,IAAI,CAAA,QAAO,GAAG,GAAG,GAAG,CAAC,IAAI;AAC3D,SAAK,IAAI;AAAA,MACP,eAAe,GAAG,KAAK,CAAC,UAAU,MAAM,KAAK,OAAO,CAAC;AAAA,MACrD,OAAO,OAAO,MAAM;AAAA,IAAA;AAAA,EAExB;AAAA,EAEA,gBAAgB,UAA2B;AACzC,eAAW,YAAY,SAAS,WAAW;AACzC,YAAM,QAAQ,cAAc,QAAQ;AAEpC,WAAK,IAAI,IAAI,eAAe,GAAG,KAAK,CAAC,EAAE;AAGvC,WAAK,eAAe,KAAK;AAAA,IAC3B;AAAA,EACF;AAAA,EAEA,mBAAmB,QAAqB;AACtC,QAAI,OAAO,UAAU;AACnB,WAAK,eAAe,IAAI,OAAO,MAAM,OAAO,QAAQ;AAAA,IACtD;AACA,UAAM,QAAQ,kBAAkB,OAAO,IAAI;AAC3C,SAAK,IAAI,GAAG,KAAK,yBAAyB,KAAK,CAAC;AAGhD,eAAW,CAAC,SAAS,OAAO,KAAK,OAAO,QAAQ,OAAO,KAAK,OAAO,GAAG;AACpE,WAAK,gBAAgB;AAAA,QACnB,MAAM;AAAA,QACN;AAAA,QACA;AAAA,QACA,OAAO,WAAW,OAAO;AAAA,MAAA;AAAA,IAE7B;AAEA,QACE,OAAO,KAAK,OAAO,YAAY,CAAA,CAAE,EAAE,WACnC,OAAO,KAAK,OAAO,KAAK,OAAO,EAAE,QACjC;AACA,WAAK,kBAAA;AAAA,IACP,OAAO;AAIL,WAAK,YAAY,MAAM,IAAI;AAAA,IAC7B;AACA,SAAK,IAAI,OAAO,OAAO,KAAK,MAAM,IAAI;AAAA,EACxC;AAAA,EAEA,qBAAqB,KAA0B;AAC7C,SAAK,eAAe,IAAI,IAAI,OAAO,IAAI,GAAG;AAAA,EAC5C;AAAA,EAEA,mBAAmB,QAAqB;AACtC,SAAK,eAAe,OAAO,OAAO,KAAK,OAAO,GAAG;AAEjD,UAAM,UAAU,cAAc,OAAO,GAAG;AACxC,UAAM,UAAU,cAAc,OAAO,GAAG;AACxC,SAAK,IAAI,GAAG,KAAK,eAAe,GAAG,OAAO,CAAC,cAAc,GAAG,OAAO,CAAC,EAAE;AAGtE,SAAK,gBAAgB,YAAY,SAAS,OAAO;AAEjD,SAAK,cAAc,OAAO;AAC1B,SAAK,YAAY,OAAO;AACxB,SAAK,IAAI,OAAO,OAAO,KAAK,SAAS,OAAO;AAAA,EAC9C;AAAA,EAEA,iBAAiB,KAAgB;AAC/B,QAAI,IAAI,eAAe;AACrB,WAAK,eAAe,IAAI,IAAI,OAAO,IAAI,aAAa;AAAA,IACtD;AACA,UAAM,QAAQ,cAAc,IAAI,KAAK;AACrC,UAAM,EAAC,SAAQ,IAAI;AACnB,UAAM,OAAO,wBAAwB,OAAO,IAAI,MAAM;AACtD,SAAK,IAAI,GAAG;AAAA,MACV,eAAe,GAAG,KAAK,CAAC,QAAQ,GAAG,IAAI,CAAC,IAAI,cAAc,IAAI,CAAC;AAAA,IAAA;AAIjE,SAAK,gBAAgB,OAAO,OAAO,MAAM,IAAI,OAAO,MAAM,IAAI,QAAQ;AAEtE,QAAI,IAAI,UAAU;AAChB,WAAK,kBAAA;AAAA,IACP,OAAO;AAGL,WAAK,cAAc,KAAK;AAAA,IAC1B;AACA,SAAK,IAAI,OAAO,IAAI,KAAK,OAAO,IAAI,MAAM;AAAA,EAC5C;AAAA,EAEA,oBAAoB,KAAmB;AACrC,UAAM,QAAQ,cAAc,IAAI,KAAK;AACrC,QAAI,UAAU,IAAI,IAAI;AACtB,UAAM,UAAU,IAAI,IAAI;AAYxB,UAAM,UAAU,wBAAwB,OAAO,IAAI,KAAK,gBAAgB;AACxE,UAAM,UAAU,wBAAwB,OAAO,IAAI,KAAK,gBAAgB;AAGxE,QAAI,YAAY,WAAW,QAAQ,aAAa,QAAQ,UAAU;AAChE,WAAK,IAAI,OAAO,IAAI,KAAK,sBAAsB,SAAS,OAAO;AAC/D;AAAA,IACF;AAGA,QAAI,QAAQ,aAAa,QAAQ,UAAU;AAEzC,YAAM,UAAU,YAAY,KAAK,IAAI,EAAE,EAAE;AAAA,QACvC,CAAA,QAAO,IAAI,cAAc,SAAS,WAAW,IAAI;AAAA,MAAA;AAEnD,YAAM,QAAQ,QAAQ,IAAI,CAAA,QAAO,wBAAwB,GAAG,IAAI,IAAI,CAAC,GAAG;AACxE,YAAM,UAAU,OAAO,OAAO;AAC9B,YAAM,KAAK;AAAA,sBACK,GAAG,KAAK,CAAC,QAAQ,GAAG,OAAO,CAAC,IAAI,cAAc,OAAO,CAAC;AAAA,iBAC3D,GAAG,KAAK,CAAC,QAAQ,GAAG,OAAO,CAAC,MAAM,GAAG,OAAO,CAAC;AAAA,sBACxC,GAAG,KAAK,CAAC,SAAS,GAAG,OAAO,CAAC;AAAA,SAC1C;AACH,iBAAW,OAAO,SAAS;AAEzB,YAAI,QAAQ,OAAO,IAAI,IAAI,QAAQ,OAAO;AAC1C,eAAO,IAAI,QAAQ,OAAO;AAC1B,cAAM,KAAK,yBAAyB,GAAG,CAAC;AAAA,MAC1C;AACA,WAAK,IAAI,GAAG,KAAK,MAAM,KAAK,EAAE,CAAC;AAC/B,gBAAU;AAAA,IACZ;AACA,QAAI,YAAY,SAAS;AACvB,WAAK,IAAI,GAAG;AAAA,QACV,eAAe,GAAG,KAAK,CAAC,WAAW,GAAG,OAAO,CAAC,OAAO,GAAG,OAAO,CAAC;AAAA,MAAA;AAAA,IAEpE;AAGA,SAAK,gBAAgB;AAAA,MACnB;AAAA,MACA,IAAI,IAAI;AAAA,MACR,IAAI,IAAI;AAAA,MACR,IAAI,IAAI;AAAA,IAAA;AAGV,SAAK,cAAc,KAAK;AACxB,SAAK,IAAI,OAAO,IAAI,KAAK,OAAO,IAAI,GAAG;AAAA,EACzC;AAAA,EAEA,kBAAkB,KAAiB;AACjC,UAAM,QAAQ,cAAc,IAAI,KAAK;AACrC,UAAM,EAAC,WAAU;AACjB,SAAK,IAAI,GAAG,KAAK,eAAe,GAAG,KAAK,CAAC,SAAS,GAAG,MAAM,CAAC,EAAE;AAG9D,SAAK,gBAAgB,aAAa,OAAO,MAAM;AAE/C,SAAK,cAAc,KAAK;AACxB,SAAK,IAAI,OAAO,IAAI,KAAK,OAAO,MAAM;AAAA,EACxC;AAAA,EAEA,iBAAiB,MAAiB;AAChC,SAAK,eAAe,KAAK,KAAK,EAAE;AAEhC,UAAM,OAAO,cAAc,KAAK,EAAE;AAClC,SAAK,IAAI,GAAG,KAAK,wBAAwB,GAAG,IAAI,CAAC,EAAE;AAGnD,SAAK,gBAAgB,YAAY,IAAI;AAErC,SAAK,YAAY,IAAI;AACrB,SAAK,IAAI,OAAO,KAAK,KAAK,IAAI;AAAA,EAChC;AAAA,EAEA,mBAAmB,QAAqB;AACtC,UAAM,QAAQ,uBAAuB,OAAO,IAAI;AAChD,SAAK,IAAI,GAAG,KAAK,yBAAyB,KAAK,CAAC;AAMhD,UAAM,YAAY,KAAK,KAAK,YAAY,IAAI,MAAM,SAAS,CAAC;AAC5D,SACG,UAAU,eAAe,CAAA,GAAI,WAC9B,OAAO,QAAQ,UAAU,OAAO,EAAE,SAAS,GAC3C;AACA,WAAK,kBAAA;AAAA,IACP,OAAO;AACL,WAAK,YAAY,MAAM,SAAS;AAAA,IAClC;AACA,SAAK,IAAI,OAAO,OAAO,KAAK,MAAM,IAAI;AAAA,EACxC;AAAA,EAEA,iBAAiB,MAAiB;AAChC,UAAM,OAAO,cAAc,KAAK,EAAE;AAClC,SAAK,IAAI,GAAG,KAAK,wBAAwB,GAAG,IAAI,CAAC,EAAE;AACnD,SAAK,IAAI,OAAO,KAAK,KAAK,IAAI;AAAA,EAChC;AAAA,EAEA,cAAc,OAAe;AAC3B,SAAK,IAAI;AAAA,MACP,UAAU,GAAG,KAAK,CAAC,QAAQ,GAAG,wBAAwB,CAAC;AAAA,MACvD,KAAK;AAAA,IAAA;AAEP,SAAK,YAAY,KAAK;AAAA,EACxB;AAAA;AAAA;AAAA;AAAA,EAKA,UACE,OACA,KACA,mBACA;AAIA,QAAI,KAAK,UAAU,aAAa,sBAAsB,QAAW;AAC/D,WAAK,WAAW;AAAA,QACd,KAAK;AAAA,QACL,KAAK;AAAA,QACL;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAEF,WAAK;AAAA,IACP;AAAA,EACF;AAAA,EAEA,aAAa,OAAe,KAAiB,aAAwB;AAInE,QAAI,KAAK,UAAU,aAAa,aAAa,QAAQ;AACnD,WAAK,WAAW,YAAY,KAAK,UAAU,KAAK,QAAQ,OAAO,GAAG;AAClE,WAAK;AAAA,IACP;AAAA,EACF;AAAA,EAEA,eAAe,OAAe;AAC5B,QAAI,KAAK,UAAU,WAAW;AAC5B,WAAK,WAAW,cAAc,KAAK,UAAU,KAAK;AAClD,WAAK;AAAA,IACP;AAAA,EACF;AAAA,EAEA,YAAY,OAAe;AACzB,SAAK,iBAAiB;AACtB,QAAI,KAAK,UAAU,WAAW;AAC5B,WAAK,WAAW,WAAW,KAAK,UAAU,KAAK;AAC/C,WAAK;AAAA,IACP;AACA,SAAK,kBAAA;AAAA,EACP;AAAA,EAEA,gBAAgB,EAAC,UAAU,WAAW,SAAS,aAA6B;AAC1E,UAAM,YAAY,cAAc,QAAQ;AACxC,UAAM,YAAY,KAAK,KAAK,YAAY,IAAI,SAAS,CAAC;AACtD,UAAM,aAAa,SAAS,OAAO;AACnC,UAAM,OAAO,CAAC,GAAG,YAAY,GAAG,OAAO;AAGvC,UAAM,gBAAgB,CAAC,GAAG,MAAM,wBAAwB,EAAE,IAAI,EAAE,EAAE,KAAK,GAAG;AAC1E,UAAM,SAAS,MAAM,KAAK,EAAC,QAAQ,KAAK,SAAS,EAAA,GAAI,MAAM,GAAG,EAAE,KAAK,GAAG;AACxE,UAAM,gBAAgB,WAAW,IAAI,EAAE,EAAE,KAAK,GAAG;AAEjD,QAAI,aAAa;AACjB,QAAI,UAAU;AACd,eAAW,KAAK,WAAW;AACzB,YAAM,MAAM;AAAA,QACV,OAAO,YAAY,KAAK,IAAI,CAAC,GAAG,MAAM,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAC;AAAA,QAChD;AAAA,QACA,KAAK;AAAA,MAAA;AAEP,YAAM,SAAS,KAAK,QAAQ,KAAK,EAAC,UAAS;AAC3C,YAAM,QAAQ,KAAK,WAAW,eAAe,WAAW,MAAM;AAC9D,UAAI,OAAO,OAAO,UAAU,MAAM,eAAe,WAAW;AAC1D;AACA;AAAA,MACF;AACA,YAAM,UACJ,OAAO,OAAO,SACV,KAAK;AAAA,QACH,CAAA,OAAM,MAAM,0BAA0B,CAAC,KAAK,OAAO;AAAA,MAAA,IAErD;AACN,UAAI,QAAQ,WAAW,GAAG;AAExB;AACA;AAAA,MACF;AACA,YAAM,cAAc,QAAQ,IAAI,CAAA,QAAO,GAAG,GAAG,GAAG,CAAC,aAAa,GAAG,GAAG,CAAC,EAAE;AACvE,WAAK,IAAI;AAAA;AAAA,QACC;AAAA,sBACM,GAAG,SAAS,CAAC,KAAK,aAAa,aAAa,MAAM;AAAA,yBAC/C,aAAa;AAAA,0BACZ,YAAY,KAAK,GAAG,CAAC;AAAA;AAAA,QAEvC,GAAG,OAAO,OAAO,IAAI,GAAG;AAAA,QACxB;AAAA;AAAA,MAAA;AAEF;AAAA,IACF;AAEA,SAAK,IAAI;AAAA,MACP,cAAc,UAAU,kBAAkB,OAAO,UAAU,SAAS;AAAA,IAAA;AAAA,EAExE;AAAA,EAEA;AAAA,EAEA,yBAAyB,EAAC,UAAU,SAAS,UAA4B;AACvE,UAAM,YAAY,cAAc,QAAQ;AACxC,UAAM,aAAa,SAAS,OAAO;AACnC,UAAM,OAAO,CAAC,GAAG,YAAY,GAAG,OAAO;AAEvC,UAAM,iBAAiB,KAAK,oBAAoB,YAAY,KAAK,IAAI,EAAE,CAAC;AACxE,eAAW,OAAO,MAAM;AACtB,qBAAe,iBAAiB,WAAW,GAAG;AAAA,IAChD;AAGA,SAAK,cAAc,SAAS;AAC5B,QAAI,QAAQ;AACV,WAAK,qBAAqB,EAAC,OAAO,WAAW,SAAS,MAAM,GAAG,OAAA;AAAA,IACjE;AACA,SAAK,IAAI,OAAO,wBAAwB,SAAS,EAAE;AAAA,EAWrD;AAAA,EAEA,cAAc,QAAuB,WAAiC;AACpE,QAAI,cAAc,KAAK,UAAU;AAC/B,YAAM,IAAI;AAAA,QACR,oBAAoB,SAAS,mCAC3B,KAAK,QACP,KAAK,UAAU,MAAM,CAAC;AAAA,MAAA;AAAA,IAE1B;AACA,+BAA2B,KAAK,KAAK,SAAS;AAE9C,QAAI,KAAK,gBAAgB;AACvB,YAAM,QAAQ,KAAK,IAAA;AACnB,WAAK,IAAI,GAAG,OAAO,UAAU;AAC7B,WAAK,IAAI;AAAA,QACP,yCAAyC,KAAK,IAAA,IAAQ,KAAK;AAAA,MAAA;AAAA,IAE/D;AAEA,QAAI,KAAK,UAAU,gBAAgB;AACjC,WAAK,IAAI,OAAA;AAAA,IACX;AAEA,UAAM,YAAY,KAAK,IAAA,IAAQ,KAAK;AACpC,SAAK,IAAI,QAAQ,gBAAgB,KAAK,QAAQ,KAAK,SAAS,MAAM;AAElE,WAAO;AAAA,MACL;AAAA,MACA,mBAAmB,KAAK;AAAA,MACxB,eAAe,KAAK;AAAA,MACpB,kBAAkB,KAAK,uBAAuB;AAAA,IAAA;AAAA,EAElD;AAAA,EAEA,MAAM,IAAgB;AACpB,OAAG,OAAO,wBAAwB,KAAK,QAAQ,EAAE;AACjD,SAAK,IAAI,SAAA;AAAA,EACX;AACF;AAEA,SAAS,qBACP,KACA,EAAC,eACqB;AACtB,MAAI,CAAC,aAAa,QAAQ;AACxB,WAAO;AAAA,EACT;AACA,SAAO,YAAY,OAAO,CAAA,QAAO,OAAO,GAAG;AAC7C;AAEA,SAAS,YAAY,KAAqB;AACxC,MAAI,eAAe,OAAO;AACxB,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,IAAI,MAAA;AAClB,QAAM,QAAQ;AACd,SAAO;AACT;"}
@@ -1 +1 @@
1
- {"version":3,"file":"incremental-sync.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/replicator/incremental-sync.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,8BAA8B,CAAC;AAG3D,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,wBAAwB,CAAC;AACnD,OAAO,EAEL,KAAK,cAAc,EAEpB,MAAM,uCAAuC,CAAC;AAK/C,OAAO,KAAK,EAAC,YAAY,EAAE,cAAc,EAAC,MAAM,iBAAiB,CAAC;AAGlE;;;;;GAKG;AACH,qBAAa,iBAAiB;;gBAkB1B,MAAM,EAAE,MAAM,EACd,EAAE,EAAE,MAAM,EACV,cAAc,EAAE,cAAc,EAC9B,OAAO,EAAE,QAAQ,EACjB,IAAI,EAAE,cAAc,EACpB,wBAAwB,EAAE,OAAO;IAW7B,GAAG,CAAC,EAAE,EAAE,UAAU;IA+ExB,SAAS,IAAI,MAAM,CAAC,YAAY,CAAC;IAIjC,IAAI,CAAC,EAAE,EAAE,UAAU,EAAE,GAAG,CAAC,EAAE,OAAO;CAGnC"}
1
+ {"version":3,"file":"incremental-sync.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/replicator/incremental-sync.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,8BAA8B,CAAC;AAG3D,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,wBAAwB,CAAC;AAEnD,OAAO,EAEL,KAAK,cAAc,EAEpB,MAAM,uCAAuC,CAAC;AAK/C,OAAO,KAAK,EAAC,YAAY,EAAE,cAAc,EAAC,MAAM,iBAAiB,CAAC;AAGlE;;;;;GAKG;AACH,qBAAa,iBAAiB;;gBAkB1B,MAAM,EAAE,MAAM,EACd,EAAE,EAAE,MAAM,EACV,cAAc,EAAE,cAAc,EAC9B,OAAO,EAAE,QAAQ,EACjB,IAAI,EAAE,cAAc,EACpB,wBAAwB,EAAE,OAAO;IAW7B,GAAG,CAAC,EAAE,EAAE,UAAU;IA4HxB,SAAS,IAAI,MAAM,CAAC,YAAY,CAAC;IAIjC,IAAI,CAAC,EAAE,EAAE,UAAU,EAAE,GAAG,CAAC,EAAE,OAAO;CAGnC"}
@@ -62,6 +62,7 @@ class IncrementalSyncer {
62
62
  "Replicating",
63
63
  `Replicating from ${watermark}`
64
64
  );
65
+ let backfillStatus;
65
66
  for await (const message of downstream) {
66
67
  this.#replicationEvents.add(1);
67
68
  switch (message[0]) {
@@ -72,8 +73,44 @@ class IncrementalSyncer {
72
73
  this.stop(lc, message[1]);
73
74
  break;
74
75
  default: {
76
+ const msg = message[1];
77
+ if (msg.tag === "backfill" && msg.status) {
78
+ const { status } = msg;
79
+ if (!backfillStatus) {
80
+ backfillStatus = status;
81
+ statusPublisher?.publish(
82
+ lc,
83
+ "Replicating",
84
+ `Backfilling ${msg.relation.name} table`,
85
+ 3e3,
86
+ () => backfillStatus ? {
87
+ downloadStatus: [
88
+ {
89
+ ...backfillStatus,
90
+ table: msg.relation.name,
91
+ columns: [
92
+ ...msg.relation.rowKey.columns,
93
+ ...msg.columns
94
+ ]
95
+ }
96
+ ]
97
+ } : {}
98
+ );
99
+ }
100
+ backfillStatus = status;
101
+ }
75
102
  const result = processor.processMessage(lc, message);
76
- if (result?.schemaUpdated) {
103
+ if (result?.completedBackfill) {
104
+ const status = result.completedBackfill;
105
+ statusPublisher?.publish(
106
+ lc,
107
+ "Replicating",
108
+ `Backfilled ${status.table} table`,
109
+ 0,
110
+ () => ({ downloadStatus: [status] })
111
+ );
112
+ backfillStatus = void 0;
113
+ } else if (result?.schemaUpdated) {
77
114
  statusPublisher?.publish(lc, "Replicating", "Schema updated");
78
115
  }
79
116
  if (result?.watermark && result?.changeLogUpdated) {
@@ -90,6 +127,7 @@ class IncrementalSyncer {
90
127
  } finally {
91
128
  downstream?.cancel();
92
129
  unregister();
130
+ statusPublisher?.stop();
93
131
  }
94
132
  await this.#state.backoff(lc, err);
95
133
  }
@@ -1 +1 @@
1
- {"version":3,"file":"incremental-sync.js","sources":["../../../../../../zero-cache/src/services/replicator/incremental-sync.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {StatementRunner} from '../../db/statements.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport type {Source} from '../../types/streams.ts';\nimport {\n PROTOCOL_VERSION,\n type ChangeStreamer,\n type Downstream,\n} from '../change-streamer/change-streamer.ts';\nimport {RunningState} from '../running-state.ts';\nimport {ChangeProcessor} from './change-processor.ts';\nimport {Notifier} from './notifier.ts';\nimport {ReplicationStatusPublisher} from './replication-status.ts';\nimport type {ReplicaState, ReplicatorMode} from './replicator.ts';\nimport {getSubscriptionState} from './schema/replication-state.ts';\n\n/**\n * The {@link IncrementalSyncer} manages a logical replication stream from upstream,\n * handling application lifecycle events (start, stop) and retrying the\n * connection with exponential backoff. The actual handling of the logical\n * replication messages is done by the {@link ChangeProcessor}.\n */\nexport class IncrementalSyncer {\n readonly #taskID: string;\n readonly #id: string;\n readonly #changeStreamer: ChangeStreamer;\n readonly #replica: StatementRunner;\n readonly #mode: ReplicatorMode;\n readonly #publishReplicationStatus: boolean;\n readonly #notifier: Notifier;\n\n readonly #state = new RunningState('IncrementalSyncer');\n\n readonly #replicationEvents = getOrCreateCounter(\n 'replication',\n 'events',\n 'Number of replication events processed',\n );\n\n constructor(\n taskID: string,\n id: string,\n changeStreamer: ChangeStreamer,\n replica: Database,\n mode: ReplicatorMode,\n publishReplicationStatus: boolean,\n ) {\n this.#taskID = taskID;\n this.#id = id;\n this.#changeStreamer = changeStreamer;\n this.#replica = new StatementRunner(replica);\n this.#mode = mode;\n this.#publishReplicationStatus = publishReplicationStatus;\n this.#notifier = new Notifier();\n }\n\n async run(lc: LogContext) {\n lc.info?.(`Starting IncrementalSyncer`);\n const {watermark: initialWatermark} = getSubscriptionState(this.#replica);\n\n // Notify any waiting subscribers that the replica is ready to be read.\n void this.#notifier.notifySubscribers();\n\n // Only the backup replicator publishes replication status events.\n const statusPublisher = this.#publishReplicationStatus\n ? new ReplicationStatusPublisher(this.#replica.db)\n : undefined;\n\n while (this.#state.shouldRun()) {\n const {replicaVersion, watermark} = getSubscriptionState(this.#replica);\n const processor = new ChangeProcessor(\n this.#replica,\n this.#mode,\n (lc: LogContext, err: unknown) => this.stop(lc, err),\n );\n\n let downstream: Source<Downstream> | undefined;\n let unregister = () => {};\n let err: unknown | undefined;\n\n try {\n downstream = await this.#changeStreamer.subscribe({\n protocolVersion: PROTOCOL_VERSION,\n taskID: this.#taskID,\n id: this.#id,\n mode: this.#mode,\n watermark,\n replicaVersion,\n initial: watermark === initialWatermark,\n });\n this.#state.resetBackoff();\n unregister = this.#state.cancelOnStop(downstream);\n statusPublisher?.publish(\n lc,\n 'Replicating',\n `Replicating from ${watermark}`,\n );\n\n for await (const message of downstream) {\n this.#replicationEvents.add(1);\n switch (message[0]) {\n case 'status':\n // Used for checking if a replica can be caught up. Not\n // relevant here.\n lc.debug?.(`Received initial status`, message[1]);\n break;\n case 'error':\n // Unrecoverable error. Stop the service.\n this.stop(lc, message[1]);\n break;\n default: {\n const result = processor.processMessage(lc, message);\n if (result?.schemaUpdated) {\n statusPublisher?.publish(lc, 'Replicating', 'Schema updated');\n }\n if (result?.watermark && result?.changeLogUpdated) {\n void this.#notifier.notifySubscribers({state: 'version-ready'});\n }\n break;\n }\n }\n }\n processor.abort(lc);\n } catch (e) {\n err = e;\n processor.abort(lc);\n } finally {\n downstream?.cancel();\n unregister();\n }\n await this.#state.backoff(lc, err);\n }\n lc.info?.('IncrementalSyncer stopped');\n }\n\n subscribe(): Source<ReplicaState> {\n return this.#notifier.subscribe();\n }\n\n stop(lc: LogContext, err?: unknown) {\n this.#state.stop(lc, err);\n }\n}\n"],"names":["lc","err"],"mappings":";;;;;;;;AAuBO,MAAM,kBAAkB;AAAA,EACpB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA,SAAS,IAAI,aAAa,mBAAmB;AAAA,EAE7C,qBAAqB;AAAA,IAC5B;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAAA,EAGF,YACE,QACA,IACA,gBACA,SACA,MACA,0BACA;AACA,SAAK,UAAU;AACf,SAAK,MAAM;AACX,SAAK,kBAAkB;AACvB,SAAK,WAAW,IAAI,gBAAgB,OAAO;AAC3C,SAAK,QAAQ;AACb,SAAK,4BAA4B;AACjC,SAAK,YAAY,IAAI,SAAA;AAAA,EACvB;AAAA,EAEA,MAAM,IAAI,IAAgB;AACxB,OAAG,OAAO,4BAA4B;AACtC,UAAM,EAAC,WAAW,iBAAA,IAAoB,qBAAqB,KAAK,QAAQ;AAGxE,SAAK,KAAK,UAAU,kBAAA;AAGpB,UAAM,kBAAkB,KAAK,4BACzB,IAAI,2BAA2B,KAAK,SAAS,EAAE,IAC/C;AAEJ,WAAO,KAAK,OAAO,aAAa;AAC9B,YAAM,EAAC,gBAAgB,UAAA,IAAa,qBAAqB,KAAK,QAAQ;AACtE,YAAM,YAAY,IAAI;AAAA,QACpB,KAAK;AAAA,QACL,KAAK;AAAA,QACL,CAACA,KAAgBC,SAAiB,KAAK,KAAKD,KAAIC,IAAG;AAAA,MAAA;AAGrD,UAAI;AACJ,UAAI,aAAa,MAAM;AAAA,MAAC;AACxB,UAAI;AAEJ,UAAI;AACF,qBAAa,MAAM,KAAK,gBAAgB,UAAU;AAAA,UAChD,iBAAiB;AAAA,UACjB,QAAQ,KAAK;AAAA,UACb,IAAI,KAAK;AAAA,UACT,MAAM,KAAK;AAAA,UACX;AAAA,UACA;AAAA,UACA,SAAS,cAAc;AAAA,QAAA,CACxB;AACD,aAAK,OAAO,aAAA;AACZ,qBAAa,KAAK,OAAO,aAAa,UAAU;AAChD,yBAAiB;AAAA,UACf;AAAA,UACA;AAAA,UACA,oBAAoB,SAAS;AAAA,QAAA;AAG/B,yBAAiB,WAAW,YAAY;AACtC,eAAK,mBAAmB,IAAI,CAAC;AAC7B,kBAAQ,QAAQ,CAAC,GAAA;AAAA,YACf,KAAK;AAGH,iBAAG,QAAQ,2BAA2B,QAAQ,CAAC,CAAC;AAChD;AAAA,YACF,KAAK;AAEH,mBAAK,KAAK,IAAI,QAAQ,CAAC,CAAC;AACxB;AAAA,YACF,SAAS;AACP,oBAAM,SAAS,UAAU,eAAe,IAAI,OAAO;AACnD,kBAAI,QAAQ,eAAe;AACzB,iCAAiB,QAAQ,IAAI,eAAe,gBAAgB;AAAA,cAC9D;AACA,kBAAI,QAAQ,aAAa,QAAQ,kBAAkB;AACjD,qBAAK,KAAK,UAAU,kBAAkB,EAAC,OAAO,iBAAgB;AAAA,cAChE;AACA;AAAA,YACF;AAAA,UAAA;AAAA,QAEJ;AACA,kBAAU,MAAM,EAAE;AAAA,MACpB,SAAS,GAAG;AACV,cAAM;AACN,kBAAU,MAAM,EAAE;AAAA,MACpB,UAAA;AACE,oBAAY,OAAA;AACZ,mBAAA;AAAA,MACF;AACA,YAAM,KAAK,OAAO,QAAQ,IAAI,GAAG;AAAA,IACnC;AACA,OAAG,OAAO,2BAA2B;AAAA,EACvC;AAAA,EAEA,YAAkC;AAChC,WAAO,KAAK,UAAU,UAAA;AAAA,EACxB;AAAA,EAEA,KAAK,IAAgB,KAAe;AAClC,SAAK,OAAO,KAAK,IAAI,GAAG;AAAA,EAC1B;AACF;"}
1
+ {"version":3,"file":"incremental-sync.js","sources":["../../../../../../zero-cache/src/services/replicator/incremental-sync.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {StatementRunner} from '../../db/statements.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport type {Source} from '../../types/streams.ts';\nimport type {DownloadStatus} from '../change-source/protocol/current.ts';\nimport {\n PROTOCOL_VERSION,\n type ChangeStreamer,\n type Downstream,\n} from '../change-streamer/change-streamer.ts';\nimport {RunningState} from '../running-state.ts';\nimport {ChangeProcessor} from './change-processor.ts';\nimport {Notifier} from './notifier.ts';\nimport {ReplicationStatusPublisher} from './replication-status.ts';\nimport type {ReplicaState, ReplicatorMode} from './replicator.ts';\nimport {getSubscriptionState} from './schema/replication-state.ts';\n\n/**\n * The {@link IncrementalSyncer} manages a logical replication stream from upstream,\n * handling application lifecycle events (start, stop) and retrying the\n * connection with exponential backoff. The actual handling of the logical\n * replication messages is done by the {@link ChangeProcessor}.\n */\nexport class IncrementalSyncer {\n readonly #taskID: string;\n readonly #id: string;\n readonly #changeStreamer: ChangeStreamer;\n readonly #replica: StatementRunner;\n readonly #mode: ReplicatorMode;\n readonly #publishReplicationStatus: boolean;\n readonly #notifier: Notifier;\n\n readonly #state = new RunningState('IncrementalSyncer');\n\n readonly #replicationEvents = getOrCreateCounter(\n 'replication',\n 'events',\n 'Number of replication events processed',\n );\n\n constructor(\n taskID: string,\n id: string,\n changeStreamer: ChangeStreamer,\n replica: Database,\n mode: ReplicatorMode,\n publishReplicationStatus: boolean,\n ) {\n this.#taskID = taskID;\n this.#id = id;\n this.#changeStreamer = changeStreamer;\n this.#replica = new StatementRunner(replica);\n this.#mode = mode;\n this.#publishReplicationStatus = publishReplicationStatus;\n this.#notifier = new Notifier();\n }\n\n async run(lc: LogContext) {\n lc.info?.(`Starting IncrementalSyncer`);\n const {watermark: initialWatermark} = getSubscriptionState(this.#replica);\n\n // Notify any waiting subscribers that the replica is ready to be read.\n void this.#notifier.notifySubscribers();\n\n // Only the backup replicator publishes replication status events.\n const statusPublisher = this.#publishReplicationStatus\n ? new ReplicationStatusPublisher(this.#replica.db)\n : undefined;\n\n while (this.#state.shouldRun()) {\n const {replicaVersion, watermark} = getSubscriptionState(this.#replica);\n const processor = new ChangeProcessor(\n this.#replica,\n this.#mode,\n (lc: LogContext, err: unknown) => this.stop(lc, err),\n );\n\n let downstream: Source<Downstream> | undefined;\n let unregister = () => {};\n let err: unknown | undefined;\n\n try {\n downstream = await this.#changeStreamer.subscribe({\n protocolVersion: PROTOCOL_VERSION,\n taskID: this.#taskID,\n id: this.#id,\n mode: this.#mode,\n watermark,\n replicaVersion,\n initial: watermark === initialWatermark,\n });\n this.#state.resetBackoff();\n unregister = this.#state.cancelOnStop(downstream);\n statusPublisher?.publish(\n lc,\n 'Replicating',\n `Replicating from ${watermark}`,\n );\n\n let backfillStatus: DownloadStatus | undefined;\n\n for await (const message of downstream) {\n this.#replicationEvents.add(1);\n switch (message[0]) {\n case 'status':\n // Used for checking if a replica can be caught up. Not\n // relevant here.\n lc.debug?.(`Received initial status`, message[1]);\n break;\n case 'error':\n // Unrecoverable error. Stop the service.\n this.stop(lc, message[1]);\n break;\n default: {\n const msg = message[1];\n if (msg.tag === 'backfill' && msg.status) {\n const {status} = msg;\n if (!backfillStatus) {\n // Start publishing the status every 3 seconds.\n backfillStatus = status;\n statusPublisher?.publish(\n lc,\n 'Replicating',\n `Backfilling ${msg.relation.name} table`,\n 3000,\n () =>\n backfillStatus\n ? {\n downloadStatus: [\n {\n ...backfillStatus,\n table: msg.relation.name,\n columns: [\n ...msg.relation.rowKey.columns,\n ...msg.columns,\n ],\n },\n ],\n }\n : {},\n );\n }\n backfillStatus = status; // Update the current status\n }\n\n const result = processor.processMessage(lc, message);\n if (result?.completedBackfill) {\n // Publish the final status\n const status = result.completedBackfill;\n statusPublisher?.publish(\n lc,\n 'Replicating',\n `Backfilled ${status.table} table`,\n 0,\n () => ({downloadStatus: [status]}),\n );\n backfillStatus = undefined;\n } else if (result?.schemaUpdated) {\n statusPublisher?.publish(lc, 'Replicating', 'Schema updated');\n }\n if (result?.watermark && result?.changeLogUpdated) {\n void this.#notifier.notifySubscribers({state: 'version-ready'});\n }\n break;\n }\n }\n }\n processor.abort(lc);\n } catch (e) {\n err = e;\n processor.abort(lc);\n } finally {\n downstream?.cancel();\n unregister();\n statusPublisher?.stop();\n }\n await this.#state.backoff(lc, err);\n }\n lc.info?.('IncrementalSyncer stopped');\n }\n\n subscribe(): Source<ReplicaState> {\n return this.#notifier.subscribe();\n }\n\n stop(lc: LogContext, err?: unknown) {\n this.#state.stop(lc, err);\n }\n}\n"],"names":["lc","err"],"mappings":";;;;;;;;AAwBO,MAAM,kBAAkB;AAAA,EACpB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA,SAAS,IAAI,aAAa,mBAAmB;AAAA,EAE7C,qBAAqB;AAAA,IAC5B;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAAA,EAGF,YACE,QACA,IACA,gBACA,SACA,MACA,0BACA;AACA,SAAK,UAAU;AACf,SAAK,MAAM;AACX,SAAK,kBAAkB;AACvB,SAAK,WAAW,IAAI,gBAAgB,OAAO;AAC3C,SAAK,QAAQ;AACb,SAAK,4BAA4B;AACjC,SAAK,YAAY,IAAI,SAAA;AAAA,EACvB;AAAA,EAEA,MAAM,IAAI,IAAgB;AACxB,OAAG,OAAO,4BAA4B;AACtC,UAAM,EAAC,WAAW,iBAAA,IAAoB,qBAAqB,KAAK,QAAQ;AAGxE,SAAK,KAAK,UAAU,kBAAA;AAGpB,UAAM,kBAAkB,KAAK,4BACzB,IAAI,2BAA2B,KAAK,SAAS,EAAE,IAC/C;AAEJ,WAAO,KAAK,OAAO,aAAa;AAC9B,YAAM,EAAC,gBAAgB,UAAA,IAAa,qBAAqB,KAAK,QAAQ;AACtE,YAAM,YAAY,IAAI;AAAA,QACpB,KAAK;AAAA,QACL,KAAK;AAAA,QACL,CAACA,KAAgBC,SAAiB,KAAK,KAAKD,KAAIC,IAAG;AAAA,MAAA;AAGrD,UAAI;AACJ,UAAI,aAAa,MAAM;AAAA,MAAC;AACxB,UAAI;AAEJ,UAAI;AACF,qBAAa,MAAM,KAAK,gBAAgB,UAAU;AAAA,UAChD,iBAAiB;AAAA,UACjB,QAAQ,KAAK;AAAA,UACb,IAAI,KAAK;AAAA,UACT,MAAM,KAAK;AAAA,UACX;AAAA,UACA;AAAA,UACA,SAAS,cAAc;AAAA,QAAA,CACxB;AACD,aAAK,OAAO,aAAA;AACZ,qBAAa,KAAK,OAAO,aAAa,UAAU;AAChD,yBAAiB;AAAA,UACf;AAAA,UACA;AAAA,UACA,oBAAoB,SAAS;AAAA,QAAA;AAG/B,YAAI;AAEJ,yBAAiB,WAAW,YAAY;AACtC,eAAK,mBAAmB,IAAI,CAAC;AAC7B,kBAAQ,QAAQ,CAAC,GAAA;AAAA,YACf,KAAK;AAGH,iBAAG,QAAQ,2BAA2B,QAAQ,CAAC,CAAC;AAChD;AAAA,YACF,KAAK;AAEH,mBAAK,KAAK,IAAI,QAAQ,CAAC,CAAC;AACxB;AAAA,YACF,SAAS;AACP,oBAAM,MAAM,QAAQ,CAAC;AACrB,kBAAI,IAAI,QAAQ,cAAc,IAAI,QAAQ;AACxC,sBAAM,EAAC,WAAU;AACjB,oBAAI,CAAC,gBAAgB;AAEnB,mCAAiB;AACjB,mCAAiB;AAAA,oBACf;AAAA,oBACA;AAAA,oBACA,eAAe,IAAI,SAAS,IAAI;AAAA,oBAChC;AAAA,oBACA,MACE,iBACI;AAAA,sBACE,gBAAgB;AAAA,wBACd;AAAA,0BACE,GAAG;AAAA,0BACH,OAAO,IAAI,SAAS;AAAA,0BACpB,SAAS;AAAA,4BACP,GAAG,IAAI,SAAS,OAAO;AAAA,4BACvB,GAAG,IAAI;AAAA,0BAAA;AAAA,wBACT;AAAA,sBACF;AAAA,oBACF,IAEF,CAAA;AAAA,kBAAC;AAAA,gBAEX;AACA,iCAAiB;AAAA,cACnB;AAEA,oBAAM,SAAS,UAAU,eAAe,IAAI,OAAO;AACnD,kBAAI,QAAQ,mBAAmB;AAE7B,sBAAM,SAAS,OAAO;AACtB,iCAAiB;AAAA,kBACf;AAAA,kBACA;AAAA,kBACA,cAAc,OAAO,KAAK;AAAA,kBAC1B;AAAA,kBACA,OAAO,EAAC,gBAAgB,CAAC,MAAM,EAAA;AAAA,gBAAC;AAElC,iCAAiB;AAAA,cACnB,WAAW,QAAQ,eAAe;AAChC,iCAAiB,QAAQ,IAAI,eAAe,gBAAgB;AAAA,cAC9D;AACA,kBAAI,QAAQ,aAAa,QAAQ,kBAAkB;AACjD,qBAAK,KAAK,UAAU,kBAAkB,EAAC,OAAO,iBAAgB;AAAA,cAChE;AACA;AAAA,YACF;AAAA,UAAA;AAAA,QAEJ;AACA,kBAAU,MAAM,EAAE;AAAA,MACpB,SAAS,GAAG;AACV,cAAM;AACN,kBAAU,MAAM,EAAE;AAAA,MACpB,UAAA;AACE,oBAAY,OAAA;AACZ,mBAAA;AACA,yBAAiB,KAAA;AAAA,MACnB;AACA,YAAM,KAAK,OAAO,QAAQ,IAAI,GAAG;AAAA,IACnC;AACA,OAAG,OAAO,2BAA2B;AAAA,EACvC;AAAA,EAEA,YAAkC;AAChC,WAAO,KAAK,UAAU,UAAA;AAAA,EACxB;AAAA,EAEA,KAAK,IAAgB,KAAe;AAClC,SAAK,OAAO,KAAK,IAAI,GAAG;AAAA,EAC1B;AACF;"}
@@ -1,11 +1,12 @@
1
1
  import type { LogContext } from '@rocicorp/logger';
2
2
  import type { JSONObject } from '../../../../zero-events/src/json.ts';
3
- import type { ReplicationStage, ReplicationStatusEvent, Status } from '../../../../zero-events/src/status.ts';
3
+ import type { ReplicationStage, ReplicationState, ReplicationStatusEvent, Status } from '../../../../zero-events/src/status.ts';
4
4
  import type { Database } from '../../../../zqlite/src/db.ts';
5
+ import { publishCriticalEvent } from '../../observability/events.ts';
5
6
  export declare class ReplicationStatusPublisher {
6
7
  #private;
7
- constructor(db: Database);
8
- publish(lc: LogContext, stage: ReplicationStage, description?: string, interval?: number): this;
8
+ constructor(db: Database, publishFn?: typeof publishCriticalEvent);
9
+ publish(lc: LogContext, stage: ReplicationStage, description?: string, interval?: number, extraState?: () => Partial<ReplicationState>, now?: Date): this;
9
10
  publishAndThrowError(lc: LogContext, stage: ReplicationStage, e: unknown): Promise<never>;
10
11
  stop(): this;
11
12
  }
@@ -1 +1 @@
1
- {"version":3,"file":"replication-status.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/replicator/replication-status.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,qCAAqC,CAAC;AACpE,OAAO,KAAK,EAGV,gBAAgB,EAChB,sBAAsB,EACtB,MAAM,EACP,MAAM,uCAAuC,CAAC;AAC/C,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,8BAA8B,CAAC;AAY3D,qBAAa,0BAA0B;;gBAIzB,EAAE,EAAE,QAAQ;IAIxB,OAAO,CACL,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,WAAW,CAAC,EAAE,MAAM,EACpB,QAAQ,SAAI,GACX,IAAI;IAgBD,oBAAoB,CACxB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,CAAC,EAAE,OAAO,GACT,OAAO,CAAC,KAAK,CAAC;IAOjB,IAAI,IAAI,IAAI;CAIb;AAED,wBAAsB,uBAAuB,CAC3C,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,YAAY,CAAC,EAAE,UAAU,EACzB,GAAG,OAAa,iBAYjB;AAED,wBAAgB,sBAAsB,CACpC,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,CAAC,EAAE,OAAO,EACV,EAAE,CAAC,EAAE,QAAQ,EACb,GAAG,OAAa,0BAKjB;AAGD,wBAAgB,sBAAsB,CACpC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,QAAQ,GAAG,SAAS,EACxB,KAAK,EAAE,gBAAgB,EACvB,MAAM,EAAE,MAAM,EACd,WAAW,CAAC,EAAE,MAAM,EACpB,GAAG,OAAa,GACf,sBAAsB,CA+BxB"}
1
+ {"version":3,"file":"replication-status.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/replicator/replication-status.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,qCAAqC,CAAC;AACpE,OAAO,KAAK,EAGV,gBAAgB,EAChB,gBAAgB,EAChB,sBAAsB,EACtB,MAAM,EACP,MAAM,uCAAuC,CAAC;AAC/C,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,8BAA8B,CAAC;AAG3D,OAAO,EAEL,oBAAoB,EACrB,MAAM,+BAA+B,CAAC;AAKvC,qBAAa,0BAA0B;;gBAKzB,EAAE,EAAE,QAAQ,EAAE,SAAS,8BAAuB;IAK1D,OAAO,CACL,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,WAAW,CAAC,EAAE,MAAM,EACpB,QAAQ,SAAI,EACZ,UAAU,CAAC,EAAE,MAAM,OAAO,CAAC,gBAAgB,CAAC,EAC5C,GAAG,OAAa,GACf,IAAI;IA2BD,oBAAoB,CACxB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,CAAC,EAAE,OAAO,GACT,OAAO,CAAC,KAAK,CAAC;IAOjB,IAAI,IAAI,IAAI;CAIb;AAED,wBAAsB,uBAAuB,CAC3C,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,YAAY,CAAC,EAAE,UAAU,EACzB,GAAG,OAAa,iBAYjB;AAED,wBAAgB,sBAAsB,CACpC,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,gBAAgB,EACvB,CAAC,EAAE,OAAO,EACV,EAAE,CAAC,EAAE,QAAQ,EACb,GAAG,OAAa,0BAKjB;AAGD,wBAAgB,sBAAsB,CACpC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,QAAQ,GAAG,SAAS,EACxB,KAAK,EAAE,gBAAgB,EACvB,MAAM,EAAE,MAAM,EACd,WAAW,CAAC,EAAE,MAAM,EACpB,GAAG,OAAa,GACf,sBAAsB,CAmCxB"}
@@ -1,22 +1,35 @@
1
1
  import { createSilentLogContext } from "../../../../shared/src/logging-test-utils.js";
2
2
  import { listIndexes, computeZqlSpecs } from "../../db/lite-tables.js";
3
- import { makeErrorDetails, publishCriticalEvent, publishEvent } from "../../observability/events.js";
3
+ import { makeErrorDetails, publishCriticalEvent } from "../../observability/events.js";
4
4
  const byKeys = (a, b) => a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0;
5
5
  class ReplicationStatusPublisher {
6
6
  #db;
7
+ #publish;
7
8
  #timer;
8
- constructor(db) {
9
+ constructor(db, publishFn = publishCriticalEvent) {
9
10
  this.#db = db;
11
+ this.#publish = publishFn;
10
12
  }
11
- publish(lc, stage, description, interval = 0) {
13
+ publish(lc, stage, description, interval = 0, extraState, now = /* @__PURE__ */ new Date()) {
12
14
  this.stop();
13
- publishEvent(
15
+ const event = replicationStatusEvent(
14
16
  lc,
15
- replicationStatusEvent(lc, this.#db, stage, "OK", description)
17
+ this.#db,
18
+ stage,
19
+ "OK",
20
+ description,
21
+ now
16
22
  );
23
+ if (event.state) {
24
+ event.state = {
25
+ ...event.state,
26
+ ...extraState?.()
27
+ };
28
+ }
29
+ void this.#publish(lc, event);
17
30
  if (interval) {
18
31
  this.#timer = setInterval(
19
- () => this.publish(lc, stage, description, interval),
32
+ () => this.publish(lc, stage, description, interval, extraState),
20
33
  interval
21
34
  );
22
35
  }
@@ -24,8 +37,8 @@ class ReplicationStatusPublisher {
24
37
  }
25
38
  async publishAndThrowError(lc, stage, e) {
26
39
  this.stop();
27
- const event = replicationStatusError(lc, stage, "ERROR", this.#db);
28
- await publishCriticalEvent(lc, event);
40
+ const event = replicationStatusError(lc, stage, e, this.#db);
41
+ await this.#publish(lc, event);
29
42
  throw e;
30
43
  }
31
44
  stop() {
@@ -51,6 +64,7 @@ function replicationStatusError(lc, stage, e, db, now = /* @__PURE__ */ new Date
51
64
  return event;
52
65
  }
53
66
  function replicationStatusEvent(lc, db, stage, status, description, now = /* @__PURE__ */ new Date()) {
67
+ const start = performance.now();
54
68
  try {
55
69
  return {
56
70
  type: "zero/events/status/replication/v1",
@@ -80,6 +94,9 @@ function replicationStatusEvent(lc, db, stage, status, description, now = /* @__
80
94
  replicaSize: 0
81
95
  }
82
96
  };
97
+ } finally {
98
+ const elapsed = (performance.now() - start).toFixed(3);
99
+ lc.debug?.(`computed schema for replication event (${elapsed} ms)`);
83
100
  }
84
101
  }
85
102
  function getReplicatedTables(db) {
@@ -88,8 +105,6 @@ function getReplicatedTables(db) {
88
105
  createSilentLogContext(),
89
106
  // avoid logging warnings about indexes
90
107
  db,
91
- // TODO: Consider exposing backfilling columns with an indication
92
- // of backfill status.
93
108
  { includeBackfillingColumns: false },
94
109
  /* @__PURE__ */ new Map(),
95
110
  fullTables
@@ -1 +1 @@
1
- {"version":3,"file":"replication-status.js","sources":["../../../../../../zero-cache/src/services/replicator/replication-status.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {createSilentLogContext} from '../../../../shared/src/logging-test-utils.ts';\nimport type {JSONObject} from '../../../../zero-events/src/json.ts';\nimport type {\n ReplicatedIndex,\n ReplicatedTable,\n ReplicationStage,\n ReplicationStatusEvent,\n Status,\n} from '../../../../zero-events/src/status.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {computeZqlSpecs, listIndexes} from '../../db/lite-tables.ts';\nimport type {LiteTableSpec} from '../../db/specs.ts';\nimport {\n makeErrorDetails,\n publishCriticalEvent,\n publishEvent,\n} from '../../observability/events.ts';\n\nconst byKeys = (a: [string, unknown], b: [string, unknown]) =>\n a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0;\n\nexport class ReplicationStatusPublisher {\n readonly #db: Database;\n #timer: NodeJS.Timeout | undefined;\n\n constructor(db: Database) {\n this.#db = db;\n }\n\n publish(\n lc: LogContext,\n stage: ReplicationStage,\n description?: string,\n interval = 0,\n ): this {\n this.stop();\n publishEvent(\n lc,\n replicationStatusEvent(lc, this.#db, stage, 'OK', description),\n );\n\n if (interval) {\n this.#timer = setInterval(\n () => this.publish(lc, stage, description, interval),\n interval,\n );\n }\n return this;\n }\n\n async publishAndThrowError(\n lc: LogContext,\n stage: ReplicationStage,\n e: unknown,\n ): Promise<never> {\n this.stop();\n const event = replicationStatusError(lc, stage, 'ERROR', this.#db);\n await publishCriticalEvent(lc, event);\n throw e;\n }\n\n stop(): this {\n clearInterval(this.#timer);\n return this;\n }\n}\n\nexport async function publishReplicationError(\n lc: LogContext,\n stage: ReplicationStage,\n description: string,\n errorDetails?: JSONObject,\n now = new Date(),\n) {\n const event: ReplicationStatusEvent = {\n type: 'zero/events/status/replication/v1',\n component: 'replication',\n status: 'ERROR',\n stage,\n description,\n errorDetails,\n time: now.toISOString(),\n };\n await publishCriticalEvent(lc, event);\n}\n\nexport function replicationStatusError(\n lc: LogContext,\n stage: ReplicationStage,\n e: unknown,\n db?: Database,\n now = new Date(),\n) {\n const event = replicationStatusEvent(lc, db, stage, 'ERROR', String(e), now);\n event.errorDetails = makeErrorDetails(e);\n return event;\n}\n\n// Exported for testing.\nexport function replicationStatusEvent(\n lc: LogContext,\n db: Database | undefined,\n stage: ReplicationStage,\n status: Status,\n description?: string,\n now = new Date(),\n): ReplicationStatusEvent {\n try {\n return {\n type: 'zero/events/status/replication/v1',\n component: 'replication',\n status,\n stage,\n description,\n time: now.toISOString(),\n state: {\n tables: db ? getReplicatedTables(db) : [],\n indexes: db ? getReplicatedIndexes(db) : [],\n replicaSize: db ? getReplicaSize(db) : undefined,\n },\n };\n } catch (e) {\n lc.warn?.(`Unable to create full ReplicationStatusEvent`, e);\n return {\n type: 'zero/events/status/replication/v1',\n component: 'replication',\n status,\n stage,\n description,\n time: now.toISOString(),\n state: {\n tables: [],\n indexes: [],\n replicaSize: 0,\n },\n };\n }\n}\n\nfunction getReplicatedTables(db: Database): ReplicatedTable[] {\n const fullTables = new Map<string, LiteTableSpec>();\n const clientSchema = computeZqlSpecs(\n createSilentLogContext(), // avoid logging warnings about indexes\n db,\n // TODO: Consider exposing backfilling columns with an indication\n // of backfill status.\n {includeBackfillingColumns: false},\n new Map(),\n fullTables,\n );\n\n return [...fullTables.entries()].sort(byKeys).map(([table, spec]) => ({\n table,\n columns: Object.entries(spec.columns)\n .sort(byKeys)\n .map(([column, spec]) => ({\n column,\n upstreamType: spec.dataType.split('|')[0],\n clientType: clientSchema.get(table)?.zqlSpec[column]?.type ?? null,\n })),\n }));\n}\n\nfunction getReplicatedIndexes(db: Database): ReplicatedIndex[] {\n return listIndexes(db).map(({tableName: table, columns, unique}) => ({\n table,\n unique,\n columns: Object.entries(columns)\n .sort(byKeys)\n .map(([column, dir]) => ({column, dir})),\n }));\n}\n\nfunction getReplicaSize(db: Database) {\n const [{page_count: pageCount}] = db.pragma<{page_count: number}>(\n 'page_count',\n );\n const [{page_size: pageSize}] = db.pragma<{page_size: number}>('page_size');\n return pageCount * pageSize;\n}\n"],"names":["spec"],"mappings":";;;AAmBA,MAAM,SAAS,CAAC,GAAsB,MACpC,EAAE,CAAC,IAAI,EAAE,CAAC,IAAI,KAAK,EAAE,CAAC,IAAI,EAAE,CAAC,IAAI,IAAI;AAEhC,MAAM,2BAA2B;AAAA,EAC7B;AAAA,EACT;AAAA,EAEA,YAAY,IAAc;AACxB,SAAK,MAAM;AAAA,EACb;AAAA,EAEA,QACE,IACA,OACA,aACA,WAAW,GACL;AACN,SAAK,KAAA;AACL;AAAA,MACE;AAAA,MACA,uBAAuB,IAAI,KAAK,KAAK,OAAO,MAAM,WAAW;AAAA,IAAA;AAG/D,QAAI,UAAU;AACZ,WAAK,SAAS;AAAA,QACZ,MAAM,KAAK,QAAQ,IAAI,OAAO,aAAa,QAAQ;AAAA,QACnD;AAAA,MAAA;AAAA,IAEJ;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,qBACJ,IACA,OACA,GACgB;AAChB,SAAK,KAAA;AACL,UAAM,QAAQ,uBAAuB,IAAI,OAAO,SAAS,KAAK,GAAG;AACjE,UAAM,qBAAqB,IAAI,KAAK;AACpC,UAAM;AAAA,EACR;AAAA,EAEA,OAAa;AACX,kBAAc,KAAK,MAAM;AACzB,WAAO;AAAA,EACT;AACF;AAEA,eAAsB,wBACpB,IACA,OACA,aACA,cACA,MAAM,oBAAI,QACV;AACA,QAAM,QAAgC;AAAA,IACpC,MAAM;AAAA,IACN,WAAW;AAAA,IACX,QAAQ;AAAA,IACR;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,IAAI,YAAA;AAAA,EAAY;AAExB,QAAM,qBAAqB,IAAI,KAAK;AACtC;AAEO,SAAS,uBACd,IACA,OACA,GACA,IACA,MAAM,oBAAI,QACV;AACA,QAAM,QAAQ,uBAAuB,IAAI,IAAI,OAAO,SAAS,OAAO,CAAC,GAAG,GAAG;AAC3E,QAAM,eAAe,iBAAiB,CAAC;AACvC,SAAO;AACT;AAGO,SAAS,uBACd,IACA,IACA,OACA,QACA,aACA,MAAM,oBAAI,QACc;AACxB,MAAI;AACF,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM,IAAI,YAAA;AAAA,MACV,OAAO;AAAA,QACL,QAAQ,KAAK,oBAAoB,EAAE,IAAI,CAAA;AAAA,QACvC,SAAS,KAAK,qBAAqB,EAAE,IAAI,CAAA;AAAA,QACzC,aAAa,KAAK,eAAe,EAAE,IAAI;AAAA,MAAA;AAAA,IACzC;AAAA,EAEJ,SAAS,GAAG;AACV,OAAG,OAAO,gDAAgD,CAAC;AAC3D,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM,IAAI,YAAA;AAAA,MACV,OAAO;AAAA,QACL,QAAQ,CAAA;AAAA,QACR,SAAS,CAAA;AAAA,QACT,aAAa;AAAA,MAAA;AAAA,IACf;AAAA,EAEJ;AACF;AAEA,SAAS,oBAAoB,IAAiC;AAC5D,QAAM,iCAAiB,IAAA;AACvB,QAAM,eAAe;AAAA,IACnB,uBAAA;AAAA;AAAA,IACA;AAAA;AAAA;AAAA,IAGA,EAAC,2BAA2B,MAAA;AAAA,wBACxB,IAAA;AAAA,IACJ;AAAA,EAAA;AAGF,SAAO,CAAC,GAAG,WAAW,QAAA,CAAS,EAAE,KAAK,MAAM,EAAE,IAAI,CAAC,CAAC,OAAO,IAAI,OAAO;AAAA,IACpE;AAAA,IACA,SAAS,OAAO,QAAQ,KAAK,OAAO,EACjC,KAAK,MAAM,EACX,IAAI,CAAC,CAAC,QAAQA,KAAI,OAAO;AAAA,MACxB;AAAA,MACA,cAAcA,MAAK,SAAS,MAAM,GAAG,EAAE,CAAC;AAAA,MACxC,YAAY,aAAa,IAAI,KAAK,GAAG,QAAQ,MAAM,GAAG,QAAQ;AAAA,IAAA,EAC9D;AAAA,EAAA,EACJ;AACJ;AAEA,SAAS,qBAAqB,IAAiC;AAC7D,SAAO,YAAY,EAAE,EAAE,IAAI,CAAC,EAAC,WAAW,OAAO,SAAS,cAAa;AAAA,IACnE;AAAA,IACA;AAAA,IACA,SAAS,OAAO,QAAQ,OAAO,EAC5B,KAAK,MAAM,EACX,IAAI,CAAC,CAAC,QAAQ,GAAG,OAAO,EAAC,QAAQ,MAAK;AAAA,EAAA,EACzC;AACJ;AAEA,SAAS,eAAe,IAAc;AACpC,QAAM,CAAC,EAAC,YAAY,UAAA,CAAU,IAAI,GAAG;AAAA,IACnC;AAAA,EAAA;AAEF,QAAM,CAAC,EAAC,WAAW,SAAA,CAAS,IAAI,GAAG,OAA4B,WAAW;AAC1E,SAAO,YAAY;AACrB;"}
1
+ {"version":3,"file":"replication-status.js","sources":["../../../../../../zero-cache/src/services/replicator/replication-status.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {createSilentLogContext} from '../../../../shared/src/logging-test-utils.ts';\nimport type {JSONObject} from '../../../../zero-events/src/json.ts';\nimport type {\n ReplicatedIndex,\n ReplicatedTable,\n ReplicationStage,\n ReplicationState,\n ReplicationStatusEvent,\n Status,\n} from '../../../../zero-events/src/status.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {computeZqlSpecs, listIndexes} from '../../db/lite-tables.ts';\nimport type {LiteTableSpec} from '../../db/specs.ts';\nimport {\n makeErrorDetails,\n publishCriticalEvent,\n} from '../../observability/events.ts';\n\nconst byKeys = (a: [string, unknown], b: [string, unknown]) =>\n a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0;\n\nexport class ReplicationStatusPublisher {\n readonly #db: Database;\n readonly #publish: typeof publishCriticalEvent;\n #timer: NodeJS.Timeout | undefined;\n\n constructor(db: Database, publishFn = publishCriticalEvent) {\n this.#db = db;\n this.#publish = publishFn;\n }\n\n publish(\n lc: LogContext,\n stage: ReplicationStage,\n description?: string,\n interval = 0,\n extraState?: () => Partial<ReplicationState>,\n now = new Date(),\n ): this {\n this.stop();\n const event = replicationStatusEvent(\n lc,\n this.#db,\n stage,\n 'OK',\n description,\n now,\n );\n if (event.state) {\n event.state = {\n ...event.state,\n ...extraState?.(),\n };\n }\n void this.#publish(lc, event);\n\n if (interval) {\n this.#timer = setInterval(\n () => this.publish(lc, stage, description, interval, extraState),\n interval,\n );\n }\n return this;\n }\n\n async publishAndThrowError(\n lc: LogContext,\n stage: ReplicationStage,\n e: unknown,\n ): Promise<never> {\n this.stop();\n const event = replicationStatusError(lc, stage, e, this.#db);\n await this.#publish(lc, event);\n throw e;\n }\n\n stop(): this {\n clearInterval(this.#timer);\n return this;\n }\n}\n\nexport async function publishReplicationError(\n lc: LogContext,\n stage: ReplicationStage,\n description: string,\n errorDetails?: JSONObject,\n now = new Date(),\n) {\n const event: ReplicationStatusEvent = {\n type: 'zero/events/status/replication/v1',\n component: 'replication',\n status: 'ERROR',\n stage,\n description,\n errorDetails,\n time: now.toISOString(),\n };\n await publishCriticalEvent(lc, event);\n}\n\nexport function replicationStatusError(\n lc: LogContext,\n stage: ReplicationStage,\n e: unknown,\n db?: Database,\n now = new Date(),\n) {\n const event = replicationStatusEvent(lc, db, stage, 'ERROR', String(e), now);\n event.errorDetails = makeErrorDetails(e);\n return event;\n}\n\n// Exported for testing.\nexport function replicationStatusEvent(\n lc: LogContext,\n db: Database | undefined,\n stage: ReplicationStage,\n status: Status,\n description?: string,\n now = new Date(),\n): ReplicationStatusEvent {\n const start = performance.now();\n try {\n return {\n type: 'zero/events/status/replication/v1',\n component: 'replication',\n status,\n stage,\n description,\n time: now.toISOString(),\n state: {\n tables: db ? getReplicatedTables(db) : [],\n indexes: db ? getReplicatedIndexes(db) : [],\n replicaSize: db ? getReplicaSize(db) : undefined,\n },\n };\n } catch (e) {\n lc.warn?.(`Unable to create full ReplicationStatusEvent`, e);\n return {\n type: 'zero/events/status/replication/v1',\n component: 'replication',\n status,\n stage,\n description,\n time: now.toISOString(),\n state: {\n tables: [],\n indexes: [],\n replicaSize: 0,\n },\n };\n } finally {\n const elapsed = (performance.now() - start).toFixed(3);\n lc.debug?.(`computed schema for replication event (${elapsed} ms)`);\n }\n}\n\nfunction getReplicatedTables(db: Database): ReplicatedTable[] {\n const fullTables = new Map<string, LiteTableSpec>();\n const clientSchema = computeZqlSpecs(\n createSilentLogContext(), // avoid logging warnings about indexes\n db,\n {includeBackfillingColumns: false},\n new Map(),\n fullTables,\n );\n\n return [...fullTables.entries()].sort(byKeys).map(([table, spec]) => ({\n table,\n columns: Object.entries(spec.columns)\n .sort(byKeys)\n .map(([column, spec]) => ({\n column,\n upstreamType: spec.dataType.split('|')[0],\n clientType: clientSchema.get(table)?.zqlSpec[column]?.type ?? null,\n })),\n }));\n}\n\nfunction getReplicatedIndexes(db: Database): ReplicatedIndex[] {\n return listIndexes(db).map(({tableName: table, columns, unique}) => ({\n table,\n unique,\n columns: Object.entries(columns)\n .sort(byKeys)\n .map(([column, dir]) => ({column, dir})),\n }));\n}\n\nfunction getReplicaSize(db: Database) {\n const [{page_count: pageCount}] = db.pragma<{page_count: number}>(\n 'page_count',\n );\n const [{page_size: pageSize}] = db.pragma<{page_size: number}>('page_size');\n return pageCount * pageSize;\n}\n"],"names":["spec"],"mappings":";;;AAmBA,MAAM,SAAS,CAAC,GAAsB,MACpC,EAAE,CAAC,IAAI,EAAE,CAAC,IAAI,KAAK,EAAE,CAAC,IAAI,EAAE,CAAC,IAAI,IAAI;AAEhC,MAAM,2BAA2B;AAAA,EAC7B;AAAA,EACA;AAAA,EACT;AAAA,EAEA,YAAY,IAAc,YAAY,sBAAsB;AAC1D,SAAK,MAAM;AACX,SAAK,WAAW;AAAA,EAClB;AAAA,EAEA,QACE,IACA,OACA,aACA,WAAW,GACX,YACA,MAAM,oBAAI,QACJ;AACN,SAAK,KAAA;AACL,UAAM,QAAQ;AAAA,MACZ;AAAA,MACA,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,QAAI,MAAM,OAAO;AACf,YAAM,QAAQ;AAAA,QACZ,GAAG,MAAM;AAAA,QACT,GAAG,aAAA;AAAA,MAAa;AAAA,IAEpB;AACA,SAAK,KAAK,SAAS,IAAI,KAAK;AAE5B,QAAI,UAAU;AACZ,WAAK,SAAS;AAAA,QACZ,MAAM,KAAK,QAAQ,IAAI,OAAO,aAAa,UAAU,UAAU;AAAA,QAC/D;AAAA,MAAA;AAAA,IAEJ;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,qBACJ,IACA,OACA,GACgB;AAChB,SAAK,KAAA;AACL,UAAM,QAAQ,uBAAuB,IAAI,OAAO,GAAG,KAAK,GAAG;AAC3D,UAAM,KAAK,SAAS,IAAI,KAAK;AAC7B,UAAM;AAAA,EACR;AAAA,EAEA,OAAa;AACX,kBAAc,KAAK,MAAM;AACzB,WAAO;AAAA,EACT;AACF;AAEA,eAAsB,wBACpB,IACA,OACA,aACA,cACA,MAAM,oBAAI,QACV;AACA,QAAM,QAAgC;AAAA,IACpC,MAAM;AAAA,IACN,WAAW;AAAA,IACX,QAAQ;AAAA,IACR;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,IAAI,YAAA;AAAA,EAAY;AAExB,QAAM,qBAAqB,IAAI,KAAK;AACtC;AAEO,SAAS,uBACd,IACA,OACA,GACA,IACA,MAAM,oBAAI,QACV;AACA,QAAM,QAAQ,uBAAuB,IAAI,IAAI,OAAO,SAAS,OAAO,CAAC,GAAG,GAAG;AAC3E,QAAM,eAAe,iBAAiB,CAAC;AACvC,SAAO;AACT;AAGO,SAAS,uBACd,IACA,IACA,OACA,QACA,aACA,MAAM,oBAAI,QACc;AACxB,QAAM,QAAQ,YAAY,IAAA;AAC1B,MAAI;AACF,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM,IAAI,YAAA;AAAA,MACV,OAAO;AAAA,QACL,QAAQ,KAAK,oBAAoB,EAAE,IAAI,CAAA;AAAA,QACvC,SAAS,KAAK,qBAAqB,EAAE,IAAI,CAAA;AAAA,QACzC,aAAa,KAAK,eAAe,EAAE,IAAI;AAAA,MAAA;AAAA,IACzC;AAAA,EAEJ,SAAS,GAAG;AACV,OAAG,OAAO,gDAAgD,CAAC;AAC3D,WAAO;AAAA,MACL,MAAM;AAAA,MACN,WAAW;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM,IAAI,YAAA;AAAA,MACV,OAAO;AAAA,QACL,QAAQ,CAAA;AAAA,QACR,SAAS,CAAA;AAAA,QACT,aAAa;AAAA,MAAA;AAAA,IACf;AAAA,EAEJ,UAAA;AACE,UAAM,WAAW,YAAY,IAAA,IAAQ,OAAO,QAAQ,CAAC;AACrD,OAAG,QAAQ,0CAA0C,OAAO,MAAM;AAAA,EACpE;AACF;AAEA,SAAS,oBAAoB,IAAiC;AAC5D,QAAM,iCAAiB,IAAA;AACvB,QAAM,eAAe;AAAA,IACnB,uBAAA;AAAA;AAAA,IACA;AAAA,IACA,EAAC,2BAA2B,MAAA;AAAA,wBACxB,IAAA;AAAA,IACJ;AAAA,EAAA;AAGF,SAAO,CAAC,GAAG,WAAW,QAAA,CAAS,EAAE,KAAK,MAAM,EAAE,IAAI,CAAC,CAAC,OAAO,IAAI,OAAO;AAAA,IACpE;AAAA,IACA,SAAS,OAAO,QAAQ,KAAK,OAAO,EACjC,KAAK,MAAM,EACX,IAAI,CAAC,CAAC,QAAQA,KAAI,OAAO;AAAA,MACxB;AAAA,MACA,cAAcA,MAAK,SAAS,MAAM,GAAG,EAAE,CAAC;AAAA,MACxC,YAAY,aAAa,IAAI,KAAK,GAAG,QAAQ,MAAM,GAAG,QAAQ;AAAA,IAAA,EAC9D;AAAA,EAAA,EACJ;AACJ;AAEA,SAAS,qBAAqB,IAAiC;AAC7D,SAAO,YAAY,EAAE,EAAE,IAAI,CAAC,EAAC,WAAW,OAAO,SAAS,cAAa;AAAA,IACnE;AAAA,IACA;AAAA,IACA,SAAS,OAAO,QAAQ,OAAO,EAC5B,KAAK,MAAM,EACX,IAAI,CAAC,CAAC,QAAQ,GAAG,OAAO,EAAC,QAAQ,MAAK;AAAA,EAAA,EACzC;AACJ;AAEA,SAAS,eAAe,IAAc;AACpC,QAAM,CAAC,EAAC,YAAY,UAAA,CAAU,IAAI,GAAG;AAAA,IACnC;AAAA,EAAA;AAEF,QAAM,CAAC,EAAC,WAAW,SAAA,CAAS,IAAI,GAAG,OAA4B,WAAW;AAC1E,SAAO,YAAY;AACrB;"}
@@ -1 +1 @@
1
- {"version":3,"file":"run-ast.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAQjD,OAAO,KAAK,EAAC,kBAAkB,EAAC,MAAM,oDAAoD,CAAC;AAC3F,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,mCAAmC,CAAC;AAE3D,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6CAA6C,CAAC;AAG9E,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,kDAAkD,CAAC;AACxF,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yCAAyC,CAAC;AACxE,OAAO,EAEL,KAAK,eAAe,EACrB,MAAM,qCAAqC,CAAC;AAC7C,OAAO,KAAK,EAAC,mBAAmB,EAAC,MAAM,gDAAgD,CAAC;AACxF,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,2CAA2C,CAAC;AAC5E,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,2BAA2B,CAAC;AACxD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,iBAAiB,CAAC;AAE7C,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,gBAAgB,CAAC;AAGnD,MAAM,MAAM,aAAa,GAAG;IAC1B,gBAAgB,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACvC,IAAI,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IAC3B,oBAAoB,CAAC,EAAE,UAAU,GAAG,SAAS,CAAC;IAC9C,SAAS,CAAC,EAAE,mBAAmB,GAAG,SAAS,CAAC;IAC5C,EAAE,EAAE,QAAQ,CAAC;IACb,IAAI,EAAE,eAAe,CAAC;IACtB,WAAW,CAAC,EAAE,iBAAiB,GAAG,SAAS,CAAC;IAC5C,YAAY,CAAC,EAAE,YAAY,GAAG,SAAS,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACjC,UAAU,EAAE,GAAG,CAAC,MAAM,EAAE,cAAc,CAAC,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;CAClC,CAAC;AAEF,wBAAsB,MAAM,CAC1B,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,YAAY,EAC1B,GAAG,EAAE,GAAG,EACR,aAAa,EAAE,OAAO,EACtB,OAAO,EAAE,aAAa,EACtB,YAAY,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,GAChC,OAAO,CAAC,kBAAkB,CAAC,CA0G7B"}
1
+ {"version":3,"file":"run-ast.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAQjD,OAAO,KAAK,EAAC,kBAAkB,EAAC,MAAM,oDAAoD,CAAC;AAC3F,OAAO,KAAK,EAAC,GAAG,EAAe,MAAM,mCAAmC,CAAC;AAEzE,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6CAA6C,CAAC;AAG9E,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,kDAAkD,CAAC;AACxF,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yCAAyC,CAAC;AACxE,OAAO,EAEL,KAAK,eAAe,EACrB,MAAM,qCAAqC,CAAC;AAG7C,OAAO,KAAK,EAAC,mBAAmB,EAAC,MAAM,gDAAgD,CAAC;AACxF,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,2CAA2C,CAAC;AAC5E,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,2BAA2B,CAAC;AAExD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,iBAAiB,CAAC;AAE7C,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,gBAAgB,CAAC;AAGnD,MAAM,MAAM,aAAa,GAAG;IAC1B,gBAAgB,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACvC,IAAI,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IAC3B,oBAAoB,CAAC,EAAE,UAAU,GAAG,SAAS,CAAC;IAC9C,SAAS,CAAC,EAAE,mBAAmB,GAAG,SAAS,CAAC;IAC5C,EAAE,EAAE,QAAQ,CAAC;IACb,IAAI,EAAE,eAAe,CAAC;IACtB,WAAW,CAAC,EAAE,iBAAiB,GAAG,SAAS,CAAC;IAC5C,YAAY,CAAC,EAAE,YAAY,GAAG,SAAS,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;IACjC,UAAU,EAAE,GAAG,CAAC,MAAM,EAAE,cAAc,CAAC,CAAC;IACxC,UAAU,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;CAClC,CAAC;AAEF,wBAAsB,MAAM,CAC1B,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,YAAY,EAC1B,GAAG,EAAE,GAAG,EACR,aAAa,EAAE,OAAO,EACtB,OAAO,EAAE,aAAa,EACtB,YAAY,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,GAChC,OAAO,CAAC,kBAAkB,CAAC,CAwI7B"}
@@ -6,6 +6,8 @@ import { sleep } from "../../../shared/src/sleep.js";
6
6
  import "../../../zero-protocol/src/ast.js";
7
7
  import { hashOfAST } from "../../../zero-protocol/src/query-hash.js";
8
8
  import { buildPipeline } from "../../../zql/src/builder/builder.js";
9
+ import { skipYields } from "../../../zql/src/ivm/operator.js";
10
+ import { resolveSimpleScalarSubqueries } from "../../../zqlite/src/resolve-scalar-subqueries.js";
9
11
  import { transformAndHashQuery } from "../auth/read-authorizer.js";
10
12
  import { hydrate } from "./view-syncer/pipeline-driver.js";
11
13
  async function runAst(lc, clientSchema, ast, isTransformed, options, yieldProcess) {
@@ -39,8 +41,22 @@ async function runAst(lc, clientSchema, ast, isTransformed, options, yieldProces
39
41
  ).transformedAst;
40
42
  result.afterPermissions = await formatOutput(ast.table + astToZQL(ast));
41
43
  }
42
- const pipeline = buildPipeline(
44
+ const executor = (subqueryAST, childField) => {
45
+ const input = buildPipeline(subqueryAST, host, "scalar-subquery");
46
+ let node;
47
+ for (const n of skipYields(input.fetch({}))) {
48
+ node ??= n;
49
+ }
50
+ input.destroy();
51
+ return node ? node.row[childField] ?? null : void 0;
52
+ };
53
+ const { ast: resolvedAst } = resolveSimpleScalarSubqueries(
43
54
  ast,
55
+ options.tableSpecs,
56
+ executor
57
+ );
58
+ const pipeline = buildPipeline(
59
+ resolvedAst,
44
60
  host,
45
61
  "query-id",
46
62
  options.costModel,
@@ -51,7 +67,11 @@ async function runAst(lc, clientSchema, ast, isTransformed, options, yieldProces
51
67
  let syncedRowCount = 0;
52
68
  const rowsByTable = {};
53
69
  const seenByTable = /* @__PURE__ */ new Set();
54
- for (const rowChange of hydrate(pipeline, hashOfAST(ast), clientSchema)) {
70
+ for (const rowChange of hydrate(
71
+ pipeline,
72
+ hashOfAST(resolvedAst),
73
+ clientSchema
74
+ )) {
55
75
  if (rowChange === "yield") {
56
76
  await yieldProcess();
57
77
  continue;
@@ -1 +1 @@
1
- {"version":3,"file":"run-ast.js","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\n// @circular-dep-ignore\nimport {astToZQL} from '../../../ast-to-zql/src/ast-to-zql.ts';\n// @circular-dep-ignore\nimport {formatOutput} from '../../../ast-to-zql/src/format.ts';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST} from '../../../zero-protocol/src/ast.ts';\nimport {mapAST} from '../../../zero-protocol/src/ast.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\nimport type {Row} from '../../../zero-protocol/src/data.ts';\nimport {hashOfAST} from '../../../zero-protocol/src/query-hash.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport type {NameMapper} from '../../../zero-schema/src/name-mapper.ts';\nimport {\n buildPipeline,\n type BuilderDelegate,\n} from '../../../zql/src/builder/builder.ts';\nimport type {ConnectionCostModel} from '../../../zql/src/planner/planner-connection.ts';\nimport type {PlanDebugger} from '../../../zql/src/planner/planner-debug.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport type {JWTAuth} from '../auth/auth.ts';\nimport {transformAndHashQuery} from '../auth/read-authorizer.ts';\nimport type {LiteAndZqlSpec} from '../db/specs.ts';\nimport {hydrate} from './view-syncer/pipeline-driver.ts';\n\nexport type RunAstOptions = {\n applyPermissions?: boolean | undefined;\n auth?: JWTAuth | undefined;\n clientToServerMapper?: NameMapper | undefined;\n costModel?: ConnectionCostModel | undefined;\n db: Database;\n host: BuilderDelegate;\n permissions?: PermissionsConfig | undefined;\n planDebugger?: PlanDebugger | undefined;\n syncedRows?: boolean | undefined;\n tableSpecs: Map<string, LiteAndZqlSpec>;\n vendedRows?: boolean | undefined;\n};\n\nexport async function runAst(\n lc: LogContext,\n clientSchema: ClientSchema,\n ast: AST,\n isTransformed: boolean,\n options: RunAstOptions,\n yieldProcess: () => Promise<void>,\n): Promise<AnalyzeQueryResult> {\n const {clientToServerMapper, permissions, host} = options;\n const result: AnalyzeQueryResult = {\n warnings: [],\n syncedRows: undefined,\n syncedRowCount: 0,\n start: 0,\n end: 0,\n elapsed: 0,\n afterPermissions: undefined,\n readRows: undefined,\n readRowCountsByQuery: {},\n readRowCount: undefined,\n };\n\n if (!isTransformed) {\n // map the AST to server names if not already transformed\n ast = mapAST(ast, must(clientToServerMapper));\n }\n if (options.applyPermissions) {\n const auth = options.auth;\n if (!auth) {\n result.warnings.push(\n 'No auth data provided. Permission rules will compare to `NULL` wherever an auth data field is referenced.',\n );\n }\n ast = transformAndHashQuery(\n lc,\n 'clientGroupIDForAnalyze',\n ast,\n must(permissions),\n auth,\n false,\n ).transformedAst;\n result.afterPermissions = await formatOutput(ast.table + astToZQL(ast));\n }\n\n const pipeline = buildPipeline(\n ast,\n host,\n 'query-id',\n options.costModel,\n lc,\n options.planDebugger,\n );\n\n const start = performance.now();\n\n let syncedRowCount = 0;\n const rowsByTable: Record<string, Row[]> = {};\n const seenByTable: Set<string> = new Set();\n for (const rowChange of hydrate(pipeline, hashOfAST(ast), clientSchema)) {\n if (rowChange === 'yield') {\n await yieldProcess();\n continue;\n }\n assert(rowChange.type === 'add', 'Hydration only handles add row changes');\n\n // yield to other tasks to avoid blocking for too long\n if (syncedRowCount % 10 === 0) {\n await Promise.resolve();\n }\n if (syncedRowCount % 100 === 0) {\n await sleep(1);\n }\n\n let rows: Row[] = rowsByTable[rowChange.table];\n const s = rowChange.table + '.' + JSON.stringify(rowChange.row);\n if (seenByTable.has(s)) {\n continue; // skip duplicates\n }\n syncedRowCount++;\n seenByTable.add(s);\n if (options.syncedRows) {\n if (!rows) {\n rows = [];\n rowsByTable[rowChange.table] = rows;\n }\n rows.push(rowChange.row);\n }\n }\n\n const end = performance.now();\n if (options.syncedRows) {\n result.syncedRows = rowsByTable;\n }\n result.start = start;\n result.end = end;\n result.elapsed = end - start;\n\n // Always include the count of synced and vended rows.\n result.syncedRowCount = syncedRowCount;\n result.readRowCountsByQuery = host.debug?.getVendedRowCounts() ?? {};\n let readRowCount = 0;\n for (const c of Object.values(result.readRowCountsByQuery)) {\n for (const v of Object.values(c)) {\n readRowCount += v;\n }\n }\n result.readRowCount = readRowCount;\n result.dbScansByQuery = host.debug?.getNVisitCounts() ?? {};\n\n if (options.vendedRows) {\n result.readRows = host.debug?.getVendedRows();\n }\n return result;\n}\n"],"names":[],"mappings":";;;;;;;;;;AA0CA,eAAsB,OACpB,IACA,cACA,KACA,eACA,SACA,cAC6B;AAC7B,QAAM,EAAuB,aAAa,KAAA,IAAQ;AAClD,QAAM,SAA6B;AAAA,IACjC,UAAU,CAAA;AAAA,IACV,YAAY;AAAA,IACZ,gBAAgB;AAAA,IAChB,OAAO;AAAA,IACP,KAAK;AAAA,IACL,SAAS;AAAA,IACT,kBAAkB;AAAA,IAClB,UAAU;AAAA,IACV,sBAAsB,CAAA;AAAA,IACtB,cAAc;AAAA,EAAA;AAOhB,MAAI,QAAQ,kBAAkB;AAC5B,UAAM,OAAO,QAAQ;AACrB,QAAI,CAAC,MAAM;AACT,aAAO,SAAS;AAAA,QACd;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,KAAK,WAAW;AAAA,MAChB;AAAA,MACA;AAAA,IAAA,EACA;AACF,WAAO,mBAAmB,MAAM,aAAa,IAAI,QAAQ,SAAS,GAAG,CAAC;AAAA,EACxE;AAEA,QAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA,QAAQ;AAAA,IACR;AAAA,IACA,QAAQ;AAAA,EAAA;AAGV,QAAM,QAAQ,YAAY,IAAA;AAE1B,MAAI,iBAAiB;AACrB,QAAM,cAAqC,CAAA;AAC3C,QAAM,kCAA+B,IAAA;AACrC,aAAW,aAAa,QAAQ,UAAU,UAAU,GAAG,GAAG,YAAY,GAAG;AACvE,QAAI,cAAc,SAAS;AACzB,YAAM,aAAA;AACN;AAAA,IACF;AACA,WAAO,UAAU,SAAS,OAAO,wCAAwC;AAGzE,QAAI,iBAAiB,OAAO,GAAG;AAC7B,YAAM,QAAQ,QAAA;AAAA,IAChB;AACA,QAAI,iBAAiB,QAAQ,GAAG;AAC9B,YAAM,MAAM,CAAC;AAAA,IACf;AAEA,QAAI,OAAc,YAAY,UAAU,KAAK;AAC7C,UAAM,IAAI,UAAU,QAAQ,MAAM,KAAK,UAAU,UAAU,GAAG;AAC9D,QAAI,YAAY,IAAI,CAAC,GAAG;AACtB;AAAA,IACF;AACA;AACA,gBAAY,IAAI,CAAC;AACjB,QAAI,QAAQ,YAAY;AACtB,UAAI,CAAC,MAAM;AACT,eAAO,CAAA;AACP,oBAAY,UAAU,KAAK,IAAI;AAAA,MACjC;AACA,WAAK,KAAK,UAAU,GAAG;AAAA,IACzB;AAAA,EACF;AAEA,QAAM,MAAM,YAAY,IAAA;AACxB,MAAI,QAAQ,YAAY;AACtB,WAAO,aAAa;AAAA,EACtB;AACA,SAAO,QAAQ;AACf,SAAO,MAAM;AACb,SAAO,UAAU,MAAM;AAGvB,SAAO,iBAAiB;AACxB,SAAO,uBAAuB,KAAK,OAAO,mBAAA,KAAwB,CAAA;AAClE,MAAI,eAAe;AACnB,aAAW,KAAK,OAAO,OAAO,OAAO,oBAAoB,GAAG;AAC1D,eAAW,KAAK,OAAO,OAAO,CAAC,GAAG;AAChC,sBAAgB;AAAA,IAClB;AAAA,EACF;AACA,SAAO,eAAe;AACtB,SAAO,iBAAiB,KAAK,OAAO,gBAAA,KAAqB,CAAA;AAEzD,MAAI,QAAQ,YAAY;AACtB,WAAO,WAAW,KAAK,OAAO,cAAA;AAAA,EAChC;AACA,SAAO;AACT;"}
1
+ {"version":3,"file":"run-ast.js","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\n// @circular-dep-ignore\nimport {astToZQL} from '../../../ast-to-zql/src/ast-to-zql.ts';\n// @circular-dep-ignore\nimport {formatOutput} from '../../../ast-to-zql/src/format.ts';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST, LiteralValue} from '../../../zero-protocol/src/ast.ts';\nimport {mapAST} from '../../../zero-protocol/src/ast.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\nimport type {Row} from '../../../zero-protocol/src/data.ts';\nimport {hashOfAST} from '../../../zero-protocol/src/query-hash.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport type {NameMapper} from '../../../zero-schema/src/name-mapper.ts';\nimport {\n buildPipeline,\n type BuilderDelegate,\n} from '../../../zql/src/builder/builder.ts';\nimport type {Node} from '../../../zql/src/ivm/data.ts';\nimport {skipYields} from '../../../zql/src/ivm/operator.ts';\nimport type {ConnectionCostModel} from '../../../zql/src/planner/planner-connection.ts';\nimport type {PlanDebugger} from '../../../zql/src/planner/planner-debug.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport {resolveSimpleScalarSubqueries} from '../../../zqlite/src/resolve-scalar-subqueries.ts';\nimport type {JWTAuth} from '../auth/auth.ts';\nimport {transformAndHashQuery} from '../auth/read-authorizer.ts';\nimport type {LiteAndZqlSpec} from '../db/specs.ts';\nimport {hydrate} from './view-syncer/pipeline-driver.ts';\n\nexport type RunAstOptions = {\n applyPermissions?: boolean | undefined;\n auth?: JWTAuth | undefined;\n clientToServerMapper?: NameMapper | undefined;\n costModel?: ConnectionCostModel | undefined;\n db: Database;\n host: BuilderDelegate;\n permissions?: PermissionsConfig | undefined;\n planDebugger?: PlanDebugger | undefined;\n syncedRows?: boolean | undefined;\n tableSpecs: Map<string, LiteAndZqlSpec>;\n vendedRows?: boolean | undefined;\n};\n\nexport async function runAst(\n lc: LogContext,\n clientSchema: ClientSchema,\n ast: AST,\n isTransformed: boolean,\n options: RunAstOptions,\n yieldProcess: () => Promise<void>,\n): Promise<AnalyzeQueryResult> {\n const {clientToServerMapper, permissions, host} = options;\n const result: AnalyzeQueryResult = {\n warnings: [],\n syncedRows: undefined,\n syncedRowCount: 0,\n start: 0,\n end: 0,\n elapsed: 0,\n afterPermissions: undefined,\n readRows: undefined,\n readRowCountsByQuery: {},\n readRowCount: undefined,\n };\n\n if (!isTransformed) {\n // map the AST to server names if not already transformed\n ast = mapAST(ast, must(clientToServerMapper));\n }\n if (options.applyPermissions) {\n const auth = options.auth;\n if (!auth) {\n result.warnings.push(\n 'No auth data provided. Permission rules will compare to `NULL` wherever an auth data field is referenced.',\n );\n }\n ast = transformAndHashQuery(\n lc,\n 'clientGroupIDForAnalyze',\n ast,\n must(permissions),\n auth,\n false,\n ).transformedAst;\n result.afterPermissions = await formatOutput(ast.table + astToZQL(ast));\n }\n\n // Resolve scalar subqueries (e.g. whereExists with {scalar: true}) to\n // literal equality conditions so that SQLite can use indexes effectively.\n // Without this, correlated subqueries get stripped from SQL filters and\n // queries on large tables fall back to full table scans.\n const executor = (\n subqueryAST: AST,\n childField: string,\n ): LiteralValue | null | undefined => {\n const input = buildPipeline(subqueryAST, host, 'scalar-subquery');\n // Consume the full stream rather than using first() to avoid\n // triggering early return on Take's #initialFetch assertion.\n // The subquery AST already has limit: 1, so at most one row is produced.\n let node: Node | undefined;\n for (const n of skipYields(input.fetch({}))) {\n node ??= n;\n }\n input.destroy();\n return node ? ((node.row[childField] as LiteralValue) ?? null) : undefined;\n };\n\n const {ast: resolvedAst} = resolveSimpleScalarSubqueries(\n ast,\n options.tableSpecs,\n executor,\n );\n\n const pipeline = buildPipeline(\n resolvedAst,\n host,\n 'query-id',\n options.costModel,\n lc,\n options.planDebugger,\n );\n\n const start = performance.now();\n\n let syncedRowCount = 0;\n const rowsByTable: Record<string, Row[]> = {};\n const seenByTable: Set<string> = new Set();\n for (const rowChange of hydrate(\n pipeline,\n hashOfAST(resolvedAst),\n clientSchema,\n )) {\n if (rowChange === 'yield') {\n await yieldProcess();\n continue;\n }\n assert(rowChange.type === 'add', 'Hydration only handles add row changes');\n\n // yield to other tasks to avoid blocking for too long\n if (syncedRowCount % 10 === 0) {\n await Promise.resolve();\n }\n if (syncedRowCount % 100 === 0) {\n await sleep(1);\n }\n\n let rows: Row[] = rowsByTable[rowChange.table];\n const s = rowChange.table + '.' + JSON.stringify(rowChange.row);\n if (seenByTable.has(s)) {\n continue; // skip duplicates\n }\n syncedRowCount++;\n seenByTable.add(s);\n if (options.syncedRows) {\n if (!rows) {\n rows = [];\n rowsByTable[rowChange.table] = rows;\n }\n rows.push(rowChange.row);\n }\n }\n\n const end = performance.now();\n if (options.syncedRows) {\n result.syncedRows = rowsByTable;\n }\n result.start = start;\n result.end = end;\n result.elapsed = end - start;\n\n // Always include the count of synced and vended rows.\n result.syncedRowCount = syncedRowCount;\n result.readRowCountsByQuery = host.debug?.getVendedRowCounts() ?? {};\n let readRowCount = 0;\n for (const c of Object.values(result.readRowCountsByQuery)) {\n for (const v of Object.values(c)) {\n readRowCount += v;\n }\n }\n result.readRowCount = readRowCount;\n result.dbScansByQuery = host.debug?.getNVisitCounts() ?? {};\n\n if (options.vendedRows) {\n result.readRows = host.debug?.getVendedRows();\n }\n return result;\n}\n"],"names":[],"mappings":";;;;;;;;;;;;AA6CA,eAAsB,OACpB,IACA,cACA,KACA,eACA,SACA,cAC6B;AAC7B,QAAM,EAAuB,aAAa,KAAA,IAAQ;AAClD,QAAM,SAA6B;AAAA,IACjC,UAAU,CAAA;AAAA,IACV,YAAY;AAAA,IACZ,gBAAgB;AAAA,IAChB,OAAO;AAAA,IACP,KAAK;AAAA,IACL,SAAS;AAAA,IACT,kBAAkB;AAAA,IAClB,UAAU;AAAA,IACV,sBAAsB,CAAA;AAAA,IACtB,cAAc;AAAA,EAAA;AAOhB,MAAI,QAAQ,kBAAkB;AAC5B,UAAM,OAAO,QAAQ;AACrB,QAAI,CAAC,MAAM;AACT,aAAO,SAAS;AAAA,QACd;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,KAAK,WAAW;AAAA,MAChB;AAAA,MACA;AAAA,IAAA,EACA;AACF,WAAO,mBAAmB,MAAM,aAAa,IAAI,QAAQ,SAAS,GAAG,CAAC;AAAA,EACxE;AAMA,QAAM,WAAW,CACf,aACA,eACoC;AACpC,UAAM,QAAQ,cAAc,aAAa,MAAM,iBAAiB;AAIhE,QAAI;AACJ,eAAW,KAAK,WAAW,MAAM,MAAM,CAAA,CAAE,CAAC,GAAG;AAC3C,eAAS;AAAA,IACX;AACA,UAAM,QAAA;AACN,WAAO,OAAS,KAAK,IAAI,UAAU,KAAsB,OAAQ;AAAA,EACnE;AAEA,QAAM,EAAC,KAAK,YAAA,IAAe;AAAA,IACzB;AAAA,IACA,QAAQ;AAAA,IACR;AAAA,EAAA;AAGF,QAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA,QAAQ;AAAA,IACR;AAAA,IACA,QAAQ;AAAA,EAAA;AAGV,QAAM,QAAQ,YAAY,IAAA;AAE1B,MAAI,iBAAiB;AACrB,QAAM,cAAqC,CAAA;AAC3C,QAAM,kCAA+B,IAAA;AACrC,aAAW,aAAa;AAAA,IACtB;AAAA,IACA,UAAU,WAAW;AAAA,IACrB;AAAA,EAAA,GACC;AACD,QAAI,cAAc,SAAS;AACzB,YAAM,aAAA;AACN;AAAA,IACF;AACA,WAAO,UAAU,SAAS,OAAO,wCAAwC;AAGzE,QAAI,iBAAiB,OAAO,GAAG;AAC7B,YAAM,QAAQ,QAAA;AAAA,IAChB;AACA,QAAI,iBAAiB,QAAQ,GAAG;AAC9B,YAAM,MAAM,CAAC;AAAA,IACf;AAEA,QAAI,OAAc,YAAY,UAAU,KAAK;AAC7C,UAAM,IAAI,UAAU,QAAQ,MAAM,KAAK,UAAU,UAAU,GAAG;AAC9D,QAAI,YAAY,IAAI,CAAC,GAAG;AACtB;AAAA,IACF;AACA;AACA,gBAAY,IAAI,CAAC;AACjB,QAAI,QAAQ,YAAY;AACtB,UAAI,CAAC,MAAM;AACT,eAAO,CAAA;AACP,oBAAY,UAAU,KAAK,IAAI;AAAA,MACjC;AACA,WAAK,KAAK,UAAU,GAAG;AAAA,IACzB;AAAA,EACF;AAEA,QAAM,MAAM,YAAY,IAAA;AACxB,MAAI,QAAQ,YAAY;AACtB,WAAO,aAAa;AAAA,EACtB;AACA,SAAO,QAAQ;AACf,SAAO,MAAM;AACb,SAAO,UAAU,MAAM;AAGvB,SAAO,iBAAiB;AACxB,SAAO,uBAAuB,KAAK,OAAO,mBAAA,KAAwB,CAAA;AAClE,MAAI,eAAe;AACnB,aAAW,KAAK,OAAO,OAAO,OAAO,oBAAoB,GAAG;AAC1D,eAAW,KAAK,OAAO,OAAO,CAAC,GAAG;AAChC,sBAAgB;AAAA,IAClB;AAAA,EACF;AACA,SAAO,eAAe;AACtB,SAAO,iBAAiB,KAAK,OAAO,gBAAA,KAAqB,CAAA;AAEzD,MAAI,QAAQ,YAAY;AACtB,WAAO,WAAW,KAAK,OAAO,cAAA;AAAA,EAChC;AACA,SAAO;AACT;"}
@@ -16,6 +16,7 @@ export declare class RunningState {
16
16
  #private;
17
17
  constructor(serviceName: string, retryConfig?: RetryConfig, setTimeoutFn?: typeof setTimeout, sleeper?: typeof sleepWithAbort);
18
18
  get signal(): AbortSignal;
19
+ get retryDelay(): number;
19
20
  /**
20
21
  * Returns `true` until {@link stop()} has been called.
21
22
  *
@@ -1 +1 @@
1
- {"version":3,"file":"running-state.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/running-state.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAGjD,OAAO,EAAC,cAAc,EAAC,MAAM,8BAA8B,CAAC;AAG5D,eAAO,MAAM,0BAA0B,QAAQ,CAAC;AAEhD,MAAM,MAAM,WAAW,GAAG;IACxB,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B,aAAa,CAAC,EAAE,MAAM,CAAC;CACxB,CAAC;AAEF,MAAM,WAAW,UAAU;IACzB,MAAM,IAAI,IAAI,CAAC;CAChB;AAED,MAAM,MAAM,YAAY,GAAG,MAAM,IAAI,CAAC;AAEtC;;GAEG;AACH,qBAAa,YAAY;;gBAarB,WAAW,EAAE,MAAM,EACnB,WAAW,CAAC,EAAE,WAAW,EACzB,YAAY,oBAAa,EACzB,OAAO,wBAAiB;IA+B1B,IAAI,MAAM,IAAI,WAAW,CAExB;IAED;;;;;OAKG;IACH,SAAS,IAAI,OAAO;IAIpB;;;OAGG;IACH,YAAY,CAAC,CAAC,EAAE,UAAU,GAAG,YAAY;IAMzC;;OAEG;IACH,UAAU,CAAC,KAAK,SAAS,OAAO,EAAE,EAChC,EAAE,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,KAAK,IAAI,EAC5B,SAAS,EAAE,MAAM,EACjB,GAAG,IAAI,EAAE,KAAK;IAWhB;;;OAGG;IACG,KAAK,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAItC;;;OAGG;IACH,IAAI,CAAC,EAAE,EAAE,UAAU,EAAE,GAAG,CAAC,EAAE,OAAO,GAAG,IAAI;IAQzC;;OAEG;IACH,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAIxB;;;;;;OAMG;IACG,OAAO,CAAC,EAAE,EAAE,UAAU,EAAE,GAAG,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;IAgB1D;;;;OAIG;IACH,YAAY;CAGb;AAED;;;GAGG;AACH,qBAAa,kBAAmB,SAAQ,KAAK;IAC3C,QAAQ,CAAC,IAAI,wBAAwB;CACtC"}
1
+ {"version":3,"file":"running-state.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/running-state.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAGjD,OAAO,EAAC,cAAc,EAAC,MAAM,8BAA8B,CAAC;AAG5D,eAAO,MAAM,0BAA0B,QAAQ,CAAC;AAEhD,MAAM,MAAM,WAAW,GAAG;IACxB,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B,aAAa,CAAC,EAAE,MAAM,CAAC;CACxB,CAAC;AAEF,MAAM,WAAW,UAAU;IACzB,MAAM,IAAI,IAAI,CAAC;CAChB;AAED,MAAM,MAAM,YAAY,GAAG,MAAM,IAAI,CAAC;AAEtC;;GAEG;AACH,qBAAa,YAAY;;gBAarB,WAAW,EAAE,MAAM,EACnB,WAAW,CAAC,EAAE,WAAW,EACzB,YAAY,oBAAa,EACzB,OAAO,wBAAiB;IA+B1B,IAAI,MAAM,IAAI,WAAW,CAExB;IAED,IAAI,UAAU,WAEb;IAED;;;;;OAKG;IACH,SAAS,IAAI,OAAO;IAIpB;;;OAGG;IACH,YAAY,CAAC,CAAC,EAAE,UAAU,GAAG,YAAY;IAMzC;;OAEG;IACH,UAAU,CAAC,KAAK,SAAS,OAAO,EAAE,EAChC,EAAE,EAAE,CAAC,GAAG,IAAI,EAAE,KAAK,KAAK,IAAI,EAC5B,SAAS,EAAE,MAAM,EACjB,GAAG,IAAI,EAAE,KAAK;IAWhB;;;OAGG;IACG,KAAK,CAAC,EAAE,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAItC;;;OAGG;IACH,IAAI,CAAC,EAAE,EAAE,UAAU,EAAE,GAAG,CAAC,EAAE,OAAO,GAAG,IAAI;IAQzC;;OAEG;IACH,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAIxB;;;;;;OAMG;IACG,OAAO,CAAC,EAAE,EAAE,UAAU,EAAE,GAAG,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;IAiB1D;;;;OAIG;IACH,YAAY;CAGb;AAED;;;GAGG;AACH,qBAAa,kBAAmB,SAAQ,KAAK;IAC3C,QAAQ,CAAC,IAAI,wBAAwB;CACtC"}