@rocicorp/zero 1.2.0-canary.4 → 1.2.0-canary.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/out/ast-to-zql/src/format.d.ts.map +1 -1
  2. package/out/ast-to-zql/src/format.js +6 -6
  3. package/out/ast-to-zql/src/format.js.map +1 -1
  4. package/out/shared/src/btree-set.d.ts.map +1 -1
  5. package/out/shared/src/btree-set.js +73 -41
  6. package/out/shared/src/btree-set.js.map +1 -1
  7. package/out/zero/package.js +5 -5
  8. package/out/zero/package.js.map +1 -1
  9. package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
  10. package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
  11. package/out/zero-cache/src/observability/metrics.d.ts +1 -1
  12. package/out/zero-cache/src/observability/metrics.d.ts.map +1 -1
  13. package/out/zero-cache/src/observability/metrics.js.map +1 -1
  14. package/out/zero-cache/src/server/anonymous-otel-start.d.ts.map +1 -1
  15. package/out/zero-cache/src/server/anonymous-otel-start.js +6 -1
  16. package/out/zero-cache/src/server/anonymous-otel-start.js.map +1 -1
  17. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  18. package/out/zero-cache/src/server/change-streamer.js +3 -1
  19. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  20. package/out/zero-cache/src/server/replicator.d.ts.map +1 -1
  21. package/out/zero-cache/src/server/replicator.js +28 -1
  22. package/out/zero-cache/src/server/replicator.js.map +1 -1
  23. package/out/zero-cache/src/services/change-source/common/replica-schema.d.ts.map +1 -1
  24. package/out/zero-cache/src/services/change-source/common/replica-schema.js +13 -1
  25. package/out/zero-cache/src/services/change-source/common/replica-schema.js.map +1 -1
  26. package/out/zero-cache/src/services/change-source/custom/change-source.js +2 -2
  27. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  28. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  29. package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js +1 -1
  30. package/out/zero-cache/src/services/change-streamer/backup-monitor.d.ts +1 -1
  31. package/out/zero-cache/src/services/change-streamer/backup-monitor.d.ts.map +1 -1
  32. package/out/zero-cache/src/services/change-streamer/backup-monitor.js +31 -1
  33. package/out/zero-cache/src/services/change-streamer/backup-monitor.js.map +1 -1
  34. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js +1 -1
  35. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +1 -1
  36. package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
  37. package/out/zero-cache/src/services/change-streamer/storer.js +1 -1
  38. package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
  39. package/out/zero-cache/src/services/replicator/schema/replication-state.d.ts.map +1 -1
  40. package/out/zero-cache/src/services/replicator/schema/replication-state.js +6 -3
  41. package/out/zero-cache/src/services/replicator/schema/replication-state.js.map +1 -1
  42. package/out/zero-cache/src/types/pg.d.ts.map +1 -1
  43. package/out/zero-cache/src/types/pg.js +16 -8
  44. package/out/zero-cache/src/types/pg.js.map +1 -1
  45. package/out/zero-cache/src/workers/replicator.d.ts +5 -2
  46. package/out/zero-cache/src/workers/replicator.d.ts.map +1 -1
  47. package/out/zero-cache/src/workers/replicator.js +10 -6
  48. package/out/zero-cache/src/workers/replicator.js.map +1 -1
  49. package/out/zero-client/src/client/version.js +1 -1
  50. package/out/zql/src/ivm/view-apply-change.d.ts.map +1 -1
  51. package/out/zql/src/ivm/view-apply-change.js +34 -26
  52. package/out/zql/src/ivm/view-apply-change.js.map +1 -1
  53. package/out/zql/src/query/query.d.ts +1 -2
  54. package/out/zql/src/query/query.d.ts.map +1 -1
  55. package/package.json +5 -5
@@ -1 +1 @@
1
- {"version":3,"file":"storer.js","names":["#lc","#shard","#taskID","#discoveryAddress","#discoveryProtocol","#db","#replicaVersion","#onConsumed","#onFatal","#queue","#backPressureThresholdBytes","#cdc","#approximateQueuedBytes","#running","#readyForMore","#stopped","#processQueue","#cancelQueueEntries","#startCatchup","#trackBackfillMetadata","#maybeReleaseBackPressure","#catchup","#upsertTableMetadataStmt","#upsertColumnBackfillStmt"],"sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport {getHeapStatistics} from 'node:v8';\nimport {type PendingQuery, type Row} from 'postgres';\nimport {AbortError} from '../../../../shared/src/abort-error.ts';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {BigIntJSON} from '../../../../shared/src/bigint-json.ts';\nimport {Queue} from '../../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../../shared/src/valita.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {runTx} from '../../db/run-transaction.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../../types/pg.ts';\nimport {cdcSchema, type ShardID} from '../../types/shards.ts';\nimport {\n backfillRequestSchema,\n isDataChange,\n isSchemaChange,\n type BackfillID,\n type BackfillRequest,\n type Change,\n type DataChange,\n type Identifier,\n type SchemaChange,\n type TableMetadata,\n} from '../change-source/protocol/current.ts';\nimport {type Commit} from '../change-source/protocol/current/downstream.ts';\nimport type {\n DownstreamStatusMessage,\n UpstreamStatusMessage,\n} from '../change-source/protocol/current/status.ts';\nimport type {ReplicatorMode} from '../replicator/replicator.ts';\nimport type {Service} from '../service.ts';\nimport type {WatermarkedChange} from './change-streamer-service.ts';\nimport {type ChangeEntry} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {\n AutoResetSignal,\n markResetRequired,\n type BackfillingColumn,\n type ReplicationState,\n type TableMetadataRow,\n} from './schema/tables.ts';\nimport type {Subscriber} from './subscriber.ts';\n\ntype SubscriberAndMode = {\n subscriber: Subscriber;\n mode: ReplicatorMode;\n};\n\ntype QueueEntry =\n | [\n 'change',\n watermark: string,\n json: string,\n orig: Exclude<Change, DataChange> | null, // null for DataChanges\n ]\n | ['ready', callback: () => void]\n | ['subscriber', SubscriberAndMode]\n | DownstreamStatusMessage\n | ['abort']\n | 'stop';\n\ntype PendingTransaction = {\n pool: TransactionPool;\n preCommitWatermark: string;\n pos: number;\n startingReplicationState: Promise<ReplicationState>;\n ack: boolean;\n};\n\nconst backfillRequestsSchema = v.array(backfillRequestSchema);\n\n/**\n * Handles the storage of changes and the catchup of subscribers\n * that are behind.\n *\n * In the context of catchup and cleanup, it is the responsibility of the\n * Storer to decide whether a client can be caught up, or whether the\n * changes needed to catch a client up have been purged.\n *\n * **Maintained invariant**: The Change DB is only empty for a\n * completely new replica (i.e. initial-sync with no changes from the\n * replication stream).\n * * In this case, all new subscribers are expected start from the\n * `replicaVersion`, which is the version at which initial sync\n * was performed, and any attempts to catchup from a different\n * point fail.\n *\n * Conversely, if non-initial changes have flowed through the system\n * (i.e. via the replication stream), the ChangeDB must *not* be empty,\n * and the earliest change in the `changeLog` represents the earliest\n * \"commit\" from (after) which a subscriber can be caught up.\n * * Any attempts to catchup from an earlier point must fail with\n * a `WatermarkTooOld` error.\n * * Failure to do so could result in streaming changes to the\n * subscriber such that there is a gap in its replication history.\n *\n * Note: Subscribers (i.e. `incremental-syncer`) consider an \"error\" signal\n * an unrecoverable error and shut down in response. This allows the\n * production system to replace it with a new task and fresh copy of the\n * replica backup.\n */\nexport class Storer implements Service {\n readonly id = 'storer';\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #taskID: string;\n readonly #discoveryAddress: string;\n readonly #discoveryProtocol: string;\n readonly #db: PostgresDB;\n readonly #replicaVersion: string;\n readonly #onConsumed: (c: Commit | UpstreamStatusMessage) => void;\n readonly #onFatal: (err: Error) => void;\n readonly #queue = new Queue<QueueEntry>();\n readonly #backPressureThresholdBytes: number;\n\n #approximateQueuedBytes = 0;\n #running = false;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n db: PostgresDB,\n replicaVersion: string,\n onConsumed: (c: Commit | UpstreamStatusMessage) => void,\n onFatal: (err: Error) => void,\n backPressureLimitHeapProportion: number,\n ) {\n this.#lc = lc.withContext('component', 'change-log');\n this.#shard = shard;\n this.#taskID = taskID;\n this.#discoveryAddress = discoveryAddress;\n this.#discoveryProtocol = discoveryProtocol;\n this.#db = db;\n this.#replicaVersion = replicaVersion;\n this.#onConsumed = onConsumed;\n this.#onFatal = onFatal;\n\n const heapStats = getHeapStatistics();\n this.#backPressureThresholdBytes =\n (heapStats.heap_size_limit - heapStats.used_heap_size) *\n backPressureLimitHeapProportion;\n\n this.#lc.info?.(\n `Using up to ${(this.#backPressureThresholdBytes / 1024 ** 2).toFixed(2)} MB of ` +\n `--max-old-space-size (~${(heapStats.heap_size_limit / 1024 ** 2).toFixed(2)} MB) ` +\n `to absorb upstream spikes`,\n {heapStats},\n );\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async assumeOwnership() {\n const db = this.#db;\n const owner = this.#taskID;\n const ownerAddress = this.#discoveryAddress;\n const ownerProtocol = this.#discoveryProtocol;\n // we omit `ws://` so that old view syncer versions that are not expecting the protocol continue to not get it\n const addressWithProtocol =\n ownerProtocol === 'ws'\n ? ownerAddress\n : `${ownerProtocol}://${ownerAddress}`;\n this.#lc.info?.(`assuming ownership at ${addressWithProtocol}`);\n const start = performance.now();\n await db`UPDATE ${this.#cdc('replicationState')} SET ${db({owner, ownerAddress: addressWithProtocol})}`;\n const elapsed = (performance.now() - start).toFixed(2);\n this.#lc.info?.(\n `assumed ownership at ${addressWithProtocol} (${elapsed} ms)`,\n );\n }\n\n async getStartStreamInitializationParameters(): Promise<{\n lastWatermark: string;\n backfillRequests: BackfillRequest[];\n }> {\n const [[{lastWatermark}], result] = await runTx(\n this.#db,\n sql => [\n sql<{lastWatermark: string}[]>`\n SELECT \"lastWatermark\" FROM ${this.#cdc('replicationState')}`,\n\n // Formats a BackfillRequest using json_object_agg() to construct the\n // `columns` object. It is LEFT JOIN'ed with the `tableMetadata` table\n // to make it optional and possibly `null`.\n sql`\n SELECT \n json_build_object(\n 'schema', b.\"schema\",\n 'name', b.\"table\",\n 'metadata', t.\"metadata\"\n ) as \"table\",\n json_object_agg(b.\"column\", b.\"backfill\") \n as \"columns\"\n FROM ${this.#cdc('backfilling')} as b\n LEFT JOIN ${this.#cdc('tableMetadata')} as t\n ON (b.\"schema\" = t.\"schema\" AND b.\"table\" = t.\"table\")\n GROUP BY b.\"schema\", b.\"table\", t.\"metadata\"\n `,\n ],\n {mode: Mode.READONLY},\n );\n\n return {\n lastWatermark,\n backfillRequests: v.parse(result, backfillRequestsSchema),\n };\n }\n\n async getMinWatermarkForCatchup(): Promise<string | null> {\n const [{minWatermark}] = await this.#db<\n {minWatermark: string | null}[]\n > /*sql*/ `\n SELECT min(watermark) as \"minWatermark\" FROM ${this.#cdc('changeLog')}`;\n return minWatermark;\n }\n\n purgeRecordsBefore(watermark: string): Promise<number> {\n return runTx(this.#db, async sql => {\n const [{deleted}] = await sql<{deleted: bigint}[]>`\n WITH purged AS (\n DELETE FROM ${this.#cdc('changeLog')} WHERE watermark < ${watermark} \n RETURNING watermark, pos\n ) SELECT COUNT(*) as deleted FROM purged;`;\n\n // Before committing the purge, check that this process is still the\n // owner. This is done after the DELETE to minimize the amount of time\n // that writes to the changeLog are delayed.\n const [{owner}] = await sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')} FOR SHARE`;\n if (owner !== this.#taskID) {\n throw new AbortError(\n `aborting changeLog purge to ${watermark} because ownership has been taken by ${owner}`,\n );\n }\n return Number(deleted);\n });\n }\n\n /**\n * @returns The size of the serialized entry, for memory / I/O estimations.\n */\n store(entry: WatermarkedChange) {\n const [watermark, [_tag, change]] = entry;\n // Eagerly stringify the JSON object so that the memory usage can be\n // more accurately measured (i.e. without an extra object traversal and\n // ad hoc memory counting heuristics).\n //\n // This essentially moves the stringify() computation out of the pg client,\n // which is instead configured to pass `string` objects directly as JSON\n // strings for JSON-valued columns (see TypeOptions.sendStringAsJson).\n const json = BigIntJSON.stringify(change);\n this.#approximateQueuedBytes += json.length;\n\n this.#queue.enqueue([\n 'change',\n watermark,\n json,\n isDataChange(change) ? null : change, // drop DataChanges to save memory\n ]);\n\n return json.length;\n }\n\n abort() {\n this.#queue.enqueue(['abort']);\n }\n\n status(s: DownstreamStatusMessage) {\n this.#queue.enqueue(s);\n }\n\n catchup(subscriber: Subscriber, mode: ReplicatorMode) {\n this.#queue.enqueue(['subscriber', {subscriber, mode}]);\n }\n\n #readyForMore: Resolver<void> | null = null;\n\n readyForMore(): Promise<void> | undefined {\n if (!this.#running) {\n return undefined;\n }\n if (\n this.#readyForMore === null &&\n this.#approximateQueuedBytes > this.#backPressureThresholdBytes\n ) {\n this.#lc.warn?.(\n `applying back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)\\n` +\n `\\n` +\n `To inspect changeLog backlog in your change DB:\\n` +\n ` SELECT\\n` +\n ` (change->'relation'->>'schema') || '.' || (change->'relation'->>'name') AS table_name,\\n` +\n ` change->>'tag' AS operation,\\n` +\n ` COUNT(*) AS count\\n` +\n ` FROM \"<app_id>/cdc\".\"changeLog\"\\n` +\n ` GROUP BY 1, 2\\n` +\n ` ORDER BY 3 DESC\\n` +\n ` LIMIT 20;`,\n );\n this.#readyForMore = resolver();\n }\n return this.#readyForMore?.promise;\n }\n\n #maybeReleaseBackPressure() {\n if (\n this.#readyForMore !== null &&\n // Wait for at least 20% of the threshold to free up.\n this.#approximateQueuedBytes < this.#backPressureThresholdBytes * 0.8\n ) {\n this.#lc.info?.(\n `releasing back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)`,\n );\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n }\n\n #stopped = promiseVoid;\n\n /**\n * Runs the storer loop until {@link stop()} is called, or an error is thrown.\n * Once {@link run()} completes, it can be called again.\n */\n async run() {\n assert(!this.#running, `storer is already running`);\n\n const {promise: stopped, resolve: signalStopped} = resolver();\n this.#running = true;\n this.#stopped = stopped;\n\n this.#lc.info?.('starting storer');\n let err: unknown;\n try {\n await this.#processQueue();\n } catch (e) {\n err = e; // used in finally\n throw e;\n } finally {\n // Release any pending backpressure so the upstream can proceed\n if (this.#readyForMore !== null) {\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n this.#cancelQueueEntries(\n this.#queue.drain().filter(entry => entry !== undefined),\n err,\n );\n this.#running = false;\n signalStopped();\n this.#lc.info?.('storer stopped');\n }\n }\n\n #cancelQueueEntries(queue: QueueEntry[], e: unknown) {\n if (queue.length === 0) {\n return;\n }\n this.#lc.info?.(\n `canceling ${queue.length} entries from the changeLog queue`,\n );\n const err = e instanceof Error ? e : new AbortError('server shutting down');\n for (const entry of queue) {\n if (entry === 'stop') {\n continue;\n }\n const type = entry[0];\n switch (type) {\n case 'subscriber': {\n // Disconnect subscribers waiting to be caught up so that they can\n // reconnect and try again.\n const {subscriber} = entry[1];\n this.#lc.info?.(`disconnecting ${subscriber.id}`);\n subscriber.fail(err);\n break;\n }\n }\n }\n }\n\n async #processQueue() {\n let tx: PendingTransaction | null = null;\n let msg: QueueEntry | false;\n\n const catchupQueue: SubscriberAndMode[] = [];\n try {\n while ((msg = await this.#queue.dequeue()) !== 'stop') {\n const [msgType] = msg;\n switch (msgType) {\n case 'ready': {\n const signalReady = msg[1];\n signalReady();\n continue;\n }\n case 'subscriber': {\n const subscriber = msg[1];\n if (tx) {\n catchupQueue.push(subscriber); // Wait for the current tx to complete.\n } else {\n await this.#startCatchup([subscriber]); // Catch up immediately.\n }\n continue;\n }\n case 'status':\n this.#onConsumed(msg);\n continue;\n case 'abort': {\n if (tx) {\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n }\n continue;\n }\n }\n // msgType === 'change'\n const [_, watermark, json, change] = msg;\n const tag = change?.tag;\n this.#approximateQueuedBytes -= json.length;\n\n if (tag === 'begin') {\n assert(!tx, 'received BEGIN in the middle of a transaction');\n const {promise, resolve, reject} = resolver<ReplicationState>();\n void promise.catch(() => {}); // handle rejections before the await\n tx = {\n pool: new TransactionPool(\n this.#lc.withContext('watermark', watermark),\n Mode.READ_COMMITTED,\n ),\n preCommitWatermark: watermark,\n pos: 0,\n startingReplicationState: promise,\n ack: !change.skipAck,\n };\n tx.pool.run(this.#db);\n // Acquire a lock on the replicationState row to detect and/or prevent\n // a concurrent ownership change.\n void tx.pool.process(tx => {\n tx<ReplicationState[]> /*sql*/ `\n SELECT * FROM ${this.#cdc('replicationState')} FOR UPDATE`.then(\n ([result]) => resolve(result),\n reject,\n );\n return [];\n });\n } else {\n assert(tx, () => `received change outside of transaction: ${json}`);\n tx.pos++;\n }\n\n const entry = {\n watermark: tag === 'commit' ? watermark : tx.preCommitWatermark,\n precommit: tag === 'commit' ? tx.preCommitWatermark : null,\n pos: tx.pos,\n change: json,\n };\n\n const processed = tx.pool.process(sql => [\n sql`INSERT INTO ${this.#cdc('changeLog')} ${sql(entry)}`,\n ...(change !== null && isSchemaChange(change)\n ? this.#trackBackfillMetadata(sql, change)\n : []),\n ]);\n\n if (tx.pos % 100 === 0) {\n // Backpressure is exerted on commit when awaiting tx.pool.done().\n // However, backpressure checks need to be regularly done for\n // very large transactions in order to avoid memory blowup.\n await processed;\n }\n this.#maybeReleaseBackPressure();\n\n if (tag === 'commit') {\n const {owner} = await tx.startingReplicationState;\n if (owner !== this.#taskID) {\n // Ownership change reflected in the replicationState read in 'begin'.\n tx.pool.fail(\n new AbortError(\n `changeLog ownership has been assumed by ${owner}`,\n ),\n );\n } else {\n // Update the replication state.\n const lastWatermark = watermark;\n void tx.pool.process(tx => [\n tx`\n UPDATE ${this.#cdc('replicationState')} SET ${tx({lastWatermark})}`,\n ]);\n tx.pool.setDone();\n }\n\n await tx.pool.done();\n\n // ACK the LSN to the upstream Postgres.\n if (tx.ack) {\n this.#onConsumed(['commit', change, {watermark}]);\n }\n tx = null;\n\n // Before beginning the next transaction, open a READONLY snapshot to\n // concurrently catchup any queued subscribers.\n await this.#startCatchup(catchupQueue.splice(0));\n } else if (tag === 'rollback') {\n // Aborted transactions are not stored in the changeLog. Abort the current tx\n // and process catchup of subscribers that were waiting for it to end.\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n\n await this.#startCatchup(catchupQueue.splice(0));\n }\n }\n } catch (e) {\n catchupQueue.forEach(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n }\n\n async #startCatchup(subs: SubscriberAndMode[]) {\n if (subs.length === 0) {\n return;\n }\n\n const reader = new TransactionPool(\n this.#lc.withContext('pool', 'catchup'),\n Mode.READONLY,\n );\n reader.run(this.#db);\n\n let lastWatermark: string | undefined;\n try {\n // Ensure that the transaction has started (and is thus holding a snapshot\n // of the database) before continuing on to commit more changes. This is\n // done by performing a single read on the db, which determines the\n // snapshot for the REPEATABLE_READ transaction.\n [{lastWatermark}] = await reader.processReadTask(\n sql => sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')}\n `,\n );\n } catch (e) {\n subs.map(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n\n // Run the actual catchup queries in the background. Errors are handled in\n // #catchup() by disconnecting the associated subscriber.\n void Promise.all(\n subs.map(sub => this.#catchup(sub, lastWatermark, reader)),\n ).finally(() => reader.setDone());\n }\n\n async #catchup(\n {subscriber: sub, mode}: SubscriberAndMode,\n lastWatermark: string,\n reader: TransactionPool,\n ) {\n try {\n await reader.processReadTask(async tx => {\n const start = Date.now();\n\n // When starting from initial-sync, there won't be a change with a watermark\n // equal to the replica version. This is the empty changeLog scenario.\n let watermarkFound = sub.watermark === this.#replicaVersion;\n let count = 0;\n let lastBatchConsumed: Promise<unknown> | undefined;\n\n for await (const entries of tx<ChangeEntry[]> /*sql*/ `\n SELECT watermark, change FROM ${this.#cdc('changeLog')}\n WHERE watermark >= ${sub.watermark}\n AND watermark <= ${lastWatermark}\n ORDER BY watermark, pos`.cursor(2000)) {\n // Wait for the last batch of entries to be consumed by the\n // subscriber before sending down the current batch. This pipelining\n // allows one batch of changes to be received from the change-db\n // while the previous batch of changes are sent to the subscriber,\n // resulting in flow control that caps the number of changes\n // referenced in memory to 2 * batch-size.\n const start = performance.now();\n await lastBatchConsumed;\n const elapsed = performance.now() - start;\n if (lastBatchConsumed) {\n (elapsed > 100 ? this.#lc.info : this.#lc.debug)?.(\n `waited ${elapsed.toFixed(3)} ms for ${sub.id} to consume last batch of catchup entries`,\n );\n }\n\n for (const entry of entries) {\n if (entry.watermark === sub.watermark) {\n // This should be the first entry.\n // Catchup starts from *after* the watermark.\n watermarkFound = true;\n } else if (watermarkFound) {\n lastBatchConsumed = sub.catchup(toDownstream(entry));\n count++;\n } else if (mode === 'backup') {\n throw new AutoResetSignal(\n `backup replica at watermark ${sub.watermark} is behind change db: ${entry.watermark})`,\n );\n } else {\n this.#lc.warn?.(\n `rejecting subscriber at watermark ${sub.watermark} (earliest watermark: ${entry.watermark})`,\n );\n sub.close(\n ErrorType.WatermarkTooOld,\n `earliest supported watermark is ${entry.watermark} (requested ${sub.watermark})`,\n );\n return;\n }\n }\n }\n if (watermarkFound) {\n await lastBatchConsumed;\n this.#lc.info?.(\n `caught up ${sub.id} with ${count} changes (${\n Date.now() - start\n } ms)`,\n );\n } else {\n this.#lc.warn?.(\n `subscriber at watermark ${sub.watermark} is ahead of latest watermark`,\n );\n }\n // Flushes the backlog of messages buffered during catchup and\n // allows the subscription to forward subsequent messages immediately.\n sub.setCaughtUp();\n });\n } catch (err) {\n this.#lc.error?.(`error while catching up subscriber ${sub.id}`, err);\n if (err instanceof AutoResetSignal) {\n await markResetRequired(this.#db, this.#shard);\n this.#onFatal(err);\n }\n sub.fail(err);\n }\n }\n\n /**\n * Returns the db statements necessary to track backfill and table metadata\n * presented in the `change`, if any.\n */\n #trackBackfillMetadata(sql: PostgresTransaction, change: SchemaChange) {\n const stmts: PendingQuery<Row[]>[] = [];\n\n switch (change.tag) {\n case 'update-table-metadata': {\n const {table, new: metadata} = change;\n stmts.push(this.#upsertTableMetadataStmt(sql, table, metadata));\n break;\n }\n\n case 'create-table': {\n const {spec, metadata, backfill} = change;\n if (metadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, spec, metadata));\n }\n if (backfill) {\n Object.entries(backfill).forEach(([col, backfill]) => {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, spec, col, backfill),\n );\n });\n }\n break;\n }\n\n case 'rename-table': {\n const {old} = change;\n const row = {schema: change.new.schema, table: change.new.name};\n stmts.push(\n sql`UPDATE ${this.#cdc('tableMetadata')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n sql`UPDATE ${this.#cdc('backfilling')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n );\n break;\n }\n\n case 'drop-table': {\n const {\n id: {schema, name},\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('tableMetadata')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n );\n break;\n }\n\n case 'add-column': {\n const {table, tableMetadata, column, backfill} = change;\n if (tableMetadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, table, tableMetadata));\n }\n if (backfill) {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, table, column.name, backfill),\n );\n }\n break;\n }\n\n case 'update-column': {\n const {\n table: {schema, name: table},\n old: {name: oldName},\n new: {name: newName},\n } = change;\n if (oldName !== newName) {\n stmts.push(\n sql`UPDATE ${this.#cdc('backfilling')} SET \"column\" = ${newName}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" = ${oldName}`,\n );\n }\n break;\n }\n\n case 'drop-column': {\n const {\n table: {schema, name},\n column,\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name} AND \"column\" = ${column}`,\n );\n break;\n }\n\n case 'backfill-completed': {\n const {\n relation: {schema, name: table, rowKey},\n columns,\n } = change;\n const cols = [...rowKey.columns, ...columns];\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" IN ${sql(cols)}`,\n );\n }\n }\n return stmts;\n }\n\n #upsertTableMetadataStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n metadata: TableMetadata,\n ) {\n const row: TableMetadataRow = {schema, table, metadata};\n return sql`\n INSERT INTO ${this.#cdc('tableMetadata')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n #upsertColumnBackfillStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n column: string,\n backfill: BackfillID,\n ) {\n const row: BackfillingColumn = {schema, table, column, backfill};\n return sql`\n INSERT INTO ${this.#cdc('backfilling')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\", \"column\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n /**\n * Waits until all currently queued entries have been processed.\n * This is only used in tests.\n */\n async allProcessed() {\n if (this.#running) {\n const {promise, resolve} = resolver();\n this.#queue.enqueue(['ready', resolve]);\n await promise;\n }\n }\n\n stop() {\n if (this.#running) {\n this.#lc.info?.(`draining ${this.#queue.size()} changeLog entries`);\n this.#queue.enqueue('stop');\n }\n return this.#stopped;\n }\n}\n\nfunction toDownstream(entry: ChangeEntry): WatermarkedChange {\n const {watermark, change} = entry;\n switch (change.tag) {\n case 'begin':\n return [watermark, ['begin', change, {commitWatermark: watermark}]];\n case 'commit':\n return [watermark, ['commit', change, {watermark}]];\n case 'rollback':\n return [watermark, ['rollback', change]];\n default:\n return [watermark, ['data', change]];\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;AAwEA,IAAM,yBAAyB,eAAE,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAgC7D,IAAa,SAAb,MAAuC;CACrC,KAAc;CACd;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA,SAAkB,IAAI,OAAmB;CACzC;CAEA,0BAA0B;CAC1B,WAAW;CAEX,YACE,IACA,OACA,QACA,kBACA,mBACA,IACA,gBACA,YACA,SACA,iCACA;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,aAAa;AACpD,QAAA,QAAc;AACd,QAAA,SAAe;AACf,QAAA,mBAAyB;AACzB,QAAA,oBAA0B;AAC1B,QAAA,KAAW;AACX,QAAA,iBAAuB;AACvB,QAAA,aAAmB;AACnB,QAAA,UAAgB;EAEhB,MAAM,YAAY,mBAAmB;AACrC,QAAA,8BACG,UAAU,kBAAkB,UAAU,kBACvC;AAEF,QAAA,GAAS,OACP,gBAAgB,MAAA,6BAAmC,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAC5C,UAAU,kBAAkB,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAE/E,EAAC,WAAU,CACZ;;CAIH,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,UAAU,MAAA,MAAY,CAAC,GAAG,QAAQ;;CAGvD,MAAM,kBAAkB;EACtB,MAAM,KAAK,MAAA;EACX,MAAM,QAAQ,MAAA;EACd,MAAM,eAAe,MAAA;EACrB,MAAM,gBAAgB,MAAA;EAEtB,MAAM,sBACJ,kBAAkB,OACd,eACA,GAAG,cAAc,KAAK;AAC5B,QAAA,GAAS,OAAO,yBAAyB,sBAAsB;EAC/D,MAAM,QAAQ,YAAY,KAAK;AAC/B,QAAM,EAAE,UAAU,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG;GAAC;GAAO,cAAc;GAAoB,CAAC;EACrG,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,QAAA,GAAS,OACP,wBAAwB,oBAAoB,IAAI,QAAQ,MACzD;;CAGH,MAAM,yCAGH;EACD,MAAM,CAAC,CAAC,EAAC,kBAAiB,UAAU,MAAM,MACxC,MAAA,KACA,QAAO,CACL,GAA8B;sCACA,MAAA,IAAU,mBAAmB,IAK3D,GAAG;;;;;;;;;iBASM,MAAA,IAAU,cAAc,CAAC;sBACpB,MAAA,IAAU,gBAAgB,CAAC;;;UAI1C,EACD,EAAC,MAAM,UAAc,CACtB;AAED,SAAO;GACL;GACA,kBAAkB,MAAQ,QAAQ,uBAAuB;GAC1D;;CAGH,MAAM,4BAAoD;EACxD,MAAM,CAAC,EAAC,kBAAiB,MAAM,MAAA,EAErB;qDACuC,MAAA,IAAU,YAAY;AACvE,SAAO;;CAGT,mBAAmB,WAAoC;AACrD,SAAO,MAAM,MAAA,IAAU,OAAM,QAAO;GAClC,MAAM,CAAC,EAAC,aAAY,MAAM,GAAwB;;wBAEhC,MAAA,IAAU,YAAY,CAAC,qBAAqB,UAAU;;;GAOxE,MAAM,CAAC,EAAC,WAAU,MAAM,GAAuB;wBAC7B,MAAA,IAAU,mBAAmB,CAAC;AAChD,OAAI,UAAU,MAAA,OACZ,OAAM,IAAI,WACR,+BAA+B,UAAU,uCAAuC,QACjF;AAEH,UAAO,OAAO,QAAQ;IACtB;;;;;CAMJ,MAAM,OAA0B;EAC9B,MAAM,CAAC,WAAW,CAAC,MAAM,WAAW;EAQpC,MAAM,OAAO,WAAW,UAAU,OAAO;AACzC,QAAA,0BAAgC,KAAK;AAErC,QAAA,MAAY,QAAQ;GAClB;GACA;GACA;GACA,aAAa,OAAO,GAAG,OAAO;GAC/B,CAAC;AAEF,SAAO,KAAK;;CAGd,QAAQ;AACN,QAAA,MAAY,QAAQ,CAAC,QAAQ,CAAC;;CAGhC,OAAO,GAA4B;AACjC,QAAA,MAAY,QAAQ,EAAE;;CAGxB,QAAQ,YAAwB,MAAsB;AACpD,QAAA,MAAY,QAAQ,CAAC,cAAc;GAAC;GAAY;GAAK,CAAC,CAAC;;CAGzD,gBAAuC;CAEvC,eAA0C;AACxC,MAAI,CAAC,MAAA,QACH;AAEF,MACE,MAAA,iBAAuB,QACvB,MAAA,yBAA+B,MAAA,4BAC/B;AACA,SAAA,GAAS,OACP,+BAA+B,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,4SAW7H;AACD,SAAA,eAAqB,UAAU;;AAEjC,SAAO,MAAA,cAAoB;;CAG7B,4BAA4B;AAC1B,MACE,MAAA,iBAAuB,QAEvB,MAAA,yBAA+B,MAAA,6BAAmC,IAClE;AACA,SAAA,GAAS,OACP,gCAAgC,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,MAC9H;AACD,SAAA,aAAmB,SAAS;AAC5B,SAAA,eAAqB;;;CAIzB,WAAW;;;;;CAMX,MAAM,MAAM;AACV,SAAO,CAAC,MAAA,SAAe,4BAA4B;EAEnD,MAAM,EAAC,SAAS,SAAS,SAAS,kBAAiB,UAAU;AAC7D,QAAA,UAAgB;AAChB,QAAA,UAAgB;AAEhB,QAAA,GAAS,OAAO,kBAAkB;EAClC,IAAI;AACJ,MAAI;AACF,SAAM,MAAA,cAAoB;WACnB,GAAG;AACV,SAAM;AACN,SAAM;YACE;AAER,OAAI,MAAA,iBAAuB,MAAM;AAC/B,UAAA,aAAmB,SAAS;AAC5B,UAAA,eAAqB;;AAEvB,SAAA,mBACE,MAAA,MAAY,OAAO,CAAC,QAAO,UAAS,UAAU,KAAA,EAAU,EACxD,IACD;AACD,SAAA,UAAgB;AAChB,kBAAe;AACf,SAAA,GAAS,OAAO,iBAAiB;;;CAIrC,oBAAoB,OAAqB,GAAY;AACnD,MAAI,MAAM,WAAW,EACnB;AAEF,QAAA,GAAS,OACP,aAAa,MAAM,OAAO,mCAC3B;EACD,MAAM,MAAM,aAAa,QAAQ,IAAI,IAAI,WAAW,uBAAuB;AAC3E,OAAK,MAAM,SAAS,OAAO;AACzB,OAAI,UAAU,OACZ;AAGF,WADa,MAAM,IACnB;IACE,KAAK,cAAc;KAGjB,MAAM,EAAC,eAAc,MAAM;AAC3B,WAAA,GAAS,OAAO,iBAAiB,WAAW,KAAK;AACjD,gBAAW,KAAK,IAAI;AACpB;;;;;CAMR,OAAA,eAAsB;EACpB,IAAI,KAAgC;EACpC,IAAI;EAEJ,MAAM,eAAoC,EAAE;AAC5C,MAAI;AACF,WAAQ,MAAM,MAAM,MAAA,MAAY,SAAS,MAAM,QAAQ;IACrD,MAAM,CAAC,WAAW;AAClB,YAAQ,SAAR;KACE,KAAK,SAAS;MACZ,MAAM,cAAc,IAAI;AACxB,mBAAa;AACb;;KAEF,KAAK,cAAc;MACjB,MAAM,aAAa,IAAI;AACvB,UAAI,GACF,cAAa,KAAK,WAAW;UAE7B,OAAM,MAAA,aAAmB,CAAC,WAAW,CAAC;AAExC;;KAEF,KAAK;AACH,YAAA,WAAiB,IAAI;AACrB;KACF,KAAK;AACH,UAAI,IAAI;AACN,UAAG,KAAK,OAAO;AACf,aAAM,GAAG,KAAK,MAAM;AACpB,YAAK;;AAEP;;IAIJ,MAAM,CAAC,GAAG,WAAW,MAAM,UAAU;IACrC,MAAM,MAAM,QAAQ;AACpB,UAAA,0BAAgC,KAAK;AAErC,QAAI,QAAQ,SAAS;AACnB,YAAO,CAAC,IAAI,gDAAgD;KAC5D,MAAM,EAAC,SAAS,SAAS,WAAU,UAA4B;AAC1D,aAAQ,YAAY,GAAG;AAC5B,UAAK;MACH,MAAM,IAAI,gBACR,MAAA,GAAS,YAAY,aAAa,UAAU,EAC5C,eACD;MACD,oBAAoB;MACpB,KAAK;MACL,0BAA0B;MAC1B,KAAK,CAAC,OAAO;MACd;AACD,QAAG,KAAK,IAAI,MAAA,GAAS;AAGhB,QAAG,KAAK,SAAQ,OAAM;AACzB,QAA+B;0BACjB,MAAA,IAAU,mBAAmB,CAAC,aAAa,MACtD,CAAC,YAAY,QAAQ,OAAO,EAC7B,OACD;AACD,aAAO,EAAE;OACT;WACG;AACL,YAAO,UAAU,2CAA2C,OAAO;AACnE,QAAG;;IAGL,MAAM,QAAQ;KACZ,WAAW,QAAQ,WAAW,YAAY,GAAG;KAC7C,WAAW,QAAQ,WAAW,GAAG,qBAAqB;KACtD,KAAK,GAAG;KACR,QAAQ;KACT;IAED,MAAM,YAAY,GAAG,KAAK,SAAQ,QAAO,CACvC,GAAG,eAAe,MAAA,IAAU,YAAY,CAAC,GAAG,IAAI,MAAM,IACtD,GAAI,WAAW,QAAQ,eAAe,OAAO,GACzC,MAAA,sBAA4B,KAAK,OAAO,GACxC,EAAE,CACP,CAAC;AAEF,QAAI,GAAG,MAAM,QAAQ,EAInB,OAAM;AAER,UAAA,0BAAgC;AAEhC,QAAI,QAAQ,UAAU;KACpB,MAAM,EAAC,UAAS,MAAM,GAAG;AACzB,SAAI,UAAU,MAAA,OAEZ,IAAG,KAAK,KACN,IAAI,WACF,2CAA2C,QAC5C,CACF;UACI;MAEL,MAAM,gBAAgB;AACjB,SAAG,KAAK,SAAQ,OAAM,CACzB,EAAE;qBACK,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG,EAAC,eAAc,CAAC,GAChE,CAAC;AACF,SAAG,KAAK,SAAS;;AAGnB,WAAM,GAAG,KAAK,MAAM;AAGpB,SAAI,GAAG,IACL,OAAA,WAAiB;MAAC;MAAU;MAAQ,EAAC,WAAU;MAAC,CAAC;AAEnD,UAAK;AAIL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;eACvC,QAAQ,YAAY;AAG7B,QAAG,KAAK,OAAO;AACf,WAAM,GAAG,KAAK,MAAM;AACpB,UAAK;AAEL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;;;WAG7C,GAAG;AACV,gBAAa,SAAS,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC1D,SAAM;;;CAIV,OAAA,aAAoB,MAA2B;AAC7C,MAAI,KAAK,WAAW,EAClB;EAGF,MAAM,SAAS,IAAI,gBACjB,MAAA,GAAS,YAAY,QAAQ,UAAU,EACvC,SACD;AACD,SAAO,IAAI,MAAA,GAAS;EAEpB,IAAI;AACJ,MAAI;AAKF,IAAC,CAAC,kBAAkB,MAAM,OAAO,iBAC/B,QAAO,GAAuB;wBACd,MAAA,IAAU,mBAAmB,CAAC;QAE/C;WACM,GAAG;AACV,QAAK,KAAK,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC9C,SAAM;;AAKH,UAAQ,IACX,KAAK,KAAI,QAAO,MAAA,QAAc,KAAK,eAAe,OAAO,CAAC,CAC3D,CAAC,cAAc,OAAO,SAAS,CAAC;;CAGnC,OAAA,QACE,EAAC,YAAY,KAAK,QAClB,eACA,QACA;AACA,MAAI;AACF,SAAM,OAAO,gBAAgB,OAAM,OAAM;IACvC,MAAM,QAAQ,KAAK,KAAK;IAIxB,IAAI,iBAAiB,IAAI,cAAc,MAAA;IACvC,IAAI,QAAQ;IACZ,IAAI;AAEJ,eAAW,MAAM,WAAW,EAA0B;0CACpB,MAAA,IAAU,YAAY,CAAC;gCACjC,IAAI,UAAU;gCACd,cAAc;oCACV,OAAO,IAAK,EAAE;KAOxC,MAAM,QAAQ,YAAY,KAAK;AAC/B,WAAM;KACN,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,SAAI,kBACF,EAAC,UAAU,MAAM,MAAA,GAAS,OAAO,MAAA,GAAS,SACxC,UAAU,QAAQ,QAAQ,EAAE,CAAC,UAAU,IAAI,GAAG,2CAC/C;AAGH,UAAK,MAAM,SAAS,QAClB,KAAI,MAAM,cAAc,IAAI,UAG1B,kBAAiB;cACR,gBAAgB;AACzB,0BAAoB,IAAI,QAAQ,aAAa,MAAM,CAAC;AACpD;gBACS,SAAS,SAClB,OAAM,IAAI,gBACR,+BAA+B,IAAI,UAAU,wBAAwB,MAAM,UAAU,GACtF;UACI;AACL,YAAA,GAAS,OACP,qCAAqC,IAAI,UAAU,wBAAwB,MAAM,UAAU,GAC5F;AACD,UAAI,MACF,GACA,mCAAmC,MAAM,UAAU,cAAc,IAAI,UAAU,GAChF;AACD;;;AAIN,QAAI,gBAAgB;AAClB,WAAM;AACN,WAAA,GAAS,OACP,aAAa,IAAI,GAAG,QAAQ,MAAM,YAChC,KAAK,KAAK,GAAG,MACd,MACF;UAED,OAAA,GAAS,OACP,2BAA2B,IAAI,UAAU,+BAC1C;AAIH,QAAI,aAAa;KACjB;WACK,KAAK;AACZ,SAAA,GAAS,QAAQ,sCAAsC,IAAI,MAAM,IAAI;AACrE,OAAI,eAAe,iBAAiB;AAClC,UAAM,kBAAkB,MAAA,IAAU,MAAA,MAAY;AAC9C,UAAA,QAAc,IAAI;;AAEpB,OAAI,KAAK,IAAI;;;;;;;CAQjB,uBAAuB,KAA0B,QAAsB;EACrE,MAAM,QAA+B,EAAE;AAEvC,UAAQ,OAAO,KAAf;GACE,KAAK,yBAAyB;IAC5B,MAAM,EAAC,OAAO,KAAK,aAAY;AAC/B,UAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,SAAS,CAAC;AAC/D;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,MAAM,UAAU,aAAY;AACnC,QAAI,SACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,MAAM,SAAS,CAAC;AAEhE,QAAI,SACF,QAAO,QAAQ,SAAS,CAAC,SAAS,CAAC,KAAK,cAAc;AACpD,WAAM,KACJ,MAAA,yBAA+B,KAAK,MAAM,KAAK,SAAS,CACzD;MACD;AAEJ;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,QAAO;IACd,MAAM,MAAM;KAAC,QAAQ,OAAO,IAAI;KAAQ,OAAO,OAAO,IAAI;KAAK;AAC/D,UAAM,KACJ,GAAG,UAAU,MAAA,IAAU,gBAAgB,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC/B,IAAI,OAAO,iBAAiB,IAAI,QACzD,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC7B,IAAI,OAAO,iBAAiB,IAAI,OAC1D;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EACJ,IAAI,EAAC,QAAQ,WACX;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,gBAAgB,CAAC;mCACpB,OAAO,iBAAiB,QACjD,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,OAClD;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EAAC,OAAO,eAAe,QAAQ,aAAY;AACjD,QAAI,cACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,cAAc,CAAC;AAEtE,QAAI,SACF,OAAM,KACJ,MAAA,yBAA+B,KAAK,OAAO,OAAO,MAAM,SAAS,CAClE;AAEH;;GAGF,KAAK,iBAAiB;IACpB,MAAM,EACJ,OAAO,EAAC,QAAQ,MAAM,SACtB,KAAK,EAAC,MAAM,WACZ,KAAK,EAAC,MAAM,cACV;AACJ,QAAI,YAAY,QACd,OAAM,KACJ,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,kBAAkB,QAAQ;mCACzC,OAAO,iBAAiB,MAAM,kBAAkB,UACxE;AAEH;;GAGF,KAAK,eAAe;IAClB,MAAM,EACJ,OAAO,EAAC,QAAQ,QAChB,WACE;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,KAAK,kBAAkB,SACzE;AACD;;GAGF,KAAK,sBAAsB;IACzB,MAAM,EACJ,UAAU,EAAC,QAAQ,MAAM,OAAO,UAChC,YACE;IACJ,MAAM,OAAO,CAAC,GAAG,OAAO,SAAS,GAAG,QAAQ;AAC5C,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,MAAM,mBAAmB,IAAI,KAAK,GACpF;;;AAGL,SAAO;;CAGT,yBACE,KACA,EAAC,QAAQ,MAAM,SACf,UACA;EACA,MAAM,MAAwB;GAAC;GAAQ;GAAO;GAAS;AACvD,SAAO,GAAG;sBACQ,MAAA,IAAU,gBAAgB,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEnC,IAAI,IAAI,CAAC;;;CAIjC,0BACE,KACA,EAAC,QAAQ,MAAM,SACf,QACA,UACA;EACA,MAAM,MAAyB;GAAC;GAAQ;GAAO;GAAQ;GAAS;AAChE,SAAO,GAAG;sBACQ,MAAA,IAAU,cAAc,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEjC,IAAI,IAAI,CAAC;;;;;;;CAQjC,MAAM,eAAe;AACnB,MAAI,MAAA,SAAe;GACjB,MAAM,EAAC,SAAS,YAAW,UAAU;AACrC,SAAA,MAAY,QAAQ,CAAC,SAAS,QAAQ,CAAC;AACvC,SAAM;;;CAIV,OAAO;AACL,MAAI,MAAA,SAAe;AACjB,SAAA,GAAS,OAAO,YAAY,MAAA,MAAY,MAAM,CAAC,oBAAoB;AACnE,SAAA,MAAY,QAAQ,OAAO;;AAE7B,SAAO,MAAA;;;AAIX,SAAS,aAAa,OAAuC;CAC3D,MAAM,EAAC,WAAW,WAAU;AAC5B,SAAQ,OAAO,KAAf;EACE,KAAK,QACH,QAAO,CAAC,WAAW;GAAC;GAAS;GAAQ,EAAC,iBAAiB,WAAU;GAAC,CAAC;EACrE,KAAK,SACH,QAAO,CAAC,WAAW;GAAC;GAAU;GAAQ,EAAC,WAAU;GAAC,CAAC;EACrD,KAAK,WACH,QAAO,CAAC,WAAW,CAAC,YAAY,OAAO,CAAC;EAC1C,QACE,QAAO,CAAC,WAAW,CAAC,QAAQ,OAAO,CAAC"}
1
+ {"version":3,"file":"storer.js","names":["#lc","#shard","#taskID","#discoveryAddress","#discoveryProtocol","#db","#replicaVersion","#onConsumed","#onFatal","#queue","#backPressureThresholdBytes","#cdc","#approximateQueuedBytes","#running","#readyForMore","#stopped","#processQueue","#cancelQueueEntries","#startCatchup","#trackBackfillMetadata","#maybeReleaseBackPressure","#catchup","#upsertTableMetadataStmt","#upsertColumnBackfillStmt"],"sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport {getHeapStatistics} from 'node:v8';\nimport {type PendingQuery, type Row} from 'postgres';\nimport {AbortError} from '../../../../shared/src/abort-error.ts';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {BigIntJSON} from '../../../../shared/src/bigint-json.ts';\nimport {Queue} from '../../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../../shared/src/valita.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {runTx} from '../../db/run-transaction.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../../types/pg.ts';\nimport {cdcSchema, type ShardID} from '../../types/shards.ts';\nimport {\n backfillRequestSchema,\n isDataChange,\n isSchemaChange,\n type BackfillID,\n type BackfillRequest,\n type Change,\n type DataChange,\n type Identifier,\n type SchemaChange,\n type TableMetadata,\n} from '../change-source/protocol/current.ts';\nimport {type Commit} from '../change-source/protocol/current/downstream.ts';\nimport type {\n DownstreamStatusMessage,\n UpstreamStatusMessage,\n} from '../change-source/protocol/current/status.ts';\nimport type {ReplicatorMode} from '../replicator/replicator.ts';\nimport type {Service} from '../service.ts';\nimport type {WatermarkedChange} from './change-streamer-service.ts';\nimport {type ChangeEntry} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {\n AutoResetSignal,\n markResetRequired,\n type BackfillingColumn,\n type ReplicationState,\n type TableMetadataRow,\n} from './schema/tables.ts';\nimport type {Subscriber} from './subscriber.ts';\n\ntype SubscriberAndMode = {\n subscriber: Subscriber;\n mode: ReplicatorMode;\n};\n\ntype QueueEntry =\n | [\n 'change',\n watermark: string,\n json: string,\n orig: Exclude<Change, DataChange> | null, // null for DataChanges\n ]\n | ['ready', callback: () => void]\n | ['subscriber', SubscriberAndMode]\n | DownstreamStatusMessage\n | ['abort']\n | 'stop';\n\ntype PendingTransaction = {\n pool: TransactionPool;\n preCommitWatermark: string;\n pos: number;\n startingReplicationState: Promise<ReplicationState>;\n ack: boolean;\n};\n\nconst backfillRequestsSchema = v.array(backfillRequestSchema);\n\n/**\n * Handles the storage of changes and the catchup of subscribers\n * that are behind.\n *\n * In the context of catchup and cleanup, it is the responsibility of the\n * Storer to decide whether a client can be caught up, or whether the\n * changes needed to catch a client up have been purged.\n *\n * **Maintained invariant**: The Change DB is only empty for a\n * completely new replica (i.e. initial-sync with no changes from the\n * replication stream).\n * * In this case, all new subscribers are expected start from the\n * `replicaVersion`, which is the version at which initial sync\n * was performed, and any attempts to catchup from a different\n * point fail.\n *\n * Conversely, if non-initial changes have flowed through the system\n * (i.e. via the replication stream), the ChangeDB must *not* be empty,\n * and the earliest change in the `changeLog` represents the earliest\n * \"commit\" from (after) which a subscriber can be caught up.\n * * Any attempts to catchup from an earlier point must fail with\n * a `WatermarkTooOld` error.\n * * Failure to do so could result in streaming changes to the\n * subscriber such that there is a gap in its replication history.\n *\n * Note: Subscribers (i.e. `incremental-syncer`) consider an \"error\" signal\n * an unrecoverable error and shut down in response. This allows the\n * production system to replace it with a new task and fresh copy of the\n * replica backup.\n */\nexport class Storer implements Service {\n readonly id = 'storer';\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #taskID: string;\n readonly #discoveryAddress: string;\n readonly #discoveryProtocol: string;\n readonly #db: PostgresDB;\n readonly #replicaVersion: string;\n readonly #onConsumed: (c: Commit | UpstreamStatusMessage) => void;\n readonly #onFatal: (err: Error) => void;\n readonly #queue = new Queue<QueueEntry>();\n readonly #backPressureThresholdBytes: number;\n\n #approximateQueuedBytes = 0;\n #running = false;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n db: PostgresDB,\n replicaVersion: string,\n onConsumed: (c: Commit | UpstreamStatusMessage) => void,\n onFatal: (err: Error) => void,\n backPressureLimitHeapProportion: number,\n ) {\n this.#lc = lc.withContext('component', 'change-log');\n this.#shard = shard;\n this.#taskID = taskID;\n this.#discoveryAddress = discoveryAddress;\n this.#discoveryProtocol = discoveryProtocol;\n this.#db = db;\n this.#replicaVersion = replicaVersion;\n this.#onConsumed = onConsumed;\n this.#onFatal = onFatal;\n\n const heapStats = getHeapStatistics();\n this.#backPressureThresholdBytes =\n (heapStats.heap_size_limit - heapStats.used_heap_size) *\n backPressureLimitHeapProportion;\n\n this.#lc.info?.(\n `Using up to ${(this.#backPressureThresholdBytes / 1024 ** 2).toFixed(2)} MB of ` +\n `--max-old-space-size (~${(heapStats.heap_size_limit / 1024 ** 2).toFixed(2)} MB) ` +\n `to absorb upstream spikes`,\n {heapStats},\n );\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async assumeOwnership() {\n const db = this.#db;\n const owner = this.#taskID;\n const ownerAddress = this.#discoveryAddress;\n const ownerProtocol = this.#discoveryProtocol;\n // we omit `ws://` so that old view syncer versions that are not expecting the protocol continue to not get it\n const addressWithProtocol =\n ownerProtocol === 'ws'\n ? ownerAddress\n : `${ownerProtocol}://${ownerAddress}`;\n this.#lc.info?.(`assuming ownership at ${addressWithProtocol}`);\n const start = performance.now();\n await db`UPDATE ${this.#cdc('replicationState')} SET ${db({owner, ownerAddress: addressWithProtocol})}`;\n const elapsed = (performance.now() - start).toFixed(2);\n this.#lc.info?.(\n `assumed ownership at ${addressWithProtocol} (${elapsed} ms)`,\n );\n }\n\n async getStartStreamInitializationParameters(): Promise<{\n lastWatermark: string;\n backfillRequests: BackfillRequest[];\n }> {\n const [[{lastWatermark}], result] = await runTx(\n this.#db,\n sql => [\n sql<{lastWatermark: string}[]>`\n SELECT \"lastWatermark\" FROM ${this.#cdc('replicationState')}`,\n\n // Formats a BackfillRequest using json_object_agg() to construct the\n // `columns` object. It is LEFT JOIN'ed with the `tableMetadata` table\n // to make it optional and possibly `null`.\n sql`\n SELECT \n json_build_object(\n 'schema', b.\"schema\",\n 'name', b.\"table\",\n 'metadata', t.\"metadata\"\n ) as \"table\",\n json_object_agg(b.\"column\", b.\"backfill\") \n as \"columns\"\n FROM ${this.#cdc('backfilling')} as b\n LEFT JOIN ${this.#cdc('tableMetadata')} as t\n ON (b.\"schema\" = t.\"schema\" AND b.\"table\" = t.\"table\")\n GROUP BY b.\"schema\", b.\"table\", t.\"metadata\"\n `,\n ],\n {mode: Mode.READONLY},\n );\n\n return {\n lastWatermark,\n backfillRequests: v.parse(result, backfillRequestsSchema),\n };\n }\n\n async getMinWatermarkForCatchup(): Promise<string | null> {\n const [{minWatermark}] = await this.#db<{minWatermark: string | null}[]>\n /*sql*/ `\n SELECT min(watermark) as \"minWatermark\" FROM ${this.#cdc('changeLog')}`;\n return minWatermark;\n }\n\n purgeRecordsBefore(watermark: string): Promise<number> {\n return runTx(this.#db, async sql => {\n const [{deleted}] = await sql<{deleted: bigint}[]>`\n WITH purged AS (\n DELETE FROM ${this.#cdc('changeLog')} WHERE watermark < ${watermark} \n RETURNING watermark, pos\n ) SELECT COUNT(*) as deleted FROM purged;`;\n\n // Before committing the purge, check that this process is still the\n // owner. This is done after the DELETE to minimize the amount of time\n // that writes to the changeLog are delayed.\n const [{owner}] = await sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')} FOR SHARE`;\n if (owner !== this.#taskID) {\n throw new AbortError(\n `aborting changeLog purge to ${watermark} because ownership has been taken by ${owner}`,\n );\n }\n return Number(deleted);\n });\n }\n\n /**\n * @returns The size of the serialized entry, for memory / I/O estimations.\n */\n store(entry: WatermarkedChange) {\n const [watermark, [_tag, change]] = entry;\n // Eagerly stringify the JSON object so that the memory usage can be\n // more accurately measured (i.e. without an extra object traversal and\n // ad hoc memory counting heuristics).\n //\n // This essentially moves the stringify() computation out of the pg client,\n // which is instead configured to pass `string` objects directly as JSON\n // strings for JSON-valued columns (see TypeOptions.sendStringAsJson).\n const json = BigIntJSON.stringify(change);\n this.#approximateQueuedBytes += json.length;\n\n this.#queue.enqueue([\n 'change',\n watermark,\n json,\n isDataChange(change) ? null : change, // drop DataChanges to save memory\n ]);\n\n return json.length;\n }\n\n abort() {\n this.#queue.enqueue(['abort']);\n }\n\n status(s: DownstreamStatusMessage) {\n this.#queue.enqueue(s);\n }\n\n catchup(subscriber: Subscriber, mode: ReplicatorMode) {\n this.#queue.enqueue(['subscriber', {subscriber, mode}]);\n }\n\n #readyForMore: Resolver<void> | null = null;\n\n readyForMore(): Promise<void> | undefined {\n if (!this.#running) {\n return undefined;\n }\n if (\n this.#readyForMore === null &&\n this.#approximateQueuedBytes > this.#backPressureThresholdBytes\n ) {\n this.#lc.warn?.(\n `applying back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)\\n` +\n `\\n` +\n `To inspect changeLog backlog in your change DB:\\n` +\n ` SELECT\\n` +\n ` (change->'relation'->>'schema') || '.' || (change->'relation'->>'name') AS table_name,\\n` +\n ` change->>'tag' AS operation,\\n` +\n ` COUNT(*) AS count\\n` +\n ` FROM \"<app_id>/cdc\".\"changeLog\"\\n` +\n ` GROUP BY 1, 2\\n` +\n ` ORDER BY 3 DESC\\n` +\n ` LIMIT 20;`,\n );\n this.#readyForMore = resolver();\n }\n return this.#readyForMore?.promise;\n }\n\n #maybeReleaseBackPressure() {\n if (\n this.#readyForMore !== null &&\n // Wait for at least 20% of the threshold to free up.\n this.#approximateQueuedBytes < this.#backPressureThresholdBytes * 0.8\n ) {\n this.#lc.info?.(\n `releasing back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)`,\n );\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n }\n\n #stopped = promiseVoid;\n\n /**\n * Runs the storer loop until {@link stop()} is called, or an error is thrown.\n * Once {@link run()} completes, it can be called again.\n */\n async run() {\n assert(!this.#running, `storer is already running`);\n\n const {promise: stopped, resolve: signalStopped} = resolver();\n this.#running = true;\n this.#stopped = stopped;\n\n this.#lc.info?.('starting storer');\n let err: unknown;\n try {\n await this.#processQueue();\n } catch (e) {\n err = e; // used in finally\n throw e;\n } finally {\n // Release any pending backpressure so the upstream can proceed\n if (this.#readyForMore !== null) {\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n this.#cancelQueueEntries(\n this.#queue.drain().filter(entry => entry !== undefined),\n err,\n );\n this.#running = false;\n signalStopped();\n this.#lc.info?.('storer stopped');\n }\n }\n\n #cancelQueueEntries(queue: QueueEntry[], e: unknown) {\n if (queue.length === 0) {\n return;\n }\n this.#lc.info?.(\n `canceling ${queue.length} entries from the changeLog queue`,\n );\n const err = e instanceof Error ? e : new AbortError('server shutting down');\n for (const entry of queue) {\n if (entry === 'stop') {\n continue;\n }\n const type = entry[0];\n switch (type) {\n case 'subscriber': {\n // Disconnect subscribers waiting to be caught up so that they can\n // reconnect and try again.\n const {subscriber} = entry[1];\n this.#lc.info?.(`disconnecting ${subscriber.id}`);\n subscriber.fail(err);\n break;\n }\n }\n }\n }\n\n async #processQueue() {\n let tx: PendingTransaction | null = null;\n let msg: QueueEntry | false;\n\n const catchupQueue: SubscriberAndMode[] = [];\n try {\n while ((msg = await this.#queue.dequeue()) !== 'stop') {\n const [msgType] = msg;\n switch (msgType) {\n case 'ready': {\n const signalReady = msg[1];\n signalReady();\n continue;\n }\n case 'subscriber': {\n const subscriber = msg[1];\n if (tx) {\n catchupQueue.push(subscriber); // Wait for the current tx to complete.\n } else {\n await this.#startCatchup([subscriber]); // Catch up immediately.\n }\n continue;\n }\n case 'status':\n this.#onConsumed(msg);\n continue;\n case 'abort': {\n if (tx) {\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n }\n continue;\n }\n }\n // msgType === 'change'\n const [_, watermark, json, change] = msg;\n const tag = change?.tag;\n this.#approximateQueuedBytes -= json.length;\n\n if (tag === 'begin') {\n assert(!tx, 'received BEGIN in the middle of a transaction');\n const {promise, resolve, reject} = resolver<ReplicationState>();\n void promise.catch(() => {}); // handle rejections before the await\n tx = {\n pool: new TransactionPool(\n this.#lc.withContext('watermark', watermark),\n Mode.READ_COMMITTED,\n ),\n preCommitWatermark: watermark,\n pos: 0,\n startingReplicationState: promise,\n ack: !change.skipAck,\n };\n tx.pool.run(this.#db);\n // Acquire a lock on the replicationState row to detect and/or prevent\n // a concurrent ownership change.\n void tx.pool.process(tx => {\n tx<ReplicationState[]> /*sql*/ `\n SELECT * FROM ${this.#cdc('replicationState')} FOR UPDATE`.then(\n ([result]) => resolve(result),\n reject,\n );\n return [];\n });\n } else {\n assert(tx, () => `received change outside of transaction: ${json}`);\n tx.pos++;\n }\n\n const entry = {\n watermark: tag === 'commit' ? watermark : tx.preCommitWatermark,\n precommit: tag === 'commit' ? tx.preCommitWatermark : null,\n pos: tx.pos,\n change: json,\n };\n\n const processed = tx.pool.process(sql => [\n sql`INSERT INTO ${this.#cdc('changeLog')} ${sql(entry)}`,\n ...(change !== null && isSchemaChange(change)\n ? this.#trackBackfillMetadata(sql, change)\n : []),\n ]);\n\n if (tx.pos % 100 === 0) {\n // Backpressure is exerted on commit when awaiting tx.pool.done().\n // However, backpressure checks need to be regularly done for\n // very large transactions in order to avoid memory blowup.\n await processed;\n }\n this.#maybeReleaseBackPressure();\n\n if (tag === 'commit') {\n const {owner} = await tx.startingReplicationState;\n if (owner !== this.#taskID) {\n // Ownership change reflected in the replicationState read in 'begin'.\n tx.pool.fail(\n new AbortError(\n `changeLog ownership has been assumed by ${owner}`,\n ),\n );\n } else {\n // Update the replication state.\n const lastWatermark = watermark;\n void tx.pool.process(tx => [\n tx`\n UPDATE ${this.#cdc('replicationState')} SET ${tx({lastWatermark})}`,\n ]);\n tx.pool.setDone();\n }\n\n await tx.pool.done();\n\n // ACK the LSN to the upstream Postgres.\n if (tx.ack) {\n this.#onConsumed(['commit', change, {watermark}]);\n }\n tx = null;\n\n // Before beginning the next transaction, open a READONLY snapshot to\n // concurrently catchup any queued subscribers.\n await this.#startCatchup(catchupQueue.splice(0));\n } else if (tag === 'rollback') {\n // Aborted transactions are not stored in the changeLog. Abort the current tx\n // and process catchup of subscribers that were waiting for it to end.\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n\n await this.#startCatchup(catchupQueue.splice(0));\n }\n }\n } catch (e) {\n catchupQueue.forEach(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n }\n\n async #startCatchup(subs: SubscriberAndMode[]) {\n if (subs.length === 0) {\n return;\n }\n\n const reader = new TransactionPool(\n this.#lc.withContext('pool', 'catchup'),\n Mode.READONLY,\n );\n reader.run(this.#db);\n\n let lastWatermark: string | undefined;\n try {\n // Ensure that the transaction has started (and is thus holding a snapshot\n // of the database) before continuing on to commit more changes. This is\n // done by performing a single read on the db, which determines the\n // snapshot for the REPEATABLE_READ transaction.\n [{lastWatermark}] = await reader.processReadTask(\n sql => sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')}\n `,\n );\n } catch (e) {\n subs.map(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n\n // Run the actual catchup queries in the background. Errors are handled in\n // #catchup() by disconnecting the associated subscriber.\n void Promise.all(\n subs.map(sub => this.#catchup(sub, lastWatermark, reader)),\n ).finally(() => reader.setDone());\n }\n\n async #catchup(\n {subscriber: sub, mode}: SubscriberAndMode,\n lastWatermark: string,\n reader: TransactionPool,\n ) {\n try {\n await reader.processReadTask(async tx => {\n const start = Date.now();\n\n // When starting from initial-sync, there won't be a change with a watermark\n // equal to the replica version. This is the empty changeLog scenario.\n let watermarkFound = sub.watermark === this.#replicaVersion;\n let count = 0;\n let lastBatchConsumed: Promise<unknown> | undefined;\n\n for await (const entries of tx<ChangeEntry[]> /*sql*/ `\n SELECT watermark, change FROM ${this.#cdc('changeLog')}\n WHERE watermark >= ${sub.watermark}\n AND watermark <= ${lastWatermark}\n ORDER BY watermark, pos`.cursor(2000)) {\n // Wait for the last batch of entries to be consumed by the\n // subscriber before sending down the current batch. This pipelining\n // allows one batch of changes to be received from the change-db\n // while the previous batch of changes are sent to the subscriber,\n // resulting in flow control that caps the number of changes\n // referenced in memory to 2 * batch-size.\n const start = performance.now();\n await lastBatchConsumed;\n const elapsed = performance.now() - start;\n if (lastBatchConsumed) {\n (elapsed > 100 ? this.#lc.info : this.#lc.debug)?.(\n `waited ${elapsed.toFixed(3)} ms for ${sub.id} to consume last batch of catchup entries`,\n );\n }\n\n for (const entry of entries) {\n if (entry.watermark === sub.watermark) {\n // This should be the first entry.\n // Catchup starts from *after* the watermark.\n watermarkFound = true;\n } else if (watermarkFound) {\n lastBatchConsumed = sub.catchup(toDownstream(entry));\n count++;\n } else if (mode === 'backup') {\n throw new AutoResetSignal(\n `backup replica at watermark ${sub.watermark} is behind change db: ${entry.watermark})`,\n );\n } else {\n this.#lc.warn?.(\n `rejecting subscriber at watermark ${sub.watermark} (earliest watermark: ${entry.watermark})`,\n );\n sub.close(\n ErrorType.WatermarkTooOld,\n `earliest supported watermark is ${entry.watermark} (requested ${sub.watermark})`,\n );\n return;\n }\n }\n }\n if (watermarkFound) {\n await lastBatchConsumed;\n this.#lc.info?.(\n `caught up ${sub.id} with ${count} changes (${\n Date.now() - start\n } ms)`,\n );\n } else {\n this.#lc.warn?.(\n `subscriber at watermark ${sub.watermark} is ahead of latest watermark`,\n );\n }\n // Flushes the backlog of messages buffered during catchup and\n // allows the subscription to forward subsequent messages immediately.\n sub.setCaughtUp();\n });\n } catch (err) {\n this.#lc.error?.(`error while catching up subscriber ${sub.id}`, err);\n if (err instanceof AutoResetSignal) {\n await markResetRequired(this.#db, this.#shard);\n this.#onFatal(err);\n }\n sub.fail(err);\n }\n }\n\n /**\n * Returns the db statements necessary to track backfill and table metadata\n * presented in the `change`, if any.\n */\n #trackBackfillMetadata(sql: PostgresTransaction, change: SchemaChange) {\n const stmts: PendingQuery<Row[]>[] = [];\n\n switch (change.tag) {\n case 'update-table-metadata': {\n const {table, new: metadata} = change;\n stmts.push(this.#upsertTableMetadataStmt(sql, table, metadata));\n break;\n }\n\n case 'create-table': {\n const {spec, metadata, backfill} = change;\n if (metadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, spec, metadata));\n }\n if (backfill) {\n Object.entries(backfill).forEach(([col, backfill]) => {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, spec, col, backfill),\n );\n });\n }\n break;\n }\n\n case 'rename-table': {\n const {old} = change;\n const row = {schema: change.new.schema, table: change.new.name};\n stmts.push(\n sql`UPDATE ${this.#cdc('tableMetadata')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n sql`UPDATE ${this.#cdc('backfilling')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n );\n break;\n }\n\n case 'drop-table': {\n const {\n id: {schema, name},\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('tableMetadata')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n );\n break;\n }\n\n case 'add-column': {\n const {table, tableMetadata, column, backfill} = change;\n if (tableMetadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, table, tableMetadata));\n }\n if (backfill) {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, table, column.name, backfill),\n );\n }\n break;\n }\n\n case 'update-column': {\n const {\n table: {schema, name: table},\n old: {name: oldName},\n new: {name: newName},\n } = change;\n if (oldName !== newName) {\n stmts.push(\n sql`UPDATE ${this.#cdc('backfilling')} SET \"column\" = ${newName}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" = ${oldName}`,\n );\n }\n break;\n }\n\n case 'drop-column': {\n const {\n table: {schema, name},\n column,\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name} AND \"column\" = ${column}`,\n );\n break;\n }\n\n case 'backfill-completed': {\n const {\n relation: {schema, name: table, rowKey},\n columns,\n } = change;\n const cols = [...rowKey.columns, ...columns];\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" IN ${sql(cols)}`,\n );\n }\n }\n return stmts;\n }\n\n #upsertTableMetadataStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n metadata: TableMetadata,\n ) {\n const row: TableMetadataRow = {schema, table, metadata};\n return sql`\n INSERT INTO ${this.#cdc('tableMetadata')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n #upsertColumnBackfillStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n column: string,\n backfill: BackfillID,\n ) {\n const row: BackfillingColumn = {schema, table, column, backfill};\n return sql`\n INSERT INTO ${this.#cdc('backfilling')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\", \"column\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n /**\n * Waits until all currently queued entries have been processed.\n * This is only used in tests.\n */\n async allProcessed() {\n if (this.#running) {\n const {promise, resolve} = resolver();\n this.#queue.enqueue(['ready', resolve]);\n await promise;\n }\n }\n\n stop() {\n if (this.#running) {\n this.#lc.info?.(`draining ${this.#queue.size()} changeLog entries`);\n this.#queue.enqueue('stop');\n }\n return this.#stopped;\n }\n}\n\nfunction toDownstream(entry: ChangeEntry): WatermarkedChange {\n const {watermark, change} = entry;\n switch (change.tag) {\n case 'begin':\n return [watermark, ['begin', change, {commitWatermark: watermark}]];\n case 'commit':\n return [watermark, ['commit', change, {watermark}]];\n case 'rollback':\n return [watermark, ['rollback', change]];\n default:\n return [watermark, ['data', change]];\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;AAwEA,IAAM,yBAAyB,eAAE,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAgC7D,IAAa,SAAb,MAAuC;CACrC,KAAc;CACd;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA,SAAkB,IAAI,OAAmB;CACzC;CAEA,0BAA0B;CAC1B,WAAW;CAEX,YACE,IACA,OACA,QACA,kBACA,mBACA,IACA,gBACA,YACA,SACA,iCACA;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,aAAa;AACpD,QAAA,QAAc;AACd,QAAA,SAAe;AACf,QAAA,mBAAyB;AACzB,QAAA,oBAA0B;AAC1B,QAAA,KAAW;AACX,QAAA,iBAAuB;AACvB,QAAA,aAAmB;AACnB,QAAA,UAAgB;EAEhB,MAAM,YAAY,mBAAmB;AACrC,QAAA,8BACG,UAAU,kBAAkB,UAAU,kBACvC;AAEF,QAAA,GAAS,OACP,gBAAgB,MAAA,6BAAmC,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAC5C,UAAU,kBAAkB,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAE/E,EAAC,WAAU,CACZ;;CAIH,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,UAAU,MAAA,MAAY,CAAC,GAAG,QAAQ;;CAGvD,MAAM,kBAAkB;EACtB,MAAM,KAAK,MAAA;EACX,MAAM,QAAQ,MAAA;EACd,MAAM,eAAe,MAAA;EACrB,MAAM,gBAAgB,MAAA;EAEtB,MAAM,sBACJ,kBAAkB,OACd,eACA,GAAG,cAAc,KAAK;AAC5B,QAAA,GAAS,OAAO,yBAAyB,sBAAsB;EAC/D,MAAM,QAAQ,YAAY,KAAK;AAC/B,QAAM,EAAE,UAAU,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG;GAAC;GAAO,cAAc;GAAoB,CAAC;EACrG,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,QAAA,GAAS,OACP,wBAAwB,oBAAoB,IAAI,QAAQ,MACzD;;CAGH,MAAM,yCAGH;EACD,MAAM,CAAC,CAAC,EAAC,kBAAiB,UAAU,MAAM,MACxC,MAAA,KACA,QAAO,CACL,GAA8B;sCACA,MAAA,IAAU,mBAAmB,IAK3D,GAAG;;;;;;;;;iBASM,MAAA,IAAU,cAAc,CAAC;sBACpB,MAAA,IAAU,gBAAgB,CAAC;;;UAI1C,EACD,EAAC,MAAM,UAAc,CACtB;AAED,SAAO;GACL;GACA,kBAAkB,MAAQ,QAAQ,uBAAuB;GAC1D;;CAGH,MAAM,4BAAoD;EACxD,MAAM,CAAC,EAAC,kBAAiB,MAAM,MAAA,EACvB;qDACyC,MAAA,IAAU,YAAY;AACvE,SAAO;;CAGT,mBAAmB,WAAoC;AACrD,SAAO,MAAM,MAAA,IAAU,OAAM,QAAO;GAClC,MAAM,CAAC,EAAC,aAAY,MAAM,GAAwB;;wBAEhC,MAAA,IAAU,YAAY,CAAC,qBAAqB,UAAU;;;GAOxE,MAAM,CAAC,EAAC,WAAU,MAAM,GAAuB;wBAC7B,MAAA,IAAU,mBAAmB,CAAC;AAChD,OAAI,UAAU,MAAA,OACZ,OAAM,IAAI,WACR,+BAA+B,UAAU,uCAAuC,QACjF;AAEH,UAAO,OAAO,QAAQ;IACtB;;;;;CAMJ,MAAM,OAA0B;EAC9B,MAAM,CAAC,WAAW,CAAC,MAAM,WAAW;EAQpC,MAAM,OAAO,WAAW,UAAU,OAAO;AACzC,QAAA,0BAAgC,KAAK;AAErC,QAAA,MAAY,QAAQ;GAClB;GACA;GACA;GACA,aAAa,OAAO,GAAG,OAAO;GAC/B,CAAC;AAEF,SAAO,KAAK;;CAGd,QAAQ;AACN,QAAA,MAAY,QAAQ,CAAC,QAAQ,CAAC;;CAGhC,OAAO,GAA4B;AACjC,QAAA,MAAY,QAAQ,EAAE;;CAGxB,QAAQ,YAAwB,MAAsB;AACpD,QAAA,MAAY,QAAQ,CAAC,cAAc;GAAC;GAAY;GAAK,CAAC,CAAC;;CAGzD,gBAAuC;CAEvC,eAA0C;AACxC,MAAI,CAAC,MAAA,QACH;AAEF,MACE,MAAA,iBAAuB,QACvB,MAAA,yBAA+B,MAAA,4BAC/B;AACA,SAAA,GAAS,OACP,+BAA+B,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,4SAW7H;AACD,SAAA,eAAqB,UAAU;;AAEjC,SAAO,MAAA,cAAoB;;CAG7B,4BAA4B;AAC1B,MACE,MAAA,iBAAuB,QAEvB,MAAA,yBAA+B,MAAA,6BAAmC,IAClE;AACA,SAAA,GAAS,OACP,gCAAgC,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,MAC9H;AACD,SAAA,aAAmB,SAAS;AAC5B,SAAA,eAAqB;;;CAIzB,WAAW;;;;;CAMX,MAAM,MAAM;AACV,SAAO,CAAC,MAAA,SAAe,4BAA4B;EAEnD,MAAM,EAAC,SAAS,SAAS,SAAS,kBAAiB,UAAU;AAC7D,QAAA,UAAgB;AAChB,QAAA,UAAgB;AAEhB,QAAA,GAAS,OAAO,kBAAkB;EAClC,IAAI;AACJ,MAAI;AACF,SAAM,MAAA,cAAoB;WACnB,GAAG;AACV,SAAM;AACN,SAAM;YACE;AAER,OAAI,MAAA,iBAAuB,MAAM;AAC/B,UAAA,aAAmB,SAAS;AAC5B,UAAA,eAAqB;;AAEvB,SAAA,mBACE,MAAA,MAAY,OAAO,CAAC,QAAO,UAAS,UAAU,KAAA,EAAU,EACxD,IACD;AACD,SAAA,UAAgB;AAChB,kBAAe;AACf,SAAA,GAAS,OAAO,iBAAiB;;;CAIrC,oBAAoB,OAAqB,GAAY;AACnD,MAAI,MAAM,WAAW,EACnB;AAEF,QAAA,GAAS,OACP,aAAa,MAAM,OAAO,mCAC3B;EACD,MAAM,MAAM,aAAa,QAAQ,IAAI,IAAI,WAAW,uBAAuB;AAC3E,OAAK,MAAM,SAAS,OAAO;AACzB,OAAI,UAAU,OACZ;AAGF,WADa,MAAM,IACnB;IACE,KAAK,cAAc;KAGjB,MAAM,EAAC,eAAc,MAAM;AAC3B,WAAA,GAAS,OAAO,iBAAiB,WAAW,KAAK;AACjD,gBAAW,KAAK,IAAI;AACpB;;;;;CAMR,OAAA,eAAsB;EACpB,IAAI,KAAgC;EACpC,IAAI;EAEJ,MAAM,eAAoC,EAAE;AAC5C,MAAI;AACF,WAAQ,MAAM,MAAM,MAAA,MAAY,SAAS,MAAM,QAAQ;IACrD,MAAM,CAAC,WAAW;AAClB,YAAQ,SAAR;KACE,KAAK,SAAS;MACZ,MAAM,cAAc,IAAI;AACxB,mBAAa;AACb;;KAEF,KAAK,cAAc;MACjB,MAAM,aAAa,IAAI;AACvB,UAAI,GACF,cAAa,KAAK,WAAW;UAE7B,OAAM,MAAA,aAAmB,CAAC,WAAW,CAAC;AAExC;;KAEF,KAAK;AACH,YAAA,WAAiB,IAAI;AACrB;KACF,KAAK;AACH,UAAI,IAAI;AACN,UAAG,KAAK,OAAO;AACf,aAAM,GAAG,KAAK,MAAM;AACpB,YAAK;;AAEP;;IAIJ,MAAM,CAAC,GAAG,WAAW,MAAM,UAAU;IACrC,MAAM,MAAM,QAAQ;AACpB,UAAA,0BAAgC,KAAK;AAErC,QAAI,QAAQ,SAAS;AACnB,YAAO,CAAC,IAAI,gDAAgD;KAC5D,MAAM,EAAC,SAAS,SAAS,WAAU,UAA4B;AAC1D,aAAQ,YAAY,GAAG;AAC5B,UAAK;MACH,MAAM,IAAI,gBACR,MAAA,GAAS,YAAY,aAAa,UAAU,EAC5C,eACD;MACD,oBAAoB;MACpB,KAAK;MACL,0BAA0B;MAC1B,KAAK,CAAC,OAAO;MACd;AACD,QAAG,KAAK,IAAI,MAAA,GAAS;AAGhB,QAAG,KAAK,SAAQ,OAAM;AACzB,QAA+B;0BACjB,MAAA,IAAU,mBAAmB,CAAC,aAAa,MACtD,CAAC,YAAY,QAAQ,OAAO,EAC7B,OACD;AACD,aAAO,EAAE;OACT;WACG;AACL,YAAO,UAAU,2CAA2C,OAAO;AACnE,QAAG;;IAGL,MAAM,QAAQ;KACZ,WAAW,QAAQ,WAAW,YAAY,GAAG;KAC7C,WAAW,QAAQ,WAAW,GAAG,qBAAqB;KACtD,KAAK,GAAG;KACR,QAAQ;KACT;IAED,MAAM,YAAY,GAAG,KAAK,SAAQ,QAAO,CACvC,GAAG,eAAe,MAAA,IAAU,YAAY,CAAC,GAAG,IAAI,MAAM,IACtD,GAAI,WAAW,QAAQ,eAAe,OAAO,GACzC,MAAA,sBAA4B,KAAK,OAAO,GACxC,EAAE,CACP,CAAC;AAEF,QAAI,GAAG,MAAM,QAAQ,EAInB,OAAM;AAER,UAAA,0BAAgC;AAEhC,QAAI,QAAQ,UAAU;KACpB,MAAM,EAAC,UAAS,MAAM,GAAG;AACzB,SAAI,UAAU,MAAA,OAEZ,IAAG,KAAK,KACN,IAAI,WACF,2CAA2C,QAC5C,CACF;UACI;MAEL,MAAM,gBAAgB;AACjB,SAAG,KAAK,SAAQ,OAAM,CACzB,EAAE;qBACK,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG,EAAC,eAAc,CAAC,GAChE,CAAC;AACF,SAAG,KAAK,SAAS;;AAGnB,WAAM,GAAG,KAAK,MAAM;AAGpB,SAAI,GAAG,IACL,OAAA,WAAiB;MAAC;MAAU;MAAQ,EAAC,WAAU;MAAC,CAAC;AAEnD,UAAK;AAIL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;eACvC,QAAQ,YAAY;AAG7B,QAAG,KAAK,OAAO;AACf,WAAM,GAAG,KAAK,MAAM;AACpB,UAAK;AAEL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;;;WAG7C,GAAG;AACV,gBAAa,SAAS,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC1D,SAAM;;;CAIV,OAAA,aAAoB,MAA2B;AAC7C,MAAI,KAAK,WAAW,EAClB;EAGF,MAAM,SAAS,IAAI,gBACjB,MAAA,GAAS,YAAY,QAAQ,UAAU,EACvC,SACD;AACD,SAAO,IAAI,MAAA,GAAS;EAEpB,IAAI;AACJ,MAAI;AAKF,IAAC,CAAC,kBAAkB,MAAM,OAAO,iBAC/B,QAAO,GAAuB;wBACd,MAAA,IAAU,mBAAmB,CAAC;QAE/C;WACM,GAAG;AACV,QAAK,KAAK,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC9C,SAAM;;AAKH,UAAQ,IACX,KAAK,KAAI,QAAO,MAAA,QAAc,KAAK,eAAe,OAAO,CAAC,CAC3D,CAAC,cAAc,OAAO,SAAS,CAAC;;CAGnC,OAAA,QACE,EAAC,YAAY,KAAK,QAClB,eACA,QACA;AACA,MAAI;AACF,SAAM,OAAO,gBAAgB,OAAM,OAAM;IACvC,MAAM,QAAQ,KAAK,KAAK;IAIxB,IAAI,iBAAiB,IAAI,cAAc,MAAA;IACvC,IAAI,QAAQ;IACZ,IAAI;AAEJ,eAAW,MAAM,WAAW,EAA0B;0CACpB,MAAA,IAAU,YAAY,CAAC;gCACjC,IAAI,UAAU;gCACd,cAAc;oCACV,OAAO,IAAK,EAAE;KAOxC,MAAM,QAAQ,YAAY,KAAK;AAC/B,WAAM;KACN,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,SAAI,kBACF,EAAC,UAAU,MAAM,MAAA,GAAS,OAAO,MAAA,GAAS,SACxC,UAAU,QAAQ,QAAQ,EAAE,CAAC,UAAU,IAAI,GAAG,2CAC/C;AAGH,UAAK,MAAM,SAAS,QAClB,KAAI,MAAM,cAAc,IAAI,UAG1B,kBAAiB;cACR,gBAAgB;AACzB,0BAAoB,IAAI,QAAQ,aAAa,MAAM,CAAC;AACpD;gBACS,SAAS,SAClB,OAAM,IAAI,gBACR,+BAA+B,IAAI,UAAU,wBAAwB,MAAM,UAAU,GACtF;UACI;AACL,YAAA,GAAS,OACP,qCAAqC,IAAI,UAAU,wBAAwB,MAAM,UAAU,GAC5F;AACD,UAAI,MACF,GACA,mCAAmC,MAAM,UAAU,cAAc,IAAI,UAAU,GAChF;AACD;;;AAIN,QAAI,gBAAgB;AAClB,WAAM;AACN,WAAA,GAAS,OACP,aAAa,IAAI,GAAG,QAAQ,MAAM,YAChC,KAAK,KAAK,GAAG,MACd,MACF;UAED,OAAA,GAAS,OACP,2BAA2B,IAAI,UAAU,+BAC1C;AAIH,QAAI,aAAa;KACjB;WACK,KAAK;AACZ,SAAA,GAAS,QAAQ,sCAAsC,IAAI,MAAM,IAAI;AACrE,OAAI,eAAe,iBAAiB;AAClC,UAAM,kBAAkB,MAAA,IAAU,MAAA,MAAY;AAC9C,UAAA,QAAc,IAAI;;AAEpB,OAAI,KAAK,IAAI;;;;;;;CAQjB,uBAAuB,KAA0B,QAAsB;EACrE,MAAM,QAA+B,EAAE;AAEvC,UAAQ,OAAO,KAAf;GACE,KAAK,yBAAyB;IAC5B,MAAM,EAAC,OAAO,KAAK,aAAY;AAC/B,UAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,SAAS,CAAC;AAC/D;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,MAAM,UAAU,aAAY;AACnC,QAAI,SACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,MAAM,SAAS,CAAC;AAEhE,QAAI,SACF,QAAO,QAAQ,SAAS,CAAC,SAAS,CAAC,KAAK,cAAc;AACpD,WAAM,KACJ,MAAA,yBAA+B,KAAK,MAAM,KAAK,SAAS,CACzD;MACD;AAEJ;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,QAAO;IACd,MAAM,MAAM;KAAC,QAAQ,OAAO,IAAI;KAAQ,OAAO,OAAO,IAAI;KAAK;AAC/D,UAAM,KACJ,GAAG,UAAU,MAAA,IAAU,gBAAgB,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC/B,IAAI,OAAO,iBAAiB,IAAI,QACzD,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC7B,IAAI,OAAO,iBAAiB,IAAI,OAC1D;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EACJ,IAAI,EAAC,QAAQ,WACX;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,gBAAgB,CAAC;mCACpB,OAAO,iBAAiB,QACjD,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,OAClD;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EAAC,OAAO,eAAe,QAAQ,aAAY;AACjD,QAAI,cACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,cAAc,CAAC;AAEtE,QAAI,SACF,OAAM,KACJ,MAAA,yBAA+B,KAAK,OAAO,OAAO,MAAM,SAAS,CAClE;AAEH;;GAGF,KAAK,iBAAiB;IACpB,MAAM,EACJ,OAAO,EAAC,QAAQ,MAAM,SACtB,KAAK,EAAC,MAAM,WACZ,KAAK,EAAC,MAAM,cACV;AACJ,QAAI,YAAY,QACd,OAAM,KACJ,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,kBAAkB,QAAQ;mCACzC,OAAO,iBAAiB,MAAM,kBAAkB,UACxE;AAEH;;GAGF,KAAK,eAAe;IAClB,MAAM,EACJ,OAAO,EAAC,QAAQ,QAChB,WACE;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,KAAK,kBAAkB,SACzE;AACD;;GAGF,KAAK,sBAAsB;IACzB,MAAM,EACJ,UAAU,EAAC,QAAQ,MAAM,OAAO,UAChC,YACE;IACJ,MAAM,OAAO,CAAC,GAAG,OAAO,SAAS,GAAG,QAAQ;AAC5C,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,MAAM,mBAAmB,IAAI,KAAK,GACpF;;;AAGL,SAAO;;CAGT,yBACE,KACA,EAAC,QAAQ,MAAM,SACf,UACA;EACA,MAAM,MAAwB;GAAC;GAAQ;GAAO;GAAS;AACvD,SAAO,GAAG;sBACQ,MAAA,IAAU,gBAAgB,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEnC,IAAI,IAAI,CAAC;;;CAIjC,0BACE,KACA,EAAC,QAAQ,MAAM,SACf,QACA,UACA;EACA,MAAM,MAAyB;GAAC;GAAQ;GAAO;GAAQ;GAAS;AAChE,SAAO,GAAG;sBACQ,MAAA,IAAU,cAAc,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEjC,IAAI,IAAI,CAAC;;;;;;;CAQjC,MAAM,eAAe;AACnB,MAAI,MAAA,SAAe;GACjB,MAAM,EAAC,SAAS,YAAW,UAAU;AACrC,SAAA,MAAY,QAAQ,CAAC,SAAS,QAAQ,CAAC;AACvC,SAAM;;;CAIV,OAAO;AACL,MAAI,MAAA,SAAe;AACjB,SAAA,GAAS,OAAO,YAAY,MAAA,MAAY,MAAM,CAAC,oBAAoB;AACnE,SAAA,MAAY,QAAQ,OAAO;;AAE7B,SAAO,MAAA;;;AAIX,SAAS,aAAa,OAAuC;CAC3D,MAAM,EAAC,WAAW,WAAU;AAC5B,SAAQ,OAAO,KAAf;EACE,KAAK,QACH,QAAO,CAAC,WAAW;GAAC;GAAS;GAAQ,EAAC,iBAAiB,WAAU;GAAC,CAAC;EACrE,KAAK,SACH,QAAO,CAAC,WAAW;GAAC;GAAU;GAAQ,EAAC,WAAU;GAAC,CAAC;EACrD,KAAK,WACH,QAAO,CAAC,WAAW,CAAC,YAAY,OAAO,CAAC;EAC1C,QACE,QAAO,CAAC,WAAW,CAAC,QAAQ,OAAO,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"replication-state.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,EAGL,KAAK,UAAU,EAChB,MAAM,0CAA0C,CAAC;AAClD,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AACzD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iCAAiC,CAAC;AAC9D,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,2BAA2B,CAAC;AAG/D,OAAO,EAAC,wBAAwB,EAAC,MAAM,gBAAgB,CAAC;AAGxD,OAAO,EAAC,wBAAwB,EAAC,CAAC;AAElC,MAAM,MAAM,YAAY,GAAG,MAAM,GAAG,SAAS,GAAG,QAAQ,CAAC;AAMzD,eAAO,MAAM,2BAA2B,iKAKvC,CAAC;AAwCF,QAAA,MAAM,uBAAuB;;;;EASxB,CAAC;AAEN,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE,QAAA,MAAM,iCAAiC;;;;;EAclC,CAAC;AAEN,MAAM,MAAM,2BAA2B,GAAG,CAAC,CAAC,KAAK,CAC/C,OAAO,iCAAiC,CACzC,CAAC;AAEF,QAAA,MAAM,sBAAsB;;aAE1B,CAAC;AAEH,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAEtE,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,QAAQ,EACZ,YAAY,EAAE,MAAM,EAAE,EACtB,SAAS,EAAE,MAAM,EACjB,kBAAkB,GAAE,UAAe,EACnC,YAAY,UAAO,QAqBpB;AAED;;;;GAIG;AACH,wBAAgB,4BAA4B,CAAC,EAAE,EAAE,QAAQ,QAExD;AAED,wBAAgB,WAAW,CAAC,EAAE,EAAE,QAAQ,EAAE,KAAK,EAAE,YAAY,QAM5D;AAED,wBAAgB,kBAAkB,CAAC,EAAE,EAAE,QAAQ;;;IAY9C;AAED,wBAAgB,oBAAoB,CAAC,EAAE,EAAE,eAAe,GAAG,iBAAiB,CAQ3E;AAED,wBAAgB,8BAA8B,CAC5C,EAAE,EAAE,eAAe,GAClB,2BAA2B,CAS7B;AAED,wBAAgB,0BAA0B,CACxC,EAAE,EAAE,eAAe,EACnB,SAAS,EAAE,MAAM,QAGlB;AAED,wBAAgB,mBAAmB,CAAC,EAAE,EAAE,eAAe,GAAG,gBAAgB,CAGzE"}
1
+ {"version":3,"file":"replication-state.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,EAGL,KAAK,UAAU,EAChB,MAAM,0CAA0C,CAAC;AAClD,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AACzD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iCAAiC,CAAC;AAC9D,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,2BAA2B,CAAC;AAG/D,OAAO,EAAC,wBAAwB,EAAC,MAAM,gBAAgB,CAAC;AAGxD,OAAO,EAAC,wBAAwB,EAAC,CAAC;AAElC,MAAM,MAAM,YAAY,GAAG,MAAM,GAAG,SAAS,GAAG,QAAQ,CAAC;AAMzD,eAAO,MAAM,2BAA2B,iKAKvC,CAAC;AA0CF,QAAA,MAAM,uBAAuB;;;;EASxB,CAAC;AAEN,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE,QAAA,MAAM,iCAAiC;;;;;EAclC,CAAC;AAEN,MAAM,MAAM,2BAA2B,GAAG,CAAC,CAAC,KAAK,CAC/C,OAAO,iCAAiC,CACzC,CAAC;AAEF,QAAA,MAAM,sBAAsB;;aAE1B,CAAC;AAEH,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAEtE,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,QAAQ,EACZ,YAAY,EAAE,MAAM,EAAE,EACtB,SAAS,EAAE,MAAM,EACjB,kBAAkB,GAAE,UAAe,EACnC,YAAY,UAAO,QAoBpB;AAED;;;;GAIG;AACH,wBAAgB,4BAA4B,CAAC,EAAE,EAAE,QAAQ,QAExD;AAED,wBAAgB,WAAW,CAAC,EAAE,EAAE,QAAQ,EAAE,KAAK,EAAE,YAAY,QAM5D;AAED,wBAAgB,kBAAkB,CAAC,EAAE,EAAE,QAAQ;;;IAY9C;AAED,wBAAgB,oBAAoB,CAAC,EAAE,EAAE,eAAe,GAAG,iBAAiB,CAQ3E;AAED,wBAAgB,8BAA8B,CAC5C,EAAE,EAAE,eAAe,GAClB,2BAA2B,CAS7B;AAED,wBAAgB,0BAA0B,CACxC,EAAE,EAAE,eAAe,EACnB,SAAS,EAAE,MAAM,QAQlB;AAED,wBAAgB,mBAAmB,CAAC,EAAE,EAAE,eAAe,GAAG,gBAAgB,CAGzE"}
@@ -17,7 +17,7 @@ var CREATE_RUNTIME_EVENTS_TABLE = `
17
17
  timestamp TEXT NOT NULL DEFAULT (current_timestamp)
18
18
  );
19
19
  `;
20
- var CREATE_REPLICATION_STATE_SCHEMA = "\n CREATE TABLE \"_zero.replicationConfig\" (\n replicaVersion TEXT NOT NULL,\n publications TEXT NOT NULL,\n initialSyncContext TEXT DEFAULT '{}',\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n \n CREATE TABLE \"_zero.replicationState\" (\n stateVersion TEXT NOT NULL,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n " + CREATE_CHANGELOG_SCHEMA + CREATE_RUNTIME_EVENTS_TABLE + CREATE_COLUMN_METADATA_TABLE + CREATE_TABLE_METADATA_TABLE;
20
+ var CREATE_REPLICATION_STATE_SCHEMA = "\n CREATE TABLE \"_zero.replicationConfig\" (\n replicaVersion TEXT NOT NULL,\n publications TEXT NOT NULL,\n initialSyncContext TEXT DEFAULT '{}',\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n \n CREATE TABLE \"_zero.replicationState\" (\n stateVersion TEXT NOT NULL,\n writeTimeMs INTEGER,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n " + CREATE_CHANGELOG_SCHEMA + CREATE_RUNTIME_EVENTS_TABLE + CREATE_COLUMN_METADATA_TABLE + CREATE_TABLE_METADATA_TABLE;
21
21
  var stringArray = valita_exports.array(valita_exports.string());
22
22
  var subscriptionStateSchema = valita_exports.object({
23
23
  replicaVersion: valita_exports.string(),
@@ -45,7 +45,8 @@ function initReplicationState(db, publications, watermark, initialSyncContext =
45
45
  (replicaVersion, publications, initialSyncContext) VALUES (?, ?, ?)
46
46
  `).run(watermark, JSON.stringify(publications.sort()), stringify(initialSyncContext));
47
47
  db.prepare(`
48
- INSERT INTO "_zero.replicationState" (stateVersion) VALUES (?)
48
+ INSERT INTO "_zero.replicationState" (stateVersion, writeTimeMs)
49
+ VALUES (?, unixepoch('subsec') * 1000)
49
50
  `).run(watermark);
50
51
  recordEvent(db, "sync");
51
52
  }
@@ -88,7 +89,9 @@ function getSubscriptionStateAndContext(db) {
88
89
  `), subscriptionStateAndContextSchema);
89
90
  }
90
91
  function updateReplicationWatermark(db, watermark) {
91
- db.run(`UPDATE "_zero.replicationState" SET stateVersion=?`, watermark);
92
+ db.run(`
93
+ UPDATE "_zero.replicationState"
94
+ SET stateVersion=?, writeTimeMs=unixepoch('subsec') * 1000`, watermark);
92
95
  }
93
96
  function getReplicationState(db) {
94
97
  return parse(db.get(`SELECT stateVersion FROM "_zero.replicationState"`), replicationStateSchema);
@@ -1 +1 @@
1
- {"version":3,"file":"replication-state.js","names":[],"sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"sourcesContent":["/**\n * Replication metadata, used for incremental view maintenance and catchup.\n *\n * These tables are created atomically in {@link setupReplicationTables}\n * after the logical replication handoff when initial data synchronization has completed.\n */\n\nimport {\n jsonObjectSchema,\n stringify,\n type JSONObject,\n} from '../../../../../shared/src/bigint-json.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport type {StatementRunner} from '../../../db/statements.ts';\nimport {CREATE_CHANGELOG_SCHEMA} from './change-log.ts';\nimport {CREATE_COLUMN_METADATA_TABLE} from './column-metadata.ts';\nimport {ZERO_VERSION_COLUMN_NAME} from './constants.ts';\nimport {CREATE_TABLE_METADATA_TABLE} from './table-metadata.ts';\n\nexport {ZERO_VERSION_COLUMN_NAME};\n\nexport type RuntimeEvent = 'sync' | 'upgrade' | 'vacuum';\n\n// event : The RuntimeEvent. Only one row per event is tracked.\n// Inserting an event will REPLACE any row for the same event.\n// timestamp : SQLite timestamp string, e.g. \"2024-04-12 11:37:46\".\n// Append a `Z` when parsing with `new Date(...)`;\nexport const CREATE_RUNTIME_EVENTS_TABLE = `\n CREATE TABLE \"_zero.runtimeEvents\" (\n event TEXT PRIMARY KEY ON CONFLICT REPLACE,\n timestamp TEXT NOT NULL DEFAULT (current_timestamp)\n );\n`;\n\nconst CREATE_REPLICATION_STATE_SCHEMA =\n // replicaVersion : A value identifying the version at which the initial sync happened, i.e.\n // the version at which all rows were copied, and to `_0_version` was set.\n // This value is used to distinguish data from other replicas (e.g. if a\n // replica is reset or if there are ever multiple replicas).\n // publications : JSON stringified array of publication names\n // initialSyncContext : Metadata related to the context of when and how the replica was initially\n // synced. This corresponds with the same column stored in upstream and is\n // used for debugging replica version mismatches, which can arise from a number\n // of misconfigurations, such as dueling replication-managers, or restores of\n // stale litestream backups.\n // lock : Auto-magic column for enforcing single-row semantics.\n /*sql*/ `\n CREATE TABLE \"_zero.replicationConfig\" (\n replicaVersion TEXT NOT NULL,\n publications TEXT NOT NULL,\n initialSyncContext TEXT DEFAULT '{}',\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n // stateVersion : The latest version replicated from upstream, starting with the initial\n // `replicaVersion` and moving forward to each subsequent commit watermark\n // (e.g. corresponding to a Postgres LSN). Versions are represented as\n // lexicographically sortable watermarks (e.g. LexiVersions).\n //\n `\n CREATE TABLE \"_zero.replicationState\" (\n stateVersion TEXT NOT NULL,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n CREATE_CHANGELOG_SCHEMA +\n CREATE_RUNTIME_EVENTS_TABLE +\n CREATE_COLUMN_METADATA_TABLE +\n CREATE_TABLE_METADATA_TABLE;\n\nconst stringArray = v.array(v.string());\n\nconst subscriptionStateSchema = v\n .object({\n replicaVersion: v.string(),\n publications: v.string(),\n watermark: v.string(),\n })\n .map(s => ({\n ...s,\n publications: v.parse(JSON.parse(s.publications), stringArray),\n }));\n\nexport type SubscriptionState = v.Infer<typeof subscriptionStateSchema>;\n\nconst subscriptionStateAndContextSchema = v\n .object({\n replicaVersion: v.string(),\n publications: v.string(),\n initialSyncContext: v.string(),\n watermark: v.string(),\n })\n .map(s => ({\n ...s,\n publications: v.parse(JSON.parse(s.publications), stringArray),\n initialSyncContext: v.parse(\n JSON.parse(s.initialSyncContext),\n jsonObjectSchema,\n ),\n }));\n\nexport type SubscriptionStateAndContext = v.Infer<\n typeof subscriptionStateAndContextSchema\n>;\n\nconst replicationStateSchema = v.object({\n stateVersion: v.string(),\n});\n\nexport type ReplicationState = v.Infer<typeof replicationStateSchema>;\n\nexport function initReplicationState(\n db: Database,\n publications: string[],\n watermark: string,\n initialSyncContext: JSONObject = {},\n createTables = true,\n) {\n if (createTables) {\n createReplicationStateTables(db);\n }\n db.prepare(\n `\n INSERT INTO \"_zero.replicationConfig\" \n (replicaVersion, publications, initialSyncContext) VALUES (?, ?, ?)\n `,\n ).run(\n watermark,\n JSON.stringify(publications.sort()),\n stringify(initialSyncContext),\n );\n db.prepare(\n `\n INSERT INTO \"_zero.replicationState\" (stateVersion) VALUES (?)\n `,\n ).run(watermark);\n recordEvent(db, 'sync');\n}\n\n/**\n * Exposed as a separate function for the custom change source,\n * which needs the tables to be created in order to construct\n * ChangeProcessor before it knows the initial watermark.\n */\nexport function createReplicationStateTables(db: Database) {\n db.exec(CREATE_REPLICATION_STATE_SCHEMA);\n}\n\nexport function recordEvent(db: Database, event: RuntimeEvent) {\n db.prepare(\n `\n INSERT INTO \"_zero.runtimeEvents\" (event) VALUES (?) \n `,\n ).run(event);\n}\n\nexport function getAscendingEvents(db: Database) {\n const result = db\n .prepare(\n `SELECT event, timestamp FROM \"_zero.runtimeEvents\" \n ORDER BY timestamp ASC\n `,\n )\n .all<{event: string; timestamp: string}>();\n return result.map(({event, timestamp}) => ({\n event,\n timestamp: new Date(timestamp + 'Z'),\n }));\n}\n\nexport function getSubscriptionState(db: StatementRunner): SubscriptionState {\n const result = db.get(/*sql*/ `\n SELECT c.replicaVersion, c.publications, s.stateVersion as watermark\n FROM \"_zero.replicationConfig\" as c\n JOIN \"_zero.replicationState\" as s\n ON c.lock = s.lock\n `);\n return v.parse(result, subscriptionStateSchema);\n}\n\nexport function getSubscriptionStateAndContext(\n db: StatementRunner,\n): SubscriptionStateAndContext {\n const result = db.get(/*sql*/ `\n SELECT c.replicaVersion, c.publications, c.initialSyncContext,\n s.stateVersion as watermark\n FROM \"_zero.replicationConfig\" as c\n JOIN \"_zero.replicationState\" as s\n ON c.lock = s.lock\n `);\n return v.parse(result, subscriptionStateAndContextSchema);\n}\n\nexport function updateReplicationWatermark(\n db: StatementRunner,\n watermark: string,\n) {\n db.run(`UPDATE \"_zero.replicationState\" SET stateVersion=?`, watermark);\n}\n\nexport function getReplicationState(db: StatementRunner): ReplicationState {\n const result = db.get(`SELECT stateVersion FROM \"_zero.replicationState\"`);\n return v.parse(result, replicationStateSchema);\n}\n"],"mappings":";;;;;;;;;;;;;AA4BA,IAAa,8BAA8B;;;;;;AAO3C,IAAM,kCAYI,kXAmBR,0BACA,8BACA,+BACA;AAEF,IAAM,cAAc,eAAE,MAAM,eAAE,QAAQ,CAAC;AAEvC,IAAM,0BAA0B,eAC7B,OAAO;CACN,gBAAgB,eAAE,QAAQ;CAC1B,cAAc,eAAE,QAAQ;CACxB,WAAW,eAAE,QAAQ;CACtB,CAAC,CACD,KAAI,OAAM;CACT,GAAG;CACH,cAAc,MAAQ,KAAK,MAAM,EAAE,aAAa,EAAE,YAAY;CAC/D,EAAE;AAIL,IAAM,oCAAoC,eACvC,OAAO;CACN,gBAAgB,eAAE,QAAQ;CAC1B,cAAc,eAAE,QAAQ;CACxB,oBAAoB,eAAE,QAAQ;CAC9B,WAAW,eAAE,QAAQ;CACtB,CAAC,CACD,KAAI,OAAM;CACT,GAAG;CACH,cAAc,MAAQ,KAAK,MAAM,EAAE,aAAa,EAAE,YAAY;CAC9D,oBAAoB,MAClB,KAAK,MAAM,EAAE,mBAAmB,EAChC,iBACD;CACF,EAAE;AAML,IAAM,yBAAyB,eAAE,OAAO,EACtC,cAAc,eAAE,QAAQ,EACzB,CAAC;AAIF,SAAgB,qBACd,IACA,cACA,WACA,qBAAiC,EAAE,EACnC,eAAe,MACf;AACA,KAAI,aACF,8BAA6B,GAAG;AAElC,IAAG,QACD;;;MAID,CAAC,IACA,WACA,KAAK,UAAU,aAAa,MAAM,CAAC,EACnC,UAAU,mBAAmB,CAC9B;AACD,IAAG,QACD;;MAGD,CAAC,IAAI,UAAU;AAChB,aAAY,IAAI,OAAO;;;;;;;AAQzB,SAAgB,6BAA6B,IAAc;AACzD,IAAG,KAAK,gCAAgC;;AAG1C,SAAgB,YAAY,IAAc,OAAqB;AAC7D,IAAG,QACD;;MAGD,CAAC,IAAI,MAAM;;AAGd,SAAgB,mBAAmB,IAAc;AAQ/C,QAPe,GACZ,QACC;;MAGD,CACA,KAAyC,CAC9B,KAAK,EAAC,OAAO,iBAAgB;EACzC;EACA,2BAAW,IAAI,KAAK,YAAY,IAAI;EACrC,EAAE;;AAGL,SAAgB,qBAAqB,IAAwC;AAO3E,QAAO,MANQ,GAAG,IAAY;;;;;MAK1B,EACmB,wBAAwB;;AAGjD,SAAgB,+BACd,IAC6B;AAQ7B,QAAO,MAPQ,GAAG,IAAY;;;;;;MAM1B,EACmB,kCAAkC;;AAG3D,SAAgB,2BACd,IACA,WACA;AACA,IAAG,IAAI,sDAAsD,UAAU;;AAGzE,SAAgB,oBAAoB,IAAuC;AAEzE,QAAO,MADQ,GAAG,IAAI,oDAAoD,EACnD,uBAAuB"}
1
+ {"version":3,"file":"replication-state.js","names":[],"sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"sourcesContent":["/**\n * Replication metadata, used for incremental view maintenance and catchup.\n *\n * These tables are created atomically in {@link setupReplicationTables}\n * after the logical replication handoff when initial data synchronization has completed.\n */\n\nimport {\n jsonObjectSchema,\n stringify,\n type JSONObject,\n} from '../../../../../shared/src/bigint-json.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport type {StatementRunner} from '../../../db/statements.ts';\nimport {CREATE_CHANGELOG_SCHEMA} from './change-log.ts';\nimport {CREATE_COLUMN_METADATA_TABLE} from './column-metadata.ts';\nimport {ZERO_VERSION_COLUMN_NAME} from './constants.ts';\nimport {CREATE_TABLE_METADATA_TABLE} from './table-metadata.ts';\n\nexport {ZERO_VERSION_COLUMN_NAME};\n\nexport type RuntimeEvent = 'sync' | 'upgrade' | 'vacuum';\n\n// event : The RuntimeEvent. Only one row per event is tracked.\n// Inserting an event will REPLACE any row for the same event.\n// timestamp : SQLite timestamp string, e.g. \"2024-04-12 11:37:46\".\n// Append a `Z` when parsing with `new Date(...)`;\nexport const CREATE_RUNTIME_EVENTS_TABLE = `\n CREATE TABLE \"_zero.runtimeEvents\" (\n event TEXT PRIMARY KEY ON CONFLICT REPLACE,\n timestamp TEXT NOT NULL DEFAULT (current_timestamp)\n );\n`;\n\nconst CREATE_REPLICATION_STATE_SCHEMA =\n // replicaVersion : A value identifying the version at which the initial sync happened, i.e.\n // the version at which all rows were copied, and to `_0_version` was set.\n // This value is used to distinguish data from other replicas (e.g. if a\n // replica is reset or if there are ever multiple replicas).\n // publications : JSON stringified array of publication names\n // initialSyncContext : Metadata related to the context of when and how the replica was initially\n // synced. This corresponds with the same column stored in upstream and is\n // used for debugging replica version mismatches, which can arise from a number\n // of misconfigurations, such as dueling replication-managers, or restores of\n // stale litestream backups.\n // lock : Auto-magic column for enforcing single-row semantics.\n /*sql*/ `\n CREATE TABLE \"_zero.replicationConfig\" (\n replicaVersion TEXT NOT NULL,\n publications TEXT NOT NULL,\n initialSyncContext TEXT DEFAULT '{}',\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n // stateVersion : The latest version replicated from upstream, starting with the initial\n // `replicaVersion` and moving forward to each subsequent commit watermark\n // (e.g. corresponding to a Postgres LSN). Versions are represented as\n // lexicographically sortable watermarks (e.g. LexiVersions).\n // writeTimeMs : The millisecond epoch at which this version was written to the replica.\n //\n /*sql*/ `\n CREATE TABLE \"_zero.replicationState\" (\n stateVersion TEXT NOT NULL,\n writeTimeMs INTEGER,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n CREATE_CHANGELOG_SCHEMA +\n CREATE_RUNTIME_EVENTS_TABLE +\n CREATE_COLUMN_METADATA_TABLE +\n CREATE_TABLE_METADATA_TABLE;\n\nconst stringArray = v.array(v.string());\n\nconst subscriptionStateSchema = v\n .object({\n replicaVersion: v.string(),\n publications: v.string(),\n watermark: v.string(),\n })\n .map(s => ({\n ...s,\n publications: v.parse(JSON.parse(s.publications), stringArray),\n }));\n\nexport type SubscriptionState = v.Infer<typeof subscriptionStateSchema>;\n\nconst subscriptionStateAndContextSchema = v\n .object({\n replicaVersion: v.string(),\n publications: v.string(),\n initialSyncContext: v.string(),\n watermark: v.string(),\n })\n .map(s => ({\n ...s,\n publications: v.parse(JSON.parse(s.publications), stringArray),\n initialSyncContext: v.parse(\n JSON.parse(s.initialSyncContext),\n jsonObjectSchema,\n ),\n }));\n\nexport type SubscriptionStateAndContext = v.Infer<\n typeof subscriptionStateAndContextSchema\n>;\n\nconst replicationStateSchema = v.object({\n stateVersion: v.string(),\n});\n\nexport type ReplicationState = v.Infer<typeof replicationStateSchema>;\n\nexport function initReplicationState(\n db: Database,\n publications: string[],\n watermark: string,\n initialSyncContext: JSONObject = {},\n createTables = true,\n) {\n if (createTables) {\n createReplicationStateTables(db);\n }\n db.prepare(\n `\n INSERT INTO \"_zero.replicationConfig\" \n (replicaVersion, publications, initialSyncContext) VALUES (?, ?, ?)\n `,\n ).run(\n watermark,\n JSON.stringify(publications.sort()),\n stringify(initialSyncContext),\n );\n db.prepare(/*sql*/ `\n INSERT INTO \"_zero.replicationState\" (stateVersion, writeTimeMs) \n VALUES (?, unixepoch('subsec') * 1000)\n `).run(watermark);\n recordEvent(db, 'sync');\n}\n\n/**\n * Exposed as a separate function for the custom change source,\n * which needs the tables to be created in order to construct\n * ChangeProcessor before it knows the initial watermark.\n */\nexport function createReplicationStateTables(db: Database) {\n db.exec(CREATE_REPLICATION_STATE_SCHEMA);\n}\n\nexport function recordEvent(db: Database, event: RuntimeEvent) {\n db.prepare(\n `\n INSERT INTO \"_zero.runtimeEvents\" (event) VALUES (?) \n `,\n ).run(event);\n}\n\nexport function getAscendingEvents(db: Database) {\n const result = db\n .prepare(\n `SELECT event, timestamp FROM \"_zero.runtimeEvents\" \n ORDER BY timestamp ASC\n `,\n )\n .all<{event: string; timestamp: string}>();\n return result.map(({event, timestamp}) => ({\n event,\n timestamp: new Date(timestamp + 'Z'),\n }));\n}\n\nexport function getSubscriptionState(db: StatementRunner): SubscriptionState {\n const result = db.get(/*sql*/ `\n SELECT c.replicaVersion, c.publications, s.stateVersion as watermark\n FROM \"_zero.replicationConfig\" as c\n JOIN \"_zero.replicationState\" as s\n ON c.lock = s.lock\n `);\n return v.parse(result, subscriptionStateSchema);\n}\n\nexport function getSubscriptionStateAndContext(\n db: StatementRunner,\n): SubscriptionStateAndContext {\n const result = db.get(/*sql*/ `\n SELECT c.replicaVersion, c.publications, c.initialSyncContext,\n s.stateVersion as watermark\n FROM \"_zero.replicationConfig\" as c\n JOIN \"_zero.replicationState\" as s\n ON c.lock = s.lock\n `);\n return v.parse(result, subscriptionStateAndContextSchema);\n}\n\nexport function updateReplicationWatermark(\n db: StatementRunner,\n watermark: string,\n) {\n db.run(\n /*sql*/ `\n UPDATE \"_zero.replicationState\" \n SET stateVersion=?, writeTimeMs=unixepoch('subsec') * 1000`,\n watermark,\n );\n}\n\nexport function getReplicationState(db: StatementRunner): ReplicationState {\n const result = db.get(`SELECT stateVersion FROM \"_zero.replicationState\"`);\n return v.parse(result, replicationStateSchema);\n}\n"],"mappings":";;;;;;;;;;;;;AA4BA,IAAa,8BAA8B;;;;;;AAO3C,IAAM,kCAYI,4YAqBR,0BACA,8BACA,+BACA;AAEF,IAAM,cAAc,eAAE,MAAM,eAAE,QAAQ,CAAC;AAEvC,IAAM,0BAA0B,eAC7B,OAAO;CACN,gBAAgB,eAAE,QAAQ;CAC1B,cAAc,eAAE,QAAQ;CACxB,WAAW,eAAE,QAAQ;CACtB,CAAC,CACD,KAAI,OAAM;CACT,GAAG;CACH,cAAc,MAAQ,KAAK,MAAM,EAAE,aAAa,EAAE,YAAY;CAC/D,EAAE;AAIL,IAAM,oCAAoC,eACvC,OAAO;CACN,gBAAgB,eAAE,QAAQ;CAC1B,cAAc,eAAE,QAAQ;CACxB,oBAAoB,eAAE,QAAQ;CAC9B,WAAW,eAAE,QAAQ;CACtB,CAAC,CACD,KAAI,OAAM;CACT,GAAG;CACH,cAAc,MAAQ,KAAK,MAAM,EAAE,aAAa,EAAE,YAAY;CAC9D,oBAAoB,MAClB,KAAK,MAAM,EAAE,mBAAmB,EAChC,iBACD;CACF,EAAE;AAML,IAAM,yBAAyB,eAAE,OAAO,EACtC,cAAc,eAAE,QAAQ,EACzB,CAAC;AAIF,SAAgB,qBACd,IACA,cACA,WACA,qBAAiC,EAAE,EACnC,eAAe,MACf;AACA,KAAI,aACF,8BAA6B,GAAG;AAElC,IAAG,QACD;;;MAID,CAAC,IACA,WACA,KAAK,UAAU,aAAa,MAAM,CAAC,EACnC,UAAU,mBAAmB,CAC9B;AACD,IAAG,QAAgB;;;MAGf,CAAC,IAAI,UAAU;AACnB,aAAY,IAAI,OAAO;;;;;;;AAQzB,SAAgB,6BAA6B,IAAc;AACzD,IAAG,KAAK,gCAAgC;;AAG1C,SAAgB,YAAY,IAAc,OAAqB;AAC7D,IAAG,QACD;;MAGD,CAAC,IAAI,MAAM;;AAGd,SAAgB,mBAAmB,IAAc;AAQ/C,QAPe,GACZ,QACC;;MAGD,CACA,KAAyC,CAC9B,KAAK,EAAC,OAAO,iBAAgB;EACzC;EACA,2BAAW,IAAI,KAAK,YAAY,IAAI;EACrC,EAAE;;AAGL,SAAgB,qBAAqB,IAAwC;AAO3E,QAAO,MANQ,GAAG,IAAY;;;;;MAK1B,EACmB,wBAAwB;;AAGjD,SAAgB,+BACd,IAC6B;AAQ7B,QAAO,MAPQ,GAAG,IAAY;;;;;;MAM1B,EACmB,kCAAkC;;AAG3D,SAAgB,2BACd,IACA,WACA;AACA,IAAG,IACO;;mEAGR,UACD;;AAGH,SAAgB,oBAAoB,IAAuC;AAEzE,QAAO,MADQ,GAAG,IAAI,oDAAoD,EACnD,uBAAuB"}
@@ -1 +1 @@
1
- {"version":3,"file":"pg.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/types/pg.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAO,QAAQ,EAAE,EAAc,KAAK,YAAY,EAAC,MAAM,UAAU,CAAC;AAClE,OAAO,EAAa,KAAK,SAAS,EAAC,MAAM,oCAAoC,CAAC;AAc9E,wBAAgB,mBAAmB,CAAC,SAAS,EAAE,MAAM,GAAG,MAAM,CAqC7D;AAED,iBAAS,kBAAkB,CAAC,GAAG,EAAE,OAAO,GAAG,MAAM,CAuBhD;AAcD,wBAAgB,0BAA0B,CAAC,YAAY,EAAE,MAAM,GAAG,MAAM,CAoBvE;AAED,wBAAgB,0BAA0B,CAAC,UAAU,EAAE,MAAM,GAAG,MAAM,CAkFrE;AAED,iBAAS,iBAAiB,CAAC,IAAI,EAAE,MAAM,GAAG,MAAM,CAG/C;AAED;;;;;;;;;GASG;AACH,MAAM,MAAM,iBAAiB,GAAG,SAAS,GAAG,UAAU,CAAC;AAEvD,MAAM,MAAM,WAAW,GAAG;IACxB;;;;;OAKG;IACH,gBAAgB,CAAC,EAAE,OAAO,CAAC;CAC5B,CAAC;AAEF;;;;GAIG;AACH,eAAO,MAAM,kBAAkB,GAAI,uBAAoB,WAAgB;;;;;;;;;;;;;;;;;;2BAwBlD,OAAO;;;;;;2BAOP,OAAO;;;;;;2BAQP,MAAM,GAAG,IAAI;;;;;;2BAUb,MAAM;uBACV,MAAM,GAAG,MAAM;;;CAG9B,CAAC;AAEH,MAAM,MAAM,UAAU,GAAG,QAAQ,CAAC,GAAG,CAAC;IACpC,MAAM,EAAE,MAAM,CAAC;IACf,IAAI,EAAE,SAAS,CAAC;CACjB,CAAC,CAAC;AAEH,MAAM,MAAM,mBAAmB,GAAG,QAAQ,CAAC,cAAc,CAAC;IACxD,MAAM,EAAE,MAAM,CAAC;IACf,IAAI,EAAE,SAAS,CAAC;CACjB,CAAC,CAAC;AAEH,wBAAgB,QAAQ,CACtB,EAAE,EAAE,UAAU,EACd,aAAa,EAAE,MAAM,EACrB,OAAO,CAAC,EAAE,QAAQ,CAAC,OAAO,CAAC;IACzB,MAAM,EAAE,YAAY,CAAC,MAAM,CAAC,CAAC;IAC7B,IAAI,EAAE,YAAY,CAAC,SAAS,CAAC,CAAC;CAC/B,CAAC,EACF,IAAI,CAAC,EAAE,WAAW,GACjB,UAAU,CA4CZ;AAED;;;;;;;;;;GAUG;AACH,wBAAgB,uBAAuB,CAAC,GAAG,EAAE,mBAAmB,QAE/D;AAED,eAAO,MAAM,aAAa,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAOhD,CAAC;AAEF,wBAAgB,eAAe,CAAC,CAAC,EAAE,OAAO,EAAE,GAAG,KAAK,EAAE,CAAC,MAAM,EAAE,GAAG,MAAM,EAAE,CAAC,WAE1E"}
1
+ {"version":3,"file":"pg.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/types/pg.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AACjD,OAAO,QAAQ,EAAE,EAAc,KAAK,YAAY,EAAC,MAAM,UAAU,CAAC;AAClE,OAAO,EAAa,KAAK,SAAS,EAAC,MAAM,oCAAoC,CAAC;AAkB9E,wBAAgB,mBAAmB,CAAC,SAAS,EAAE,MAAM,GAAG,MAAM,CAiD7D;AAED,iBAAS,kBAAkB,CAAC,GAAG,EAAE,OAAO,GAAG,MAAM,CAuBhD;AAcD,wBAAgB,0BAA0B,CAAC,YAAY,EAAE,MAAM,GAAG,MAAM,CAoBvE;AAED,wBAAgB,0BAA0B,CAAC,UAAU,EAAE,MAAM,GAAG,MAAM,CAkFrE;AAED,iBAAS,iBAAiB,CAAC,IAAI,EAAE,MAAM,GAAG,MAAM,CAG/C;AAED;;;;;;;;;GASG;AACH,MAAM,MAAM,iBAAiB,GAAG,SAAS,GAAG,UAAU,CAAC;AAEvD,MAAM,MAAM,WAAW,GAAG;IACxB;;;;;OAKG;IACH,gBAAgB,CAAC,EAAE,OAAO,CAAC;CAC5B,CAAC;AAEF;;;;GAIG;AACH,eAAO,MAAM,kBAAkB,GAAI,uBAAoB,WAAgB;;;;;;;;;;;;;;;;;;2BAwBlD,OAAO;;;;;;2BAOP,OAAO;;;;;;2BAQP,MAAM,GAAG,IAAI;;;;;;2BAUb,MAAM;uBACV,MAAM,GAAG,MAAM;;;CAG9B,CAAC;AAEH,MAAM,MAAM,UAAU,GAAG,QAAQ,CAAC,GAAG,CAAC;IACpC,MAAM,EAAE,MAAM,CAAC;IACf,IAAI,EAAE,SAAS,CAAC;CACjB,CAAC,CAAC;AAEH,MAAM,MAAM,mBAAmB,GAAG,QAAQ,CAAC,cAAc,CAAC;IACxD,MAAM,EAAE,MAAM,CAAC;IACf,IAAI,EAAE,SAAS,CAAC;CACjB,CAAC,CAAC;AAEH,wBAAgB,QAAQ,CACtB,EAAE,EAAE,UAAU,EACd,aAAa,EAAE,MAAM,EACrB,OAAO,CAAC,EAAE,QAAQ,CAAC,OAAO,CAAC;IACzB,MAAM,EAAE,YAAY,CAAC,MAAM,CAAC,CAAC;IAC7B,IAAI,EAAE,YAAY,CAAC,SAAS,CAAC,CAAC;CAC/B,CAAC,EACF,IAAI,CAAC,EAAE,WAAW,GACjB,UAAU,CA4CZ;AAED;;;;;;;;;;GAUG;AACH,wBAAgB,uBAAuB,CAAC,GAAG,EAAE,mBAAmB,QAE/D;AAED,eAAO,MAAM,aAAa,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAOhD,CAAC;AAEF,wBAAgB,eAAe,CAAC,CAAC,EAAE,OAAO,EAAE,GAAG,KAAK,EAAE,CAAC,MAAM,EAAE,GAAG,MAAM,EAAE,CAAC,WAE1E"}
@@ -5,17 +5,25 @@ import postgres from "postgres";
5
5
  import { PreciseDate } from "@google-cloud/precise-date";
6
6
  import { OID } from "@postgresql-typed/oids";
7
7
  //#region ../zero-cache/src/types/pg.ts
8
+ var pgTimestampRe = /^(\d+)-(\d{2}-\d{2}) (\d{2}:\d{2}:\d{2}(?:\.\d+)?)([+-]\d{1,2}(?::\d{2})?)?(?: BC)?$/;
8
9
  function timestampToFpMillis(timestamp) {
9
- timestamp = timestamp.replace(" ", "T");
10
- const positiveOffset = timestamp.includes("+");
11
- const tzSplitIndex = positiveOffset ? timestamp.lastIndexOf("+") : timestamp.indexOf("-", timestamp.indexOf("T"));
12
- const timezoneOffset = tzSplitIndex === -1 ? void 0 : timestamp.substring(tzSplitIndex);
13
- const tsWithoutTimezone = (tzSplitIndex === -1 ? timestamp : timestamp.substring(0, tzSplitIndex)) + "Z";
10
+ const match = timestamp.match(pgTimestampRe);
11
+ if (!match) throw new Error(`Error parsing ${timestamp}`);
12
+ const [, yearStr, monthDay, time, tz] = match;
13
+ const bc = timestamp.endsWith(" BC");
14
+ let year = Number(yearStr);
15
+ if (bc) year = -(year - 1);
16
+ let isoYear;
17
+ if (year >= 0 && year <= 9999) isoYear = String(year).padStart(4, "0");
18
+ else if (year >= 0) isoYear = "+" + String(year).padStart(6, "0");
19
+ else isoYear = "-" + String(Math.abs(year)).padStart(6, "0");
20
+ const utcString = `${isoYear}-${monthDay}T${time}Z`;
14
21
  try {
15
- const fullTime = new PreciseDate(tsWithoutTimezone).getFullTime();
22
+ const fullTime = new PreciseDate(utcString).getFullTime();
16
23
  const ret = Number(fullTime / 1000000n) + Number(fullTime % 1000000n) * 1e-6;
17
- if (timezoneOffset) {
18
- const [hours, minutes] = timezoneOffset.split(":");
24
+ if (tz) {
25
+ const positiveOffset = tz.startsWith("+");
26
+ const [hours, minutes] = tz.split(":");
19
27
  const offsetMillis = (Math.abs(Number(hours)) * 60 + (minutes ? Number(minutes) : 0)) * 60 * 1e3;
20
28
  return positiveOffset ? ret - offsetMillis : ret + offsetMillis;
21
29
  }
@@ -1 +1 @@
1
- {"version":3,"file":"pg.js","names":[],"sources":["../../../../../zero-cache/src/types/pg.ts"],"sourcesContent":["import {PreciseDate} from '@google-cloud/precise-date';\nimport {OID} from '@postgresql-typed/oids';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres, {type Notice, type PostgresType} from 'postgres';\nimport {BigIntJSON, type JSONValue} from '../../../shared/src/bigint-json.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport {\n DATE,\n JSON,\n JSONB,\n NUMERIC,\n TIME,\n TIMESTAMP,\n TIMESTAMPTZ,\n TIMETZ,\n} from './pg-types.ts';\n\n// exported for testing.\nexport function timestampToFpMillis(timestamp: string): number {\n // Convert from PG's time string, e.g. \"1999-01-08 12:05:06+00\" to \"Z\"\n // format expected by PreciseDate.\n timestamp = timestamp.replace(' ', 'T');\n const positiveOffset = timestamp.includes('+');\n const tzSplitIndex = positiveOffset\n ? timestamp.lastIndexOf('+')\n : timestamp.indexOf('-', timestamp.indexOf('T'));\n const timezoneOffset =\n tzSplitIndex === -1 ? undefined : timestamp.substring(tzSplitIndex);\n const tsWithoutTimezone =\n (tzSplitIndex === -1 ? timestamp : timestamp.substring(0, tzSplitIndex)) +\n 'Z';\n\n try {\n // PreciseDate does not return microsecond precision unless the provided\n // timestamp is in UTC time so we need to add the timezone offset back in.\n const fullTime = new PreciseDate(tsWithoutTimezone).getFullTime();\n const millis = Number(fullTime / 1_000_000n);\n const nanos = Number(fullTime % 1_000_000n);\n const ret = millis + nanos * 1e-6; // floating point milliseconds\n\n // add back in the timezone offset\n if (timezoneOffset) {\n const [hours, minutes] = timezoneOffset.split(':');\n const offset =\n Math.abs(Number(hours)) * 60 + (minutes ? Number(minutes) : 0);\n const offsetMillis = offset * 60 * 1_000;\n // If it is a positive offset, we subtract the offset from the UTC\n // because we passed in the \"local time\" as if it was UTC.\n // The opposite is true for negative offsets.\n return positiveOffset ? ret - offsetMillis : ret + offsetMillis;\n }\n return ret;\n } catch (e) {\n throw new Error(`Error parsing ${timestamp}`, {cause: e});\n }\n}\n\nfunction serializeTimestamp(val: unknown): string {\n switch (typeof val) {\n case 'string':\n return val; // Let Postgres parse it\n case 'number': {\n if (Number.isInteger(val)) {\n return new PreciseDate(val).toISOString();\n }\n // Convert floating point to bigint nanoseconds.\n const nanoseconds =\n 1_000_000n * BigInt(Math.trunc(val)) +\n BigInt(Math.trunc((val % 1) * 1e6));\n return new PreciseDate(nanoseconds).toISOString();\n }\n // Note: Don't support bigint inputs until we decide what the semantics are (e.g. micros vs nanos)\n // case 'bigint':\n // return new PreciseDate(val).toISOString();\n default:\n if (val instanceof Date) {\n return val.toISOString();\n }\n }\n throw new Error(`Unsupported type \"${typeof val}\" for timestamp: ${val}`);\n}\n\nconst MILLISECONDS_PER_DAY = 24 * 60 * 60 * 1000;\n\nfunction serializeTime(x: unknown, type: 'time' | 'timetz'): string {\n switch (typeof x) {\n case 'string':\n return x; // Let Postgres parse it\n case 'number':\n return millisecondsToPostgresTime(x);\n }\n throw new Error(`Unsupported type \"${typeof x}\" for ${type}: ${x}`);\n}\n\nexport function millisecondsToPostgresTime(milliseconds: number): string {\n if (milliseconds < 0) {\n throw new Error('Milliseconds cannot be negative');\n }\n\n if (milliseconds >= MILLISECONDS_PER_DAY) {\n throw new Error(\n `Milliseconds cannot exceed 24 hours (${MILLISECONDS_PER_DAY}ms)`,\n );\n }\n\n milliseconds = Math.floor(milliseconds); // Ensure it's an integer\n\n const totalSeconds = Math.floor(milliseconds / 1000);\n const hours = Math.floor(totalSeconds / 3600);\n const minutes = Math.floor((totalSeconds % 3600) / 60);\n const seconds = totalSeconds % 60;\n const ms = milliseconds % 1000;\n\n return `${hours.toString().padStart(2, '0')}:${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')}.${ms.toString().padStart(3, '0')}+00`;\n}\n\nexport function postgresTimeToMilliseconds(timeString: string): number {\n // Validate basic format\n if (!timeString || typeof timeString !== 'string') {\n throw new Error('Invalid time string: must be a non-empty string');\n }\n\n // Regular expression to match HH:MM:SS, HH:MM:SS.mmm, or HH:MM:SS+00 / HH:MM:SS.mmm+00\n // Supports optional timezone offset\n const timeRegex =\n /^(\\d{1,2}):(\\d{2}):(\\d{2})(?:\\.(\\d{1,6}))?(?:([+-])(\\d{1,2})(?::(\\d{2}))?)?$/;\n const match = timeString.match(timeRegex);\n\n if (!match) {\n throw new Error(\n `Invalid time format: \"${timeString}\". Expected HH:MM:SS[.mmm][+|-HH[:MM]]`,\n );\n }\n\n // Extract components\n const hours = parseInt(match[1], 10);\n const minutes = parseInt(match[2], 10);\n const seconds = parseInt(match[3], 10);\n // Handle optional milliseconds, pad right with zeros if needed\n let milliseconds = 0;\n if (match[4]) {\n // Pad microseconds to 6 digits\n const msString = match[4].padEnd(6, '0');\n // slice milliseconds out of the microseconds\n // e.g. 123456 -> 123, 1234 -> 123,\n milliseconds = parseInt(msString.slice(0, 3), 10);\n }\n\n // Validate ranges\n if (hours < 0 || hours > 24) {\n throw new Error(\n `Invalid hours: ${hours}. Must be between 0 and 24 (24 means end of day)`,\n );\n }\n\n if (minutes < 0 || minutes >= 60) {\n throw new Error(`Invalid minutes: ${minutes}. Must be between 0 and 59`);\n }\n\n if (seconds < 0 || seconds >= 60) {\n throw new Error(`Invalid seconds: ${seconds}. Must be between 0 and 59`);\n }\n\n if (milliseconds < 0 || milliseconds >= 1000) {\n throw new Error(\n `Invalid milliseconds: ${milliseconds}. Must be between 0 and 999`,\n );\n }\n\n // Special case: PostgreSQL allows 24:00:00 to represent end of day\n if (hours === 24 && (minutes !== 0 || seconds !== 0 || milliseconds !== 0)) {\n throw new Error(\n 'Invalid time: when hours is 24, minutes, seconds, and milliseconds must be 0',\n );\n }\n\n // Calculate total milliseconds\n let totalMs =\n hours * 3600000 + minutes * 60000 + seconds * 1000 + milliseconds;\n\n // Timezone Offset\n if (match[5]) {\n const sign = match[5] === '+' ? 1 : -1;\n const tzHours = parseInt(match[6], 10);\n const tzMinutes = match[7] ? parseInt(match[7], 10) : 0;\n const offsetMs = sign * (tzHours * 3600000 + tzMinutes * 60000);\n totalMs -= offsetMs;\n }\n\n // Normalize to 0-24h only if outside valid range\n if (totalMs > MILLISECONDS_PER_DAY || totalMs < 0) {\n return (\n ((totalMs % MILLISECONDS_PER_DAY) + MILLISECONDS_PER_DAY) %\n MILLISECONDS_PER_DAY\n );\n }\n\n return totalMs;\n}\n\nfunction dateToUTCMidnight(date: string): number {\n const d = new Date(date);\n return Date.UTC(d.getUTCFullYear(), d.getUTCMonth(), d.getUTCDate());\n}\n\n/**\n * The (javascript) types of objects that can be returned by our configured\n * Postgres clients. For initial-sync, these comes from the postgres.js client:\n *\n * https://github.com/porsager/postgres/blob/master/src/types.js\n *\n * and for the replication stream these come from the the node-postgres client:\n *\n * https://github.com/brianc/node-pg-types/blob/master/lib/textParsers.js\n */\nexport type PostgresValueType = JSONValue | Uint8Array;\n\nexport type TypeOptions = {\n /**\n * Sends strings directly as JSON values (i.e. without JSON stringification).\n * The application is responsible for ensuring that string inputs for JSON\n * columns are already stringified. Other data types (e.g. objects) will\n * still be stringified by the pg client.\n */\n sendStringAsJson?: boolean;\n};\n\n/**\n * Configures types for the Postgres.js client library (`postgres`).\n *\n * @param jsonAsString Keep JSON / JSONB values as strings instead of parsing.\n */\nexport const postgresTypeConfig = ({sendStringAsJson}: TypeOptions = {}) => ({\n // Type the type IDs as `number` so that Typescript doesn't complain about\n // referencing external types during type inference.\n types: {\n bigint: postgres.BigInt,\n json: {\n to: JSON,\n from: [JSON, JSONB],\n serialize: sendStringAsJson\n ? (x: unknown) => (typeof x === 'string' ? x : BigIntJSON.stringify(x))\n : BigIntJSON.stringify,\n parse: BigIntJSON.parse,\n },\n // Timestamps are converted to PreciseDate objects.\n timestamp: {\n to: TIMESTAMP,\n from: [TIMESTAMP, TIMESTAMPTZ],\n serialize: serializeTimestamp,\n parse: timestampToFpMillis,\n },\n // Times are converted as strings\n time: {\n to: TIME,\n from: [TIME, TIMETZ],\n serialize: (x: unknown) => serializeTime(x, 'time'),\n parse: postgresTimeToMilliseconds,\n },\n\n timetz: {\n to: TIMETZ,\n from: [TIME, TIMETZ],\n serialize: (x: unknown) => serializeTime(x, 'timetz'),\n parse: postgresTimeToMilliseconds,\n },\n\n // The DATE type is stored directly as the PG normalized date string.\n date: {\n to: DATE,\n from: [DATE],\n serialize: (x: string | Date) =>\n (x instanceof Date ? x : new Date(x)).toISOString(),\n parse: dateToUTCMidnight,\n },\n // Returns a `js` number which can lose precision for large numbers.\n // JS number is 53 bits so this should generally not occur.\n // An API will be provided for users to override this type.\n numeric: {\n to: NUMERIC,\n from: [NUMERIC],\n serialize: (x: number) => String(x), // pg expects a string\n parse: (x: string | number) => Number(x),\n },\n },\n});\n\nexport type PostgresDB = postgres.Sql<{\n bigint: bigint;\n json: JSONValue;\n}>;\n\nexport type PostgresTransaction = postgres.TransactionSql<{\n bigint: bigint;\n json: JSONValue;\n}>;\n\nexport function pgClient(\n lc: LogContext,\n connectionURI: string,\n options?: postgres.Options<{\n bigint: PostgresType<bigint>;\n json: PostgresType<JSONValue>;\n }>,\n opts?: TypeOptions,\n): PostgresDB {\n const onnotice = (n: Notice) => {\n // https://www.postgresql.org/docs/current/plpgsql-errors-and-messages.html#PLPGSQL-STATEMENTS-RAISE\n switch (n.severity) {\n case 'NOTICE':\n return; // silenced\n case 'DEBUG':\n lc.debug?.(n);\n return;\n case 'WARNING':\n lc.warn?.(n);\n return;\n case 'EXCEPTION':\n lc.error?.(n);\n return;\n case 'LOG':\n case 'INFO':\n default:\n lc.info?.(n);\n }\n };\n const url = new URL(connectionURI);\n const sslFlag =\n url.searchParams.get('ssl') ?? url.searchParams.get('sslmode') ?? 'prefer';\n\n let ssl: boolean | 'prefer' | {rejectUnauthorized: boolean};\n if (sslFlag === 'disable' || sslFlag === 'false') {\n ssl = false;\n } else if (sslFlag === 'no-verify') {\n ssl = {rejectUnauthorized: false};\n } else {\n ssl = sslFlag as 'prefer';\n }\n\n // Set connections to expire between 5 and 10 minutes to free up state on PG.\n const maxLifetimeSeconds = randInt(5 * 60, 10 * 60);\n\n return postgres(connectionURI, {\n ...postgresTypeConfig(opts),\n onnotice,\n ['max_lifetime']: maxLifetimeSeconds,\n ssl,\n ...options,\n });\n}\n\n/**\n * Disables any statement_timeout for the current transaction. By default,\n * Postgres does not impose a statement timeout, but some users and providers\n * set one at the database level (even though it is explicitly discouraged by\n * the Postgres documentation).\n *\n * Zero logic in particular often does not fit into the category of general\n * application logic; for potentially long-running operations like migrations\n * and background cleanup, the statement timeout should be disabled to prevent\n * these operations from timing out.\n */\nexport function disableStatementTimeout(sql: PostgresTransaction) {\n void sql`SET LOCAL statement_timeout = 0;`.execute();\n}\n\nexport const typeNameByOID: Record<number, string> = Object.freeze(\n Object.fromEntries(\n Object.entries(OID).map(([name, oid]) => [\n oid,\n name.startsWith('_') ? `${name.substring(1)}[]` : name,\n ]),\n ),\n);\n\nexport function isPostgresError(e: unknown, ...codes: [string, ...string[]]) {\n return e instanceof postgres.PostgresError && codes.includes(e.code);\n}\n"],"mappings":";;;;;;;AAkBA,SAAgB,oBAAoB,WAA2B;AAG7D,aAAY,UAAU,QAAQ,KAAK,IAAI;CACvC,MAAM,iBAAiB,UAAU,SAAS,IAAI;CAC9C,MAAM,eAAe,iBACjB,UAAU,YAAY,IAAI,GAC1B,UAAU,QAAQ,KAAK,UAAU,QAAQ,IAAI,CAAC;CAClD,MAAM,iBACJ,iBAAiB,KAAK,KAAA,IAAY,UAAU,UAAU,aAAa;CACrE,MAAM,qBACH,iBAAiB,KAAK,YAAY,UAAU,UAAU,GAAG,aAAa,IACvE;AAEF,KAAI;EAGF,MAAM,WAAW,IAAI,YAAY,kBAAkB,CAAC,aAAa;EAGjE,MAAM,MAFS,OAAO,WAAW,SAAW,GAC9B,OAAO,WAAW,SAAW,GACd;AAG7B,MAAI,gBAAgB;GAClB,MAAM,CAAC,OAAO,WAAW,eAAe,MAAM,IAAI;GAGlD,MAAM,gBADJ,KAAK,IAAI,OAAO,MAAM,CAAC,GAAG,MAAM,UAAU,OAAO,QAAQ,GAAG,MAChC,KAAK;AAInC,UAAO,iBAAiB,MAAM,eAAe,MAAM;;AAErD,SAAO;UACA,GAAG;AACV,QAAM,IAAI,MAAM,iBAAiB,aAAa,EAAC,OAAO,GAAE,CAAC;;;AAI7D,SAAS,mBAAmB,KAAsB;AAChD,SAAQ,OAAO,KAAf;EACE,KAAK,SACH,QAAO;EACT,KAAK;AACH,OAAI,OAAO,UAAU,IAAI,CACvB,QAAO,IAAI,YAAY,IAAI,CAAC,aAAa;AAM3C,UAAO,IAAI,YAFT,WAAa,OAAO,KAAK,MAAM,IAAI,CAAC,GACpC,OAAO,KAAK,MAAO,MAAM,IAAK,IAAI,CAAC,CACF,CAAC,aAAa;EAKnD,QACE,KAAI,eAAe,KACjB,QAAO,IAAI,aAAa;;AAG9B,OAAM,IAAI,MAAM,qBAAqB,OAAO,IAAI,mBAAmB,MAAM;;AAG3E,IAAM,uBAAuB,OAAU,KAAK;AAE5C,SAAS,cAAc,GAAY,MAAiC;AAClE,SAAQ,OAAO,GAAf;EACE,KAAK,SACH,QAAO;EACT,KAAK,SACH,QAAO,2BAA2B,EAAE;;AAExC,OAAM,IAAI,MAAM,qBAAqB,OAAO,EAAE,QAAQ,KAAK,IAAI,IAAI;;AAGrE,SAAgB,2BAA2B,cAA8B;AACvE,KAAI,eAAe,EACjB,OAAM,IAAI,MAAM,kCAAkC;AAGpD,KAAI,gBAAgB,qBAClB,OAAM,IAAI,MACR,wCAAwC,qBAAqB,KAC9D;AAGH,gBAAe,KAAK,MAAM,aAAa;CAEvC,MAAM,eAAe,KAAK,MAAM,eAAe,IAAK;CACpD,MAAM,QAAQ,KAAK,MAAM,eAAe,KAAK;CAC7C,MAAM,UAAU,KAAK,MAAO,eAAe,OAAQ,GAAG;CACtD,MAAM,UAAU,eAAe;CAC/B,MAAM,KAAK,eAAe;AAE1B,QAAO,GAAG,MAAM,UAAU,CAAC,SAAS,GAAG,IAAI,CAAC,GAAG,QAAQ,UAAU,CAAC,SAAS,GAAG,IAAI,CAAC,GAAG,QAAQ,UAAU,CAAC,SAAS,GAAG,IAAI,CAAC,GAAG,GAAG,UAAU,CAAC,SAAS,GAAG,IAAI,CAAC;;AAG9J,SAAgB,2BAA2B,YAA4B;AAErE,KAAI,CAAC,cAAc,OAAO,eAAe,SACvC,OAAM,IAAI,MAAM,kDAAkD;CAOpE,MAAM,QAAQ,WAAW,MADvB,+EACuC;AAEzC,KAAI,CAAC,MACH,OAAM,IAAI,MACR,yBAAyB,WAAW,wCACrC;CAIH,MAAM,QAAQ,SAAS,MAAM,IAAI,GAAG;CACpC,MAAM,UAAU,SAAS,MAAM,IAAI,GAAG;CACtC,MAAM,UAAU,SAAS,MAAM,IAAI,GAAG;CAEtC,IAAI,eAAe;AACnB,KAAI,MAAM,IAAI;EAEZ,MAAM,WAAW,MAAM,GAAG,OAAO,GAAG,IAAI;AAGxC,iBAAe,SAAS,SAAS,MAAM,GAAG,EAAE,EAAE,GAAG;;AAInD,KAAI,QAAQ,KAAK,QAAQ,GACvB,OAAM,IAAI,MACR,kBAAkB,MAAM,kDACzB;AAGH,KAAI,UAAU,KAAK,WAAW,GAC5B,OAAM,IAAI,MAAM,oBAAoB,QAAQ,4BAA4B;AAG1E,KAAI,UAAU,KAAK,WAAW,GAC5B,OAAM,IAAI,MAAM,oBAAoB,QAAQ,4BAA4B;AAG1E,KAAI,eAAe,KAAK,gBAAgB,IACtC,OAAM,IAAI,MACR,yBAAyB,aAAa,6BACvC;AAIH,KAAI,UAAU,OAAO,YAAY,KAAK,YAAY,KAAK,iBAAiB,GACtE,OAAM,IAAI,MACR,+EACD;CAIH,IAAI,UACF,QAAQ,OAAU,UAAU,MAAQ,UAAU,MAAO;AAGvD,KAAI,MAAM,IAAI;EACZ,MAAM,OAAO,MAAM,OAAO,MAAM,IAAI;EACpC,MAAM,UAAU,SAAS,MAAM,IAAI,GAAG;EACtC,MAAM,YAAY,MAAM,KAAK,SAAS,MAAM,IAAI,GAAG,GAAG;EACtD,MAAM,WAAW,QAAQ,UAAU,OAAU,YAAY;AACzD,aAAW;;AAIb,KAAI,UAAU,wBAAwB,UAAU,EAC9C,SACI,UAAU,uBAAwB,wBACpC;AAIJ,QAAO;;AAGT,SAAS,kBAAkB,MAAsB;CAC/C,MAAM,IAAI,IAAI,KAAK,KAAK;AACxB,QAAO,KAAK,IAAI,EAAE,gBAAgB,EAAE,EAAE,aAAa,EAAE,EAAE,YAAY,CAAC;;;;;;;AA8BtE,IAAa,sBAAsB,EAAC,qBAAiC,EAAE,MAAM,EAG3E,OAAO;CACL,QAAQ,SAAS;CACjB,MAAM;EACJ,IAAA;EACA,MAAM,CAAA,KAAO,MAAM;EACnB,WAAW,oBACN,MAAgB,OAAO,MAAM,WAAW,IAAI,WAAW,UAAU,EAAE,GACpE,WAAW;EACf,OAAO,WAAW;EACnB;CAED,WAAW;EACT,IAAI;EACJ,MAAM,CAAC,WAAW,YAAY;EAC9B,WAAW;EACX,OAAO;EACR;CAED,MAAM;EACJ,IAAI;EACJ,MAAM,CAAC,MAAM,OAAO;EACpB,YAAY,MAAe,cAAc,GAAG,OAAO;EACnD,OAAO;EACR;CAED,QAAQ;EACN,IAAI;EACJ,MAAM,CAAC,MAAM,OAAO;EACpB,YAAY,MAAe,cAAc,GAAG,SAAS;EACrD,OAAO;EACR;CAGD,MAAM;EACJ,IAAI;EACJ,MAAM,CAAC,KAAK;EACZ,YAAY,OACT,aAAa,OAAO,IAAI,IAAI,KAAK,EAAE,EAAE,aAAa;EACrD,OAAO;EACR;CAID,SAAS;EACP,IAAI;EACJ,MAAM,CAAC,QAAQ;EACf,YAAY,MAAc,OAAO,EAAE;EACnC,QAAQ,MAAuB,OAAO,EAAE;EACzC;CACF,EACF;AAYD,SAAgB,SACd,IACA,eACA,SAIA,MACY;CACZ,MAAM,YAAY,MAAc;AAE9B,UAAQ,EAAE,UAAV;GACE,KAAK,SACH;GACF,KAAK;AACH,OAAG,QAAQ,EAAE;AACb;GACF,KAAK;AACH,OAAG,OAAO,EAAE;AACZ;GACF,KAAK;AACH,OAAG,QAAQ,EAAE;AACb;GAGF,QACE,IAAG,OAAO,EAAE;;;CAGlB,MAAM,MAAM,IAAI,IAAI,cAAc;CAClC,MAAM,UACJ,IAAI,aAAa,IAAI,MAAM,IAAI,IAAI,aAAa,IAAI,UAAU,IAAI;CAEpE,IAAI;AACJ,KAAI,YAAY,aAAa,YAAY,QACvC,OAAM;UACG,YAAY,YACrB,OAAM,EAAC,oBAAoB,OAAM;KAEjC,OAAM;CAIR,MAAM,qBAAqB,QAAQ,KAAQ,IAAQ;AAEnD,QAAO,SAAS,eAAe;EAC7B,GAAG,mBAAmB,KAAK;EAC3B;GACC,iBAAiB;EAClB;EACA,GAAG;EACJ,CAAC;;AAkBiD,OAAO,OAC1D,OAAO,YACL,OAAO,QAAQ,IAAI,CAAC,KAAK,CAAC,MAAM,SAAS,CACvC,KACA,KAAK,WAAW,IAAI,GAAG,GAAG,KAAK,UAAU,EAAE,CAAC,MAAM,KACnD,CAAC,CACH,CACF;AAED,SAAgB,gBAAgB,GAAY,GAAG,OAA8B;AAC3E,QAAO,aAAa,SAAS,iBAAiB,MAAM,SAAS,EAAE,KAAK"}
1
+ {"version":3,"file":"pg.js","names":[],"sources":["../../../../../zero-cache/src/types/pg.ts"],"sourcesContent":["import {PreciseDate} from '@google-cloud/precise-date';\nimport {OID} from '@postgresql-typed/oids';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres, {type Notice, type PostgresType} from 'postgres';\nimport {BigIntJSON, type JSONValue} from '../../../shared/src/bigint-json.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport {\n DATE,\n JSON,\n JSONB,\n NUMERIC,\n TIME,\n TIMESTAMP,\n TIMESTAMPTZ,\n TIMETZ,\n} from './pg-types.ts';\n\n// Matches: YEAR-MM-DD HH:MM:SS[.fraction][+-TZ[:MM]][ BC]\nconst pgTimestampRe =\n /^(\\d+)-(\\d{2}-\\d{2}) (\\d{2}:\\d{2}:\\d{2}(?:\\.\\d+)?)([+-]\\d{1,2}(?::\\d{2})?)?(?: BC)?$/;\n\n// exported for testing.\nexport function timestampToFpMillis(timestamp: string): number {\n const match = timestamp.match(pgTimestampRe);\n if (!match) {\n throw new Error(`Error parsing ${timestamp}`);\n }\n\n const [, yearStr, monthDay, time, tz] = match;\n const bc = timestamp.endsWith(' BC');\n\n let year = Number(yearStr);\n if (bc) {\n // Postgres: 1 BC = JS year 0, 2 BC = JS year -1, N BC = -(N-1)\n year = -(year - 1);\n }\n\n // Format year as ISO 8601 expanded year if needed.\n // https://tc39.es/ecma262/#sec-expanded-years\n let isoYear: string;\n if (year >= 0 && year <= 9999) {\n isoYear = String(year).padStart(4, '0');\n } else if (year >= 0) {\n isoYear = '+' + String(year).padStart(6, '0');\n } else {\n isoYear = '-' + String(Math.abs(year)).padStart(6, '0');\n }\n\n // Build a UTC ISO string so PreciseDate returns microsecond precision.\n const utcString = `${isoYear}-${monthDay}T${time}Z`;\n\n try {\n const fullTime = new PreciseDate(utcString).getFullTime();\n const millis = Number(fullTime / 1_000_000n);\n const nanos = Number(fullTime % 1_000_000n);\n const ret = millis + nanos * 1e-6; // floating point milliseconds\n\n // Add back in the timezone offset. We passed local time as UTC,\n // so a positive offset means we need to subtract, and vice versa.\n if (tz) {\n const positiveOffset = tz.startsWith('+');\n const [hours, minutes] = tz.split(':');\n const offset =\n Math.abs(Number(hours)) * 60 + (minutes ? Number(minutes) : 0);\n const offsetMillis = offset * 60 * 1_000;\n return positiveOffset ? ret - offsetMillis : ret + offsetMillis;\n }\n return ret;\n } catch (e) {\n throw new Error(`Error parsing ${timestamp}`, {cause: e});\n }\n}\n\nfunction serializeTimestamp(val: unknown): string {\n switch (typeof val) {\n case 'string':\n return val; // Let Postgres parse it\n case 'number': {\n if (Number.isInteger(val)) {\n return new PreciseDate(val).toISOString();\n }\n // Convert floating point to bigint nanoseconds.\n const nanoseconds =\n 1_000_000n * BigInt(Math.trunc(val)) +\n BigInt(Math.trunc((val % 1) * 1e6));\n return new PreciseDate(nanoseconds).toISOString();\n }\n // Note: Don't support bigint inputs until we decide what the semantics are (e.g. micros vs nanos)\n // case 'bigint':\n // return new PreciseDate(val).toISOString();\n default:\n if (val instanceof Date) {\n return val.toISOString();\n }\n }\n throw new Error(`Unsupported type \"${typeof val}\" for timestamp: ${val}`);\n}\n\nconst MILLISECONDS_PER_DAY = 24 * 60 * 60 * 1000;\n\nfunction serializeTime(x: unknown, type: 'time' | 'timetz'): string {\n switch (typeof x) {\n case 'string':\n return x; // Let Postgres parse it\n case 'number':\n return millisecondsToPostgresTime(x);\n }\n throw new Error(`Unsupported type \"${typeof x}\" for ${type}: ${x}`);\n}\n\nexport function millisecondsToPostgresTime(milliseconds: number): string {\n if (milliseconds < 0) {\n throw new Error('Milliseconds cannot be negative');\n }\n\n if (milliseconds >= MILLISECONDS_PER_DAY) {\n throw new Error(\n `Milliseconds cannot exceed 24 hours (${MILLISECONDS_PER_DAY}ms)`,\n );\n }\n\n milliseconds = Math.floor(milliseconds); // Ensure it's an integer\n\n const totalSeconds = Math.floor(milliseconds / 1000);\n const hours = Math.floor(totalSeconds / 3600);\n const minutes = Math.floor((totalSeconds % 3600) / 60);\n const seconds = totalSeconds % 60;\n const ms = milliseconds % 1000;\n\n return `${hours.toString().padStart(2, '0')}:${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')}.${ms.toString().padStart(3, '0')}+00`;\n}\n\nexport function postgresTimeToMilliseconds(timeString: string): number {\n // Validate basic format\n if (!timeString || typeof timeString !== 'string') {\n throw new Error('Invalid time string: must be a non-empty string');\n }\n\n // Regular expression to match HH:MM:SS, HH:MM:SS.mmm, or HH:MM:SS+00 / HH:MM:SS.mmm+00\n // Supports optional timezone offset\n const timeRegex =\n /^(\\d{1,2}):(\\d{2}):(\\d{2})(?:\\.(\\d{1,6}))?(?:([+-])(\\d{1,2})(?::(\\d{2}))?)?$/;\n const match = timeString.match(timeRegex);\n\n if (!match) {\n throw new Error(\n `Invalid time format: \"${timeString}\". Expected HH:MM:SS[.mmm][+|-HH[:MM]]`,\n );\n }\n\n // Extract components\n const hours = parseInt(match[1], 10);\n const minutes = parseInt(match[2], 10);\n const seconds = parseInt(match[3], 10);\n // Handle optional milliseconds, pad right with zeros if needed\n let milliseconds = 0;\n if (match[4]) {\n // Pad microseconds to 6 digits\n const msString = match[4].padEnd(6, '0');\n // slice milliseconds out of the microseconds\n // e.g. 123456 -> 123, 1234 -> 123,\n milliseconds = parseInt(msString.slice(0, 3), 10);\n }\n\n // Validate ranges\n if (hours < 0 || hours > 24) {\n throw new Error(\n `Invalid hours: ${hours}. Must be between 0 and 24 (24 means end of day)`,\n );\n }\n\n if (minutes < 0 || minutes >= 60) {\n throw new Error(`Invalid minutes: ${minutes}. Must be between 0 and 59`);\n }\n\n if (seconds < 0 || seconds >= 60) {\n throw new Error(`Invalid seconds: ${seconds}. Must be between 0 and 59`);\n }\n\n if (milliseconds < 0 || milliseconds >= 1000) {\n throw new Error(\n `Invalid milliseconds: ${milliseconds}. Must be between 0 and 999`,\n );\n }\n\n // Special case: PostgreSQL allows 24:00:00 to represent end of day\n if (hours === 24 && (minutes !== 0 || seconds !== 0 || milliseconds !== 0)) {\n throw new Error(\n 'Invalid time: when hours is 24, minutes, seconds, and milliseconds must be 0',\n );\n }\n\n // Calculate total milliseconds\n let totalMs =\n hours * 3600000 + minutes * 60000 + seconds * 1000 + milliseconds;\n\n // Timezone Offset\n if (match[5]) {\n const sign = match[5] === '+' ? 1 : -1;\n const tzHours = parseInt(match[6], 10);\n const tzMinutes = match[7] ? parseInt(match[7], 10) : 0;\n const offsetMs = sign * (tzHours * 3600000 + tzMinutes * 60000);\n totalMs -= offsetMs;\n }\n\n // Normalize to 0-24h only if outside valid range\n if (totalMs > MILLISECONDS_PER_DAY || totalMs < 0) {\n return (\n ((totalMs % MILLISECONDS_PER_DAY) + MILLISECONDS_PER_DAY) %\n MILLISECONDS_PER_DAY\n );\n }\n\n return totalMs;\n}\n\nfunction dateToUTCMidnight(date: string): number {\n const d = new Date(date);\n return Date.UTC(d.getUTCFullYear(), d.getUTCMonth(), d.getUTCDate());\n}\n\n/**\n * The (javascript) types of objects that can be returned by our configured\n * Postgres clients. For initial-sync, these comes from the postgres.js client:\n *\n * https://github.com/porsager/postgres/blob/master/src/types.js\n *\n * and for the replication stream these come from the the node-postgres client:\n *\n * https://github.com/brianc/node-pg-types/blob/master/lib/textParsers.js\n */\nexport type PostgresValueType = JSONValue | Uint8Array;\n\nexport type TypeOptions = {\n /**\n * Sends strings directly as JSON values (i.e. without JSON stringification).\n * The application is responsible for ensuring that string inputs for JSON\n * columns are already stringified. Other data types (e.g. objects) will\n * still be stringified by the pg client.\n */\n sendStringAsJson?: boolean;\n};\n\n/**\n * Configures types for the Postgres.js client library (`postgres`).\n *\n * @param jsonAsString Keep JSON / JSONB values as strings instead of parsing.\n */\nexport const postgresTypeConfig = ({sendStringAsJson}: TypeOptions = {}) => ({\n // Type the type IDs as `number` so that Typescript doesn't complain about\n // referencing external types during type inference.\n types: {\n bigint: postgres.BigInt,\n json: {\n to: JSON,\n from: [JSON, JSONB],\n serialize: sendStringAsJson\n ? (x: unknown) => (typeof x === 'string' ? x : BigIntJSON.stringify(x))\n : BigIntJSON.stringify,\n parse: BigIntJSON.parse,\n },\n // Timestamps are converted to PreciseDate objects.\n timestamp: {\n to: TIMESTAMP,\n from: [TIMESTAMP, TIMESTAMPTZ],\n serialize: serializeTimestamp,\n parse: timestampToFpMillis,\n },\n // Times are converted as strings\n time: {\n to: TIME,\n from: [TIME, TIMETZ],\n serialize: (x: unknown) => serializeTime(x, 'time'),\n parse: postgresTimeToMilliseconds,\n },\n\n timetz: {\n to: TIMETZ,\n from: [TIME, TIMETZ],\n serialize: (x: unknown) => serializeTime(x, 'timetz'),\n parse: postgresTimeToMilliseconds,\n },\n\n // The DATE type is stored directly as the PG normalized date string.\n date: {\n to: DATE,\n from: [DATE],\n serialize: (x: string | Date) =>\n (x instanceof Date ? x : new Date(x)).toISOString(),\n parse: dateToUTCMidnight,\n },\n // Returns a `js` number which can lose precision for large numbers.\n // JS number is 53 bits so this should generally not occur.\n // An API will be provided for users to override this type.\n numeric: {\n to: NUMERIC,\n from: [NUMERIC],\n serialize: (x: number) => String(x), // pg expects a string\n parse: (x: string | number) => Number(x),\n },\n },\n});\n\nexport type PostgresDB = postgres.Sql<{\n bigint: bigint;\n json: JSONValue;\n}>;\n\nexport type PostgresTransaction = postgres.TransactionSql<{\n bigint: bigint;\n json: JSONValue;\n}>;\n\nexport function pgClient(\n lc: LogContext,\n connectionURI: string,\n options?: postgres.Options<{\n bigint: PostgresType<bigint>;\n json: PostgresType<JSONValue>;\n }>,\n opts?: TypeOptions,\n): PostgresDB {\n const onnotice = (n: Notice) => {\n // https://www.postgresql.org/docs/current/plpgsql-errors-and-messages.html#PLPGSQL-STATEMENTS-RAISE\n switch (n.severity) {\n case 'NOTICE':\n return; // silenced\n case 'DEBUG':\n lc.debug?.(n);\n return;\n case 'WARNING':\n lc.warn?.(n);\n return;\n case 'EXCEPTION':\n lc.error?.(n);\n return;\n case 'LOG':\n case 'INFO':\n default:\n lc.info?.(n);\n }\n };\n const url = new URL(connectionURI);\n const sslFlag =\n url.searchParams.get('ssl') ?? url.searchParams.get('sslmode') ?? 'prefer';\n\n let ssl: boolean | 'prefer' | {rejectUnauthorized: boolean};\n if (sslFlag === 'disable' || sslFlag === 'false') {\n ssl = false;\n } else if (sslFlag === 'no-verify') {\n ssl = {rejectUnauthorized: false};\n } else {\n ssl = sslFlag as 'prefer';\n }\n\n // Set connections to expire between 5 and 10 minutes to free up state on PG.\n const maxLifetimeSeconds = randInt(5 * 60, 10 * 60);\n\n return postgres(connectionURI, {\n ...postgresTypeConfig(opts),\n onnotice,\n ['max_lifetime']: maxLifetimeSeconds,\n ssl,\n ...options,\n });\n}\n\n/**\n * Disables any statement_timeout for the current transaction. By default,\n * Postgres does not impose a statement timeout, but some users and providers\n * set one at the database level (even though it is explicitly discouraged by\n * the Postgres documentation).\n *\n * Zero logic in particular often does not fit into the category of general\n * application logic; for potentially long-running operations like migrations\n * and background cleanup, the statement timeout should be disabled to prevent\n * these operations from timing out.\n */\nexport function disableStatementTimeout(sql: PostgresTransaction) {\n void sql`SET LOCAL statement_timeout = 0;`.execute();\n}\n\nexport const typeNameByOID: Record<number, string> = Object.freeze(\n Object.fromEntries(\n Object.entries(OID).map(([name, oid]) => [\n oid,\n name.startsWith('_') ? `${name.substring(1)}[]` : name,\n ]),\n ),\n);\n\nexport function isPostgresError(e: unknown, ...codes: [string, ...string[]]) {\n return e instanceof postgres.PostgresError && codes.includes(e.code);\n}\n"],"mappings":";;;;;;;AAkBA,IAAM,gBACJ;AAGF,SAAgB,oBAAoB,WAA2B;CAC7D,MAAM,QAAQ,UAAU,MAAM,cAAc;AAC5C,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,iBAAiB,YAAY;CAG/C,MAAM,GAAG,SAAS,UAAU,MAAM,MAAM;CACxC,MAAM,KAAK,UAAU,SAAS,MAAM;CAEpC,IAAI,OAAO,OAAO,QAAQ;AAC1B,KAAI,GAEF,QAAO,EAAE,OAAO;CAKlB,IAAI;AACJ,KAAI,QAAQ,KAAK,QAAQ,KACvB,WAAU,OAAO,KAAK,CAAC,SAAS,GAAG,IAAI;UAC9B,QAAQ,EACjB,WAAU,MAAM,OAAO,KAAK,CAAC,SAAS,GAAG,IAAI;KAE7C,WAAU,MAAM,OAAO,KAAK,IAAI,KAAK,CAAC,CAAC,SAAS,GAAG,IAAI;CAIzD,MAAM,YAAY,GAAG,QAAQ,GAAG,SAAS,GAAG,KAAK;AAEjD,KAAI;EACF,MAAM,WAAW,IAAI,YAAY,UAAU,CAAC,aAAa;EAGzD,MAAM,MAFS,OAAO,WAAW,SAAW,GAC9B,OAAO,WAAW,SAAW,GACd;AAI7B,MAAI,IAAI;GACN,MAAM,iBAAiB,GAAG,WAAW,IAAI;GACzC,MAAM,CAAC,OAAO,WAAW,GAAG,MAAM,IAAI;GAGtC,MAAM,gBADJ,KAAK,IAAI,OAAO,MAAM,CAAC,GAAG,MAAM,UAAU,OAAO,QAAQ,GAAG,MAChC,KAAK;AACnC,UAAO,iBAAiB,MAAM,eAAe,MAAM;;AAErD,SAAO;UACA,GAAG;AACV,QAAM,IAAI,MAAM,iBAAiB,aAAa,EAAC,OAAO,GAAE,CAAC;;;AAI7D,SAAS,mBAAmB,KAAsB;AAChD,SAAQ,OAAO,KAAf;EACE,KAAK,SACH,QAAO;EACT,KAAK;AACH,OAAI,OAAO,UAAU,IAAI,CACvB,QAAO,IAAI,YAAY,IAAI,CAAC,aAAa;AAM3C,UAAO,IAAI,YAFT,WAAa,OAAO,KAAK,MAAM,IAAI,CAAC,GACpC,OAAO,KAAK,MAAO,MAAM,IAAK,IAAI,CAAC,CACF,CAAC,aAAa;EAKnD,QACE,KAAI,eAAe,KACjB,QAAO,IAAI,aAAa;;AAG9B,OAAM,IAAI,MAAM,qBAAqB,OAAO,IAAI,mBAAmB,MAAM;;AAG3E,IAAM,uBAAuB,OAAU,KAAK;AAE5C,SAAS,cAAc,GAAY,MAAiC;AAClE,SAAQ,OAAO,GAAf;EACE,KAAK,SACH,QAAO;EACT,KAAK,SACH,QAAO,2BAA2B,EAAE;;AAExC,OAAM,IAAI,MAAM,qBAAqB,OAAO,EAAE,QAAQ,KAAK,IAAI,IAAI;;AAGrE,SAAgB,2BAA2B,cAA8B;AACvE,KAAI,eAAe,EACjB,OAAM,IAAI,MAAM,kCAAkC;AAGpD,KAAI,gBAAgB,qBAClB,OAAM,IAAI,MACR,wCAAwC,qBAAqB,KAC9D;AAGH,gBAAe,KAAK,MAAM,aAAa;CAEvC,MAAM,eAAe,KAAK,MAAM,eAAe,IAAK;CACpD,MAAM,QAAQ,KAAK,MAAM,eAAe,KAAK;CAC7C,MAAM,UAAU,KAAK,MAAO,eAAe,OAAQ,GAAG;CACtD,MAAM,UAAU,eAAe;CAC/B,MAAM,KAAK,eAAe;AAE1B,QAAO,GAAG,MAAM,UAAU,CAAC,SAAS,GAAG,IAAI,CAAC,GAAG,QAAQ,UAAU,CAAC,SAAS,GAAG,IAAI,CAAC,GAAG,QAAQ,UAAU,CAAC,SAAS,GAAG,IAAI,CAAC,GAAG,GAAG,UAAU,CAAC,SAAS,GAAG,IAAI,CAAC;;AAG9J,SAAgB,2BAA2B,YAA4B;AAErE,KAAI,CAAC,cAAc,OAAO,eAAe,SACvC,OAAM,IAAI,MAAM,kDAAkD;CAOpE,MAAM,QAAQ,WAAW,MADvB,+EACuC;AAEzC,KAAI,CAAC,MACH,OAAM,IAAI,MACR,yBAAyB,WAAW,wCACrC;CAIH,MAAM,QAAQ,SAAS,MAAM,IAAI,GAAG;CACpC,MAAM,UAAU,SAAS,MAAM,IAAI,GAAG;CACtC,MAAM,UAAU,SAAS,MAAM,IAAI,GAAG;CAEtC,IAAI,eAAe;AACnB,KAAI,MAAM,IAAI;EAEZ,MAAM,WAAW,MAAM,GAAG,OAAO,GAAG,IAAI;AAGxC,iBAAe,SAAS,SAAS,MAAM,GAAG,EAAE,EAAE,GAAG;;AAInD,KAAI,QAAQ,KAAK,QAAQ,GACvB,OAAM,IAAI,MACR,kBAAkB,MAAM,kDACzB;AAGH,KAAI,UAAU,KAAK,WAAW,GAC5B,OAAM,IAAI,MAAM,oBAAoB,QAAQ,4BAA4B;AAG1E,KAAI,UAAU,KAAK,WAAW,GAC5B,OAAM,IAAI,MAAM,oBAAoB,QAAQ,4BAA4B;AAG1E,KAAI,eAAe,KAAK,gBAAgB,IACtC,OAAM,IAAI,MACR,yBAAyB,aAAa,6BACvC;AAIH,KAAI,UAAU,OAAO,YAAY,KAAK,YAAY,KAAK,iBAAiB,GACtE,OAAM,IAAI,MACR,+EACD;CAIH,IAAI,UACF,QAAQ,OAAU,UAAU,MAAQ,UAAU,MAAO;AAGvD,KAAI,MAAM,IAAI;EACZ,MAAM,OAAO,MAAM,OAAO,MAAM,IAAI;EACpC,MAAM,UAAU,SAAS,MAAM,IAAI,GAAG;EACtC,MAAM,YAAY,MAAM,KAAK,SAAS,MAAM,IAAI,GAAG,GAAG;EACtD,MAAM,WAAW,QAAQ,UAAU,OAAU,YAAY;AACzD,aAAW;;AAIb,KAAI,UAAU,wBAAwB,UAAU,EAC9C,SACI,UAAU,uBAAwB,wBACpC;AAIJ,QAAO;;AAGT,SAAS,kBAAkB,MAAsB;CAC/C,MAAM,IAAI,IAAI,KAAK,KAAK;AACxB,QAAO,KAAK,IAAI,EAAE,gBAAgB,EAAE,EAAE,aAAa,EAAE,EAAE,YAAY,CAAC;;;;;;;AA8BtE,IAAa,sBAAsB,EAAC,qBAAiC,EAAE,MAAM,EAG3E,OAAO;CACL,QAAQ,SAAS;CACjB,MAAM;EACJ,IAAA;EACA,MAAM,CAAA,KAAO,MAAM;EACnB,WAAW,oBACN,MAAgB,OAAO,MAAM,WAAW,IAAI,WAAW,UAAU,EAAE,GACpE,WAAW;EACf,OAAO,WAAW;EACnB;CAED,WAAW;EACT,IAAI;EACJ,MAAM,CAAC,WAAW,YAAY;EAC9B,WAAW;EACX,OAAO;EACR;CAED,MAAM;EACJ,IAAI;EACJ,MAAM,CAAC,MAAM,OAAO;EACpB,YAAY,MAAe,cAAc,GAAG,OAAO;EACnD,OAAO;EACR;CAED,QAAQ;EACN,IAAI;EACJ,MAAM,CAAC,MAAM,OAAO;EACpB,YAAY,MAAe,cAAc,GAAG,SAAS;EACrD,OAAO;EACR;CAGD,MAAM;EACJ,IAAI;EACJ,MAAM,CAAC,KAAK;EACZ,YAAY,OACT,aAAa,OAAO,IAAI,IAAI,KAAK,EAAE,EAAE,aAAa;EACrD,OAAO;EACR;CAID,SAAS;EACP,IAAI;EACJ,MAAM,CAAC,QAAQ;EACf,YAAY,MAAc,OAAO,EAAE;EACnC,QAAQ,MAAuB,OAAO,EAAE;EACzC;CACF,EACF;AAYD,SAAgB,SACd,IACA,eACA,SAIA,MACY;CACZ,MAAM,YAAY,MAAc;AAE9B,UAAQ,EAAE,UAAV;GACE,KAAK,SACH;GACF,KAAK;AACH,OAAG,QAAQ,EAAE;AACb;GACF,KAAK;AACH,OAAG,OAAO,EAAE;AACZ;GACF,KAAK;AACH,OAAG,QAAQ,EAAE;AACb;GAGF,QACE,IAAG,OAAO,EAAE;;;CAGlB,MAAM,MAAM,IAAI,IAAI,cAAc;CAClC,MAAM,UACJ,IAAI,aAAa,IAAI,MAAM,IAAI,IAAI,aAAa,IAAI,UAAU,IAAI;CAEpE,IAAI;AACJ,KAAI,YAAY,aAAa,YAAY,QACvC,OAAM;UACG,YAAY,YACrB,OAAM,EAAC,oBAAoB,OAAM;KAEjC,OAAM;CAIR,MAAM,qBAAqB,QAAQ,KAAQ,IAAQ;AAEnD,QAAO,SAAS,eAAe;EAC7B,GAAG,mBAAmB,KAAK;EAC3B;GACC,iBAAiB;EAClB;EACA,GAAG;EACJ,CAAC;;AAkBiD,OAAO,OAC1D,OAAO,YACL,OAAO,QAAQ,IAAI,CAAC,KAAK,CAAC,MAAM,SAAS,CACvC,KACA,KAAK,WAAW,IAAI,GAAG,GAAG,KAAK,UAAU,EAAE,CAAC,MAAM,KACnD,CAAC,CACH,CACF;AAED,SAAgB,gBAAgB,GAAY,GAAG,OAA8B;AAC3E,QAAO,aAAa,SAAS,iBAAiB,MAAM,SAAS,EAAE,KAAK"}
@@ -1,6 +1,5 @@
1
1
  import type { LogContext } from '@rocicorp/logger';
2
2
  import * as v from '../../../shared/src/valita.ts';
3
- import { Database } from '../../../zqlite/src/db.ts';
4
3
  import type { ReplicaOptions } from '../config/zero-config.ts';
5
4
  import { Notifier } from '../services/replicator/notifier.ts';
6
5
  import type { ReplicaStateNotifier, Replicator } from '../services/replicator/replicator.ts';
@@ -8,6 +7,7 @@ import { type PragmaConfig } from '../services/replicator/write-worker-client.ts
8
7
  import type { Worker } from '../types/processes.ts';
9
8
  export declare const replicaFileModeSchema: v.Type<"backup" | "serving" | "serving-copy">;
10
9
  export type ReplicaFileMode = v.Infer<typeof replicaFileModeSchema>;
10
+ export type WalMode = 'wal' | 'wal2';
11
11
  export declare function replicaFileName(replicaFile: string, mode: ReplicaFileMode): string;
12
12
  /**
13
13
  * Returns the PragmaConfig for a given replica file mode.
@@ -15,7 +15,10 @@ export declare function replicaFileName(replicaFile: string, mode: ReplicaFileMo
15
15
  * the write worker thread to apply the same pragma settings.
16
16
  */
17
17
  export declare function getPragmaConfig(mode: ReplicaFileMode): PragmaConfig;
18
- export declare function setupReplica(lc: LogContext, mode: ReplicaFileMode, replicaOptions: ReplicaOptions): Promise<Database>;
18
+ export declare function setupReplica(lc: LogContext, mode: ReplicaFileMode, replicaOptions: ReplicaOptions): Promise<{
19
+ file: string;
20
+ walMode: WalMode;
21
+ }>;
19
22
  export declare function setUpMessageHandlers(lc: LogContext, replicator: Replicator, parent: Worker): void;
20
23
  export declare function handleSubscriptionsFrom(lc: LogContext, subscriber: Worker, notifier: ReplicaStateNotifier): void;
21
24
  /**
@@ -1 +1 @@
1
- {"version":3,"file":"replicator.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/workers/replicator.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,CAAC,MAAM,+BAA+B,CAAC;AACnD,OAAO,EAAC,QAAQ,EAAC,MAAM,2BAA2B,CAAC;AACnD,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,0BAA0B,CAAC;AAG7D,OAAO,EAAC,QAAQ,EAAC,MAAM,oCAAoC,CAAC;AAC5D,OAAO,KAAK,EAEV,oBAAoB,EACpB,UAAU,EACX,MAAM,sCAAsC,CAAC;AAK9C,OAAO,EAEL,KAAK,YAAY,EAClB,MAAM,+CAA+C,CAAC;AACvD,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,uBAAuB,CAAC;AAElD,eAAO,MAAM,qBAAqB,+CAIjC,CAAC;AAEF,MAAM,MAAM,eAAe,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,qBAAqB,CAAC,CAAC;AAEpE,wBAAgB,eAAe,CAAC,WAAW,EAAE,MAAM,EAAE,IAAI,EAAE,eAAe,UAEzE;AAyFD;;;;GAIG;AACH,wBAAgB,eAAe,CAAC,IAAI,EAAE,eAAe,GAAG,YAAY,CAMnE;AAED,wBAAsB,YAAY,CAChC,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,eAAe,EACrB,cAAc,EAAE,cAAc,GAC7B,OAAO,CAAC,QAAQ,CAAC,CA8BnB;AAED,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,UAAU,EACd,UAAU,EAAE,UAAU,EACtB,MAAM,EAAE,MAAM,QAGf;AAID,wBAAgB,uBAAuB,CACrC,EAAE,EAAE,UAAU,EACd,UAAU,EAAE,MAAM,EAClB,QAAQ,EAAE,oBAAoB,QA8B/B;AAED;;;;GAIG;AACH,wBAAgB,kBAAkB,CAAC,GAAG,EAAE,UAAU,EAAE,MAAM,EAAE,MAAM,GAAG,QAAQ,CAM5E;AAED,wBAAgB,WAAW,CAAC,GAAG,EAAE,UAAU,EAAE,MAAM,EAAE,MAAM,QAE1D"}
1
+ {"version":3,"file":"replicator.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/workers/replicator.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,CAAC,MAAM,+BAA+B,CAAC;AAEnD,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,0BAA0B,CAAC;AAG7D,OAAO,EAAC,QAAQ,EAAC,MAAM,oCAAoC,CAAC;AAC5D,OAAO,KAAK,EAEV,oBAAoB,EACpB,UAAU,EACX,MAAM,sCAAsC,CAAC;AAK9C,OAAO,EAEL,KAAK,YAAY,EAClB,MAAM,+CAA+C,CAAC;AACvD,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,uBAAuB,CAAC;AAElD,eAAO,MAAM,qBAAqB,+CAIjC,CAAC;AAEF,MAAM,MAAM,eAAe,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,qBAAqB,CAAC,CAAC;AAEpE,MAAM,MAAM,OAAO,GAAG,KAAK,GAAG,MAAM,CAAC;AAErC,wBAAgB,eAAe,CAAC,WAAW,EAAE,MAAM,EAAE,IAAI,EAAE,eAAe,UAEzE;AA0FD;;;;GAIG;AACH,wBAAgB,eAAe,CAAC,IAAI,EAAE,eAAe,GAAG,YAAY,CAMnE;AAED,wBAAgB,YAAY,CAC1B,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,eAAe,EACrB,cAAc,EAAE,cAAc;UAhGd,MAAM;aAAW,OAAO;GA+HzC;AAED,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,UAAU,EACd,UAAU,EAAE,UAAU,EACtB,MAAM,EAAE,MAAM,QAGf;AAID,wBAAgB,uBAAuB,CACrC,EAAE,EAAE,UAAU,EACd,UAAU,EAAE,MAAM,EAClB,QAAQ,EAAE,oBAAoB,QA8B/B;AAED;;;;GAIG;AACH,wBAAgB,kBAAkB,CAAC,GAAG,EAAE,UAAU,EAAE,MAAM,EAAE,MAAM,GAAG,QAAQ,CAM5E;AAED,wBAAgB,WAAW,CAAC,GAAG,EAAE,UAAU,EAAE,MAAM,EAAE,MAAM,QAE1D"}
@@ -13,7 +13,7 @@ function replicaFileName(replicaFile, mode) {
13
13
  }
14
14
  var MILLIS_PER_HOUR = 1e3 * 60 * 60;
15
15
  var MB = 1024 * 1024;
16
- async function connect(lc, { file, vacuumIntervalHours }, walMode, mode) {
16
+ async function prepare(lc, { file, vacuumIntervalHours }, walMode, mode) {
17
17
  const replica = new Database(lc, file);
18
18
  await upgradeReplica(lc, `${mode}-replica`, file);
19
19
  await setJournalMode(lc, replica, "delete");
@@ -42,7 +42,11 @@ async function connect(lc, { file, vacuumIntervalHours }, walMode, mode) {
42
42
  applyPragmas(replica, getPragmaConfig(mode));
43
43
  replica.pragma("optimize = 0x10002");
44
44
  lc.info?.(`optimized ${file}`);
45
- return replica;
45
+ replica.close();
46
+ return {
47
+ file,
48
+ walMode
49
+ };
46
50
  }
47
51
  async function setJournalMode(lc, replica, mode) {
48
52
  lc.info?.(`setting ${replica.name} to ${mode} mode`);
@@ -71,10 +75,10 @@ function getPragmaConfig(mode) {
71
75
  walAutocheckpoint: mode === "backup" ? 0 : void 0
72
76
  };
73
77
  }
74
- async function setupReplica(lc, mode, replicaOptions) {
78
+ function setupReplica(lc, mode, replicaOptions) {
75
79
  lc.info?.(`setting up ${mode} replica`);
76
80
  switch (mode) {
77
- case "backup": return await connect(lc, replicaOptions, "wal", mode);
81
+ case "backup": return prepare(lc, replicaOptions, "wal", mode);
78
82
  case "serving-copy": {
79
83
  const { file } = replicaOptions;
80
84
  const copyLocation = replicaFileName(file, mode);
@@ -85,12 +89,12 @@ async function setupReplica(lc, mode, replicaOptions) {
85
89
  replica.prepare(`VACUUM INTO ?`).run(copyLocation);
86
90
  replica.close();
87
91
  lc.info?.(`finished copy (${Date.now() - start} ms)`);
88
- return connect(lc, {
92
+ return prepare(lc, {
89
93
  ...replicaOptions,
90
94
  file: copyLocation
91
95
  }, "wal2", mode);
92
96
  }
93
- case "serving": return connect(lc, replicaOptions, "wal2", mode);
97
+ case "serving": return prepare(lc, replicaOptions, "wal2", mode);
94
98
  default: throw new Error(`Invalid ReplicaMode ${mode}`);
95
99
  }
96
100
  }
@@ -1 +1 @@
1
- {"version":3,"file":"replicator.js","names":[],"sources":["../../../../../zero-cache/src/workers/replicator.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\nimport type {ReplicaOptions} from '../config/zero-config.ts';\nimport {deleteLiteDB} from '../db/delete-lite-db.ts';\nimport {upgradeReplica} from '../services/change-source/common/replica-schema.ts';\nimport {Notifier} from '../services/replicator/notifier.ts';\nimport type {\n ReplicaState,\n ReplicaStateNotifier,\n Replicator,\n} from '../services/replicator/replicator.ts';\nimport {\n getAscendingEvents,\n recordEvent,\n} from '../services/replicator/schema/replication-state.ts';\nimport {\n applyPragmas,\n type PragmaConfig,\n} from '../services/replicator/write-worker-client.ts';\nimport type {Worker} from '../types/processes.ts';\n\nexport const replicaFileModeSchema = v.literalUnion(\n 'serving',\n 'serving-copy',\n 'backup',\n);\n\nexport type ReplicaFileMode = v.Infer<typeof replicaFileModeSchema>;\n\nexport function replicaFileName(replicaFile: string, mode: ReplicaFileMode) {\n return mode === 'serving-copy' ? `${replicaFile}-serving-copy` : replicaFile;\n}\n\nconst MILLIS_PER_HOUR = 1000 * 60 * 60;\nconst MB = 1024 * 1024;\n\nasync function connect(\n lc: LogContext,\n {file, vacuumIntervalHours}: ReplicaOptions,\n walMode: 'wal' | 'wal2',\n mode: ReplicaFileMode,\n): Promise<Database> {\n const replica = new Database(lc, file);\n\n // Perform any upgrades to the replica in case the backup is an\n // earlier version.\n await upgradeReplica(lc, `${mode}-replica`, file);\n\n // Start by folding any (e.g. restored) WAL(2) files into the main db.\n await setJournalMode(lc, replica, 'delete');\n\n const [{page_size: pageSize}] = replica.pragma<{page_size: number}>(\n 'page_size',\n );\n const [{page_count: pageCount}] = replica.pragma<{page_count: number}>(\n 'page_count',\n );\n const [{freelist_count: freelistCount}] = replica.pragma<{\n freelist_count: number;\n }>('freelist_count');\n\n const dbSize = ((pageCount * pageSize) / MB).toFixed(2);\n const freelistSize = ((freelistCount * pageSize) / MB).toFixed(2);\n\n // TODO: Consider adding a freelist size or ratio based vacuum trigger.\n lc.info?.(`Size of db ${file}: ${dbSize} MB (${freelistSize} MB freeable)`);\n\n // Check for the VACUUM threshold.\n const events = getAscendingEvents(replica);\n lc.debug?.(`Runtime events for db ${file}`, {events});\n if (vacuumIntervalHours !== undefined) {\n const millisSinceLastEvent =\n Date.now() - (events.at(-1)?.timestamp.getTime() ?? 0);\n if (millisSinceLastEvent / MILLIS_PER_HOUR > vacuumIntervalHours) {\n lc.info?.(`Performing maintenance cleanup on ${file}`);\n const t0 = performance.now();\n replica.unsafeMode(true);\n replica.pragma('journal_mode = OFF');\n replica.exec('VACUUM');\n recordEvent(replica, 'vacuum');\n replica.unsafeMode(false);\n const t1 = performance.now();\n lc.info?.(`VACUUM completed (${t1 - t0} ms)`);\n }\n }\n\n await setJournalMode(lc, replica, walMode);\n\n const pragmas = getPragmaConfig(mode);\n applyPragmas(replica, pragmas);\n\n replica.pragma('optimize = 0x10002');\n lc.info?.(`optimized ${file}`);\n return replica;\n}\n\n// Setting the journal_mode requires an exclusive lock on the replica.\n// Add resilience against random replica reads (for stats, etc.) by\n// retrying if the database is locked. Note that the busy_timeout doesn't\n// work here.\nasync function setJournalMode(\n lc: LogContext,\n replica: Database,\n mode: 'delete' | 'wal' | 'wal2',\n) {\n lc.info?.(`setting ${replica.name} to ${mode} mode`);\n let err: unknown;\n for (let i = 0; i < 5; i++) {\n try {\n replica.pragma(`journal_mode = ${mode}`);\n return;\n } catch (e) {\n lc.warn?.(`error setting journal_mode to ${mode} (attempt ${i + 1})`, e);\n err = e;\n }\n await sleep(500);\n }\n throw err;\n}\n\n/**\n * Returns the PragmaConfig for a given replica file mode.\n * This is used by both the main thread (setupReplica) and\n * the write worker thread to apply the same pragma settings.\n */\nexport function getPragmaConfig(mode: ReplicaFileMode): PragmaConfig {\n return {\n busyTimeout: 30000,\n analysisLimit: 1000,\n walAutocheckpoint: mode === 'backup' ? 0 : undefined,\n };\n}\n\nexport async function setupReplica(\n lc: LogContext,\n mode: ReplicaFileMode,\n replicaOptions: ReplicaOptions,\n): Promise<Database> {\n lc.info?.(`setting up ${mode} replica`);\n\n switch (mode) {\n case 'backup':\n return await connect(lc, replicaOptions, 'wal', mode);\n\n case 'serving-copy': {\n // In 'serving-copy' mode, the original file is being used for 'backup'\n // mode, so we make a copy for servicing sync requests.\n const {file} = replicaOptions;\n const copyLocation = replicaFileName(file, mode);\n deleteLiteDB(copyLocation);\n\n const start = Date.now();\n lc.info?.(`copying ${file} to ${copyLocation}`);\n const replica = new Database(lc, file);\n replica.prepare(`VACUUM INTO ?`).run(copyLocation);\n replica.close();\n lc.info?.(`finished copy (${Date.now() - start} ms)`);\n\n return connect(lc, {...replicaOptions, file: copyLocation}, 'wal2', mode);\n }\n\n case 'serving':\n return connect(lc, replicaOptions, 'wal2', mode);\n\n default:\n throw new Error(`Invalid ReplicaMode ${mode}`);\n }\n}\n\nexport function setUpMessageHandlers(\n lc: LogContext,\n replicator: Replicator,\n parent: Worker,\n) {\n handleSubscriptionsFrom(lc, parent, replicator);\n}\n\ntype Notification = ['notify', ReplicaState];\n\nexport function handleSubscriptionsFrom(\n lc: LogContext,\n subscriber: Worker,\n notifier: ReplicaStateNotifier,\n) {\n subscriber.onMessageType('subscribe', async () => {\n const subscription = notifier.subscribe();\n\n subscriber.on('close', () => {\n lc.debug?.(`closing replication subscription from ${subscriber.pid}`);\n subscription.cancel();\n });\n\n for await (const msg of subscription) {\n try {\n subscriber.send<Notification>(['notify', msg]);\n } catch (e) {\n const log =\n e instanceof Error &&\n 'code' in e &&\n // This can happen in a race condition if the subscribing process\n // is closed before the 'close' message is processed.\n e.code === 'ERR_IPC_CHANNEL_CLOSED'\n ? 'warn'\n : 'error';\n\n lc[log]?.(\n `error sending replicator notification to ${subscriber.pid}: ${String(e)}`,\n e,\n );\n }\n }\n });\n}\n\n/**\n * Creates a Notifier to relay notifications the notifier of another Worker.\n * This does not send the initial subscription message. Use {@link subscribeTo}\n * to initiate the subscription.\n */\nexport function createNotifierFrom(_lc: LogContext, source: Worker): Notifier {\n const notifier = new Notifier();\n source.onMessageType<Notification>('notify', msg =>\n notifier.notifySubscribers(msg),\n );\n return notifier;\n}\n\nexport function subscribeTo(_lc: LogContext, source: Worker) {\n source.send(['subscribe', {}]);\n}\n"],"mappings":";;;;;;;;;AAuBA,IAAa,wBAAwB,aACnC,WACA,gBACA,SACD;AAID,SAAgB,gBAAgB,aAAqB,MAAuB;AAC1E,QAAO,SAAS,iBAAiB,GAAG,YAAY,iBAAiB;;AAGnE,IAAM,kBAAkB,MAAO,KAAK;AACpC,IAAM,KAAK,OAAO;AAElB,eAAe,QACb,IACA,EAAC,MAAM,uBACP,SACA,MACmB;CACnB,MAAM,UAAU,IAAI,SAAS,IAAI,KAAK;AAItC,OAAM,eAAe,IAAI,GAAG,KAAK,WAAW,KAAK;AAGjD,OAAM,eAAe,IAAI,SAAS,SAAS;CAE3C,MAAM,CAAC,EAAC,WAAW,cAAa,QAAQ,OACtC,YACD;CACD,MAAM,CAAC,EAAC,YAAY,eAAc,QAAQ,OACxC,aACD;CACD,MAAM,CAAC,EAAC,gBAAgB,mBAAkB,QAAQ,OAE/C,iBAAiB;CAEpB,MAAM,UAAW,YAAY,WAAY,IAAI,QAAQ,EAAE;CACvD,MAAM,gBAAiB,gBAAgB,WAAY,IAAI,QAAQ,EAAE;AAGjE,IAAG,OAAO,cAAc,KAAK,IAAI,OAAO,OAAO,aAAa,eAAe;CAG3E,MAAM,SAAS,mBAAmB,QAAQ;AAC1C,IAAG,QAAQ,yBAAyB,QAAQ,EAAC,QAAO,CAAC;AACrD,KAAI,wBAAwB,KAAA;OAExB,KAAK,KAAK,IAAI,OAAO,GAAG,GAAG,EAAE,UAAU,SAAS,IAAI,MAC3B,kBAAkB,qBAAqB;AAChE,MAAG,OAAO,qCAAqC,OAAO;GACtD,MAAM,KAAK,YAAY,KAAK;AAC5B,WAAQ,WAAW,KAAK;AACxB,WAAQ,OAAO,qBAAqB;AACpC,WAAQ,KAAK,SAAS;AACtB,eAAY,SAAS,SAAS;AAC9B,WAAQ,WAAW,MAAM;GACzB,MAAM,KAAK,YAAY,KAAK;AAC5B,MAAG,OAAO,qBAAqB,KAAK,GAAG,MAAM;;;AAIjD,OAAM,eAAe,IAAI,SAAS,QAAQ;AAG1C,cAAa,SADG,gBAAgB,KAAK,CACP;AAE9B,SAAQ,OAAO,qBAAqB;AACpC,IAAG,OAAO,aAAa,OAAO;AAC9B,QAAO;;AAOT,eAAe,eACb,IACA,SACA,MACA;AACA,IAAG,OAAO,WAAW,QAAQ,KAAK,MAAM,KAAK,OAAO;CACpD,IAAI;AACJ,MAAK,IAAI,IAAI,GAAG,IAAI,GAAG,KAAK;AAC1B,MAAI;AACF,WAAQ,OAAO,kBAAkB,OAAO;AACxC;WACO,GAAG;AACV,MAAG,OAAO,iCAAiC,KAAK,YAAY,IAAI,EAAE,IAAI,EAAE;AACxE,SAAM;;AAER,QAAM,MAAM,IAAI;;AAElB,OAAM;;;;;;;AAQR,SAAgB,gBAAgB,MAAqC;AACnE,QAAO;EACL,aAAa;EACb,eAAe;EACf,mBAAmB,SAAS,WAAW,IAAI,KAAA;EAC5C;;AAGH,eAAsB,aACpB,IACA,MACA,gBACmB;AACnB,IAAG,OAAO,cAAc,KAAK,UAAU;AAEvC,SAAQ,MAAR;EACE,KAAK,SACH,QAAO,MAAM,QAAQ,IAAI,gBAAgB,OAAO,KAAK;EAEvD,KAAK,gBAAgB;GAGnB,MAAM,EAAC,SAAQ;GACf,MAAM,eAAe,gBAAgB,MAAM,KAAK;AAChD,gBAAa,aAAa;GAE1B,MAAM,QAAQ,KAAK,KAAK;AACxB,MAAG,OAAO,WAAW,KAAK,MAAM,eAAe;GAC/C,MAAM,UAAU,IAAI,SAAS,IAAI,KAAK;AACtC,WAAQ,QAAQ,gBAAgB,CAAC,IAAI,aAAa;AAClD,WAAQ,OAAO;AACf,MAAG,OAAO,kBAAkB,KAAK,KAAK,GAAG,MAAM,MAAM;AAErD,UAAO,QAAQ,IAAI;IAAC,GAAG;IAAgB,MAAM;IAAa,EAAE,QAAQ,KAAK;;EAG3E,KAAK,UACH,QAAO,QAAQ,IAAI,gBAAgB,QAAQ,KAAK;EAElD,QACE,OAAM,IAAI,MAAM,uBAAuB,OAAO;;;AAIpD,SAAgB,qBACd,IACA,YACA,QACA;AACA,yBAAwB,IAAI,QAAQ,WAAW;;AAKjD,SAAgB,wBACd,IACA,YACA,UACA;AACA,YAAW,cAAc,aAAa,YAAY;EAChD,MAAM,eAAe,SAAS,WAAW;AAEzC,aAAW,GAAG,eAAe;AAC3B,MAAG,QAAQ,yCAAyC,WAAW,MAAM;AACrE,gBAAa,QAAQ;IACrB;AAEF,aAAW,MAAM,OAAO,aACtB,KAAI;AACF,cAAW,KAAmB,CAAC,UAAU,IAAI,CAAC;WACvC,GAAG;AAUV,MARE,aAAa,SACb,UAAU,KAGV,EAAE,SAAS,2BACP,SACA,WAGJ,4CAA4C,WAAW,IAAI,IAAI,OAAO,EAAE,IACxE,EACD;;GAGL;;;;;;;AAQJ,SAAgB,mBAAmB,KAAiB,QAA0B;CAC5E,MAAM,WAAW,IAAI,UAAU;AAC/B,QAAO,cAA4B,WAAU,QAC3C,SAAS,kBAAkB,IAAI,CAChC;AACD,QAAO;;AAGT,SAAgB,YAAY,KAAiB,QAAgB;AAC3D,QAAO,KAAK,CAAC,aAAa,EAAE,CAAC,CAAC"}
1
+ {"version":3,"file":"replicator.js","names":[],"sources":["../../../../../zero-cache/src/workers/replicator.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\nimport type {ReplicaOptions} from '../config/zero-config.ts';\nimport {deleteLiteDB} from '../db/delete-lite-db.ts';\nimport {upgradeReplica} from '../services/change-source/common/replica-schema.ts';\nimport {Notifier} from '../services/replicator/notifier.ts';\nimport type {\n ReplicaState,\n ReplicaStateNotifier,\n Replicator,\n} from '../services/replicator/replicator.ts';\nimport {\n getAscendingEvents,\n recordEvent,\n} from '../services/replicator/schema/replication-state.ts';\nimport {\n applyPragmas,\n type PragmaConfig,\n} from '../services/replicator/write-worker-client.ts';\nimport type {Worker} from '../types/processes.ts';\n\nexport const replicaFileModeSchema = v.literalUnion(\n 'serving',\n 'serving-copy',\n 'backup',\n);\n\nexport type ReplicaFileMode = v.Infer<typeof replicaFileModeSchema>;\n\nexport type WalMode = 'wal' | 'wal2';\n\nexport function replicaFileName(replicaFile: string, mode: ReplicaFileMode) {\n return mode === 'serving-copy' ? `${replicaFile}-serving-copy` : replicaFile;\n}\n\nconst MILLIS_PER_HOUR = 1000 * 60 * 60;\nconst MB = 1024 * 1024;\n\nasync function prepare(\n lc: LogContext,\n {file, vacuumIntervalHours}: ReplicaOptions,\n walMode: WalMode,\n mode: ReplicaFileMode,\n): Promise<{file: string; walMode: WalMode}> {\n const replica = new Database(lc, file);\n\n // Perform any upgrades to the replica in case the backup is an\n // earlier version.\n await upgradeReplica(lc, `${mode}-replica`, file);\n\n // Start by folding any (e.g. restored) WAL(2) files into the main db.\n await setJournalMode(lc, replica, 'delete');\n\n const [{page_size: pageSize}] = replica.pragma<{page_size: number}>(\n 'page_size',\n );\n const [{page_count: pageCount}] = replica.pragma<{page_count: number}>(\n 'page_count',\n );\n const [{freelist_count: freelistCount}] = replica.pragma<{\n freelist_count: number;\n }>('freelist_count');\n\n const dbSize = ((pageCount * pageSize) / MB).toFixed(2);\n const freelistSize = ((freelistCount * pageSize) / MB).toFixed(2);\n\n // TODO: Consider adding a freelist size or ratio based vacuum trigger.\n lc.info?.(`Size of db ${file}: ${dbSize} MB (${freelistSize} MB freeable)`);\n\n // Check for the VACUUM threshold.\n const events = getAscendingEvents(replica);\n lc.debug?.(`Runtime events for db ${file}`, {events});\n if (vacuumIntervalHours !== undefined) {\n const millisSinceLastEvent =\n Date.now() - (events.at(-1)?.timestamp.getTime() ?? 0);\n if (millisSinceLastEvent / MILLIS_PER_HOUR > vacuumIntervalHours) {\n lc.info?.(`Performing maintenance cleanup on ${file}`);\n const t0 = performance.now();\n replica.unsafeMode(true);\n replica.pragma('journal_mode = OFF');\n replica.exec('VACUUM');\n recordEvent(replica, 'vacuum');\n replica.unsafeMode(false);\n const t1 = performance.now();\n lc.info?.(`VACUUM completed (${t1 - t0} ms)`);\n }\n }\n\n await setJournalMode(lc, replica, walMode);\n\n const pragmas = getPragmaConfig(mode);\n applyPragmas(replica, pragmas);\n\n replica.pragma('optimize = 0x10002');\n lc.info?.(`optimized ${file}`);\n replica.close();\n return {file, walMode};\n}\n\n// Setting the journal_mode requires an exclusive lock on the replica.\n// Add resilience against random replica reads (for stats, etc.) by\n// retrying if the database is locked. Note that the busy_timeout doesn't\n// work here.\nasync function setJournalMode(\n lc: LogContext,\n replica: Database,\n mode: 'delete' | 'wal' | 'wal2',\n) {\n lc.info?.(`setting ${replica.name} to ${mode} mode`);\n let err: unknown;\n for (let i = 0; i < 5; i++) {\n try {\n replica.pragma(`journal_mode = ${mode}`);\n return;\n } catch (e) {\n lc.warn?.(`error setting journal_mode to ${mode} (attempt ${i + 1})`, e);\n err = e;\n }\n await sleep(500);\n }\n throw err;\n}\n\n/**\n * Returns the PragmaConfig for a given replica file mode.\n * This is used by both the main thread (setupReplica) and\n * the write worker thread to apply the same pragma settings.\n */\nexport function getPragmaConfig(mode: ReplicaFileMode): PragmaConfig {\n return {\n busyTimeout: 30000,\n analysisLimit: 1000,\n walAutocheckpoint: mode === 'backup' ? 0 : undefined,\n };\n}\n\nexport function setupReplica(\n lc: LogContext,\n mode: ReplicaFileMode,\n replicaOptions: ReplicaOptions,\n) {\n lc.info?.(`setting up ${mode} replica`);\n\n switch (mode) {\n case 'backup':\n return prepare(lc, replicaOptions, 'wal', mode);\n\n case 'serving-copy': {\n // In 'serving-copy' mode, the original file is being used for 'backup'\n // mode, so we make a copy for servicing sync requests.\n const {file} = replicaOptions;\n const copyLocation = replicaFileName(file, mode);\n deleteLiteDB(copyLocation);\n\n const start = Date.now();\n lc.info?.(`copying ${file} to ${copyLocation}`);\n const replica = new Database(lc, file);\n replica.prepare(`VACUUM INTO ?`).run(copyLocation);\n replica.close();\n lc.info?.(`finished copy (${Date.now() - start} ms)`);\n\n return prepare(lc, {...replicaOptions, file: copyLocation}, 'wal2', mode);\n }\n\n case 'serving':\n return prepare(lc, replicaOptions, 'wal2', mode);\n\n default:\n throw new Error(`Invalid ReplicaMode ${mode}`);\n }\n}\n\nexport function setUpMessageHandlers(\n lc: LogContext,\n replicator: Replicator,\n parent: Worker,\n) {\n handleSubscriptionsFrom(lc, parent, replicator);\n}\n\ntype Notification = ['notify', ReplicaState];\n\nexport function handleSubscriptionsFrom(\n lc: LogContext,\n subscriber: Worker,\n notifier: ReplicaStateNotifier,\n) {\n subscriber.onMessageType('subscribe', async () => {\n const subscription = notifier.subscribe();\n\n subscriber.on('close', () => {\n lc.debug?.(`closing replication subscription from ${subscriber.pid}`);\n subscription.cancel();\n });\n\n for await (const msg of subscription) {\n try {\n subscriber.send<Notification>(['notify', msg]);\n } catch (e) {\n const log =\n e instanceof Error &&\n 'code' in e &&\n // This can happen in a race condition if the subscribing process\n // is closed before the 'close' message is processed.\n e.code === 'ERR_IPC_CHANNEL_CLOSED'\n ? 'warn'\n : 'error';\n\n lc[log]?.(\n `error sending replicator notification to ${subscriber.pid}: ${String(e)}`,\n e,\n );\n }\n }\n });\n}\n\n/**\n * Creates a Notifier to relay notifications the notifier of another Worker.\n * This does not send the initial subscription message. Use {@link subscribeTo}\n * to initiate the subscription.\n */\nexport function createNotifierFrom(_lc: LogContext, source: Worker): Notifier {\n const notifier = new Notifier();\n source.onMessageType<Notification>('notify', msg =>\n notifier.notifySubscribers(msg),\n );\n return notifier;\n}\n\nexport function subscribeTo(_lc: LogContext, source: Worker) {\n source.send(['subscribe', {}]);\n}\n"],"mappings":";;;;;;;;;AAuBA,IAAa,wBAAwB,aACnC,WACA,gBACA,SACD;AAMD,SAAgB,gBAAgB,aAAqB,MAAuB;AAC1E,QAAO,SAAS,iBAAiB,GAAG,YAAY,iBAAiB;;AAGnE,IAAM,kBAAkB,MAAO,KAAK;AACpC,IAAM,KAAK,OAAO;AAElB,eAAe,QACb,IACA,EAAC,MAAM,uBACP,SACA,MAC2C;CAC3C,MAAM,UAAU,IAAI,SAAS,IAAI,KAAK;AAItC,OAAM,eAAe,IAAI,GAAG,KAAK,WAAW,KAAK;AAGjD,OAAM,eAAe,IAAI,SAAS,SAAS;CAE3C,MAAM,CAAC,EAAC,WAAW,cAAa,QAAQ,OACtC,YACD;CACD,MAAM,CAAC,EAAC,YAAY,eAAc,QAAQ,OACxC,aACD;CACD,MAAM,CAAC,EAAC,gBAAgB,mBAAkB,QAAQ,OAE/C,iBAAiB;CAEpB,MAAM,UAAW,YAAY,WAAY,IAAI,QAAQ,EAAE;CACvD,MAAM,gBAAiB,gBAAgB,WAAY,IAAI,QAAQ,EAAE;AAGjE,IAAG,OAAO,cAAc,KAAK,IAAI,OAAO,OAAO,aAAa,eAAe;CAG3E,MAAM,SAAS,mBAAmB,QAAQ;AAC1C,IAAG,QAAQ,yBAAyB,QAAQ,EAAC,QAAO,CAAC;AACrD,KAAI,wBAAwB,KAAA;OAExB,KAAK,KAAK,IAAI,OAAO,GAAG,GAAG,EAAE,UAAU,SAAS,IAAI,MAC3B,kBAAkB,qBAAqB;AAChE,MAAG,OAAO,qCAAqC,OAAO;GACtD,MAAM,KAAK,YAAY,KAAK;AAC5B,WAAQ,WAAW,KAAK;AACxB,WAAQ,OAAO,qBAAqB;AACpC,WAAQ,KAAK,SAAS;AACtB,eAAY,SAAS,SAAS;AAC9B,WAAQ,WAAW,MAAM;GACzB,MAAM,KAAK,YAAY,KAAK;AAC5B,MAAG,OAAO,qBAAqB,KAAK,GAAG,MAAM;;;AAIjD,OAAM,eAAe,IAAI,SAAS,QAAQ;AAG1C,cAAa,SADG,gBAAgB,KAAK,CACP;AAE9B,SAAQ,OAAO,qBAAqB;AACpC,IAAG,OAAO,aAAa,OAAO;AAC9B,SAAQ,OAAO;AACf,QAAO;EAAC;EAAM;EAAQ;;AAOxB,eAAe,eACb,IACA,SACA,MACA;AACA,IAAG,OAAO,WAAW,QAAQ,KAAK,MAAM,KAAK,OAAO;CACpD,IAAI;AACJ,MAAK,IAAI,IAAI,GAAG,IAAI,GAAG,KAAK;AAC1B,MAAI;AACF,WAAQ,OAAO,kBAAkB,OAAO;AACxC;WACO,GAAG;AACV,MAAG,OAAO,iCAAiC,KAAK,YAAY,IAAI,EAAE,IAAI,EAAE;AACxE,SAAM;;AAER,QAAM,MAAM,IAAI;;AAElB,OAAM;;;;;;;AAQR,SAAgB,gBAAgB,MAAqC;AACnE,QAAO;EACL,aAAa;EACb,eAAe;EACf,mBAAmB,SAAS,WAAW,IAAI,KAAA;EAC5C;;AAGH,SAAgB,aACd,IACA,MACA,gBACA;AACA,IAAG,OAAO,cAAc,KAAK,UAAU;AAEvC,SAAQ,MAAR;EACE,KAAK,SACH,QAAO,QAAQ,IAAI,gBAAgB,OAAO,KAAK;EAEjD,KAAK,gBAAgB;GAGnB,MAAM,EAAC,SAAQ;GACf,MAAM,eAAe,gBAAgB,MAAM,KAAK;AAChD,gBAAa,aAAa;GAE1B,MAAM,QAAQ,KAAK,KAAK;AACxB,MAAG,OAAO,WAAW,KAAK,MAAM,eAAe;GAC/C,MAAM,UAAU,IAAI,SAAS,IAAI,KAAK;AACtC,WAAQ,QAAQ,gBAAgB,CAAC,IAAI,aAAa;AAClD,WAAQ,OAAO;AACf,MAAG,OAAO,kBAAkB,KAAK,KAAK,GAAG,MAAM,MAAM;AAErD,UAAO,QAAQ,IAAI;IAAC,GAAG;IAAgB,MAAM;IAAa,EAAE,QAAQ,KAAK;;EAG3E,KAAK,UACH,QAAO,QAAQ,IAAI,gBAAgB,QAAQ,KAAK;EAElD,QACE,OAAM,IAAI,MAAM,uBAAuB,OAAO;;;AAIpD,SAAgB,qBACd,IACA,YACA,QACA;AACA,yBAAwB,IAAI,QAAQ,WAAW;;AAKjD,SAAgB,wBACd,IACA,YACA,UACA;AACA,YAAW,cAAc,aAAa,YAAY;EAChD,MAAM,eAAe,SAAS,WAAW;AAEzC,aAAW,GAAG,eAAe;AAC3B,MAAG,QAAQ,yCAAyC,WAAW,MAAM;AACrE,gBAAa,QAAQ;IACrB;AAEF,aAAW,MAAM,OAAO,aACtB,KAAI;AACF,cAAW,KAAmB,CAAC,UAAU,IAAI,CAAC;WACvC,GAAG;AAUV,MARE,aAAa,SACb,UAAU,KAGV,EAAE,SAAS,2BACP,SACA,WAGJ,4CAA4C,WAAW,IAAI,IAAI,OAAO,EAAE,IACxE,EACD;;GAGL;;;;;;;AAQJ,SAAgB,mBAAmB,KAAiB,QAA0B;CAC5E,MAAM,WAAW,IAAI,UAAU;AAC/B,QAAO,cAA4B,WAAU,QAC3C,SAAS,kBAAkB,IAAI,CAChC;AACD,QAAO;;AAGT,SAAgB,YAAY,KAAiB,QAAgB;AAC3D,QAAO,KAAK,CAAC,aAAa,EAAE,CAAC,CAAC"}
@@ -2,7 +2,7 @@
2
2
  /**
3
3
  * The current version of Zero.
4
4
  */
5
- var version = "1.2.0-canary.4";
5
+ var version = "1.2.0-canary.6";
6
6
  //#endregion
7
7
  export { version };
8
8
 
@@ -1 +1 @@
1
- {"version":3,"file":"view-apply-change.d.ts","sourceRoot":"","sources":["../../../../../zql/src/ivm/view-apply-change.ts"],"names":[],"mappings":"AAQA,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,oCAAoC,CAAC;AAC5D,OAAO,EAAkB,KAAK,IAAI,EAAC,MAAM,WAAW,CAAC;AAErD,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,aAAa,CAAC;AAC9C,OAAO,KAAK,EAAC,KAAK,EAAE,MAAM,EAAC,MAAM,WAAW,CAAC;AAE7C,eAAO,MAAM,cAAc,eAAe,CAAC;AAC3C,eAAO,MAAM,QAAQ,eAAe,CAAC;AAQrC;;;;GAIG;AACH,MAAM,MAAM,UAAU,GAClB,aAAa,GACb,gBAAgB,GAChB,eAAe,GACf,cAAc,CAAC;AAEnB,MAAM,MAAM,WAAW,GAAG;IAAC,GAAG,EAAE,GAAG,CAAA;CAAC,CAAC;AAErC,MAAM,MAAM,aAAa,GAAG;IAC1B,IAAI,EAAE,KAAK,CAAC;IACZ,IAAI,EAAE,IAAI,CAAC;CACZ,CAAC;AAEF,MAAM,MAAM,gBAAgB,GAAG;IAC7B,IAAI,EAAE,QAAQ,CAAC;IACf,IAAI,EAAE,IAAI,CAAC;CACZ,CAAC;AAEF,KAAK,eAAe,GAAG;IACrB,IAAI,EAAE,OAAO,CAAC;IACd,IAAI,EAAE,WAAW,CAAC;IAClB,KAAK,EAAE;QACL,gBAAgB,EAAE,MAAM,CAAC;QACzB,MAAM,EAAE,UAAU,CAAC;KACpB,CAAC;CACH,CAAC;AAEF,KAAK,cAAc,GAAG;IACpB,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,WAAW,CAAC;IAClB,OAAO,EAAE,WAAW,CAAC;CACtB,CAAC;AAEF;;;GAGG;AACH,MAAM,WAAW,WAAW;IAC1B,GAAG,CAAC,KAAK,EAAE,KAAK,GAAG,MAAM,GAAG,SAAS,CAAC;IACtC,GAAG,CAAC,KAAK,EAAE,KAAK,EAAE,QAAQ,EAAE,MAAM,GAAG,IAAI,CAAC;IAC1C,MAAM,CAAC,KAAK,EAAE,KAAK,GAAG,OAAO,CAAC;CAC/B;AAED,wBAAgB,WAAW,CACzB,WAAW,EAAE,KAAK,EAClB,MAAM,EAAE,UAAU,EAClB,MAAM,EAAE,YAAY,EACpB,YAAY,EAAE,MAAM,EACpB,MAAM,EAAE,MAAM,EACd,OAAO,UAAQ,GACd,IAAI,CAyON"}
1
+ {"version":3,"file":"view-apply-change.d.ts","sourceRoot":"","sources":["../../../../../zql/src/ivm/view-apply-change.ts"],"names":[],"mappings":"AAQA,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,oCAAoC,CAAC;AAC5D,OAAO,EAAkB,KAAK,IAAI,EAAC,MAAM,WAAW,CAAC;AAErD,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,aAAa,CAAC;AAC9C,OAAO,KAAK,EAAC,KAAK,EAAE,MAAM,EAAC,MAAM,WAAW,CAAC;AAE7C,eAAO,MAAM,cAAc,eAAe,CAAC;AAC3C,eAAO,MAAM,QAAQ,eAAe,CAAC;AAQrC;;;;GAIG;AACH,MAAM,MAAM,UAAU,GAClB,aAAa,GACb,gBAAgB,GAChB,eAAe,GACf,cAAc,CAAC;AAEnB,MAAM,MAAM,WAAW,GAAG;IAAC,GAAG,EAAE,GAAG,CAAA;CAAC,CAAC;AAErC,MAAM,MAAM,aAAa,GAAG;IAC1B,IAAI,EAAE,KAAK,CAAC;IACZ,IAAI,EAAE,IAAI,CAAC;CACZ,CAAC;AAEF,MAAM,MAAM,gBAAgB,GAAG;IAC7B,IAAI,EAAE,QAAQ,CAAC;IACf,IAAI,EAAE,IAAI,CAAC;CACZ,CAAC;AAEF,KAAK,eAAe,GAAG;IACrB,IAAI,EAAE,OAAO,CAAC;IACd,IAAI,EAAE,WAAW,CAAC;IAClB,KAAK,EAAE;QACL,gBAAgB,EAAE,MAAM,CAAC;QACzB,MAAM,EAAE,UAAU,CAAC;KACpB,CAAC;CACH,CAAC;AAEF,KAAK,cAAc,GAAG;IACpB,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,WAAW,CAAC;IAClB,OAAO,EAAE,WAAW,CAAC;CACtB,CAAC;AAEF;;;GAGG;AACH,MAAM,WAAW,WAAW;IAC1B,GAAG,CAAC,KAAK,EAAE,KAAK,GAAG,MAAM,GAAG,SAAS,CAAC;IACtC,GAAG,CAAC,KAAK,EAAE,KAAK,EAAE,QAAQ,EAAE,MAAM,GAAG,IAAI,CAAC;IAC1C,MAAM,CAAC,KAAK,EAAE,KAAK,GAAG,OAAO,CAAC;CAC/B;AAED,wBAAgB,WAAW,CACzB,WAAW,EAAE,KAAK,EAClB,MAAM,EAAE,UAAU,EAClB,MAAM,EAAE,YAAY,EACpB,YAAY,EAAE,MAAM,EACpB,MAAM,EAAE,MAAM,EACd,OAAO,UAAQ,GACd,IAAI,CA4ON"}