@rocicorp/zero 1.0.0 → 1.0.1-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/out/analyze-query/src/bin-analyze.js +19 -7
  2. package/out/analyze-query/src/bin-analyze.js.map +1 -1
  3. package/out/zero/package.js +1 -1
  4. package/out/zero/package.js.map +1 -1
  5. package/out/zero-cache/src/config/zero-config.d.ts +6 -0
  6. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  7. package/out/zero-cache/src/config/zero-config.js +12 -0
  8. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  9. package/out/zero-cache/src/server/anonymous-otel-start.d.ts.map +1 -1
  10. package/out/zero-cache/src/server/anonymous-otel-start.js +1 -14
  11. package/out/zero-cache/src/server/anonymous-otel-start.js.map +1 -1
  12. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  13. package/out/zero-cache/src/server/change-streamer.js +2 -2
  14. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  15. package/out/zero-cache/src/services/analyze.js +1 -1
  16. package/out/zero-cache/src/services/change-source/change-source.d.ts +7 -0
  17. package/out/zero-cache/src/services/change-source/change-source.d.ts.map +1 -1
  18. package/out/zero-cache/src/services/change-source/common/change-stream-multiplexer.d.ts.map +1 -1
  19. package/out/zero-cache/src/services/change-source/common/change-stream-multiplexer.js +1 -1
  20. package/out/zero-cache/src/services/change-source/common/change-stream-multiplexer.js.map +1 -1
  21. package/out/zero-cache/src/services/change-source/custom/change-source.d.ts.map +1 -1
  22. package/out/zero-cache/src/services/change-source/custom/change-source.js +3 -0
  23. package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
  24. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts +9 -1
  25. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  26. package/out/zero-cache/src/services/change-source/pg/change-source.js +150 -45
  27. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  28. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +8 -0
  29. package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
  30. package/out/zero-cache/src/services/change-source/protocol/current/status.d.ts +26 -1
  31. package/out/zero-cache/src/services/change-source/protocol/current/status.d.ts.map +1 -1
  32. package/out/zero-cache/src/services/change-source/protocol/current/status.js +7 -2
  33. package/out/zero-cache/src/services/change-source/protocol/current/status.js.map +1 -1
  34. package/out/zero-cache/src/services/change-source/protocol/current/upstream.d.ts +8 -0
  35. package/out/zero-cache/src/services/change-source/protocol/current/upstream.d.ts.map +1 -1
  36. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
  37. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +10 -2
  38. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  39. package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts +25 -0
  40. package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts.map +1 -1
  41. package/out/zero-cache/src/services/change-streamer/change-streamer.js +8 -1
  42. package/out/zero-cache/src/services/change-streamer/change-streamer.js.map +1 -1
  43. package/out/zero-cache/src/services/change-streamer/forwarder.d.ts +2 -0
  44. package/out/zero-cache/src/services/change-streamer/forwarder.d.ts.map +1 -1
  45. package/out/zero-cache/src/services/change-streamer/forwarder.js +3 -0
  46. package/out/zero-cache/src/services/change-streamer/forwarder.js.map +1 -1
  47. package/out/zero-cache/src/services/change-streamer/subscriber.d.ts +3 -2
  48. package/out/zero-cache/src/services/change-streamer/subscriber.d.ts.map +1 -1
  49. package/out/zero-cache/src/services/change-streamer/subscriber.js +17 -8
  50. package/out/zero-cache/src/services/change-streamer/subscriber.js.map +1 -1
  51. package/out/zero-cache/src/services/replicator/incremental-sync.d.ts +2 -2
  52. package/out/zero-cache/src/services/replicator/incremental-sync.d.ts.map +1 -1
  53. package/out/zero-cache/src/services/replicator/incremental-sync.js +19 -4
  54. package/out/zero-cache/src/services/replicator/incremental-sync.js.map +1 -1
  55. package/out/zero-cache/src/services/replicator/replicator.d.ts.map +1 -1
  56. package/out/zero-cache/src/services/replicator/replicator.js +2 -2
  57. package/out/zero-cache/src/services/replicator/replicator.js.map +1 -1
  58. package/out/zero-cache/src/services/replicator/reporter/recorder.d.ts +12 -0
  59. package/out/zero-cache/src/services/replicator/reporter/recorder.d.ts.map +1 -0
  60. package/out/zero-cache/src/services/replicator/reporter/recorder.js +58 -0
  61. package/out/zero-cache/src/services/replicator/reporter/recorder.js.map +1 -0
  62. package/out/zero-cache/src/services/replicator/reporter/report-schema.d.ts +35 -0
  63. package/out/zero-cache/src/services/replicator/reporter/report-schema.d.ts.map +1 -0
  64. package/out/zero-cache/src/services/replicator/reporter/report-schema.js +20 -0
  65. package/out/zero-cache/src/services/replicator/reporter/report-schema.js.map +1 -0
  66. package/out/zero-cache/src/services/run-ast.js +1 -1
  67. package/out/zero-cache/src/types/pg.d.ts.map +1 -1
  68. package/out/zero-cache/src/types/pg.js +2 -0
  69. package/out/zero-cache/src/types/pg.js.map +1 -1
  70. package/out/zero-client/src/client/version.js +1 -1
  71. package/package.json +1 -1
  72. package/out/analyze-query/src/run-ast.d.ts +0 -22
  73. package/out/analyze-query/src/run-ast.d.ts.map +0 -1
  74. package/out/analyze-query/src/run-ast.js +0 -75
  75. package/out/analyze-query/src/run-ast.js.map +0 -1
@@ -1 +1 @@
1
- {"version":3,"file":"change-source.js","names":["#lc","#upstreamUri","#shard","#replica","#context","#stopExistingReplicationSlotSubscribers","#startStream","#logCurrentReplicaInfo","#dropReplicationSlots","#acks","#expectDownstreamAck","#ackIfDownstreamIsCaughtUp","#waitingForDownstreamAck","#sendAck","#shardPrefix","#shardConfig","#initialSchema","#upstreamDB","#error","#logError","#makeChanges","#handleDdlMessage","#lastSnapshotInTx","#handleRelation","#parseReplicationEvent","#replicaIdentityTimer","#preSchema","#makeSchemaChanges","#getTableChanges"],"sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"sourcesContent":["import {\n PG_ADMIN_SHUTDOWN,\n PG_OBJECT_IN_USE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres from 'postgres';\nimport {AbortError} from '../../../../../shared/src/abort-error.ts';\nimport {stringify} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport {must} from '../../../../../shared/src/must.ts';\nimport {mapValues} from '../../../../../shared/src/objects.ts';\nimport {promiseVoid} from '../../../../../shared/src/resolved-promises.ts';\nimport {\n equals,\n intersection,\n symmetricDifferences,\n} from '../../../../../shared/src/set-utils.ts';\nimport {sleep} from '../../../../../shared/src/sleep.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {\n mapPostgresToLiteColumn,\n UnsupportedColumnDefaultError,\n} from '../../../db/pg-to-lite.ts';\nimport {runTx} from '../../../db/run-transaction.ts';\nimport type {\n ColumnSpec,\n PublishedIndexSpec,\n PublishedTableSpec,\n} from '../../../db/specs.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport {type LexiVersion} from '../../../types/lexi-version.ts';\nimport {pgClient, type PostgresDB} from '../../../types/pg.ts';\nimport {\n upstreamSchema,\n type ShardConfig,\n type ShardID,\n} from '../../../types/shards.ts';\nimport {\n majorVersionFromString,\n majorVersionToString,\n} from '../../../types/state-version.ts';\nimport type {Sink} from '../../../types/streams.ts';\nimport {AutoResetSignal} from '../../change-streamer/schema/tables.ts';\nimport {\n getSubscriptionStateAndContext,\n type SubscriptionState,\n type SubscriptionStateAndContext,\n} from '../../replicator/schema/replication-state.ts';\nimport type {ChangeSource, ChangeStream} from '../change-source.ts';\nimport {BackfillManager} from '../common/backfill-manager.ts';\nimport {\n ChangeStreamMultiplexer,\n type Listener,\n} from '../common/change-stream-multiplexer.ts';\nimport {initReplica} from '../common/replica-schema.ts';\nimport type {BackfillRequest, JSONObject} from '../protocol/current.ts';\nimport type {\n ColumnAdd,\n Identifier,\n MessageRelation,\n SchemaChange,\n TableCreate,\n} from '../protocol/current/data.ts';\nimport type {\n ChangeStreamData,\n ChangeStreamMessage,\n Data,\n} from '../protocol/current/downstream.ts';\nimport type {ColumnMetadata, TableMetadata} from './backfill-metadata.ts';\nimport {streamBackfill} from './backfill-stream.ts';\nimport {\n initialSync,\n type InitialSyncOptions,\n type ServerContext,\n} from './initial-sync.ts';\nimport type {\n Message,\n MessageMessage,\n MessageRelation as PostgresRelation,\n} from './logical-replication/pgoutput.types.ts';\nimport {subscribe} from './logical-replication/stream.ts';\nimport {fromBigInt, toStateVersionString, type LSN} from './lsn.ts';\nimport {\n replicationEventSchema,\n type DdlUpdateEvent,\n type SchemaSnapshotEvent,\n} from './schema/ddl.ts';\nimport {updateShardSchema} from './schema/init.ts';\nimport {\n getPublicationInfo,\n type PublishedSchema,\n type PublishedTableWithReplicaIdentity,\n} from './schema/published.ts';\nimport {\n dropShard,\n getInternalShardConfig,\n getReplicaAtVersion,\n internalPublicationPrefix,\n legacyReplicationSlot,\n replicaIdentitiesForTablesWithoutPrimaryKeys,\n replicationSlotExpression,\n type InternalShardConfig,\n type Replica,\n} from './schema/shard.ts';\nimport {validate} from './schema/validation.ts';\n\n/**\n * Initializes a Postgres change source, including the initial sync of the\n * replica, before streaming changes from the corresponding logical replication\n * stream.\n */\nexport async function initializePostgresChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n syncOptions: InitialSyncOptions,\n context: ServerContext,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initReplica(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n replicaDbFile,\n (log, tx) => initialSync(log, shard, tx, upstreamURI, syncOptions, context),\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionStateAndContext(\n new StatementRunner(replica),\n );\n replica.close();\n\n // Check that upstream is properly setup, and throw an AutoReset to re-run\n // initial sync if not.\n const db = pgClient(lc, upstreamURI);\n try {\n const upstreamReplica = await checkAndUpdateUpstream(\n lc,\n db,\n shard,\n subscriptionState,\n );\n\n const changeSource = new PostgresChangeSource(\n lc,\n upstreamURI,\n shard,\n upstreamReplica,\n context,\n );\n\n return {subscriptionState, changeSource};\n } finally {\n await db.end();\n }\n}\n\nasync function checkAndUpdateUpstream(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n {\n replicaVersion,\n publications: subscribed,\n initialSyncContext,\n }: SubscriptionStateAndContext,\n) {\n // Perform any shard schema updates\n await updateShardSchema(lc, sql, shard, replicaVersion);\n\n const upstreamReplica = await getReplicaAtVersion(\n lc,\n sql,\n shard,\n replicaVersion,\n initialSyncContext,\n );\n if (!upstreamReplica) {\n throw new AutoResetSignal(\n `No replication slot for replica at version ${replicaVersion}`,\n );\n }\n\n // Verify that the publications match what is being replicated.\n const requested = [...shard.publications].sort();\n const replicated = upstreamReplica.publications\n .filter(p => !p.startsWith(internalPublicationPrefix(shard)))\n .sort();\n if (!deepEqual(requested, replicated)) {\n lc.warn?.(`Dropping shard to change publications to: [${requested}]`);\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n throw new AutoResetSignal(\n `Requested publications [${requested}] do not match configured ` +\n `publications: [${replicated}]`,\n );\n }\n\n // Sanity check: The subscription state on the replica should have the\n // same publications. This should be guaranteed by the equivalence of the\n // replicaVersion, but it doesn't hurt to verify.\n if (!deepEqual(upstreamReplica.publications, subscribed)) {\n throw new AutoResetSignal(\n `Upstream publications [${upstreamReplica.publications}] do not ` +\n `match subscribed publications [${subscribed}]`,\n );\n }\n\n // Verify that the publications exist.\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(subscribed)};\n `.values();\n if (exists.length !== subscribed.length) {\n throw new AutoResetSignal(\n `Upstream publications [${exists.flat()}] do not contain ` +\n `all subscribed publications [${subscribed}]`,\n );\n }\n\n const {slot} = upstreamReplica;\n const result = await sql<\n {restartLSN: LSN | null; walStatus: string | null}[]\n > /*sql*/ `\n SELECT restart_lsn as \"restartLSN\", wal_status as \"walStatus\" FROM pg_replication_slots\n WHERE slot_name = ${slot}`;\n if (result.length === 0) {\n throw new AutoResetSignal(`replication slot ${slot} is missing`);\n }\n const [{restartLSN, walStatus}] = result;\n if (restartLSN === null || walStatus === 'lost') {\n throw new AutoResetSignal(\n `replication slot ${slot} has been invalidated for exceeding the max_slot_wal_keep_size`,\n );\n }\n return upstreamReplica;\n}\n\n// Parameterize this if necessary. In practice starvation may never happen.\nconst MAX_LOW_PRIORITY_DELAY_MS = 1000;\n\ntype ReservationState = {\n lastWatermark?: string;\n};\n\n/**\n * Postgres implementation of a {@link ChangeSource} backed by a logical\n * replication stream.\n */\nclass PostgresChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replica: Replica;\n readonly #context: ServerContext;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replica: Replica,\n context: ServerContext,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replica = replica;\n this.#context = context;\n }\n\n async startStream(\n clientWatermark: string,\n backfillRequests: BackfillRequest[] = [],\n ): Promise<ChangeStream> {\n const db = pgClient(this.#lc, this.#upstreamUri);\n const {slot} = this.#replica;\n\n let cleanup = promiseVoid;\n try {\n ({cleanup} = await this.#stopExistingReplicationSlotSubscribers(\n db,\n slot,\n ));\n const config = await getInternalShardConfig(db, this.#shard);\n this.#lc.info?.(`starting replication stream@${slot}`);\n return await this.#startStream(\n db,\n slot,\n clientWatermark,\n config,\n backfillRequests,\n );\n } finally {\n void cleanup.then(() => db.end());\n }\n }\n\n async #startStream(\n db: PostgresDB,\n slot: string,\n clientWatermark: string,\n shardConfig: InternalShardConfig,\n backfillRequests: BackfillRequest[],\n ): Promise<ChangeStream> {\n const clientStart = majorVersionFromString(clientWatermark) + 1n;\n const {messages, acks} = await subscribe(\n this.#lc,\n db,\n slot,\n [...shardConfig.publications],\n clientStart,\n );\n const acker = new Acker(acks);\n\n // The ChangeStreamMultiplexer facilitates cooperative streaming from\n // the main replication stream and backfill streams initiated by the\n // BackfillManager.\n const changes = new ChangeStreamMultiplexer(this.#lc, clientWatermark);\n const backfillManager = new BackfillManager(this.#lc, changes, req =>\n streamBackfill(this.#lc, this.#upstreamUri, this.#replica, req),\n );\n changes\n .addProducers(messages, backfillManager)\n .addListeners(backfillManager, acker);\n backfillManager.run(clientWatermark, backfillRequests);\n\n const changeMaker = new ChangeMaker(\n this.#lc,\n this.#shard,\n shardConfig,\n this.#replica.initialSchema,\n this.#upstreamUri,\n );\n\n void (async () => {\n try {\n let reservation: ReservationState | null = null;\n let inTransaction = false;\n\n for await (const [lsn, msg] of messages) {\n // Note: no reservation is needed for pushStatus().\n if (msg.tag === 'keepalive') {\n changes.pushStatus([\n 'status',\n {ack: msg.shouldRespond},\n {watermark: majorVersionToString(lsn)},\n ]);\n\n // If we're not in a transaction but the last reservation was kept\n // because of pending keepalives in the queue, release the\n // reservation.\n if (!inTransaction && reservation?.lastWatermark) {\n changes.release(reservation.lastWatermark);\n reservation = null;\n }\n continue;\n }\n\n if (!reservation) {\n const res = changes.reserve('replication');\n typeof res === 'string' || (await res); // awaits should be uncommon\n reservation = {};\n }\n\n let lastChange: ChangeStreamMessage | undefined;\n for (const change of await changeMaker.makeChanges(lsn, msg)) {\n await changes.push(change); // Allow the change-streamer to push back.\n lastChange = change;\n }\n\n switch (lastChange?.[0]) {\n case 'begin':\n inTransaction = true;\n break;\n case 'commit':\n inTransaction = false;\n reservation.lastWatermark = lastChange[2].watermark;\n if (\n messages.queued === 0 ||\n changes.waiterDelay() > MAX_LOW_PRIORITY_DELAY_MS\n ) {\n // After each transaction, release the reservation:\n // - if there are no pending upstream messages\n // - or if a low priority request has been waiting for longer\n // than MAX_LOW_PRIORITY_DELAY_MS. This is to prevent\n // (backfill) starvation on very active upstreams.\n changes.release(reservation.lastWatermark);\n reservation = null;\n }\n break;\n }\n }\n } catch (e) {\n // Note: no need to worry about reservations here since downstream\n // is being completely canceled.\n const err = translateError(e);\n if (err instanceof ShutdownSignal) {\n // Log the new state of the replica to surface information about the\n // server that sent the shutdown signal, if any.\n await this.#logCurrentReplicaInfo();\n }\n changes.fail(err);\n }\n })();\n\n this.#lc.info?.(\n `started replication stream@${slot} from ${clientWatermark} (replicaVersion: ${\n this.#replica.version\n })`,\n );\n\n return {\n changes: changes.asSource(),\n acks: {push: status => acker.ack(status[2].watermark)},\n };\n }\n\n async #logCurrentReplicaInfo() {\n const db = pgClient(this.#lc, this.#upstreamUri);\n try {\n const replica = await getReplicaAtVersion(\n this.#lc,\n db,\n this.#shard,\n this.#replica.version,\n );\n if (replica) {\n this.#lc.info?.(\n `Shutdown signal from replica@${this.#replica.version}: ${stringify(replica.subscriberContext)}`,\n );\n }\n } catch (e) {\n this.#lc.warn?.(`error logging replica info`, e);\n } finally {\n await db.end();\n }\n }\n\n /**\n * Stops replication slots associated with this shard, and returns\n * a `cleanup` task that drops any slot other than the specified\n * `slotToKeep`.\n *\n * Note that replication slots created after `slotToKeep` (as indicated by\n * the timestamp suffix) are preserved, as those are newly syncing replicas\n * that will soon take over the slot.\n */\n async #stopExistingReplicationSlotSubscribers(\n db: PostgresDB,\n slotToKeep: string,\n ): Promise<{cleanup: Promise<void>}> {\n const slotExpression = replicationSlotExpression(this.#shard);\n const legacySlotName = legacyReplicationSlot(this.#shard);\n\n const result = await runTx(db, async sql => {\n // Note: `slot_name <= slotToKeep` uses a string compare of the millisecond\n // timestamp, which works until it exceeds 13 digits (sometime in 2286).\n const result = await sql<\n {slot: string; pid: string | null; terminated: boolean | null}[]\n > /*sql*/ `\n SELECT slot_name as slot, pg_terminate_backend(active_pid) as terminated, active_pid as pid\n FROM pg_replication_slots \n WHERE (slot_name LIKE ${slotExpression} OR slot_name = ${legacySlotName})\n AND slot_name <= ${slotToKeep}`;\n this.#lc.info?.(\n `terminated replication slots: ${JSON.stringify(result)}`,\n );\n const replicasTable = `${upstreamSchema(this.#shard)}.replicas`;\n const replicasBefore = await sql`\n SELECT slot, version, \"initialSyncContext\", \"subscriberContext\" \n FROM ${sql(replicasTable)} ORDER BY slot`;\n\n if (result.length === 0) {\n const shardSlots = await sql`\n SELECT slot_name as slot, active, active_pid as pid\n FROM pg_replication_slots\n WHERE slot_name LIKE ${slotExpression} OR slot_name = ${legacySlotName}\n ORDER BY slot_name`;\n this.#lc.warn?.(\n `slot ${slotToKeep} not found while cleaning subscribers`,\n {slots: shardSlots, replicas: replicasBefore},\n );\n throw new AbortError(\n `replication slot ${slotToKeep} is missing. A different ` +\n `replication-manager should now be running on a new ` +\n `replication slot.`,\n );\n }\n // Clear the state of the older replicas.\n this.#lc.info?.(\n `replicas before cleanup (slotToKeep=${slotToKeep}): ${JSON.stringify(\n replicasBefore,\n )}`,\n );\n await sql`\n DELETE FROM ${sql(replicasTable)} WHERE slot < ${slotToKeep}`;\n await sql`\n UPDATE ${sql(replicasTable)} \n SET \"subscriberContext\" = ${this.#context}\n WHERE slot = ${slotToKeep}`;\n const replicasAfter = await sql<{slot: string; version: string}[]>`\n SELECT slot, version FROM ${sql(replicasTable)} ORDER BY slot`;\n this.#lc.info?.(\n `replicas after cleanup (slotToKeep=${slotToKeep}): ${JSON.stringify(\n replicasAfter,\n )}`,\n );\n return result;\n });\n\n const pids = result.filter(({pid}) => pid !== null).map(({pid}) => pid);\n if (pids.length) {\n this.#lc.info?.(`signaled subscriber ${pids} to shut down`);\n }\n const otherSlots = result\n .filter(({slot}) => slot !== slotToKeep)\n .map(({slot}) => slot);\n return {\n cleanup: otherSlots.length\n ? this.#dropReplicationSlots(db, otherSlots)\n : promiseVoid,\n };\n }\n\n async #dropReplicationSlots(sql: PostgresDB, slots: string[]) {\n this.#lc.info?.(`dropping other replication slot(s) ${slots}`);\n for (let i = 0; i < 5; i++) {\n try {\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name IN ${sql(slots)}\n `;\n this.#lc.info?.(`successfully dropped ${slots}`);\n return;\n } catch (e) {\n // error: replication slot \"zero_slot_change_source_test_id\" is active for PID 268\n if (\n e instanceof postgres.PostgresError &&\n e.code === PG_OBJECT_IN_USE\n ) {\n // The freeing up of the replication slot is not transactional;\n // sometimes it takes time for Postgres to consider the slot\n // inactive.\n this.#lc.debug?.(`attempt ${i + 1}: ${String(e)}`, e);\n } else {\n this.#lc.warn?.(`error dropping ${slots}`, e);\n }\n await sleep(1000);\n }\n }\n this.#lc.warn?.(`maximum attempts exceeded dropping ${slots}`);\n }\n}\n\n// Exported for testing.\nexport class Acker implements Listener {\n #acks: Sink<bigint>;\n #waitingForDownstreamAck: string | null = null;\n\n constructor(acks: Sink<bigint>) {\n this.#acks = acks;\n }\n\n onChange(change: ChangeStreamMessage): void {\n switch (change[0]) {\n case 'status':\n const {watermark} = change[2];\n if (change[1].ack) {\n this.#expectDownstreamAck(watermark);\n } else {\n // Keepalives with shouldRespond = false are sent to Listeners,\n // but for efficiency they are not sent downstream to the\n // change-streamer. Ack them here if the change-streamer is caught\n // up. This updates the replication slot's `confirmed_flush_lsn`\n // more quickly (rather than waiting for the periodic shouldRespond),\n // which is useful for monitoring replication slot lag.\n this.#ackIfDownstreamIsCaughtUp(watermark);\n }\n break;\n case 'begin':\n // Mark the commit watermark as being expected so that any intermediate\n // shouldRespond=false watermarks, which will be at the\n // commitWatermark, are *not* acked, as the ack must come from\n // change-streamer after it commits the transaction.\n if (!change[1].skipAck) {\n this.#expectDownstreamAck(change[2].commitWatermark);\n }\n break;\n }\n }\n\n #expectDownstreamAck(watermark: string) {\n this.#waitingForDownstreamAck = watermark;\n }\n\n ack(watermark: LexiVersion) {\n if (\n this.#waitingForDownstreamAck &&\n this.#waitingForDownstreamAck <= watermark\n ) {\n this.#waitingForDownstreamAck = null;\n }\n this.#sendAck(watermark);\n }\n\n #ackIfDownstreamIsCaughtUp(watermark: string) {\n if (this.#waitingForDownstreamAck === null) {\n this.#sendAck(watermark);\n }\n }\n\n #sendAck(watermark: LexiVersion) {\n const lsn = majorVersionFromString(watermark);\n this.#acks.push(lsn);\n }\n}\n\ntype ReplicationError = {\n lsn: bigint;\n msg: Message;\n err: unknown;\n lastLogTime: number;\n};\n\nconst SET_REPLICA_IDENTITY_DELAY_MS = 50;\n\nclass ChangeMaker {\n readonly #lc: LogContext;\n readonly #shardPrefix: string;\n readonly #shardConfig: InternalShardConfig;\n readonly #initialSchema: PublishedSchema;\n readonly #upstreamDB: PostgresDB;\n\n #replicaIdentityTimer: NodeJS.Timeout | undefined;\n #error: ReplicationError | undefined;\n\n constructor(\n lc: LogContext,\n {appID, shardNum}: ShardID,\n shardConfig: InternalShardConfig,\n initialSchema: PublishedSchema,\n upstreamURI: string,\n ) {\n this.#lc = lc;\n // Note: This matches the prefix used in pg_logical_emit_message() in pg/schema/ddl.ts.\n this.#shardPrefix = `${appID}/${shardNum}`;\n this.#shardConfig = shardConfig;\n this.#initialSchema = initialSchema;\n this.#upstreamDB = pgClient(lc, upstreamURI, {\n ['idle_timeout']: 10, // only used occasionally\n connection: {['application_name']: 'zero-schema-change-detector'},\n });\n }\n\n async makeChanges(lsn: bigint, msg: Message): Promise<ChangeStreamMessage[]> {\n if (this.#error) {\n this.#logError(this.#error);\n return [];\n }\n try {\n return await this.#makeChanges(msg);\n } catch (err) {\n this.#error = {lsn, msg, err, lastLogTime: 0};\n this.#logError(this.#error);\n\n const message = `Unable to continue replication from LSN ${fromBigInt(lsn)}`;\n const errorDetails: JSONObject = {error: message};\n if (err instanceof UnsupportedSchemaChangeError) {\n errorDetails.reason = err.description;\n errorDetails.context = err.event.context;\n } else {\n errorDetails.reason = String(err);\n }\n\n // Rollback the current transaction to avoid dangling transactions in\n // downstream processors (i.e. changeLog, replicator).\n return [\n ['rollback', {tag: 'rollback'}],\n ['control', {tag: 'reset-required', message, errorDetails}],\n ];\n }\n }\n\n #logError(error: ReplicationError) {\n const {lsn, msg, err, lastLogTime} = error;\n const now = Date.now();\n\n // Output an error to logs as replication messages continue to be dropped,\n // at most once a minute.\n if (now - lastLogTime > 60_000) {\n this.#lc.error?.(\n `Unable to continue replication from LSN ${fromBigInt(lsn)}: ${String(\n err,\n )}`,\n err instanceof UnsupportedSchemaChangeError\n ? err.event.context\n : // 'content' can be a large byte Buffer. Exclude it from logging output.\n {...msg, content: undefined},\n );\n error.lastLogTime = now;\n }\n }\n\n // oxlint-disable-next-line require-await\n async #makeChanges(msg: Message): Promise<ChangeStreamData[]> {\n switch (msg.tag) {\n case 'begin':\n return [\n [\n 'begin',\n {...msg, json: 's'},\n {commitWatermark: toStateVersionString(must(msg.commitLsn))},\n ],\n ];\n\n case 'delete': {\n if (!(msg.key ?? msg.old)) {\n throw new Error(\n `Invalid DELETE msg (missing key): ${stringify(msg)}`,\n );\n }\n return [\n [\n 'data',\n {\n ...msg,\n relation: makeRelation(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-DELETE\n key: must(msg.old ?? msg.key),\n },\n ],\n ];\n }\n\n case 'update': {\n return [\n [\n 'data',\n {\n ...msg,\n relation: makeRelation(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-UPDATE\n key: msg.old ?? msg.key,\n },\n ],\n ];\n }\n\n case 'insert':\n return [['data', {...msg, relation: makeRelation(msg.relation)}]];\n case 'truncate':\n return [['data', {...msg, relations: msg.relations.map(makeRelation)}]];\n\n case 'message':\n if (!msg.prefix.startsWith(this.#shardPrefix)) {\n this.#lc.debug?.('ignoring message for different shard', msg.prefix);\n return [];\n }\n switch (msg.prefix.substring(this.#shardPrefix.length)) {\n case '': // Legacy prefix\n case '/ddl':\n return this.#handleDdlMessage(msg);\n default:\n this.#lc.debug?.('ignoring unknown message type', msg.prefix);\n return [];\n }\n\n case 'commit':\n this.#lastSnapshotInTx = undefined;\n return [\n [\n 'commit',\n msg,\n {watermark: toStateVersionString(must(msg.commitLsn))},\n ],\n ];\n\n case 'relation':\n return this.#handleRelation(msg);\n case 'type':\n return []; // Nothing need be done for custom types.\n case 'origin':\n // No need to detect replication loops since we are not a\n // PG replication source.\n return [];\n default:\n msg satisfies never;\n throw new Error(`Unexpected message type ${stringify(msg)}`);\n }\n }\n\n #preSchema: PublishedSchema | undefined;\n #lastSnapshotInTx: PublishedSchema | undefined;\n\n #handleDdlMessage(msg: MessageMessage) {\n const event = this.#parseReplicationEvent(msg.content);\n // Cancel manual schema adjustment timeouts when an upstream schema change\n // is about to happen, so as to avoid interfering / redundant work.\n clearTimeout(this.#replicaIdentityTimer);\n\n let previousSchema: PublishedSchema | null;\n const {type} = event;\n switch (type) {\n case 'ddlStart':\n // Store the schema in order to diff it with a subsequent ddlUpdate.\n this.#preSchema = event.schema;\n return [];\n case 'ddlUpdate':\n // guaranteed by event triggers\n previousSchema = must(\n this.#preSchema,\n `ddlUpdate received without a ddlStart`,\n );\n break;\n case 'schemaSnapshot':\n previousSchema = this.#lastSnapshotInTx ?? null;\n break;\n default: // Ignore unknown types for forwards compatibility\n this.#lc.info?.(`ignoring unknown ddl message type: ${type}`);\n return [];\n }\n\n // Store the schema (from either a ddlUpdate or schemaSnapshot) to\n // diff against the next schemaSnapshot.\n this.#lastSnapshotInTx = event.schema;\n if (!previousSchema) {\n this.#lc.info?.(`received ${msg.prefix}/${type} event`);\n return []; // First schemaSnapshot in the tx.\n }\n this.#lc.info?.(`processing ${msg.prefix}/${type} event`, event);\n\n const changes = this.#makeSchemaChanges(previousSchema, event).map(\n change => ['data', change] satisfies Data,\n );\n\n this.#lc\n .withContext('tag', event.event.tag)\n .withContext('query', event.context.query)\n .info?.(`${changes.length} schema change(s)`, {changes});\n\n const replicaIdentities = replicaIdentitiesForTablesWithoutPrimaryKeys(\n event.schema,\n );\n if (replicaIdentities) {\n this.#replicaIdentityTimer = setTimeout(async () => {\n try {\n await replicaIdentities.apply(this.#lc, this.#upstreamDB);\n } catch (err) {\n this.#lc.warn?.(`error setting replica identities`, err);\n }\n }, SET_REPLICA_IDENTITY_DELAY_MS);\n }\n\n return changes;\n }\n\n /**\n * A note on operation order:\n *\n * Postgres will drop related indexes when columns are dropped,\n * but SQLite will error instead (https://sqlite.org/forum/forumpost/2e62dba69f?t=c&hist).\n * The current workaround is to drop indexes first.\n *\n * Also note that although it should not be possible to both rename and\n * add/drop tables/columns in a single statement, the operations are\n * ordered to handle that possibility, by always dropping old entities,\n * then modifying kept entities, and then adding new entities.\n *\n * Thus, the order of replicating DDL updates is:\n * - drop indexes\n * - drop tables\n * - alter tables\n * - drop columns\n * - alter columns\n * - add columns\n * - create tables\n * - create indexes\n *\n * In the future the replication logic should be improved to handle this\n * behavior in SQLite by dropping dependent indexes manually before dropping\n * columns. This, for example, would be needed to properly support changing\n * the type of a column that's indexed.\n */\n #makeSchemaChanges(\n preSchema: PublishedSchema,\n update: DdlUpdateEvent | SchemaSnapshotEvent,\n ): SchemaChange[] {\n try {\n const [prevTbl, prevIdx] = specsByID(preSchema);\n const [nextTbl, nextIdx] = specsByID(update.schema);\n const changes: SchemaChange[] = [];\n\n // Validate the new table schemas\n for (const table of nextTbl.values()) {\n validate(this.#lc, table);\n }\n\n const [droppedIdx, createdIdx] = symmetricDifferences(prevIdx, nextIdx);\n\n // Detect modified indexes (same name, different definition).\n // This happens when a constraint is dropped and recreated with the\n // same name in a single ALTER TABLE statement.\n // Note: We compare using stable column attnums rather than names,\n // because table/column renames change the index spec cosmetically\n // (tableName, column keys) without the index actually being recreated.\n const keptIdx = intersection(prevIdx, nextIdx);\n for (const id of keptIdx) {\n if (\n isIndexStructurallyChanged(\n must(prevIdx.get(id)),\n must(nextIdx.get(id)),\n prevTbl,\n nextTbl,\n )\n ) {\n droppedIdx.add(id);\n createdIdx.add(id);\n }\n }\n\n for (const id of droppedIdx) {\n const {schema, name} = must(prevIdx.get(id));\n changes.push({tag: 'drop-index', id: {schema, name}});\n }\n\n // DROP\n const [droppedTbl, createdTbl] = symmetricDifferences(prevTbl, nextTbl);\n for (const id of droppedTbl) {\n const {schema, name} = must(prevTbl.get(id));\n changes.push({tag: 'drop-table', id: {schema, name}});\n }\n // ALTER TABLE | ALTER PUBLICATION\n const tables = intersection(prevTbl, nextTbl);\n for (const id of tables) {\n changes.push(\n ...this.#getTableChanges(\n must(prevTbl.get(id)),\n must(nextTbl.get(id)),\n update.event.tag,\n ),\n );\n }\n // CREATE\n for (const id of createdTbl) {\n const spec = must(nextTbl.get(id));\n const createTable: TableCreate = {\n tag: 'create-table',\n spec,\n metadata: getMetadata(spec),\n };\n if (!update.event.tag.startsWith('CREATE')) {\n // Tables introduced to the publication via ALTER statements\n // or the COMMENT statement (from schemaSnapshots) must be\n // backfilled.\n createTable.backfill = mapValues(spec.columns, ({pos: attNum}) => ({\n attNum,\n })) satisfies Record<string, ColumnMetadata>;\n }\n changes.push(createTable);\n }\n\n // Add indexes last since they may reference tables / columns that need\n // to be created first.\n for (const id of createdIdx) {\n const spec = must(nextIdx.get(id));\n changes.push({tag: 'create-index', spec});\n }\n return changes;\n } catch (e) {\n throw new UnsupportedSchemaChangeError(String(e), update, {cause: e});\n }\n }\n\n #getTableChanges(\n oldTable: PublishedTableWithReplicaIdentity,\n newTable: PublishedTableWithReplicaIdentity,\n ddlTag: string,\n ): SchemaChange[] {\n const changes: SchemaChange[] = [];\n if (\n oldTable.schema !== newTable.schema ||\n oldTable.name !== newTable.name\n ) {\n changes.push({\n tag: 'rename-table',\n old: {schema: oldTable.schema, name: oldTable.name},\n new: {schema: newTable.schema, name: newTable.name},\n });\n }\n const oldMetadata = getMetadata(oldTable);\n const newMetadata = getMetadata(newTable);\n if (!deepEqual(oldMetadata, newMetadata)) {\n changes.push({\n tag: 'update-table-metadata',\n table: {schema: newTable.schema, name: newTable.name},\n old: oldMetadata,\n new: newMetadata,\n });\n }\n const table = {schema: newTable.schema, name: newTable.name};\n const oldColumns = columnsByID(oldTable.columns);\n const newColumns = columnsByID(newTable.columns);\n\n // DROP\n const [dropped, added] = symmetricDifferences(oldColumns, newColumns);\n for (const id of dropped) {\n const {name: column} = must(oldColumns.get(id));\n changes.push({tag: 'drop-column', table, column});\n }\n\n // ALTER\n const both = intersection(oldColumns, newColumns);\n for (const id of both) {\n const {name: oldName, ...oldSpec} = must(oldColumns.get(id));\n const {name: newName, ...newSpec} = must(newColumns.get(id));\n // The three things that we care about are:\n // 1. name\n // 2. type\n // 3. not-null\n if (\n oldName !== newName ||\n oldSpec.dataType !== newSpec.dataType ||\n oldSpec.notNull !== newSpec.notNull\n ) {\n changes.push({\n tag: 'update-column',\n table,\n old: {name: oldName, spec: oldSpec},\n new: {name: newName, spec: newSpec},\n });\n }\n }\n\n // All columns introduced by a publication change require backfill\n // (which appear as ALTER PUBLICATION or COMMENT tags).\n // Columns created by ALTER TABLE, on the other hand, only require\n // backfill if they have non-constant defaults.\n const alwaysBackfill = ddlTag !== 'ALTER TABLE';\n\n // ADD\n for (const id of added) {\n const {name, ...spec} = must(newColumns.get(id));\n const column = {name, spec};\n const addColumn: ColumnAdd = {\n tag: 'add-column',\n table,\n column,\n tableMetadata: getMetadata(newTable),\n };\n if (alwaysBackfill) {\n addColumn.column.spec.dflt = null;\n addColumn.backfill = {attNum: spec.pos} satisfies ColumnMetadata;\n } else {\n // Determine if the ChangeProcessor will accept the column add as is.\n try {\n mapPostgresToLiteColumn(table.name, column);\n } catch (e) {\n if (!(e instanceof UnsupportedColumnDefaultError)) {\n // Note: mapPostgresToLiteColumn is not expected to throw any other\n // types of errors.\n throw e;\n }\n // If the column has an unsupported default (e.g. an expression or a\n // generated value), create the column as initially hidden with a\n // `null` default, and publish it after backfilling the values from\n // upstream. Note that this does require that the table have a valid\n // REPLICA IDENTITY, since backfill relies on merging new data with\n // an existing row.\n this.#lc.info?.(\n `Backfilling column ${table.name}.${name}: ${String(e)}`,\n );\n addColumn.column.spec.dflt = null;\n addColumn.backfill = {attNum: spec.pos} satisfies ColumnMetadata;\n }\n }\n changes.push(addColumn);\n }\n return changes;\n }\n\n #parseReplicationEvent(content: Uint8Array) {\n const str =\n content instanceof Buffer\n ? content.toString('utf-8')\n : new TextDecoder().decode(content);\n const json = JSON.parse(str);\n return v.parse(json, replicationEventSchema, 'passthrough');\n }\n\n /**\n * If `ddlDetection === true`, relation messages are irrelevant,\n * as schema changes are detected by event triggers that\n * emit custom messages.\n *\n * For degraded-mode replication (`ddlDetection === false`):\n * 1. query the current published schemas on upstream\n * 2. compare that with the InternalShardConfig.initialSchema\n * 3. compare that with the incoming MessageRelation\n * 4. On any discrepancy, throw an UnsupportedSchemaChangeError\n * to halt replication.\n *\n * Note that schemas queried in step [1] will be *post-transaction*\n * schemas, which are not necessarily suitable for actually processing\n * the statements in the transaction being replicated. In other words,\n * this mechanism cannot be used to reliably *replicate* schema changes.\n * However, they serve the purpose determining if schemas have changed.\n */\n async #handleRelation(rel: PostgresRelation): Promise<ChangeStreamData[]> {\n const {publications, ddlDetection} = this.#shardConfig;\n if (ddlDetection) {\n return [];\n }\n const currentSchema = await getPublicationInfo(\n this.#upstreamDB,\n publications,\n );\n const difference = getSchemaDifference(this.#initialSchema, currentSchema);\n if (difference !== null) {\n throw new MissingEventTriggerSupport(difference);\n }\n // Even if the currentSchema is equal to the initialSchema, the\n // MessageRelation itself must be checked to detect transient\n // schema changes within the transaction (e.g. adding and dropping\n // a table, or renaming a column and then renaming it back).\n const orel = this.#initialSchema.tables.find(\n t => t.oid === rel.relationOid,\n );\n if (!orel) {\n // Can happen if a table is created and then dropped in the same transaction.\n throw new MissingEventTriggerSupport(\n `relation not in initialSchema: ${stringify(rel)}`,\n );\n }\n if (relationDifferent(orel, rel)) {\n throw new MissingEventTriggerSupport(\n `relation has changed within the transaction: ${stringify(orel)} vs ${stringify(rel)}`,\n );\n }\n return [];\n }\n}\n\nfunction getSchemaDifference(\n a: PublishedSchema,\n b: PublishedSchema,\n): string | null {\n // Note: ignore indexes since changes need not to halt replication\n if (a.tables.length !== b.tables.length) {\n return `tables created or dropped`;\n }\n for (let i = 0; i < a.tables.length; i++) {\n const at = a.tables[i];\n const bt = b.tables[i];\n const difference = getTableDifference(at, bt);\n if (difference) {\n return difference;\n }\n }\n return null;\n}\n\n// ColumnSpec comparator\nconst byColumnPos = (a: [string, ColumnSpec], b: [string, ColumnSpec]) =>\n a[1].pos < b[1].pos ? -1 : a[1].pos > b[1].pos ? 1 : 0;\n\nfunction getTableDifference(\n a: PublishedTableSpec,\n b: PublishedTableSpec,\n): string | null {\n if (a.oid !== b.oid || a.schema !== b.schema || a.name !== b.name) {\n return `Table \"${a.name}\" differs from table \"${b.name}\"`;\n }\n if (!deepEqual(a.primaryKey, b.primaryKey)) {\n return `Primary key of table \"${a.name}\" has changed`;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = Object.entries(b.columns).sort(byColumnPos);\n if (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const [bname, bcol] = bcols[i];\n return (\n aname !== bname ||\n acol.pos !== bcol.pos ||\n acol.typeOID !== bcol.typeOID ||\n acol.notNull !== bcol.notNull\n );\n })\n ) {\n return `Columns of table \"${a.name}\" have changed`;\n }\n return null;\n}\n\nexport function relationDifferent(a: PublishedTableSpec, b: PostgresRelation) {\n if (a.oid !== b.relationOid || a.schema !== b.schema || a.name !== b.name) {\n return true;\n }\n if (\n // The MessageRelation's `keyColumns` field contains the columns in column\n // declaration order, whereas the PublishedTableSpec's `primaryKey`\n // contains the columns in primary key (i.e. index) order. Do an\n // order-agnostic compare here since it is not possible to detect\n // key-order changes from the MessageRelation message alone.\n b.replicaIdentity === 'default' &&\n !equals(new Set(a.primaryKey), new Set(b.keyColumns))\n ) {\n return true;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = b.columns;\n return (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const bcol = bcols[i];\n return aname !== bcol.name || acol.typeOID !== bcol.typeOid;\n })\n );\n}\n\nfunction translateError(e: unknown): Error {\n if (!(e instanceof Error)) {\n return new Error(String(e));\n }\n if (e instanceof postgres.PostgresError && e.code === PG_ADMIN_SHUTDOWN) {\n return new ShutdownSignal(e);\n }\n return e;\n}\nconst idString = (id: Identifier) => `${id.schema}.${id.name}`;\n\nfunction specsByID(published: PublishedSchema) {\n return [\n // It would have been nice to use a CustomKeyMap here, but we rely on set-utils\n // operations which use plain Sets.\n new Map(published.tables.map(t => [t.oid, t])),\n new Map(published.indexes.map(i => [idString(i), i])),\n ] as const;\n}\n\n/**\n * Determines if an index was structurally changed (e.g. constraint dropped\n * and recreated with different columns) vs cosmetically changed (e.g. the\n * index spec changed because the table or a column was renamed).\n *\n * Compares boolean properties directly and resolves column names to their\n * stable attnums (pg_attribute `attnum`) for the column comparison.\n */\nfunction isIndexStructurallyChanged(\n prev: PublishedIndexSpec,\n next: PublishedIndexSpec,\n prevTables: Map<number, PublishedTableWithReplicaIdentity>,\n nextTables: Map<number, PublishedTableWithReplicaIdentity>,\n): boolean {\n if (\n prev.unique !== next.unique ||\n prev.isPrimaryKey !== next.isPrimaryKey ||\n prev.isReplicaIdentity !== next.isReplicaIdentity ||\n prev.isImmediate !== next.isImmediate\n ) {\n return true;\n }\n\n const prevTable = findTableBySchemaAndName(\n prevTables,\n prev.schema,\n prev.tableName,\n );\n const nextTable = findTableBySchemaAndName(\n nextTables,\n next.schema,\n next.tableName,\n );\n if (!prevTable || !nextTable) {\n // Can't resolve tables; conservatively treat as changed.\n return true;\n }\n\n const prevEntries = Object.entries(prev.columns);\n const nextEntries = Object.entries(next.columns);\n if (prevEntries.length !== nextEntries.length) {\n return true;\n }\n\n // Resolve column names → attnums and compare.\n const prevByAttnum = new Map<number | undefined, string>(\n prevEntries.map(([name, dir]) => [prevTable.columns[name]?.pos, dir]),\n );\n const nextByAttnum = new Map<number | undefined, string>(\n nextEntries.map(([name, dir]) => [nextTable.columns[name]?.pos, dir]),\n );\n\n if (prevByAttnum.has(undefined) || nextByAttnum.has(undefined)) {\n // Column not found in table spec; conservatively treat as changed.\n return true;\n }\n if (prevByAttnum.size !== nextByAttnum.size) {\n return true;\n }\n for (const [attnum, dir] of prevByAttnum) {\n if (nextByAttnum.get(attnum) !== dir) {\n return true;\n }\n }\n return false;\n}\n\nfunction findTableBySchemaAndName(\n tables: Map<number, PublishedTableWithReplicaIdentity>,\n schema: string,\n name: string,\n): PublishedTableWithReplicaIdentity | undefined {\n for (const table of tables.values()) {\n if (table.schema === schema && table.name === name) {\n return table;\n }\n }\n return undefined;\n}\n\nfunction columnsByID(\n columns: Record<string, ColumnSpec>,\n): Map<number, ColumnSpec & {name: string}> {\n const colsByID = new Map<number, ColumnSpec & {name: string}>();\n for (const [name, spec] of Object.entries(columns)) {\n // The `pos` field is the `attnum` in `pg_attribute`, which is a stable\n // identifier for the column in this table (i.e. never reused).\n colsByID.set(spec.pos, {...spec, name});\n }\n return colsByID;\n}\n\nfunction getMetadata(table: PublishedTableWithReplicaIdentity): TableMetadata {\n return {\n schemaOID: must(table.schemaOID),\n relationOID: table.oid,\n rowKey: Object.fromEntries(\n table.replicaIdentityColumns.map(k => [\n k,\n {attNum: table.columns[k].pos},\n ]),\n ),\n };\n}\n\n// Avoid sending the `columns` from the Postgres MessageRelation message.\n// They are not used downstream and the message can be large.\nfunction makeRelation(relation: PostgresRelation): MessageRelation {\n // Avoid sending the `columns` from the Postgres MessageRelation message.\n // They are not used downstream and the message can be large.\n const {columns: _, keyColumns, replicaIdentity, ...rest} = relation;\n return {\n ...rest,\n rowKey: {\n columns: keyColumns,\n type: replicaIdentity,\n },\n // For now, deprecated columns are sent for backwards compatibility.\n // These can be removed when bumping the MIN_PROTOCOL_VERSION to 5.\n keyColumns,\n replicaIdentity,\n };\n}\n\nclass UnsupportedSchemaChangeError extends Error {\n readonly name = 'UnsupportedSchemaChangeError';\n readonly description: string;\n readonly event: DdlUpdateEvent | SchemaSnapshotEvent;\n\n constructor(\n description: string,\n event: DdlUpdateEvent | SchemaSnapshotEvent,\n options?: ErrorOptions,\n ) {\n super(\n `Replication halted. Resync the replica to recover: ${description}`,\n options,\n );\n this.description = description;\n this.event = event;\n }\n}\n\nclass MissingEventTriggerSupport extends Error {\n readonly name = 'MissingEventTriggerSupport';\n\n constructor(msg: string) {\n super(\n `${msg}. Schema changes cannot be reliably replicated without event trigger support.`,\n );\n }\n}\n\n// TODO(0xcadams): should this be a ProtocolError?\nclass ShutdownSignal extends AbortError {\n readonly name = 'ShutdownSignal';\n\n constructor(cause: unknown) {\n super(\n 'shutdown signal received (e.g. another zero-cache taking over the replication stream)',\n {\n cause,\n },\n );\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAgHA,eAAsB,+BACpB,IACA,aACA,OACA,eACA,aACA,SAC6E;AAC7E,OAAM,YACJ,IACA,WAAW,MAAM,MAAM,GAAG,MAAM,YAChC,gBACC,KAAK,OAAO,YAAY,KAAK,OAAO,IAAI,aAAa,aAAa,QAAQ,CAC5E;CAED,MAAM,UAAU,IAAI,SAAS,IAAI,cAAc;CAC/C,MAAM,oBAAoB,+BACxB,IAAI,gBAAgB,QAAQ,CAC7B;AACD,SAAQ,OAAO;CAIf,MAAM,KAAK,SAAS,IAAI,YAAY;AACpC,KAAI;AAgBF,SAAO;GAAC;GAAmB,cARN,IAAI,qBACvB,IACA,aACA,OAVsB,MAAM,uBAC5B,IACA,IACA,OACA,kBACD,EAOC,QACD;GAEuC;WAChC;AACR,QAAM,GAAG,KAAK;;;AAIlB,eAAe,uBACb,IACA,KACA,OACA,EACE,gBACA,cAAc,YACd,sBAEF;AAEA,OAAM,kBAAkB,IAAI,KAAK,OAAO,eAAe;CAEvD,MAAM,kBAAkB,MAAM,oBAC5B,IACA,KACA,OACA,gBACA,mBACD;AACD,KAAI,CAAC,gBACH,OAAM,IAAI,gBACR,8CAA8C,iBAC/C;CAIH,MAAM,YAAY,CAAC,GAAG,MAAM,aAAa,CAAC,MAAM;CAChD,MAAM,aAAa,gBAAgB,aAChC,QAAO,MAAK,CAAC,EAAE,WAAW,0BAA0B,MAAM,CAAC,CAAC,CAC5D,MAAM;AACT,KAAI,CAAC,UAAU,WAAW,WAAW,EAAE;AACrC,KAAG,OAAO,8CAA8C,UAAU,GAAG;AACrE,QAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,SAAS,CAAC;AACxD,QAAM,IAAI,gBACR,2BAA2B,UAAU,2CACjB,WAAW,GAChC;;AAMH,KAAI,CAAC,UAAU,gBAAgB,cAAc,WAAW,CACtD,OAAM,IAAI,gBACR,0BAA0B,gBAAgB,aAAa,0CACnB,WAAW,GAChD;CAIH,MAAM,SAAS,MAAM,GAAG;0DACgC,IAAI,WAAW,CAAC;IACtE,QAAQ;AACV,KAAI,OAAO,WAAW,WAAW,OAC/B,OAAM,IAAI,gBACR,0BAA0B,OAAO,MAAM,CAAC,gDACN,WAAW,GAC9C;CAGH,MAAM,EAAC,SAAQ;CACf,MAAM,SAAS,MAAM,GAEX;;0BAEc;AACxB,KAAI,OAAO,WAAW,EACpB,OAAM,IAAI,gBAAgB,oBAAoB,KAAK,aAAa;CAElE,MAAM,CAAC,EAAC,YAAY,eAAc;AAClC,KAAI,eAAe,QAAQ,cAAc,OACvC,OAAM,IAAI,gBACR,oBAAoB,KAAK,gEAC1B;AAEH,QAAO;;AAIT,IAAM,4BAA4B;;;;;AAUlC,IAAM,uBAAN,MAAmD;CACjD;CACA;CACA;CACA;CACA;CAEA,YACE,IACA,aACA,OACA,SACA,SACA;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,gBAAgB;AACvD,QAAA,cAAoB;AACpB,QAAA,QAAc;AACd,QAAA,UAAgB;AAChB,QAAA,UAAgB;;CAGlB,MAAM,YACJ,iBACA,mBAAsC,EAAE,EACjB;EACvB,MAAM,KAAK,SAAS,MAAA,IAAU,MAAA,YAAkB;EAChD,MAAM,EAAC,SAAQ,MAAA;EAEf,IAAI,UAAU;AACd,MAAI;AACF,IAAC,CAAC,WAAW,MAAM,MAAA,uCACjB,IACA,KACD;GACD,MAAM,SAAS,MAAM,uBAAuB,IAAI,MAAA,MAAY;AAC5D,SAAA,GAAS,OAAO,+BAA+B,OAAO;AACtD,UAAO,MAAM,MAAA,YACX,IACA,MACA,iBACA,QACA,iBACD;YACO;AACH,WAAQ,WAAW,GAAG,KAAK,CAAC;;;CAIrC,OAAA,YACE,IACA,MACA,iBACA,aACA,kBACuB;EACvB,MAAM,cAAc,uBAAuB,gBAAgB,GAAG;EAC9D,MAAM,EAAC,UAAU,SAAQ,MAAM,UAC7B,MAAA,IACA,IACA,MACA,CAAC,GAAG,YAAY,aAAa,EAC7B,YACD;EACD,MAAM,QAAQ,IAAI,MAAM,KAAK;EAK7B,MAAM,UAAU,IAAI,wBAAwB,MAAA,IAAU,gBAAgB;EACtE,MAAM,kBAAkB,IAAI,gBAAgB,MAAA,IAAU,UAAS,QAC7D,eAAe,MAAA,IAAU,MAAA,aAAmB,MAAA,SAAe,IAAI,CAChE;AACD,UACG,aAAa,UAAU,gBAAgB,CACvC,aAAa,iBAAiB,MAAM;AACvC,kBAAgB,IAAI,iBAAiB,iBAAiB;EAEtD,MAAM,cAAc,IAAI,YACtB,MAAA,IACA,MAAA,OACA,aACA,MAAA,QAAc,eACd,MAAA,YACD;AAED,GAAM,YAAY;AAChB,OAAI;IACF,IAAI,cAAuC;IAC3C,IAAI,gBAAgB;AAEpB,eAAW,MAAM,CAAC,KAAK,QAAQ,UAAU;AAEvC,SAAI,IAAI,QAAQ,aAAa;AAC3B,cAAQ,WAAW;OACjB;OACA,EAAC,KAAK,IAAI,eAAc;OACxB,EAAC,WAAW,qBAAqB,IAAI,EAAC;OACvC,CAAC;AAKF,UAAI,CAAC,iBAAiB,aAAa,eAAe;AAChD,eAAQ,QAAQ,YAAY,cAAc;AAC1C,qBAAc;;AAEhB;;AAGF,SAAI,CAAC,aAAa;MAChB,MAAM,MAAM,QAAQ,QAAQ,cAAc;AAC1C,aAAO,QAAQ,YAAa,MAAM;AAClC,oBAAc,EAAE;;KAGlB,IAAI;AACJ,UAAK,MAAM,UAAU,MAAM,YAAY,YAAY,KAAK,IAAI,EAAE;AAC5D,YAAM,QAAQ,KAAK,OAAO;AAC1B,mBAAa;;AAGf,aAAQ,aAAa,IAArB;MACE,KAAK;AACH,uBAAgB;AAChB;MACF,KAAK;AACH,uBAAgB;AAChB,mBAAY,gBAAgB,WAAW,GAAG;AAC1C,WACE,SAAS,WAAW,KACpB,QAAQ,aAAa,GAAG,2BACxB;AAMA,gBAAQ,QAAQ,YAAY,cAAc;AAC1C,sBAAc;;AAEhB;;;YAGC,GAAG;IAGV,MAAM,MAAM,eAAe,EAAE;AAC7B,QAAI,eAAe,eAGjB,OAAM,MAAA,uBAA6B;AAErC,YAAQ,KAAK,IAAI;;MAEjB;AAEJ,QAAA,GAAS,OACP,8BAA8B,KAAK,QAAQ,gBAAgB,oBACzD,MAAA,QAAc,QACf,GACF;AAED,SAAO;GACL,SAAS,QAAQ,UAAU;GAC3B,MAAM,EAAC,OAAM,WAAU,MAAM,IAAI,OAAO,GAAG,UAAU,EAAC;GACvD;;CAGH,OAAA,wBAA+B;EAC7B,MAAM,KAAK,SAAS,MAAA,IAAU,MAAA,YAAkB;AAChD,MAAI;GACF,MAAM,UAAU,MAAM,oBACpB,MAAA,IACA,IACA,MAAA,OACA,MAAA,QAAc,QACf;AACD,OAAI,QACF,OAAA,GAAS,OACP,gCAAgC,MAAA,QAAc,QAAQ,IAAI,UAAU,QAAQ,kBAAkB,GAC/F;WAEI,GAAG;AACV,SAAA,GAAS,OAAO,8BAA8B,EAAE;YACxC;AACR,SAAM,GAAG,KAAK;;;;;;;;;;;;CAalB,OAAA,uCACE,IACA,YACmC;EACnC,MAAM,iBAAiB,0BAA0B,MAAA,MAAY;EAC7D,MAAM,iBAAiB,sBAAsB,MAAA,MAAY;EAEzD,MAAM,SAAS,MAAM,MAAM,IAAI,OAAM,QAAO;GAG1C,MAAM,SAAS,MAAM,GAEX;;;gCAGgB,eAAe,kBAAkB,eAAe;iCAC/C;AAC3B,SAAA,GAAS,OACP,iCAAiC,KAAK,UAAU,OAAO,GACxD;GACD,MAAM,gBAAgB,GAAG,eAAe,MAAA,MAAY,CAAC;GACrD,MAAM,iBAAiB,MAAM,GAAG;;iBAErB,IAAI,cAAc,CAAC;AAE9B,OAAI,OAAO,WAAW,GAAG;IACvB,MAAM,aAAa,MAAM,GAAG;;;iCAGH,eAAe,kBAAkB,eAAe;;AAEzE,UAAA,GAAS,OACP,QAAQ,WAAW,wCACnB;KAAC,OAAO;KAAY,UAAU;KAAe,CAC9C;AACD,UAAM,IAAI,WACR,oBAAoB,WAAW,+FAGhC;;AAGH,SAAA,GAAS,OACP,uCAAuC,WAAW,KAAK,KAAK,UAC1D,eACD,GACF;AACD,SAAM,GAAG;sBACO,IAAI,cAAc,CAAC,gBAAgB;AACnD,SAAM,GAAG;iBACE,IAAI,cAAc,CAAC;sCACE,MAAA,QAAc;yBAC3B;GACnB,MAAM,gBAAgB,MAAM,GAAsC;kCACtC,IAAI,cAAc,CAAC;AAC/C,SAAA,GAAS,OACP,sCAAsC,WAAW,KAAK,KAAK,UACzD,cACD,GACF;AACD,UAAO;IACP;EAEF,MAAM,OAAO,OAAO,QAAQ,EAAC,UAAS,QAAQ,KAAK,CAAC,KAAK,EAAC,UAAS,IAAI;AACvE,MAAI,KAAK,OACP,OAAA,GAAS,OAAO,uBAAuB,KAAK,eAAe;EAE7D,MAAM,aAAa,OAChB,QAAQ,EAAC,WAAU,SAAS,WAAW,CACvC,KAAK,EAAC,WAAU,KAAK;AACxB,SAAO,EACL,SAAS,WAAW,SAChB,MAAA,qBAA2B,IAAI,WAAW,GAC1C,aACL;;CAGH,OAAA,qBAA4B,KAAiB,OAAiB;AAC5D,QAAA,GAAS,OAAO,sCAAsC,QAAQ;AAC9D,OAAK,IAAI,IAAI,GAAG,IAAI,GAAG,IACrB,KAAI;AACF,SAAM,GAAG;;iCAEgB,IAAI,MAAM,CAAC;;AAEpC,SAAA,GAAS,OAAO,wBAAwB,QAAQ;AAChD;WACO,GAAG;AAEV,OACE,aAAa,SAAS,iBACtB,EAAE,SAAS,iBAKX,OAAA,GAAS,QAAQ,WAAW,IAAI,EAAE,IAAI,OAAO,EAAE,IAAI,EAAE;OAErD,OAAA,GAAS,OAAO,kBAAkB,SAAS,EAAE;AAE/C,SAAM,MAAM,IAAK;;AAGrB,QAAA,GAAS,OAAO,sCAAsC,QAAQ;;;AAKlE,IAAa,QAAb,MAAuC;CACrC;CACA,2BAA0C;CAE1C,YAAY,MAAoB;AAC9B,QAAA,OAAa;;CAGf,SAAS,QAAmC;AAC1C,UAAQ,OAAO,IAAf;GACE,KAAK;IACH,MAAM,EAAC,cAAa,OAAO;AAC3B,QAAI,OAAO,GAAG,IACZ,OAAA,oBAA0B,UAAU;QAQpC,OAAA,0BAAgC,UAAU;AAE5C;GACF,KAAK;AAKH,QAAI,CAAC,OAAO,GAAG,QACb,OAAA,oBAA0B,OAAO,GAAG,gBAAgB;AAEtD;;;CAIN,qBAAqB,WAAmB;AACtC,QAAA,0BAAgC;;CAGlC,IAAI,WAAwB;AAC1B,MACE,MAAA,2BACA,MAAA,2BAAiC,UAEjC,OAAA,0BAAgC;AAElC,QAAA,QAAc,UAAU;;CAG1B,2BAA2B,WAAmB;AAC5C,MAAI,MAAA,4BAAkC,KACpC,OAAA,QAAc,UAAU;;CAI5B,SAAS,WAAwB;EAC/B,MAAM,MAAM,uBAAuB,UAAU;AAC7C,QAAA,KAAW,KAAK,IAAI;;;AAWxB,IAAM,gCAAgC;AAEtC,IAAM,cAAN,MAAkB;CAChB;CACA;CACA;CACA;CACA;CAEA;CACA;CAEA,YACE,IACA,EAAC,OAAO,YACR,aACA,eACA,aACA;AACA,QAAA,KAAW;AAEX,QAAA,cAAoB,GAAG,MAAM,GAAG;AAChC,QAAA,cAAoB;AACpB,QAAA,gBAAsB;AACtB,QAAA,aAAmB,SAAS,IAAI,aAAa;IAC1C,iBAAiB;GAClB,YAAY,GAAE,qBAAqB,+BAA8B;GAClE,CAAC;;CAGJ,MAAM,YAAY,KAAa,KAA8C;AAC3E,MAAI,MAAA,OAAa;AACf,SAAA,SAAe,MAAA,MAAY;AAC3B,UAAO,EAAE;;AAEX,MAAI;AACF,UAAO,MAAM,MAAA,YAAkB,IAAI;WAC5B,KAAK;AACZ,SAAA,QAAc;IAAC;IAAK;IAAK;IAAK,aAAa;IAAE;AAC7C,SAAA,SAAe,MAAA,MAAY;GAE3B,MAAM,UAAU,2CAA2C,WAAW,IAAI;GAC1E,MAAM,eAA2B,EAAC,OAAO,SAAQ;AACjD,OAAI,eAAe,8BAA8B;AAC/C,iBAAa,SAAS,IAAI;AAC1B,iBAAa,UAAU,IAAI,MAAM;SAEjC,cAAa,SAAS,OAAO,IAAI;AAKnC,UAAO,CACL,CAAC,YAAY,EAAC,KAAK,YAAW,CAAC,EAC/B,CAAC,WAAW;IAAC,KAAK;IAAkB;IAAS;IAAa,CAAC,CAC5D;;;CAIL,UAAU,OAAyB;EACjC,MAAM,EAAC,KAAK,KAAK,KAAK,gBAAe;EACrC,MAAM,MAAM,KAAK,KAAK;AAItB,MAAI,MAAM,cAAc,KAAQ;AAC9B,SAAA,GAAS,QACP,2CAA2C,WAAW,IAAI,CAAC,IAAI,OAC7D,IACD,IACD,eAAe,+BACX,IAAI,MAAM,UAEV;IAAC,GAAG;IAAK,SAAS,KAAA;IAAU,CACjC;AACD,SAAM,cAAc;;;CAKxB,OAAA,YAAmB,KAA2C;AAC5D,UAAQ,IAAI,KAAZ;GACE,KAAK,QACH,QAAO,CACL;IACE;IACA;KAAC,GAAG;KAAK,MAAM;KAAI;IACnB,EAAC,iBAAiB,qBAAqB,KAAK,IAAI,UAAU,CAAC,EAAC;IAC7D,CACF;GAEH,KAAK;AACH,QAAI,EAAE,IAAI,OAAO,IAAI,KACnB,OAAM,IAAI,MACR,qCAAqC,UAAU,IAAI,GACpD;AAEH,WAAO,CACL,CACE,QACA;KACE,GAAG;KACH,UAAU,aAAa,IAAI,SAAS;KAEpC,KAAK,KAAK,IAAI,OAAO,IAAI,IAAI;KAC9B,CACF,CACF;GAGH,KAAK,SACH,QAAO,CACL,CACE,QACA;IACE,GAAG;IACH,UAAU,aAAa,IAAI,SAAS;IAEpC,KAAK,IAAI,OAAO,IAAI;IACrB,CACF,CACF;GAGH,KAAK,SACH,QAAO,CAAC,CAAC,QAAQ;IAAC,GAAG;IAAK,UAAU,aAAa,IAAI,SAAS;IAAC,CAAC,CAAC;GACnE,KAAK,WACH,QAAO,CAAC,CAAC,QAAQ;IAAC,GAAG;IAAK,WAAW,IAAI,UAAU,IAAI,aAAa;IAAC,CAAC,CAAC;GAEzE,KAAK;AACH,QAAI,CAAC,IAAI,OAAO,WAAW,MAAA,YAAkB,EAAE;AAC7C,WAAA,GAAS,QAAQ,wCAAwC,IAAI,OAAO;AACpE,YAAO,EAAE;;AAEX,YAAQ,IAAI,OAAO,UAAU,MAAA,YAAkB,OAAO,EAAtD;KACE,KAAK;KACL,KAAK,OACH,QAAO,MAAA,iBAAuB,IAAI;KACpC;AACE,YAAA,GAAS,QAAQ,iCAAiC,IAAI,OAAO;AAC7D,aAAO,EAAE;;GAGf,KAAK;AACH,UAAA,mBAAyB,KAAA;AACzB,WAAO,CACL;KACE;KACA;KACA,EAAC,WAAW,qBAAqB,KAAK,IAAI,UAAU,CAAC,EAAC;KACvD,CACF;GAEH,KAAK,WACH,QAAO,MAAA,eAAqB,IAAI;GAClC,KAAK,OACH,QAAO,EAAE;GACX,KAAK,SAGH,QAAO,EAAE;GACX,QAEE,OAAM,IAAI,MAAM,2BAA2B,UAAU,IAAI,GAAG;;;CAIlE;CACA;CAEA,kBAAkB,KAAqB;EACrC,MAAM,QAAQ,MAAA,sBAA4B,IAAI,QAAQ;AAGtD,eAAa,MAAA,qBAA2B;EAExC,IAAI;EACJ,MAAM,EAAC,SAAQ;AACf,UAAQ,MAAR;GACE,KAAK;AAEH,UAAA,YAAkB,MAAM;AACxB,WAAO,EAAE;GACX,KAAK;AAEH,qBAAiB,KACf,MAAA,WACA,wCACD;AACD;GACF,KAAK;AACH,qBAAiB,MAAA,oBAA0B;AAC3C;GACF;AACE,UAAA,GAAS,OAAO,sCAAsC,OAAO;AAC7D,WAAO,EAAE;;AAKb,QAAA,mBAAyB,MAAM;AAC/B,MAAI,CAAC,gBAAgB;AACnB,SAAA,GAAS,OAAO,YAAY,IAAI,OAAO,GAAG,KAAK,QAAQ;AACvD,UAAO,EAAE;;AAEX,QAAA,GAAS,OAAO,cAAc,IAAI,OAAO,GAAG,KAAK,SAAS,MAAM;EAEhE,MAAM,UAAU,MAAA,kBAAwB,gBAAgB,MAAM,CAAC,KAC7D,WAAU,CAAC,QAAQ,OAAO,CAC3B;AAED,QAAA,GACG,YAAY,OAAO,MAAM,MAAM,IAAI,CACnC,YAAY,SAAS,MAAM,QAAQ,MAAM,CACzC,OAAO,GAAG,QAAQ,OAAO,oBAAoB,EAAC,SAAQ,CAAC;EAE1D,MAAM,oBAAoB,6CACxB,MAAM,OACP;AACD,MAAI,kBACF,OAAA,uBAA6B,WAAW,YAAY;AAClD,OAAI;AACF,UAAM,kBAAkB,MAAM,MAAA,IAAU,MAAA,WAAiB;YAClD,KAAK;AACZ,UAAA,GAAS,OAAO,oCAAoC,IAAI;;KAEzD,8BAA8B;AAGnC,SAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA8BT,mBACE,WACA,QACgB;AAChB,MAAI;GACF,MAAM,CAAC,SAAS,WAAW,UAAU,UAAU;GAC/C,MAAM,CAAC,SAAS,WAAW,UAAU,OAAO,OAAO;GACnD,MAAM,UAA0B,EAAE;AAGlC,QAAK,MAAM,SAAS,QAAQ,QAAQ,CAClC,UAAS,MAAA,IAAU,MAAM;GAG3B,MAAM,CAAC,YAAY,cAAc,qBAAqB,SAAS,QAAQ;GAQvE,MAAM,UAAU,aAAa,SAAS,QAAQ;AAC9C,QAAK,MAAM,MAAM,QACf,KACE,2BACE,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,SACA,QACD,EACD;AACA,eAAW,IAAI,GAAG;AAClB,eAAW,IAAI,GAAG;;AAItB,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,EAAC,QAAQ,SAAQ,KAAK,QAAQ,IAAI,GAAG,CAAC;AAC5C,YAAQ,KAAK;KAAC,KAAK;KAAc,IAAI;MAAC;MAAQ;MAAK;KAAC,CAAC;;GAIvD,MAAM,CAAC,YAAY,cAAc,qBAAqB,SAAS,QAAQ;AACvE,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,EAAC,QAAQ,SAAQ,KAAK,QAAQ,IAAI,GAAG,CAAC;AAC5C,YAAQ,KAAK;KAAC,KAAK;KAAc,IAAI;MAAC;MAAQ;MAAK;KAAC,CAAC;;GAGvD,MAAM,SAAS,aAAa,SAAS,QAAQ;AAC7C,QAAK,MAAM,MAAM,OACf,SAAQ,KACN,GAAG,MAAA,gBACD,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,OAAO,MAAM,IACd,CACF;AAGH,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,OAAO,KAAK,QAAQ,IAAI,GAAG,CAAC;IAClC,MAAM,cAA2B;KAC/B,KAAK;KACL;KACA,UAAU,YAAY,KAAK;KAC5B;AACD,QAAI,CAAC,OAAO,MAAM,IAAI,WAAW,SAAS,CAIxC,aAAY,WAAW,UAAU,KAAK,UAAU,EAAC,KAAK,cAAa,EACjE,QACD,EAAE;AAEL,YAAQ,KAAK,YAAY;;AAK3B,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,OAAO,KAAK,QAAQ,IAAI,GAAG,CAAC;AAClC,YAAQ,KAAK;KAAC,KAAK;KAAgB;KAAK,CAAC;;AAE3C,UAAO;WACA,GAAG;AACV,SAAM,IAAI,6BAA6B,OAAO,EAAE,EAAE,QAAQ,EAAC,OAAO,GAAE,CAAC;;;CAIzE,iBACE,UACA,UACA,QACgB;EAChB,MAAM,UAA0B,EAAE;AAClC,MACE,SAAS,WAAW,SAAS,UAC7B,SAAS,SAAS,SAAS,KAE3B,SAAQ,KAAK;GACX,KAAK;GACL,KAAK;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACnD,KAAK;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACpD,CAAC;EAEJ,MAAM,cAAc,YAAY,SAAS;EACzC,MAAM,cAAc,YAAY,SAAS;AACzC,MAAI,CAAC,UAAU,aAAa,YAAY,CACtC,SAAQ,KAAK;GACX,KAAK;GACL,OAAO;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACrD,KAAK;GACL,KAAK;GACN,CAAC;EAEJ,MAAM,QAAQ;GAAC,QAAQ,SAAS;GAAQ,MAAM,SAAS;GAAK;EAC5D,MAAM,aAAa,YAAY,SAAS,QAAQ;EAChD,MAAM,aAAa,YAAY,SAAS,QAAQ;EAGhD,MAAM,CAAC,SAAS,SAAS,qBAAqB,YAAY,WAAW;AACrE,OAAK,MAAM,MAAM,SAAS;GACxB,MAAM,EAAC,MAAM,WAAU,KAAK,WAAW,IAAI,GAAG,CAAC;AAC/C,WAAQ,KAAK;IAAC,KAAK;IAAe;IAAO;IAAO,CAAC;;EAInD,MAAM,OAAO,aAAa,YAAY,WAAW;AACjD,OAAK,MAAM,MAAM,MAAM;GACrB,MAAM,EAAC,MAAM,SAAS,GAAG,YAAW,KAAK,WAAW,IAAI,GAAG,CAAC;GAC5D,MAAM,EAAC,MAAM,SAAS,GAAG,YAAW,KAAK,WAAW,IAAI,GAAG,CAAC;AAK5D,OACE,YAAY,WACZ,QAAQ,aAAa,QAAQ,YAC7B,QAAQ,YAAY,QAAQ,QAE5B,SAAQ,KAAK;IACX,KAAK;IACL;IACA,KAAK;KAAC,MAAM;KAAS,MAAM;KAAQ;IACnC,KAAK;KAAC,MAAM;KAAS,MAAM;KAAQ;IACpC,CAAC;;EAQN,MAAM,iBAAiB,WAAW;AAGlC,OAAK,MAAM,MAAM,OAAO;GACtB,MAAM,EAAC,MAAM,GAAG,SAAQ,KAAK,WAAW,IAAI,GAAG,CAAC;GAChD,MAAM,SAAS;IAAC;IAAM;IAAK;GAC3B,MAAM,YAAuB;IAC3B,KAAK;IACL;IACA;IACA,eAAe,YAAY,SAAS;IACrC;AACD,OAAI,gBAAgB;AAClB,cAAU,OAAO,KAAK,OAAO;AAC7B,cAAU,WAAW,EAAC,QAAQ,KAAK,KAAI;SAGvC,KAAI;AACF,4BAAwB,MAAM,MAAM,OAAO;YACpC,GAAG;AACV,QAAI,EAAE,aAAa,+BAGjB,OAAM;AAQR,UAAA,GAAS,OACP,sBAAsB,MAAM,KAAK,GAAG,KAAK,IAAI,OAAO,EAAE,GACvD;AACD,cAAU,OAAO,KAAK,OAAO;AAC7B,cAAU,WAAW,EAAC,QAAQ,KAAK,KAAI;;AAG3C,WAAQ,KAAK,UAAU;;AAEzB,SAAO;;CAGT,uBAAuB,SAAqB;EAC1C,MAAM,MACJ,mBAAmB,SACf,QAAQ,SAAS,QAAQ,GACzB,IAAI,aAAa,CAAC,OAAO,QAAQ;AAEvC,SAAO,MADM,KAAK,MAAM,IAAI,EACP,wBAAwB,cAAc;;;;;;;;;;;;;;;;;;;;CAqB7D,OAAA,eAAsB,KAAoD;EACxE,MAAM,EAAC,cAAc,iBAAgB,MAAA;AACrC,MAAI,aACF,QAAO,EAAE;EAEX,MAAM,gBAAgB,MAAM,mBAC1B,MAAA,YACA,aACD;EACD,MAAM,aAAa,oBAAoB,MAAA,eAAqB,cAAc;AAC1E,MAAI,eAAe,KACjB,OAAM,IAAI,2BAA2B,WAAW;EAMlD,MAAM,OAAO,MAAA,cAAoB,OAAO,MACtC,MAAK,EAAE,QAAQ,IAAI,YACpB;AACD,MAAI,CAAC,KAEH,OAAM,IAAI,2BACR,kCAAkC,UAAU,IAAI,GACjD;AAEH,MAAI,kBAAkB,MAAM,IAAI,CAC9B,OAAM,IAAI,2BACR,gDAAgD,UAAU,KAAK,CAAC,MAAM,UAAU,IAAI,GACrF;AAEH,SAAO,EAAE;;;AAIb,SAAS,oBACP,GACA,GACe;AAEf,KAAI,EAAE,OAAO,WAAW,EAAE,OAAO,OAC/B,QAAO;AAET,MAAK,IAAI,IAAI,GAAG,IAAI,EAAE,OAAO,QAAQ,KAAK;EACxC,MAAM,KAAK,EAAE,OAAO;EACpB,MAAM,KAAK,EAAE,OAAO;EACpB,MAAM,aAAa,mBAAmB,IAAI,GAAG;AAC7C,MAAI,WACF,QAAO;;AAGX,QAAO;;AAIT,IAAM,eAAe,GAAyB,MAC5C,EAAE,GAAG,MAAM,EAAE,GAAG,MAAM,KAAK,EAAE,GAAG,MAAM,EAAE,GAAG,MAAM,IAAI;AAEvD,SAAS,mBACP,GACA,GACe;AACf,KAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,KAC3D,QAAO,UAAU,EAAE,KAAK,wBAAwB,EAAE,KAAK;AAEzD,KAAI,CAAC,UAAU,EAAE,YAAY,EAAE,WAAW,CACxC,QAAO,yBAAyB,EAAE,KAAK;CAEzC,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;CACzD,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;AACzD,KACE,MAAM,WAAW,MAAM,UACvB,MAAM,MAAM,CAAC,OAAO,OAAO,MAAM;EAC/B,MAAM,CAAC,OAAO,QAAQ,MAAM;AAC5B,SACE,UAAU,SACV,KAAK,QAAQ,KAAK,OAClB,KAAK,YAAY,KAAK,WACtB,KAAK,YAAY,KAAK;GAExB,CAEF,QAAO,qBAAqB,EAAE,KAAK;AAErC,QAAO;;AAGT,SAAgB,kBAAkB,GAAuB,GAAqB;AAC5E,KAAI,EAAE,QAAQ,EAAE,eAAe,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,KACnE,QAAO;AAET,KAME,EAAE,oBAAoB,aACtB,CAAC,OAAO,IAAI,IAAI,EAAE,WAAW,EAAE,IAAI,IAAI,EAAE,WAAW,CAAC,CAErD,QAAO;CAET,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;CACzD,MAAM,QAAQ,EAAE;AAChB,QACE,MAAM,WAAW,MAAM,UACvB,MAAM,MAAM,CAAC,OAAO,OAAO,MAAM;EAC/B,MAAM,OAAO,MAAM;AACnB,SAAO,UAAU,KAAK,QAAQ,KAAK,YAAY,KAAK;GACpD;;AAIN,SAAS,eAAe,GAAmB;AACzC,KAAI,EAAE,aAAa,OACjB,QAAO,IAAI,MAAM,OAAO,EAAE,CAAC;AAE7B,KAAI,aAAa,SAAS,iBAAiB,EAAE,SAAS,kBACpD,QAAO,IAAI,eAAe,EAAE;AAE9B,QAAO;;AAET,IAAM,YAAY,OAAmB,GAAG,GAAG,OAAO,GAAG,GAAG;AAExD,SAAS,UAAU,WAA4B;AAC7C,QAAO,CAGL,IAAI,IAAI,UAAU,OAAO,KAAI,MAAK,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC,EAC9C,IAAI,IAAI,UAAU,QAAQ,KAAI,MAAK,CAAC,SAAS,EAAE,EAAE,EAAE,CAAC,CAAC,CACtD;;;;;;;;;;AAWH,SAAS,2BACP,MACA,MACA,YACA,YACS;AACT,KACE,KAAK,WAAW,KAAK,UACrB,KAAK,iBAAiB,KAAK,gBAC3B,KAAK,sBAAsB,KAAK,qBAChC,KAAK,gBAAgB,KAAK,YAE1B,QAAO;CAGT,MAAM,YAAY,yBAChB,YACA,KAAK,QACL,KAAK,UACN;CACD,MAAM,YAAY,yBAChB,YACA,KAAK,QACL,KAAK,UACN;AACD,KAAI,CAAC,aAAa,CAAC,UAEjB,QAAO;CAGT,MAAM,cAAc,OAAO,QAAQ,KAAK,QAAQ;CAChD,MAAM,cAAc,OAAO,QAAQ,KAAK,QAAQ;AAChD,KAAI,YAAY,WAAW,YAAY,OACrC,QAAO;CAIT,MAAM,eAAe,IAAI,IACvB,YAAY,KAAK,CAAC,MAAM,SAAS,CAAC,UAAU,QAAQ,OAAO,KAAK,IAAI,CAAC,CACtE;CACD,MAAM,eAAe,IAAI,IACvB,YAAY,KAAK,CAAC,MAAM,SAAS,CAAC,UAAU,QAAQ,OAAO,KAAK,IAAI,CAAC,CACtE;AAED,KAAI,aAAa,IAAI,KAAA,EAAU,IAAI,aAAa,IAAI,KAAA,EAAU,CAE5D,QAAO;AAET,KAAI,aAAa,SAAS,aAAa,KACrC,QAAO;AAET,MAAK,MAAM,CAAC,QAAQ,QAAQ,aAC1B,KAAI,aAAa,IAAI,OAAO,KAAK,IAC/B,QAAO;AAGX,QAAO;;AAGT,SAAS,yBACP,QACA,QACA,MAC+C;AAC/C,MAAK,MAAM,SAAS,OAAO,QAAQ,CACjC,KAAI,MAAM,WAAW,UAAU,MAAM,SAAS,KAC5C,QAAO;;AAMb,SAAS,YACP,SAC0C;CAC1C,MAAM,2BAAW,IAAI,KAA0C;AAC/D,MAAK,MAAM,CAAC,MAAM,SAAS,OAAO,QAAQ,QAAQ,CAGhD,UAAS,IAAI,KAAK,KAAK;EAAC,GAAG;EAAM;EAAK,CAAC;AAEzC,QAAO;;AAGT,SAAS,YAAY,OAAyD;AAC5E,QAAO;EACL,WAAW,KAAK,MAAM,UAAU;EAChC,aAAa,MAAM;EACnB,QAAQ,OAAO,YACb,MAAM,uBAAuB,KAAI,MAAK,CACpC,GACA,EAAC,QAAQ,MAAM,QAAQ,GAAG,KAAI,CAC/B,CAAC,CACH;EACF;;AAKH,SAAS,aAAa,UAA6C;CAGjE,MAAM,EAAC,SAAS,GAAG,YAAY,iBAAiB,GAAG,SAAQ;AAC3D,QAAO;EACL,GAAG;EACH,QAAQ;GACN,SAAS;GACT,MAAM;GACP;EAGD;EACA;EACD;;AAGH,IAAM,+BAAN,cAA2C,MAAM;CAC/C,OAAgB;CAChB;CACA;CAEA,YACE,aACA,OACA,SACA;AACA,QACE,sDAAsD,eACtD,QACD;AACD,OAAK,cAAc;AACnB,OAAK,QAAQ;;;AAIjB,IAAM,6BAAN,cAAyC,MAAM;CAC7C,OAAgB;CAEhB,YAAY,KAAa;AACvB,QACE,GAAG,IAAI,+EACR;;;AAKL,IAAM,iBAAN,cAA6B,WAAW;CACtC,OAAgB;CAEhB,YAAY,OAAgB;AAC1B,QACE,yFACA,EACE,OACD,CACF"}
1
+ {"version":3,"file":"change-source.js","names":["#lc","#db","#upstreamUri","#shard","#replica","#context","#lagReporter","#stopExistingReplicationSlotSubscribers","#startStream","#logCurrentReplicaInfo","#dropReplicationSlots","#acks","#expectDownstreamAck","#ackIfDownstreamIsCaughtUp","#waitingForDownstreamAck","#sendAck","#lagIntervalMs","#getPgVersion","#pgVersion","#lastReportID","#timer","#scheduleNextReport","#shardPrefix","#shardConfig","#initialSchema","#error","#logError","#makeChanges","#handleDdlMessage","#lastSnapshotInTx","#handleRelation","#replicaIdentityTimer","#preSchema","#makeSchemaChanges","#getTableChanges"],"sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"sourcesContent":["import {\n PG_ADMIN_SHUTDOWN,\n PG_OBJECT_IN_USE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {nanoid} from 'nanoid';\nimport postgres from 'postgres';\nimport {AbortError} from '../../../../../shared/src/abort-error.ts';\nimport {assert} from '../../../../../shared/src/asserts.ts';\nimport {stringify} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport {must} from '../../../../../shared/src/must.ts';\nimport {mapValues} from '../../../../../shared/src/objects.ts';\nimport {\n equals,\n intersection,\n symmetricDifferences,\n} from '../../../../../shared/src/set-utils.ts';\nimport {sleep} from '../../../../../shared/src/sleep.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {\n mapPostgresToLiteColumn,\n UnsupportedColumnDefaultError,\n} from '../../../db/pg-to-lite.ts';\nimport {runTx} from '../../../db/run-transaction.ts';\nimport type {\n ColumnSpec,\n PublishedIndexSpec,\n PublishedTableSpec,\n} from '../../../db/specs.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport {type LexiVersion} from '../../../types/lexi-version.ts';\nimport {pgClient, type PostgresDB} from '../../../types/pg.ts';\nimport {\n upstreamSchema,\n type ShardConfig,\n type ShardID,\n} from '../../../types/shards.ts';\nimport {\n majorVersionFromString,\n majorVersionToString,\n} from '../../../types/state-version.ts';\nimport type {Sink} from '../../../types/streams.ts';\nimport {AutoResetSignal} from '../../change-streamer/schema/tables.ts';\nimport {\n getSubscriptionStateAndContext,\n type SubscriptionState,\n type SubscriptionStateAndContext,\n} from '../../replicator/schema/replication-state.ts';\nimport type {ChangeSource, ChangeStream} from '../change-source.ts';\nimport {BackfillManager} from '../common/backfill-manager.ts';\nimport {\n ChangeStreamMultiplexer,\n type Listener,\n} from '../common/change-stream-multiplexer.ts';\nimport {initReplica} from '../common/replica-schema.ts';\nimport type {\n BackfillRequest,\n DownstreamStatusMessage,\n JSONObject,\n} from '../protocol/current.ts';\nimport type {\n ColumnAdd,\n Identifier,\n MessageRelation,\n SchemaChange,\n TableCreate,\n} from '../protocol/current/data.ts';\nimport type {\n ChangeStreamData,\n ChangeStreamMessage,\n Data,\n} from '../protocol/current/downstream.ts';\nimport type {ColumnMetadata, TableMetadata} from './backfill-metadata.ts';\nimport {streamBackfill} from './backfill-stream.ts';\nimport {\n initialSync,\n type InitialSyncOptions,\n type ServerContext,\n} from './initial-sync.ts';\nimport type {\n Message,\n MessageMessage,\n MessageRelation as PostgresRelation,\n} from './logical-replication/pgoutput.types.ts';\nimport {subscribe, type StreamMessage} from './logical-replication/stream.ts';\nimport {fromBigInt, toStateVersionString, type LSN} from './lsn.ts';\nimport {\n replicationEventSchema,\n type DdlUpdateEvent,\n type SchemaSnapshotEvent,\n} from './schema/ddl.ts';\nimport {updateShardSchema} from './schema/init.ts';\nimport {\n getPublicationInfo,\n type PublishedSchema,\n type PublishedTableWithReplicaIdentity,\n} from './schema/published.ts';\nimport {\n dropShard,\n getInternalShardConfig,\n getReplicaAtVersion,\n internalPublicationPrefix,\n legacyReplicationSlot,\n replicaIdentitiesForTablesWithoutPrimaryKeys,\n replicationSlotExpression,\n type InternalShardConfig,\n type Replica,\n} from './schema/shard.ts';\nimport {validate} from './schema/validation.ts';\n\n/**\n * Initializes a Postgres change source, including the initial sync of the\n * replica, before streaming changes from the corresponding logical replication\n * stream.\n */\nexport async function initializePostgresChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n syncOptions: InitialSyncOptions,\n context: ServerContext,\n lagReportIntervalMs?: number,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initReplica(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n replicaDbFile,\n (log, tx) => initialSync(log, shard, tx, upstreamURI, syncOptions, context),\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionStateAndContext(\n new StatementRunner(replica),\n );\n replica.close();\n\n // Check that upstream is properly setup, and throw an AutoReset to re-run\n // initial sync if not.\n const db = pgClient(lc, upstreamURI);\n try {\n const upstreamReplica = await checkAndUpdateUpstream(\n lc,\n db,\n shard,\n subscriptionState,\n );\n\n const changeSource = new PostgresChangeSource(\n lc,\n upstreamURI,\n shard,\n upstreamReplica,\n context,\n lagReportIntervalMs ?? null,\n );\n\n return {subscriptionState, changeSource};\n } finally {\n await db.end();\n }\n}\n\nasync function checkAndUpdateUpstream(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n {\n replicaVersion,\n publications: subscribed,\n initialSyncContext,\n }: SubscriptionStateAndContext,\n) {\n // Perform any shard schema updates\n await updateShardSchema(lc, sql, shard, replicaVersion);\n\n const upstreamReplica = await getReplicaAtVersion(\n lc,\n sql,\n shard,\n replicaVersion,\n initialSyncContext,\n );\n if (!upstreamReplica) {\n throw new AutoResetSignal(\n `No replication slot for replica at version ${replicaVersion}`,\n );\n }\n\n // Verify that the publications match what is being replicated.\n const requested = [...shard.publications].sort();\n const replicated = upstreamReplica.publications\n .filter(p => !p.startsWith(internalPublicationPrefix(shard)))\n .sort();\n if (!deepEqual(requested, replicated)) {\n lc.warn?.(`Dropping shard to change publications to: [${requested}]`);\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n throw new AutoResetSignal(\n `Requested publications [${requested}] do not match configured ` +\n `publications: [${replicated}]`,\n );\n }\n\n // Sanity check: The subscription state on the replica should have the\n // same publications. This should be guaranteed by the equivalence of the\n // replicaVersion, but it doesn't hurt to verify.\n if (!deepEqual(upstreamReplica.publications, subscribed)) {\n throw new AutoResetSignal(\n `Upstream publications [${upstreamReplica.publications}] do not ` +\n `match subscribed publications [${subscribed}]`,\n );\n }\n\n // Verify that the publications exist.\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(subscribed)};\n `.values();\n if (exists.length !== subscribed.length) {\n throw new AutoResetSignal(\n `Upstream publications [${exists.flat()}] do not contain ` +\n `all subscribed publications [${subscribed}]`,\n );\n }\n\n const {slot} = upstreamReplica;\n const result = await sql<\n {restartLSN: LSN | null; walStatus: string | null}[]\n > /*sql*/ `\n SELECT restart_lsn as \"restartLSN\", wal_status as \"walStatus\" FROM pg_replication_slots\n WHERE slot_name = ${slot}`;\n if (result.length === 0) {\n throw new AutoResetSignal(`replication slot ${slot} is missing`);\n }\n const [{restartLSN, walStatus}] = result;\n if (restartLSN === null || walStatus === 'lost') {\n throw new AutoResetSignal(\n `replication slot ${slot} has been invalidated for exceeding the max_slot_wal_keep_size`,\n );\n }\n return upstreamReplica;\n}\n\n// Parameterize this if necessary. In practice starvation may never happen.\nconst MAX_LOW_PRIORITY_DELAY_MS = 1000;\n\ntype ReservationState = {\n lastWatermark?: string;\n};\n\n/**\n * Postgres implementation of a {@link ChangeSource} backed by a logical\n * replication stream.\n */\nclass PostgresChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #db: PostgresDB;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replica: Replica;\n readonly #context: ServerContext;\n readonly #lagReporter: LagReporter | null;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replica: Replica,\n context: ServerContext,\n lagReportIntervalMs: number | null,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#db = pgClient(lc, upstreamUri, {\n // used occasionally for schema changes, periodically for lag reporting\n ['idle_timeout']: 60,\n connection: {['application_name']: 'zero-replication-monitor'},\n });\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replica = replica;\n this.#context = context;\n this.#lagReporter = lagReportIntervalMs\n ? new LagReporter(\n lc.withContext('component', 'lag-reporter'),\n shard,\n this.#db,\n lagReportIntervalMs,\n )\n : null;\n }\n\n startLagReporter(): Promise<{nextSendTimeMs: number}> | null {\n return this.#lagReporter ? this.#lagReporter.initiateLagReport() : null;\n }\n\n async startStream(\n clientWatermark: string,\n backfillRequests: BackfillRequest[] = [],\n ): Promise<ChangeStream> {\n const {slot} = this.#replica;\n\n await this.#stopExistingReplicationSlotSubscribers(slot);\n const config = await getInternalShardConfig(this.#db, this.#shard);\n this.#lc.info?.(`starting replication stream@${slot}`);\n return this.#startStream(slot, clientWatermark, config, backfillRequests);\n }\n\n async #startStream(\n slot: string,\n clientWatermark: string,\n shardConfig: InternalShardConfig,\n backfillRequests: BackfillRequest[],\n ): Promise<ChangeStream> {\n const clientStart = majorVersionFromString(clientWatermark) + 1n;\n const {messages, acks} = await subscribe(\n this.#lc,\n this.#db,\n slot,\n [...shardConfig.publications],\n clientStart,\n );\n const acker = new Acker(acks);\n\n // The ChangeStreamMultiplexer facilitates cooperative streaming from\n // the main replication stream and backfill streams initiated by the\n // BackfillManager.\n const changes = new ChangeStreamMultiplexer(this.#lc, clientWatermark);\n const backfillManager = new BackfillManager(this.#lc, changes, req =>\n streamBackfill(this.#lc, this.#upstreamUri, this.#replica, req),\n );\n changes\n .addProducers(messages, backfillManager)\n .addListeners(backfillManager, acker);\n backfillManager.run(clientWatermark, backfillRequests);\n\n const changeMaker = new ChangeMaker(\n this.#lc,\n this.#shard,\n shardConfig,\n this.#db,\n this.#replica.initialSchema,\n );\n\n /**\n * Determines if the incoming message is transactional, otherwise handling\n * non-transactional messages with a downstream status message.\n */\n const isTransactionalMessage = (\n lsn: bigint,\n msg: StreamMessage[1],\n ): msg is Message => {\n if (msg.tag === 'keepalive') {\n changes.pushStatus([\n 'status',\n {ack: msg.shouldRespond},\n {watermark: majorVersionToString(lsn)},\n ]);\n return false;\n }\n if (\n msg.tag === 'message' &&\n msg.prefix === this.#lagReporter?.messagePrefix\n ) {\n changes.pushStatus(this.#lagReporter.processLagReport(msg));\n return false;\n }\n return true;\n };\n\n void (async () => {\n try {\n let reservation: ReservationState | null = null;\n let inTransaction = false;\n\n for await (const [lsn, msg] of messages) {\n if (!isTransactionalMessage(lsn, msg)) {\n // If we're not in a transaction but the last reservation was kept\n // because of pending keepalives or lag reports in the queue,\n // release the reservation.\n if (!inTransaction && reservation?.lastWatermark) {\n changes.release(reservation.lastWatermark);\n reservation = null;\n }\n continue;\n }\n\n if (!reservation) {\n const res = changes.reserve('replication');\n typeof res === 'string' || (await res); // awaits should be uncommon\n reservation = {};\n }\n\n let lastChange: ChangeStreamMessage | undefined;\n for (const change of await changeMaker.makeChanges(lsn, msg)) {\n await changes.push(change); // Allow the change-streamer to push back.\n lastChange = change;\n }\n\n switch (lastChange?.[0]) {\n case 'begin':\n inTransaction = true;\n break;\n case 'commit':\n inTransaction = false;\n reservation.lastWatermark = lastChange[2].watermark;\n if (\n messages.queued === 0 ||\n changes.waiterDelay() > MAX_LOW_PRIORITY_DELAY_MS\n ) {\n // After each transaction, release the reservation:\n // - if there are no pending upstream messages\n // - or if a low priority request has been waiting for longer\n // than MAX_LOW_PRIORITY_DELAY_MS. This is to prevent\n // (backfill) starvation on very active upstreams.\n changes.release(reservation.lastWatermark);\n reservation = null;\n }\n break;\n }\n }\n } catch (e) {\n // Note: no need to worry about reservations here since downstream\n // is being completely canceled.\n const err = translateError(e);\n if (err instanceof ShutdownSignal) {\n // Log the new state of the replica to surface information about the\n // server that sent the shutdown signal, if any.\n await this.#logCurrentReplicaInfo();\n }\n changes.fail(err);\n }\n })();\n\n this.#lc.info?.(\n `started replication stream@${slot} from ${clientWatermark} (replicaVersion: ${\n this.#replica.version\n })`,\n );\n\n return {\n changes: changes.asSource(),\n acks: {push: status => acker.ack(status[2].watermark)},\n };\n }\n\n async #logCurrentReplicaInfo() {\n try {\n const replica = await getReplicaAtVersion(\n this.#lc,\n this.#db,\n this.#shard,\n this.#replica.version,\n );\n if (replica) {\n this.#lc.info?.(\n `Shutdown signal from replica@${this.#replica.version}: ${stringify(replica.subscriberContext)}`,\n );\n }\n } catch (e) {\n this.#lc.warn?.(`error logging replica info`, e);\n }\n }\n\n /**\n * Stops replication slots associated with this shard, and returns\n * a `cleanup` task that drops any slot other than the specified\n * `slotToKeep`.\n *\n * Note that replication slots created after `slotToKeep` (as indicated by\n * the timestamp suffix) are preserved, as those are newly syncing replicas\n * that will soon take over the slot.\n */\n async #stopExistingReplicationSlotSubscribers(slotToKeep: string) {\n const slotExpression = replicationSlotExpression(this.#shard);\n const legacySlotName = legacyReplicationSlot(this.#shard);\n\n const result = await runTx(this.#db, async sql => {\n // Note: `slot_name <= slotToKeep` uses a string compare of the millisecond\n // timestamp, which works until it exceeds 13 digits (sometime in 2286).\n const result = await sql<\n {slot: string; pid: string | null; terminated: boolean | null}[]\n > /*sql*/ `\n SELECT slot_name as slot, pg_terminate_backend(active_pid) as terminated, active_pid as pid\n FROM pg_replication_slots \n WHERE (slot_name LIKE ${slotExpression} OR slot_name = ${legacySlotName})\n AND slot_name <= ${slotToKeep}`;\n this.#lc.info?.(\n `terminated replication slots: ${JSON.stringify(result)}`,\n );\n const replicasTable = `${upstreamSchema(this.#shard)}.replicas`;\n const replicasBefore = await sql`\n SELECT slot, version, \"initialSyncContext\", \"subscriberContext\" \n FROM ${sql(replicasTable)} ORDER BY slot`;\n\n if (result.length === 0) {\n const shardSlots = await sql`\n SELECT slot_name as slot, active, active_pid as pid\n FROM pg_replication_slots\n WHERE slot_name LIKE ${slotExpression} OR slot_name = ${legacySlotName}\n ORDER BY slot_name`;\n this.#lc.warn?.(\n `slot ${slotToKeep} not found while cleaning subscribers`,\n {slots: shardSlots, replicas: replicasBefore},\n );\n throw new AbortError(\n `replication slot ${slotToKeep} is missing. A different ` +\n `replication-manager should now be running on a new ` +\n `replication slot.`,\n );\n }\n // Clear the state of the older replicas.\n this.#lc.info?.(\n `replicas before cleanup (slotToKeep=${slotToKeep}): ${JSON.stringify(\n replicasBefore,\n )}`,\n );\n await sql`\n DELETE FROM ${sql(replicasTable)} WHERE slot < ${slotToKeep}`;\n await sql`\n UPDATE ${sql(replicasTable)} \n SET \"subscriberContext\" = ${this.#context}\n WHERE slot = ${slotToKeep}`;\n const replicasAfter = await sql<{slot: string; version: string}[]>`\n SELECT slot, version FROM ${sql(replicasTable)} ORDER BY slot`;\n this.#lc.info?.(\n `replicas after cleanup (slotToKeep=${slotToKeep}): ${JSON.stringify(\n replicasAfter,\n )}`,\n );\n return result;\n });\n\n const pids = result.filter(({pid}) => pid !== null).map(({pid}) => pid);\n if (pids.length) {\n this.#lc.info?.(`signaled subscriber ${pids} to shut down`);\n }\n const otherSlots = result\n .filter(({slot}) => slot !== slotToKeep)\n .map(({slot}) => slot);\n\n if (otherSlots.length) {\n void this.#dropReplicationSlots(otherSlots).catch(e =>\n this.#lc.warn?.(`error dropping replication slots`, e),\n );\n }\n }\n\n async #dropReplicationSlots(slots: string[]) {\n this.#lc.info?.(`dropping other replication slot(s) ${slots}`);\n const sql = this.#db;\n for (let i = 0; i < 5; i++) {\n try {\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name IN ${sql(slots)}\n `;\n this.#lc.info?.(`successfully dropped ${slots}`);\n return;\n } catch (e) {\n // error: replication slot \"zero_slot_change_source_test_id\" is active for PID 268\n if (\n e instanceof postgres.PostgresError &&\n e.code === PG_OBJECT_IN_USE\n ) {\n // The freeing up of the replication slot is not transactional;\n // sometimes it takes time for Postgres to consider the slot\n // inactive.\n this.#lc.debug?.(`attempt ${i + 1}: ${String(e)}`, e);\n } else {\n this.#lc.warn?.(`error dropping ${slots}`, e);\n }\n await sleep(1000);\n }\n }\n this.#lc.warn?.(`maximum attempts exceeded dropping ${slots}`);\n }\n}\n\n// Exported for testing.\nexport class Acker implements Listener {\n #acks: Sink<bigint>;\n #waitingForDownstreamAck: string | null = null;\n\n constructor(acks: Sink<bigint>) {\n this.#acks = acks;\n }\n\n onChange(change: ChangeStreamMessage): void {\n switch (change[0]) {\n case 'status':\n const {watermark} = change[2];\n if (change[1].ack) {\n this.#expectDownstreamAck(watermark);\n } else {\n // Keepalives with shouldRespond = false are sent to Listeners,\n // but for efficiency they are not sent downstream to the\n // change-streamer. Ack them here if the change-streamer is caught\n // up. This updates the replication slot's `confirmed_flush_lsn`\n // more quickly (rather than waiting for the periodic shouldRespond),\n // which is useful for monitoring replication slot lag.\n this.#ackIfDownstreamIsCaughtUp(watermark);\n }\n break;\n case 'begin':\n // Mark the commit watermark as being expected so that any intermediate\n // shouldRespond=false watermarks, which will be at the\n // commitWatermark, are *not* acked, as the ack must come from\n // change-streamer after it commits the transaction.\n if (!change[1].skipAck) {\n this.#expectDownstreamAck(change[2].commitWatermark);\n }\n break;\n }\n }\n\n #expectDownstreamAck(watermark: string) {\n this.#waitingForDownstreamAck = watermark;\n }\n\n ack(watermark: LexiVersion) {\n if (\n this.#waitingForDownstreamAck &&\n this.#waitingForDownstreamAck <= watermark\n ) {\n this.#waitingForDownstreamAck = null;\n }\n this.#sendAck(watermark);\n }\n\n #ackIfDownstreamIsCaughtUp(watermark: string) {\n if (this.#waitingForDownstreamAck === null) {\n this.#sendAck(watermark);\n }\n }\n\n #sendAck(watermark: LexiVersion) {\n const lsn = majorVersionFromString(watermark);\n this.#acks.push(lsn);\n }\n}\n\nconst lagReportSchema = v.object({\n id: v.string(),\n sendTimeMs: v.number(),\n commitTimeMs: v.number(),\n});\n\nexport type LagReport = v.Infer<typeof lagReportSchema>;\n\nclass LagReporter {\n static readonly MESSAGE_SUFFIX = '/lag-report/v1';\n\n readonly #lc: LogContext;\n readonly messagePrefix: string;\n\n // Weird issue with oxlint, which thinks:\n // × eslint(no-unused-private-class-members): 'db' is defined but never used.\n // oxlint-disable-next-line eslint(no-unused-private-class-members)\n readonly #db: PostgresDB;\n readonly #lagIntervalMs: number;\n\n #pgVersion: number | undefined;\n #lastReportID: string = '';\n #timer: NodeJS.Timeout | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n db: PostgresDB,\n lagIntervalMs: number,\n ) {\n this.#lc = lc;\n this.messagePrefix = `${shard.appID}/${shard.shardNum}${LagReporter.MESSAGE_SUFFIX}`;\n this.#db = db;\n this.#lagIntervalMs = lagIntervalMs;\n }\n\n async #getPgVersion() {\n if (this.#pgVersion === undefined) {\n const [{pgVersion}] = await this.#db<{pgVersion: number}[]> /*sql*/ `\n SELECT current_setting('server_version_num')::int as \"pgVersion\"`;\n this.#pgVersion = pgVersion;\n }\n return this.#pgVersion;\n }\n\n async initiateLagReport(now = Date.now()) {\n const pgVersion = this.#pgVersion ?? (await this.#getPgVersion());\n this.#lastReportID = nanoid();\n\n if (pgVersion >= 170000) {\n await this.#db /*sql*/ `\n SELECT pg_logical_emit_message(\n false,\n ${this.messagePrefix},\n json_build_object(\n 'id', ${this.#lastReportID}::text,\n 'sendTimeMs', ${now}::int8,\n 'commitTimeMs', extract(epoch from now()) * 1000\n )::text,\n true\n );\n `;\n } else {\n // Versions before PG 17 do not support the final `flush` option of\n // pg_logical_emit_message(). This results in an extra 50~100ms latency\n // for replication reports when the db is idle, which is still\n // acceptable for the purpose for alerting on pathological lag, for\n // which the threshold is much higher (e.g. many seconds).\n await this.#db /*sql*/ `\n SELECT pg_logical_emit_message(\n false,\n ${this.messagePrefix},\n json_build_object(\n 'id', ${this.#lastReportID}::text,\n 'sendTimeMs', ${now}::int8,\n 'commitTimeMs', extract(epoch from now()) * 1000\n )::text\n );\n `;\n }\n return {nextSendTimeMs: now};\n }\n\n #scheduleNextReport(delayMs: number) {\n clearTimeout(this.#timer);\n this.#timer = setTimeout(async () => {\n try {\n await this.initiateLagReport();\n } catch (e) {\n this.#lc.warn?.(`error initiating lag report`, e);\n this.#scheduleNextReport(this.#lagIntervalMs);\n }\n }, delayMs);\n }\n\n processLagReport(msg: MessageMessage): DownstreamStatusMessage {\n assert(\n msg.prefix === this.messagePrefix,\n `unexpected message prefix: ${msg.prefix}`,\n );\n const report = parseLogicalMessageContent(msg, lagReportSchema);\n const now = Date.now();\n const nextSendTimeMs = Math.max(\n now,\n report.sendTimeMs + this.#lagIntervalMs,\n );\n if (report.id === this.#lastReportID) {\n // Only schedule the next report when receiving the previous report.\n // For historic reports in the WAL, or reports generated by other\n // replication-managers, status messages are still sent downstream,\n // but the next report is not actually scheduled.\n this.#scheduleNextReport(nextSendTimeMs - now);\n }\n const {sendTimeMs, commitTimeMs} = report;\n return [\n 'status',\n {\n ack: false,\n lagReport: {\n lastTimings: {\n sendTimeMs,\n commitTimeMs,\n receiveTimeMs: now,\n },\n nextSendTimeMs,\n },\n },\n {watermark: toStateVersionString(msg.messageLsn ?? '0/0')},\n ];\n }\n}\n\ntype ReplicationError = {\n lsn: bigint;\n msg: Message;\n err: unknown;\n lastLogTime: number;\n};\n\nconst SET_REPLICA_IDENTITY_DELAY_MS = 50;\n\nclass ChangeMaker {\n readonly #lc: LogContext;\n readonly #shardPrefix: string;\n readonly #shardConfig: InternalShardConfig;\n readonly #initialSchema: PublishedSchema;\n readonly #db: PostgresDB;\n\n #replicaIdentityTimer: NodeJS.Timeout | undefined;\n #error: ReplicationError | undefined;\n\n constructor(\n lc: LogContext,\n {appID, shardNum}: ShardID,\n shardConfig: InternalShardConfig,\n db: PostgresDB,\n initialSchema: PublishedSchema,\n ) {\n this.#lc = lc;\n // Note: This matches the prefix used in pg_logical_emit_message() in pg/schema/ddl.ts.\n this.#shardPrefix = `${appID}/${shardNum}`;\n this.#shardConfig = shardConfig;\n this.#initialSchema = initialSchema;\n this.#db = db;\n }\n\n async makeChanges(lsn: bigint, msg: Message): Promise<ChangeStreamMessage[]> {\n if (this.#error) {\n this.#logError(this.#error);\n return [];\n }\n try {\n return await this.#makeChanges(msg);\n } catch (err) {\n this.#error = {lsn, msg, err, lastLogTime: 0};\n this.#logError(this.#error);\n\n const message = `Unable to continue replication from LSN ${fromBigInt(lsn)}`;\n const errorDetails: JSONObject = {error: message};\n if (err instanceof UnsupportedSchemaChangeError) {\n errorDetails.reason = err.description;\n errorDetails.context = err.event.context;\n } else {\n errorDetails.reason = String(err);\n }\n\n // Rollback the current transaction to avoid dangling transactions in\n // downstream processors (i.e. changeLog, replicator).\n return [\n ['rollback', {tag: 'rollback'}],\n ['control', {tag: 'reset-required', message, errorDetails}],\n ];\n }\n }\n\n #logError(error: ReplicationError) {\n const {lsn, msg, err, lastLogTime} = error;\n const now = Date.now();\n\n // Output an error to logs as replication messages continue to be dropped,\n // at most once a minute.\n if (now - lastLogTime > 60_000) {\n this.#lc.error?.(\n `Unable to continue replication from LSN ${fromBigInt(lsn)}: ${String(\n err,\n )}`,\n err instanceof UnsupportedSchemaChangeError\n ? err.event.context\n : // 'content' can be a large byte Buffer. Exclude it from logging output.\n {...msg, content: undefined},\n );\n error.lastLogTime = now;\n }\n }\n\n // oxlint-disable-next-line require-await\n async #makeChanges(msg: Message): Promise<ChangeStreamData[]> {\n switch (msg.tag) {\n case 'begin':\n return [\n [\n 'begin',\n {...msg, json: 's'},\n {commitWatermark: toStateVersionString(must(msg.commitLsn))},\n ],\n ];\n\n case 'delete': {\n if (!(msg.key ?? msg.old)) {\n throw new Error(\n `Invalid DELETE msg (missing key): ${stringify(msg)}`,\n );\n }\n return [\n [\n 'data',\n {\n ...msg,\n relation: makeRelation(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-DELETE\n key: must(msg.old ?? msg.key),\n },\n ],\n ];\n }\n\n case 'update': {\n return [\n [\n 'data',\n {\n ...msg,\n relation: makeRelation(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-UPDATE\n key: msg.old ?? msg.key,\n },\n ],\n ];\n }\n\n case 'insert':\n return [['data', {...msg, relation: makeRelation(msg.relation)}]];\n case 'truncate':\n return [['data', {...msg, relations: msg.relations.map(makeRelation)}]];\n\n case 'message':\n if (!msg.prefix.startsWith(this.#shardPrefix)) {\n this.#lc.debug?.('ignoring message for different shard', msg.prefix);\n return [];\n }\n switch (msg.prefix.substring(this.#shardPrefix.length)) {\n case '': // Legacy prefix\n case '/ddl':\n return this.#handleDdlMessage(msg);\n default:\n this.#lc.debug?.('ignoring unknown message type', msg.prefix);\n return [];\n }\n\n case 'commit':\n this.#lastSnapshotInTx = undefined;\n return [\n [\n 'commit',\n msg,\n {watermark: toStateVersionString(must(msg.commitLsn))},\n ],\n ];\n\n case 'relation':\n return this.#handleRelation(msg);\n case 'type':\n return []; // Nothing need be done for custom types.\n case 'origin':\n // No need to detect replication loops since we are not a\n // PG replication source.\n return [];\n default:\n msg satisfies never;\n throw new Error(`Unexpected message type ${stringify(msg)}`);\n }\n }\n\n #preSchema: PublishedSchema | undefined;\n #lastSnapshotInTx: PublishedSchema | undefined;\n\n #handleDdlMessage(msg: MessageMessage) {\n const event = parseLogicalMessageContent(msg, replicationEventSchema);\n // Cancel manual schema adjustment timeouts when an upstream schema change\n // is about to happen, so as to avoid interfering / redundant work.\n clearTimeout(this.#replicaIdentityTimer);\n\n let previousSchema: PublishedSchema | null;\n const {type} = event;\n switch (type) {\n case 'ddlStart':\n // Store the schema in order to diff it with a subsequent ddlUpdate.\n this.#preSchema = event.schema;\n return [];\n case 'ddlUpdate':\n // guaranteed by event triggers\n previousSchema = must(\n this.#preSchema,\n `ddlUpdate received without a ddlStart`,\n );\n break;\n case 'schemaSnapshot':\n previousSchema = this.#lastSnapshotInTx ?? null;\n break;\n default: // Ignore unknown types for forwards compatibility\n this.#lc.info?.(`ignoring unknown ddl message type: ${type}`);\n return [];\n }\n\n // Store the schema (from either a ddlUpdate or schemaSnapshot) to\n // diff against the next schemaSnapshot.\n this.#lastSnapshotInTx = event.schema;\n if (!previousSchema) {\n this.#lc.info?.(`received ${msg.prefix}/${type} event`);\n return []; // First schemaSnapshot in the tx.\n }\n this.#lc.info?.(`processing ${msg.prefix}/${type} event`, event);\n\n const changes = this.#makeSchemaChanges(previousSchema, event).map(\n change => ['data', change] satisfies Data,\n );\n\n this.#lc\n .withContext('tag', event.event.tag)\n .withContext('query', event.context.query)\n .info?.(`${changes.length} schema change(s)`, {changes});\n\n const replicaIdentities = replicaIdentitiesForTablesWithoutPrimaryKeys(\n event.schema,\n );\n if (replicaIdentities) {\n this.#replicaIdentityTimer = setTimeout(async () => {\n try {\n await replicaIdentities.apply(this.#lc, this.#db);\n } catch (err) {\n this.#lc.warn?.(`error setting replica identities`, err);\n }\n }, SET_REPLICA_IDENTITY_DELAY_MS);\n }\n\n return changes;\n }\n\n /**\n * A note on operation order:\n *\n * Postgres will drop related indexes when columns are dropped,\n * but SQLite will error instead (https://sqlite.org/forum/forumpost/2e62dba69f?t=c&hist).\n * The current workaround is to drop indexes first.\n *\n * Also note that although it should not be possible to both rename and\n * add/drop tables/columns in a single statement, the operations are\n * ordered to handle that possibility, by always dropping old entities,\n * then modifying kept entities, and then adding new entities.\n *\n * Thus, the order of replicating DDL updates is:\n * - drop indexes\n * - drop tables\n * - alter tables\n * - drop columns\n * - alter columns\n * - add columns\n * - create tables\n * - create indexes\n *\n * In the future the replication logic should be improved to handle this\n * behavior in SQLite by dropping dependent indexes manually before dropping\n * columns. This, for example, would be needed to properly support changing\n * the type of a column that's indexed.\n */\n #makeSchemaChanges(\n preSchema: PublishedSchema,\n update: DdlUpdateEvent | SchemaSnapshotEvent,\n ): SchemaChange[] {\n try {\n const [prevTbl, prevIdx] = specsByID(preSchema);\n const [nextTbl, nextIdx] = specsByID(update.schema);\n const changes: SchemaChange[] = [];\n\n // Validate the new table schemas\n for (const table of nextTbl.values()) {\n validate(this.#lc, table);\n }\n\n const [droppedIdx, createdIdx] = symmetricDifferences(prevIdx, nextIdx);\n\n // Detect modified indexes (same name, different definition).\n // This happens when a constraint is dropped and recreated with the\n // same name in a single ALTER TABLE statement.\n // Note: We compare using stable column attnums rather than names,\n // because table/column renames change the index spec cosmetically\n // (tableName, column keys) without the index actually being recreated.\n const keptIdx = intersection(prevIdx, nextIdx);\n for (const id of keptIdx) {\n if (\n isIndexStructurallyChanged(\n must(prevIdx.get(id)),\n must(nextIdx.get(id)),\n prevTbl,\n nextTbl,\n )\n ) {\n droppedIdx.add(id);\n createdIdx.add(id);\n }\n }\n\n for (const id of droppedIdx) {\n const {schema, name} = must(prevIdx.get(id));\n changes.push({tag: 'drop-index', id: {schema, name}});\n }\n\n // DROP\n const [droppedTbl, createdTbl] = symmetricDifferences(prevTbl, nextTbl);\n for (const id of droppedTbl) {\n const {schema, name} = must(prevTbl.get(id));\n changes.push({tag: 'drop-table', id: {schema, name}});\n }\n // ALTER TABLE | ALTER PUBLICATION\n const tables = intersection(prevTbl, nextTbl);\n for (const id of tables) {\n changes.push(\n ...this.#getTableChanges(\n must(prevTbl.get(id)),\n must(nextTbl.get(id)),\n update.event.tag,\n ),\n );\n }\n // CREATE\n for (const id of createdTbl) {\n const spec = must(nextTbl.get(id));\n const createTable: TableCreate = {\n tag: 'create-table',\n spec,\n metadata: getMetadata(spec),\n };\n if (!update.event.tag.startsWith('CREATE')) {\n // Tables introduced to the publication via ALTER statements\n // or the COMMENT statement (from schemaSnapshots) must be\n // backfilled.\n createTable.backfill = mapValues(spec.columns, ({pos: attNum}) => ({\n attNum,\n })) satisfies Record<string, ColumnMetadata>;\n }\n changes.push(createTable);\n }\n\n // Add indexes last since they may reference tables / columns that need\n // to be created first.\n for (const id of createdIdx) {\n const spec = must(nextIdx.get(id));\n changes.push({tag: 'create-index', spec});\n }\n return changes;\n } catch (e) {\n throw new UnsupportedSchemaChangeError(String(e), update, {cause: e});\n }\n }\n\n #getTableChanges(\n oldTable: PublishedTableWithReplicaIdentity,\n newTable: PublishedTableWithReplicaIdentity,\n ddlTag: string,\n ): SchemaChange[] {\n const changes: SchemaChange[] = [];\n if (\n oldTable.schema !== newTable.schema ||\n oldTable.name !== newTable.name\n ) {\n changes.push({\n tag: 'rename-table',\n old: {schema: oldTable.schema, name: oldTable.name},\n new: {schema: newTable.schema, name: newTable.name},\n });\n }\n const oldMetadata = getMetadata(oldTable);\n const newMetadata = getMetadata(newTable);\n if (!deepEqual(oldMetadata, newMetadata)) {\n changes.push({\n tag: 'update-table-metadata',\n table: {schema: newTable.schema, name: newTable.name},\n old: oldMetadata,\n new: newMetadata,\n });\n }\n const table = {schema: newTable.schema, name: newTable.name};\n const oldColumns = columnsByID(oldTable.columns);\n const newColumns = columnsByID(newTable.columns);\n\n // DROP\n const [dropped, added] = symmetricDifferences(oldColumns, newColumns);\n for (const id of dropped) {\n const {name: column} = must(oldColumns.get(id));\n changes.push({tag: 'drop-column', table, column});\n }\n\n // ALTER\n const both = intersection(oldColumns, newColumns);\n for (const id of both) {\n const {name: oldName, ...oldSpec} = must(oldColumns.get(id));\n const {name: newName, ...newSpec} = must(newColumns.get(id));\n // The three things that we care about are:\n // 1. name\n // 2. type\n // 3. not-null\n if (\n oldName !== newName ||\n oldSpec.dataType !== newSpec.dataType ||\n oldSpec.notNull !== newSpec.notNull\n ) {\n changes.push({\n tag: 'update-column',\n table,\n old: {name: oldName, spec: oldSpec},\n new: {name: newName, spec: newSpec},\n });\n }\n }\n\n // All columns introduced by a publication change require backfill\n // (which appear as ALTER PUBLICATION or COMMENT tags).\n // Columns created by ALTER TABLE, on the other hand, only require\n // backfill if they have non-constant defaults.\n const alwaysBackfill = ddlTag !== 'ALTER TABLE';\n\n // ADD\n for (const id of added) {\n const {name, ...spec} = must(newColumns.get(id));\n const column = {name, spec};\n const addColumn: ColumnAdd = {\n tag: 'add-column',\n table,\n column,\n tableMetadata: getMetadata(newTable),\n };\n if (alwaysBackfill) {\n addColumn.column.spec.dflt = null;\n addColumn.backfill = {attNum: spec.pos} satisfies ColumnMetadata;\n } else {\n // Determine if the ChangeProcessor will accept the column add as is.\n try {\n mapPostgresToLiteColumn(table.name, column);\n } catch (e) {\n if (!(e instanceof UnsupportedColumnDefaultError)) {\n // Note: mapPostgresToLiteColumn is not expected to throw any other\n // types of errors.\n throw e;\n }\n // If the column has an unsupported default (e.g. an expression or a\n // generated value), create the column as initially hidden with a\n // `null` default, and publish it after backfilling the values from\n // upstream. Note that this does require that the table have a valid\n // REPLICA IDENTITY, since backfill relies on merging new data with\n // an existing row.\n this.#lc.info?.(\n `Backfilling column ${table.name}.${name}: ${String(e)}`,\n );\n addColumn.column.spec.dflt = null;\n addColumn.backfill = {attNum: spec.pos} satisfies ColumnMetadata;\n }\n }\n changes.push(addColumn);\n }\n return changes;\n }\n\n /**\n * If `ddlDetection === true`, relation messages are irrelevant,\n * as schema changes are detected by event triggers that\n * emit custom messages.\n *\n * For degraded-mode replication (`ddlDetection === false`):\n * 1. query the current published schemas on upstream\n * 2. compare that with the InternalShardConfig.initialSchema\n * 3. compare that with the incoming MessageRelation\n * 4. On any discrepancy, throw an UnsupportedSchemaChangeError\n * to halt replication.\n *\n * Note that schemas queried in step [1] will be *post-transaction*\n * schemas, which are not necessarily suitable for actually processing\n * the statements in the transaction being replicated. In other words,\n * this mechanism cannot be used to reliably *replicate* schema changes.\n * However, they serve the purpose determining if schemas have changed.\n */\n async #handleRelation(rel: PostgresRelation): Promise<ChangeStreamData[]> {\n const {publications, ddlDetection} = this.#shardConfig;\n if (ddlDetection) {\n return [];\n }\n const currentSchema = await getPublicationInfo(this.#db, publications);\n const difference = getSchemaDifference(this.#initialSchema, currentSchema);\n if (difference !== null) {\n throw new MissingEventTriggerSupport(difference);\n }\n // Even if the currentSchema is equal to the initialSchema, the\n // MessageRelation itself must be checked to detect transient\n // schema changes within the transaction (e.g. adding and dropping\n // a table, or renaming a column and then renaming it back).\n const orel = this.#initialSchema.tables.find(\n t => t.oid === rel.relationOid,\n );\n if (!orel) {\n // Can happen if a table is created and then dropped in the same transaction.\n throw new MissingEventTriggerSupport(\n `relation not in initialSchema: ${stringify(rel)}`,\n );\n }\n if (relationDifferent(orel, rel)) {\n throw new MissingEventTriggerSupport(\n `relation has changed within the transaction: ${stringify(orel)} vs ${stringify(rel)}`,\n );\n }\n return [];\n }\n}\n\nfunction getSchemaDifference(\n a: PublishedSchema,\n b: PublishedSchema,\n): string | null {\n // Note: ignore indexes since changes need not to halt replication\n if (a.tables.length !== b.tables.length) {\n return `tables created or dropped`;\n }\n for (let i = 0; i < a.tables.length; i++) {\n const at = a.tables[i];\n const bt = b.tables[i];\n const difference = getTableDifference(at, bt);\n if (difference) {\n return difference;\n }\n }\n return null;\n}\n\n// ColumnSpec comparator\nconst byColumnPos = (a: [string, ColumnSpec], b: [string, ColumnSpec]) =>\n a[1].pos < b[1].pos ? -1 : a[1].pos > b[1].pos ? 1 : 0;\n\nfunction getTableDifference(\n a: PublishedTableSpec,\n b: PublishedTableSpec,\n): string | null {\n if (a.oid !== b.oid || a.schema !== b.schema || a.name !== b.name) {\n return `Table \"${a.name}\" differs from table \"${b.name}\"`;\n }\n if (!deepEqual(a.primaryKey, b.primaryKey)) {\n return `Primary key of table \"${a.name}\" has changed`;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = Object.entries(b.columns).sort(byColumnPos);\n if (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const [bname, bcol] = bcols[i];\n return (\n aname !== bname ||\n acol.pos !== bcol.pos ||\n acol.typeOID !== bcol.typeOID ||\n acol.notNull !== bcol.notNull\n );\n })\n ) {\n return `Columns of table \"${a.name}\" have changed`;\n }\n return null;\n}\n\nexport function relationDifferent(a: PublishedTableSpec, b: PostgresRelation) {\n if (a.oid !== b.relationOid || a.schema !== b.schema || a.name !== b.name) {\n return true;\n }\n if (\n // The MessageRelation's `keyColumns` field contains the columns in column\n // declaration order, whereas the PublishedTableSpec's `primaryKey`\n // contains the columns in primary key (i.e. index) order. Do an\n // order-agnostic compare here since it is not possible to detect\n // key-order changes from the MessageRelation message alone.\n b.replicaIdentity === 'default' &&\n !equals(new Set(a.primaryKey), new Set(b.keyColumns))\n ) {\n return true;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = b.columns;\n return (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const bcol = bcols[i];\n return aname !== bcol.name || acol.typeOID !== bcol.typeOid;\n })\n );\n}\n\nfunction translateError(e: unknown): Error {\n if (!(e instanceof Error)) {\n return new Error(String(e));\n }\n if (e instanceof postgres.PostgresError && e.code === PG_ADMIN_SHUTDOWN) {\n return new ShutdownSignal(e);\n }\n return e;\n}\nconst idString = (id: Identifier) => `${id.schema}.${id.name}`;\n\nfunction specsByID(published: PublishedSchema) {\n return [\n // It would have been nice to use a CustomKeyMap here, but we rely on set-utils\n // operations which use plain Sets.\n new Map(published.tables.map(t => [t.oid, t])),\n new Map(published.indexes.map(i => [idString(i), i])),\n ] as const;\n}\n\n/**\n * Determines if an index was structurally changed (e.g. constraint dropped\n * and recreated with different columns) vs cosmetically changed (e.g. the\n * index spec changed because the table or a column was renamed).\n *\n * Compares boolean properties directly and resolves column names to their\n * stable attnums (pg_attribute `attnum`) for the column comparison.\n */\nfunction isIndexStructurallyChanged(\n prev: PublishedIndexSpec,\n next: PublishedIndexSpec,\n prevTables: Map<number, PublishedTableWithReplicaIdentity>,\n nextTables: Map<number, PublishedTableWithReplicaIdentity>,\n): boolean {\n if (\n prev.unique !== next.unique ||\n prev.isPrimaryKey !== next.isPrimaryKey ||\n prev.isReplicaIdentity !== next.isReplicaIdentity ||\n prev.isImmediate !== next.isImmediate\n ) {\n return true;\n }\n\n const prevTable = findTableBySchemaAndName(\n prevTables,\n prev.schema,\n prev.tableName,\n );\n const nextTable = findTableBySchemaAndName(\n nextTables,\n next.schema,\n next.tableName,\n );\n if (!prevTable || !nextTable) {\n // Can't resolve tables; conservatively treat as changed.\n return true;\n }\n\n const prevEntries = Object.entries(prev.columns);\n const nextEntries = Object.entries(next.columns);\n if (prevEntries.length !== nextEntries.length) {\n return true;\n }\n\n // Resolve column names → attnums and compare.\n const prevByAttnum = new Map<number | undefined, string>(\n prevEntries.map(([name, dir]) => [prevTable.columns[name]?.pos, dir]),\n );\n const nextByAttnum = new Map<number | undefined, string>(\n nextEntries.map(([name, dir]) => [nextTable.columns[name]?.pos, dir]),\n );\n\n if (prevByAttnum.has(undefined) || nextByAttnum.has(undefined)) {\n // Column not found in table spec; conservatively treat as changed.\n return true;\n }\n if (prevByAttnum.size !== nextByAttnum.size) {\n return true;\n }\n for (const [attnum, dir] of prevByAttnum) {\n if (nextByAttnum.get(attnum) !== dir) {\n return true;\n }\n }\n return false;\n}\n\nfunction findTableBySchemaAndName(\n tables: Map<number, PublishedTableWithReplicaIdentity>,\n schema: string,\n name: string,\n): PublishedTableWithReplicaIdentity | undefined {\n for (const table of tables.values()) {\n if (table.schema === schema && table.name === name) {\n return table;\n }\n }\n return undefined;\n}\n\nfunction columnsByID(\n columns: Record<string, ColumnSpec>,\n): Map<number, ColumnSpec & {name: string}> {\n const colsByID = new Map<number, ColumnSpec & {name: string}>();\n for (const [name, spec] of Object.entries(columns)) {\n // The `pos` field is the `attnum` in `pg_attribute`, which is a stable\n // identifier for the column in this table (i.e. never reused).\n colsByID.set(spec.pos, {...spec, name});\n }\n return colsByID;\n}\n\nfunction getMetadata(table: PublishedTableWithReplicaIdentity): TableMetadata {\n return {\n schemaOID: must(table.schemaOID),\n relationOID: table.oid,\n rowKey: Object.fromEntries(\n table.replicaIdentityColumns.map(k => [\n k,\n {attNum: table.columns[k].pos},\n ]),\n ),\n };\n}\n\n// Avoid sending the `columns` from the Postgres MessageRelation message.\n// They are not used downstream and the message can be large.\nfunction makeRelation(relation: PostgresRelation): MessageRelation {\n // Avoid sending the `columns` from the Postgres MessageRelation message.\n // They are not used downstream and the message can be large.\n const {columns: _, keyColumns, replicaIdentity, ...rest} = relation;\n return {\n ...rest,\n rowKey: {\n columns: keyColumns,\n type: replicaIdentity,\n },\n // For now, deprecated columns are sent for backwards compatibility.\n // These can be removed when bumping the MIN_PROTOCOL_VERSION to 5.\n keyColumns,\n replicaIdentity,\n };\n}\n\nclass UnsupportedSchemaChangeError extends Error {\n readonly name = 'UnsupportedSchemaChangeError';\n readonly description: string;\n readonly event: DdlUpdateEvent | SchemaSnapshotEvent;\n\n constructor(\n description: string,\n event: DdlUpdateEvent | SchemaSnapshotEvent,\n options?: ErrorOptions,\n ) {\n super(\n `Replication halted. Resync the replica to recover: ${description}`,\n options,\n );\n this.description = description;\n this.event = event;\n }\n}\n\nclass MissingEventTriggerSupport extends Error {\n readonly name = 'MissingEventTriggerSupport';\n\n constructor(msg: string) {\n super(\n `${msg}. Schema changes cannot be reliably replicated without event trigger support.`,\n );\n }\n}\n\n// TODO(0xcadams): should this be a ProtocolError?\nclass ShutdownSignal extends AbortError {\n readonly name = 'ShutdownSignal';\n\n constructor(cause: unknown) {\n super(\n 'shutdown signal received (e.g. another zero-cache taking over the replication stream)',\n {\n cause,\n },\n );\n }\n}\n\nfunction parseLogicalMessageContent<T>(\n {content}: MessageMessage,\n schema: v.Type<T>,\n) {\n const str =\n content instanceof Buffer\n ? content.toString('utf-8')\n : new TextDecoder().decode(content);\n const json = JSON.parse(str);\n return v.parse(json, schema, 'passthrough');\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqHA,eAAsB,+BACpB,IACA,aACA,OACA,eACA,aACA,SACA,qBAC6E;AAC7E,OAAM,YACJ,IACA,WAAW,MAAM,MAAM,GAAG,MAAM,YAChC,gBACC,KAAK,OAAO,YAAY,KAAK,OAAO,IAAI,aAAa,aAAa,QAAQ,CAC5E;CAED,MAAM,UAAU,IAAI,SAAS,IAAI,cAAc;CAC/C,MAAM,oBAAoB,+BACxB,IAAI,gBAAgB,QAAQ,CAC7B;AACD,SAAQ,OAAO;CAIf,MAAM,KAAK,SAAS,IAAI,YAAY;AACpC,KAAI;AAiBF,SAAO;GAAC;GAAmB,cATN,IAAI,qBACvB,IACA,aACA,OAVsB,MAAM,uBAC5B,IACA,IACA,OACA,kBACD,EAOC,SACA,uBAAuB,KACxB;GAEuC;WAChC;AACR,QAAM,GAAG,KAAK;;;AAIlB,eAAe,uBACb,IACA,KACA,OACA,EACE,gBACA,cAAc,YACd,sBAEF;AAEA,OAAM,kBAAkB,IAAI,KAAK,OAAO,eAAe;CAEvD,MAAM,kBAAkB,MAAM,oBAC5B,IACA,KACA,OACA,gBACA,mBACD;AACD,KAAI,CAAC,gBACH,OAAM,IAAI,gBACR,8CAA8C,iBAC/C;CAIH,MAAM,YAAY,CAAC,GAAG,MAAM,aAAa,CAAC,MAAM;CAChD,MAAM,aAAa,gBAAgB,aAChC,QAAO,MAAK,CAAC,EAAE,WAAW,0BAA0B,MAAM,CAAC,CAAC,CAC5D,MAAM;AACT,KAAI,CAAC,UAAU,WAAW,WAAW,EAAE;AACrC,KAAG,OAAO,8CAA8C,UAAU,GAAG;AACrE,QAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,SAAS,CAAC;AACxD,QAAM,IAAI,gBACR,2BAA2B,UAAU,2CACjB,WAAW,GAChC;;AAMH,KAAI,CAAC,UAAU,gBAAgB,cAAc,WAAW,CACtD,OAAM,IAAI,gBACR,0BAA0B,gBAAgB,aAAa,0CACnB,WAAW,GAChD;CAIH,MAAM,SAAS,MAAM,GAAG;0DACgC,IAAI,WAAW,CAAC;IACtE,QAAQ;AACV,KAAI,OAAO,WAAW,WAAW,OAC/B,OAAM,IAAI,gBACR,0BAA0B,OAAO,MAAM,CAAC,gDACN,WAAW,GAC9C;CAGH,MAAM,EAAC,SAAQ;CACf,MAAM,SAAS,MAAM,GAEX;;0BAEc;AACxB,KAAI,OAAO,WAAW,EACpB,OAAM,IAAI,gBAAgB,oBAAoB,KAAK,aAAa;CAElE,MAAM,CAAC,EAAC,YAAY,eAAc;AAClC,KAAI,eAAe,QAAQ,cAAc,OACvC,OAAM,IAAI,gBACR,oBAAoB,KAAK,gEAC1B;AAEH,QAAO;;AAIT,IAAM,4BAA4B;;;;;AAUlC,IAAM,uBAAN,MAAmD;CACjD;CACA;CACA;CACA;CACA;CACA;CACA;CAEA,YACE,IACA,aACA,OACA,SACA,SACA,qBACA;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,gBAAgB;AACvD,QAAA,KAAW,SAAS,IAAI,aAAa;IAElC,iBAAiB;GAClB,YAAY,GAAE,qBAAqB,4BAA2B;GAC/D,CAAC;AACF,QAAA,cAAoB;AACpB,QAAA,QAAc;AACd,QAAA,UAAgB;AAChB,QAAA,UAAgB;AAChB,QAAA,cAAoB,sBAChB,IAAI,YACF,GAAG,YAAY,aAAa,eAAe,EAC3C,OACA,MAAA,IACA,oBACD,GACD;;CAGN,mBAA6D;AAC3D,SAAO,MAAA,cAAoB,MAAA,YAAkB,mBAAmB,GAAG;;CAGrE,MAAM,YACJ,iBACA,mBAAsC,EAAE,EACjB;EACvB,MAAM,EAAC,SAAQ,MAAA;AAEf,QAAM,MAAA,uCAA6C,KAAK;EACxD,MAAM,SAAS,MAAM,uBAAuB,MAAA,IAAU,MAAA,MAAY;AAClE,QAAA,GAAS,OAAO,+BAA+B,OAAO;AACtD,SAAO,MAAA,YAAkB,MAAM,iBAAiB,QAAQ,iBAAiB;;CAG3E,OAAA,YACE,MACA,iBACA,aACA,kBACuB;EACvB,MAAM,cAAc,uBAAuB,gBAAgB,GAAG;EAC9D,MAAM,EAAC,UAAU,SAAQ,MAAM,UAC7B,MAAA,IACA,MAAA,IACA,MACA,CAAC,GAAG,YAAY,aAAa,EAC7B,YACD;EACD,MAAM,QAAQ,IAAI,MAAM,KAAK;EAK7B,MAAM,UAAU,IAAI,wBAAwB,MAAA,IAAU,gBAAgB;EACtE,MAAM,kBAAkB,IAAI,gBAAgB,MAAA,IAAU,UAAS,QAC7D,eAAe,MAAA,IAAU,MAAA,aAAmB,MAAA,SAAe,IAAI,CAChE;AACD,UACG,aAAa,UAAU,gBAAgB,CACvC,aAAa,iBAAiB,MAAM;AACvC,kBAAgB,IAAI,iBAAiB,iBAAiB;EAEtD,MAAM,cAAc,IAAI,YACtB,MAAA,IACA,MAAA,OACA,aACA,MAAA,IACA,MAAA,QAAc,cACf;;;;;EAMD,MAAM,0BACJ,KACA,QACmB;AACnB,OAAI,IAAI,QAAQ,aAAa;AAC3B,YAAQ,WAAW;KACjB;KACA,EAAC,KAAK,IAAI,eAAc;KACxB,EAAC,WAAW,qBAAqB,IAAI,EAAC;KACvC,CAAC;AACF,WAAO;;AAET,OACE,IAAI,QAAQ,aACZ,IAAI,WAAW,MAAA,aAAmB,eAClC;AACA,YAAQ,WAAW,MAAA,YAAkB,iBAAiB,IAAI,CAAC;AAC3D,WAAO;;AAET,UAAO;;AAGT,GAAM,YAAY;AAChB,OAAI;IACF,IAAI,cAAuC;IAC3C,IAAI,gBAAgB;AAEpB,eAAW,MAAM,CAAC,KAAK,QAAQ,UAAU;AACvC,SAAI,CAAC,uBAAuB,KAAK,IAAI,EAAE;AAIrC,UAAI,CAAC,iBAAiB,aAAa,eAAe;AAChD,eAAQ,QAAQ,YAAY,cAAc;AAC1C,qBAAc;;AAEhB;;AAGF,SAAI,CAAC,aAAa;MAChB,MAAM,MAAM,QAAQ,QAAQ,cAAc;AAC1C,aAAO,QAAQ,YAAa,MAAM;AAClC,oBAAc,EAAE;;KAGlB,IAAI;AACJ,UAAK,MAAM,UAAU,MAAM,YAAY,YAAY,KAAK,IAAI,EAAE;AAC5D,YAAM,QAAQ,KAAK,OAAO;AAC1B,mBAAa;;AAGf,aAAQ,aAAa,IAArB;MACE,KAAK;AACH,uBAAgB;AAChB;MACF,KAAK;AACH,uBAAgB;AAChB,mBAAY,gBAAgB,WAAW,GAAG;AAC1C,WACE,SAAS,WAAW,KACpB,QAAQ,aAAa,GAAG,2BACxB;AAMA,gBAAQ,QAAQ,YAAY,cAAc;AAC1C,sBAAc;;AAEhB;;;YAGC,GAAG;IAGV,MAAM,MAAM,eAAe,EAAE;AAC7B,QAAI,eAAe,eAGjB,OAAM,MAAA,uBAA6B;AAErC,YAAQ,KAAK,IAAI;;MAEjB;AAEJ,QAAA,GAAS,OACP,8BAA8B,KAAK,QAAQ,gBAAgB,oBACzD,MAAA,QAAc,QACf,GACF;AAED,SAAO;GACL,SAAS,QAAQ,UAAU;GAC3B,MAAM,EAAC,OAAM,WAAU,MAAM,IAAI,OAAO,GAAG,UAAU,EAAC;GACvD;;CAGH,OAAA,wBAA+B;AAC7B,MAAI;GACF,MAAM,UAAU,MAAM,oBACpB,MAAA,IACA,MAAA,IACA,MAAA,OACA,MAAA,QAAc,QACf;AACD,OAAI,QACF,OAAA,GAAS,OACP,gCAAgC,MAAA,QAAc,QAAQ,IAAI,UAAU,QAAQ,kBAAkB,GAC/F;WAEI,GAAG;AACV,SAAA,GAAS,OAAO,8BAA8B,EAAE;;;;;;;;;;;;CAapD,OAAA,uCAA8C,YAAoB;EAChE,MAAM,iBAAiB,0BAA0B,MAAA,MAAY;EAC7D,MAAM,iBAAiB,sBAAsB,MAAA,MAAY;EAEzD,MAAM,SAAS,MAAM,MAAM,MAAA,IAAU,OAAM,QAAO;GAGhD,MAAM,SAAS,MAAM,GAEX;;;gCAGgB,eAAe,kBAAkB,eAAe;iCAC/C;AAC3B,SAAA,GAAS,OACP,iCAAiC,KAAK,UAAU,OAAO,GACxD;GACD,MAAM,gBAAgB,GAAG,eAAe,MAAA,MAAY,CAAC;GACrD,MAAM,iBAAiB,MAAM,GAAG;;iBAErB,IAAI,cAAc,CAAC;AAE9B,OAAI,OAAO,WAAW,GAAG;IACvB,MAAM,aAAa,MAAM,GAAG;;;iCAGH,eAAe,kBAAkB,eAAe;;AAEzE,UAAA,GAAS,OACP,QAAQ,WAAW,wCACnB;KAAC,OAAO;KAAY,UAAU;KAAe,CAC9C;AACD,UAAM,IAAI,WACR,oBAAoB,WAAW,+FAGhC;;AAGH,SAAA,GAAS,OACP,uCAAuC,WAAW,KAAK,KAAK,UAC1D,eACD,GACF;AACD,SAAM,GAAG;sBACO,IAAI,cAAc,CAAC,gBAAgB;AACnD,SAAM,GAAG;iBACE,IAAI,cAAc,CAAC;sCACE,MAAA,QAAc;yBAC3B;GACnB,MAAM,gBAAgB,MAAM,GAAsC;kCACtC,IAAI,cAAc,CAAC;AAC/C,SAAA,GAAS,OACP,sCAAsC,WAAW,KAAK,KAAK,UACzD,cACD,GACF;AACD,UAAO;IACP;EAEF,MAAM,OAAO,OAAO,QAAQ,EAAC,UAAS,QAAQ,KAAK,CAAC,KAAK,EAAC,UAAS,IAAI;AACvE,MAAI,KAAK,OACP,OAAA,GAAS,OAAO,uBAAuB,KAAK,eAAe;EAE7D,MAAM,aAAa,OAChB,QAAQ,EAAC,WAAU,SAAS,WAAW,CACvC,KAAK,EAAC,WAAU,KAAK;AAExB,MAAI,WAAW,OACR,OAAA,qBAA2B,WAAW,CAAC,OAAM,MAChD,MAAA,GAAS,OAAO,oCAAoC,EAAE,CACvD;;CAIL,OAAA,qBAA4B,OAAiB;AAC3C,QAAA,GAAS,OAAO,sCAAsC,QAAQ;EAC9D,MAAM,MAAM,MAAA;AACZ,OAAK,IAAI,IAAI,GAAG,IAAI,GAAG,IACrB,KAAI;AACF,SAAM,GAAG;;iCAEgB,IAAI,MAAM,CAAC;;AAEpC,SAAA,GAAS,OAAO,wBAAwB,QAAQ;AAChD;WACO,GAAG;AAEV,OACE,aAAa,SAAS,iBACtB,EAAE,SAAS,iBAKX,OAAA,GAAS,QAAQ,WAAW,IAAI,EAAE,IAAI,OAAO,EAAE,IAAI,EAAE;OAErD,OAAA,GAAS,OAAO,kBAAkB,SAAS,EAAE;AAE/C,SAAM,MAAM,IAAK;;AAGrB,QAAA,GAAS,OAAO,sCAAsC,QAAQ;;;AAKlE,IAAa,QAAb,MAAuC;CACrC;CACA,2BAA0C;CAE1C,YAAY,MAAoB;AAC9B,QAAA,OAAa;;CAGf,SAAS,QAAmC;AAC1C,UAAQ,OAAO,IAAf;GACE,KAAK;IACH,MAAM,EAAC,cAAa,OAAO;AAC3B,QAAI,OAAO,GAAG,IACZ,OAAA,oBAA0B,UAAU;QAQpC,OAAA,0BAAgC,UAAU;AAE5C;GACF,KAAK;AAKH,QAAI,CAAC,OAAO,GAAG,QACb,OAAA,oBAA0B,OAAO,GAAG,gBAAgB;AAEtD;;;CAIN,qBAAqB,WAAmB;AACtC,QAAA,0BAAgC;;CAGlC,IAAI,WAAwB;AAC1B,MACE,MAAA,2BACA,MAAA,2BAAiC,UAEjC,OAAA,0BAAgC;AAElC,QAAA,QAAc,UAAU;;CAG1B,2BAA2B,WAAmB;AAC5C,MAAI,MAAA,4BAAkC,KACpC,OAAA,QAAc,UAAU;;CAI5B,SAAS,WAAwB;EAC/B,MAAM,MAAM,uBAAuB,UAAU;AAC7C,QAAA,KAAW,KAAK,IAAI;;;AAIxB,IAAM,kBAAkB,eAAE,OAAO;CAC/B,IAAI,eAAE,QAAQ;CACd,YAAY,eAAE,QAAQ;CACtB,cAAc,eAAE,QAAQ;CACzB,CAAC;AAIF,IAAM,cAAN,MAAM,YAAY;CAChB,OAAgB,iBAAiB;CAEjC;CACA;CAKA;CACA;CAEA;CACA,gBAAwB;CACxB;CAEA,YACE,IACA,OACA,IACA,eACA;AACA,QAAA,KAAW;AACX,OAAK,gBAAgB,GAAG,MAAM,MAAM,GAAG,MAAM,WAAW,YAAY;AACpE,QAAA,KAAW;AACX,QAAA,gBAAsB;;CAGxB,OAAA,eAAsB;AACpB,MAAI,MAAA,cAAoB,KAAA,GAAW;GACjC,MAAM,CAAC,EAAC,eAAc,MAAM,MAAA,EAAwC;;AAEpE,SAAA,YAAkB;;AAEpB,SAAO,MAAA;;CAGT,MAAM,kBAAkB,MAAM,KAAK,KAAK,EAAE;EACxC,MAAM,YAAY,MAAA,aAAoB,MAAM,MAAA,cAAoB;AAChE,QAAA,eAAqB,QAAQ;AAE7B,MAAI,aAAa,KACf,OAAM,MAAA,EAAiB;;;YAGjB,KAAK,cAAc;;oBAEX,MAAA,aAAmB;4BACX,IAAI;;;;;;MAY1B,OAAM,MAAA,EAAiB;;;YAGjB,KAAK,cAAc;;oBAEX,MAAA,aAAmB;4BACX,IAAI;;;;;AAM5B,SAAO,EAAC,gBAAgB,KAAI;;CAG9B,oBAAoB,SAAiB;AACnC,eAAa,MAAA,MAAY;AACzB,QAAA,QAAc,WAAW,YAAY;AACnC,OAAI;AACF,UAAM,KAAK,mBAAmB;YACvB,GAAG;AACV,UAAA,GAAS,OAAO,+BAA+B,EAAE;AACjD,UAAA,mBAAyB,MAAA,cAAoB;;KAE9C,QAAQ;;CAGb,iBAAiB,KAA8C;AAC7D,SACE,IAAI,WAAW,KAAK,eACpB,8BAA8B,IAAI,SACnC;EACD,MAAM,SAAS,2BAA2B,KAAK,gBAAgB;EAC/D,MAAM,MAAM,KAAK,KAAK;EACtB,MAAM,iBAAiB,KAAK,IAC1B,KACA,OAAO,aAAa,MAAA,cACrB;AACD,MAAI,OAAO,OAAO,MAAA,aAKhB,OAAA,mBAAyB,iBAAiB,IAAI;EAEhD,MAAM,EAAC,YAAY,iBAAgB;AACnC,SAAO;GACL;GACA;IACE,KAAK;IACL,WAAW;KACT,aAAa;MACX;MACA;MACA,eAAe;MAChB;KACD;KACD;IACF;GACD,EAAC,WAAW,qBAAqB,IAAI,cAAc,MAAM,EAAC;GAC3D;;;AAWL,IAAM,gCAAgC;AAEtC,IAAM,cAAN,MAAkB;CAChB;CACA;CACA;CACA;CACA;CAEA;CACA;CAEA,YACE,IACA,EAAC,OAAO,YACR,aACA,IACA,eACA;AACA,QAAA,KAAW;AAEX,QAAA,cAAoB,GAAG,MAAM,GAAG;AAChC,QAAA,cAAoB;AACpB,QAAA,gBAAsB;AACtB,QAAA,KAAW;;CAGb,MAAM,YAAY,KAAa,KAA8C;AAC3E,MAAI,MAAA,OAAa;AACf,SAAA,SAAe,MAAA,MAAY;AAC3B,UAAO,EAAE;;AAEX,MAAI;AACF,UAAO,MAAM,MAAA,YAAkB,IAAI;WAC5B,KAAK;AACZ,SAAA,QAAc;IAAC;IAAK;IAAK;IAAK,aAAa;IAAE;AAC7C,SAAA,SAAe,MAAA,MAAY;GAE3B,MAAM,UAAU,2CAA2C,WAAW,IAAI;GAC1E,MAAM,eAA2B,EAAC,OAAO,SAAQ;AACjD,OAAI,eAAe,8BAA8B;AAC/C,iBAAa,SAAS,IAAI;AAC1B,iBAAa,UAAU,IAAI,MAAM;SAEjC,cAAa,SAAS,OAAO,IAAI;AAKnC,UAAO,CACL,CAAC,YAAY,EAAC,KAAK,YAAW,CAAC,EAC/B,CAAC,WAAW;IAAC,KAAK;IAAkB;IAAS;IAAa,CAAC,CAC5D;;;CAIL,UAAU,OAAyB;EACjC,MAAM,EAAC,KAAK,KAAK,KAAK,gBAAe;EACrC,MAAM,MAAM,KAAK,KAAK;AAItB,MAAI,MAAM,cAAc,KAAQ;AAC9B,SAAA,GAAS,QACP,2CAA2C,WAAW,IAAI,CAAC,IAAI,OAC7D,IACD,IACD,eAAe,+BACX,IAAI,MAAM,UAEV;IAAC,GAAG;IAAK,SAAS,KAAA;IAAU,CACjC;AACD,SAAM,cAAc;;;CAKxB,OAAA,YAAmB,KAA2C;AAC5D,UAAQ,IAAI,KAAZ;GACE,KAAK,QACH,QAAO,CACL;IACE;IACA;KAAC,GAAG;KAAK,MAAM;KAAI;IACnB,EAAC,iBAAiB,qBAAqB,KAAK,IAAI,UAAU,CAAC,EAAC;IAC7D,CACF;GAEH,KAAK;AACH,QAAI,EAAE,IAAI,OAAO,IAAI,KACnB,OAAM,IAAI,MACR,qCAAqC,UAAU,IAAI,GACpD;AAEH,WAAO,CACL,CACE,QACA;KACE,GAAG;KACH,UAAU,aAAa,IAAI,SAAS;KAEpC,KAAK,KAAK,IAAI,OAAO,IAAI,IAAI;KAC9B,CACF,CACF;GAGH,KAAK,SACH,QAAO,CACL,CACE,QACA;IACE,GAAG;IACH,UAAU,aAAa,IAAI,SAAS;IAEpC,KAAK,IAAI,OAAO,IAAI;IACrB,CACF,CACF;GAGH,KAAK,SACH,QAAO,CAAC,CAAC,QAAQ;IAAC,GAAG;IAAK,UAAU,aAAa,IAAI,SAAS;IAAC,CAAC,CAAC;GACnE,KAAK,WACH,QAAO,CAAC,CAAC,QAAQ;IAAC,GAAG;IAAK,WAAW,IAAI,UAAU,IAAI,aAAa;IAAC,CAAC,CAAC;GAEzE,KAAK;AACH,QAAI,CAAC,IAAI,OAAO,WAAW,MAAA,YAAkB,EAAE;AAC7C,WAAA,GAAS,QAAQ,wCAAwC,IAAI,OAAO;AACpE,YAAO,EAAE;;AAEX,YAAQ,IAAI,OAAO,UAAU,MAAA,YAAkB,OAAO,EAAtD;KACE,KAAK;KACL,KAAK,OACH,QAAO,MAAA,iBAAuB,IAAI;KACpC;AACE,YAAA,GAAS,QAAQ,iCAAiC,IAAI,OAAO;AAC7D,aAAO,EAAE;;GAGf,KAAK;AACH,UAAA,mBAAyB,KAAA;AACzB,WAAO,CACL;KACE;KACA;KACA,EAAC,WAAW,qBAAqB,KAAK,IAAI,UAAU,CAAC,EAAC;KACvD,CACF;GAEH,KAAK,WACH,QAAO,MAAA,eAAqB,IAAI;GAClC,KAAK,OACH,QAAO,EAAE;GACX,KAAK,SAGH,QAAO,EAAE;GACX,QAEE,OAAM,IAAI,MAAM,2BAA2B,UAAU,IAAI,GAAG;;;CAIlE;CACA;CAEA,kBAAkB,KAAqB;EACrC,MAAM,QAAQ,2BAA2B,KAAK,uBAAuB;AAGrE,eAAa,MAAA,qBAA2B;EAExC,IAAI;EACJ,MAAM,EAAC,SAAQ;AACf,UAAQ,MAAR;GACE,KAAK;AAEH,UAAA,YAAkB,MAAM;AACxB,WAAO,EAAE;GACX,KAAK;AAEH,qBAAiB,KACf,MAAA,WACA,wCACD;AACD;GACF,KAAK;AACH,qBAAiB,MAAA,oBAA0B;AAC3C;GACF;AACE,UAAA,GAAS,OAAO,sCAAsC,OAAO;AAC7D,WAAO,EAAE;;AAKb,QAAA,mBAAyB,MAAM;AAC/B,MAAI,CAAC,gBAAgB;AACnB,SAAA,GAAS,OAAO,YAAY,IAAI,OAAO,GAAG,KAAK,QAAQ;AACvD,UAAO,EAAE;;AAEX,QAAA,GAAS,OAAO,cAAc,IAAI,OAAO,GAAG,KAAK,SAAS,MAAM;EAEhE,MAAM,UAAU,MAAA,kBAAwB,gBAAgB,MAAM,CAAC,KAC7D,WAAU,CAAC,QAAQ,OAAO,CAC3B;AAED,QAAA,GACG,YAAY,OAAO,MAAM,MAAM,IAAI,CACnC,YAAY,SAAS,MAAM,QAAQ,MAAM,CACzC,OAAO,GAAG,QAAQ,OAAO,oBAAoB,EAAC,SAAQ,CAAC;EAE1D,MAAM,oBAAoB,6CACxB,MAAM,OACP;AACD,MAAI,kBACF,OAAA,uBAA6B,WAAW,YAAY;AAClD,OAAI;AACF,UAAM,kBAAkB,MAAM,MAAA,IAAU,MAAA,GAAS;YAC1C,KAAK;AACZ,UAAA,GAAS,OAAO,oCAAoC,IAAI;;KAEzD,8BAA8B;AAGnC,SAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA8BT,mBACE,WACA,QACgB;AAChB,MAAI;GACF,MAAM,CAAC,SAAS,WAAW,UAAU,UAAU;GAC/C,MAAM,CAAC,SAAS,WAAW,UAAU,OAAO,OAAO;GACnD,MAAM,UAA0B,EAAE;AAGlC,QAAK,MAAM,SAAS,QAAQ,QAAQ,CAClC,UAAS,MAAA,IAAU,MAAM;GAG3B,MAAM,CAAC,YAAY,cAAc,qBAAqB,SAAS,QAAQ;GAQvE,MAAM,UAAU,aAAa,SAAS,QAAQ;AAC9C,QAAK,MAAM,MAAM,QACf,KACE,2BACE,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,SACA,QACD,EACD;AACA,eAAW,IAAI,GAAG;AAClB,eAAW,IAAI,GAAG;;AAItB,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,EAAC,QAAQ,SAAQ,KAAK,QAAQ,IAAI,GAAG,CAAC;AAC5C,YAAQ,KAAK;KAAC,KAAK;KAAc,IAAI;MAAC;MAAQ;MAAK;KAAC,CAAC;;GAIvD,MAAM,CAAC,YAAY,cAAc,qBAAqB,SAAS,QAAQ;AACvE,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,EAAC,QAAQ,SAAQ,KAAK,QAAQ,IAAI,GAAG,CAAC;AAC5C,YAAQ,KAAK;KAAC,KAAK;KAAc,IAAI;MAAC;MAAQ;MAAK;KAAC,CAAC;;GAGvD,MAAM,SAAS,aAAa,SAAS,QAAQ;AAC7C,QAAK,MAAM,MAAM,OACf,SAAQ,KACN,GAAG,MAAA,gBACD,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,OAAO,MAAM,IACd,CACF;AAGH,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,OAAO,KAAK,QAAQ,IAAI,GAAG,CAAC;IAClC,MAAM,cAA2B;KAC/B,KAAK;KACL;KACA,UAAU,YAAY,KAAK;KAC5B;AACD,QAAI,CAAC,OAAO,MAAM,IAAI,WAAW,SAAS,CAIxC,aAAY,WAAW,UAAU,KAAK,UAAU,EAAC,KAAK,cAAa,EACjE,QACD,EAAE;AAEL,YAAQ,KAAK,YAAY;;AAK3B,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,OAAO,KAAK,QAAQ,IAAI,GAAG,CAAC;AAClC,YAAQ,KAAK;KAAC,KAAK;KAAgB;KAAK,CAAC;;AAE3C,UAAO;WACA,GAAG;AACV,SAAM,IAAI,6BAA6B,OAAO,EAAE,EAAE,QAAQ,EAAC,OAAO,GAAE,CAAC;;;CAIzE,iBACE,UACA,UACA,QACgB;EAChB,MAAM,UAA0B,EAAE;AAClC,MACE,SAAS,WAAW,SAAS,UAC7B,SAAS,SAAS,SAAS,KAE3B,SAAQ,KAAK;GACX,KAAK;GACL,KAAK;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACnD,KAAK;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACpD,CAAC;EAEJ,MAAM,cAAc,YAAY,SAAS;EACzC,MAAM,cAAc,YAAY,SAAS;AACzC,MAAI,CAAC,UAAU,aAAa,YAAY,CACtC,SAAQ,KAAK;GACX,KAAK;GACL,OAAO;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACrD,KAAK;GACL,KAAK;GACN,CAAC;EAEJ,MAAM,QAAQ;GAAC,QAAQ,SAAS;GAAQ,MAAM,SAAS;GAAK;EAC5D,MAAM,aAAa,YAAY,SAAS,QAAQ;EAChD,MAAM,aAAa,YAAY,SAAS,QAAQ;EAGhD,MAAM,CAAC,SAAS,SAAS,qBAAqB,YAAY,WAAW;AACrE,OAAK,MAAM,MAAM,SAAS;GACxB,MAAM,EAAC,MAAM,WAAU,KAAK,WAAW,IAAI,GAAG,CAAC;AAC/C,WAAQ,KAAK;IAAC,KAAK;IAAe;IAAO;IAAO,CAAC;;EAInD,MAAM,OAAO,aAAa,YAAY,WAAW;AACjD,OAAK,MAAM,MAAM,MAAM;GACrB,MAAM,EAAC,MAAM,SAAS,GAAG,YAAW,KAAK,WAAW,IAAI,GAAG,CAAC;GAC5D,MAAM,EAAC,MAAM,SAAS,GAAG,YAAW,KAAK,WAAW,IAAI,GAAG,CAAC;AAK5D,OACE,YAAY,WACZ,QAAQ,aAAa,QAAQ,YAC7B,QAAQ,YAAY,QAAQ,QAE5B,SAAQ,KAAK;IACX,KAAK;IACL;IACA,KAAK;KAAC,MAAM;KAAS,MAAM;KAAQ;IACnC,KAAK;KAAC,MAAM;KAAS,MAAM;KAAQ;IACpC,CAAC;;EAQN,MAAM,iBAAiB,WAAW;AAGlC,OAAK,MAAM,MAAM,OAAO;GACtB,MAAM,EAAC,MAAM,GAAG,SAAQ,KAAK,WAAW,IAAI,GAAG,CAAC;GAChD,MAAM,SAAS;IAAC;IAAM;IAAK;GAC3B,MAAM,YAAuB;IAC3B,KAAK;IACL;IACA;IACA,eAAe,YAAY,SAAS;IACrC;AACD,OAAI,gBAAgB;AAClB,cAAU,OAAO,KAAK,OAAO;AAC7B,cAAU,WAAW,EAAC,QAAQ,KAAK,KAAI;SAGvC,KAAI;AACF,4BAAwB,MAAM,MAAM,OAAO;YACpC,GAAG;AACV,QAAI,EAAE,aAAa,+BAGjB,OAAM;AAQR,UAAA,GAAS,OACP,sBAAsB,MAAM,KAAK,GAAG,KAAK,IAAI,OAAO,EAAE,GACvD;AACD,cAAU,OAAO,KAAK,OAAO;AAC7B,cAAU,WAAW,EAAC,QAAQ,KAAK,KAAI;;AAG3C,WAAQ,KAAK,UAAU;;AAEzB,SAAO;;;;;;;;;;;;;;;;;;;;CAqBT,OAAA,eAAsB,KAAoD;EACxE,MAAM,EAAC,cAAc,iBAAgB,MAAA;AACrC,MAAI,aACF,QAAO,EAAE;EAEX,MAAM,gBAAgB,MAAM,mBAAmB,MAAA,IAAU,aAAa;EACtE,MAAM,aAAa,oBAAoB,MAAA,eAAqB,cAAc;AAC1E,MAAI,eAAe,KACjB,OAAM,IAAI,2BAA2B,WAAW;EAMlD,MAAM,OAAO,MAAA,cAAoB,OAAO,MACtC,MAAK,EAAE,QAAQ,IAAI,YACpB;AACD,MAAI,CAAC,KAEH,OAAM,IAAI,2BACR,kCAAkC,UAAU,IAAI,GACjD;AAEH,MAAI,kBAAkB,MAAM,IAAI,CAC9B,OAAM,IAAI,2BACR,gDAAgD,UAAU,KAAK,CAAC,MAAM,UAAU,IAAI,GACrF;AAEH,SAAO,EAAE;;;AAIb,SAAS,oBACP,GACA,GACe;AAEf,KAAI,EAAE,OAAO,WAAW,EAAE,OAAO,OAC/B,QAAO;AAET,MAAK,IAAI,IAAI,GAAG,IAAI,EAAE,OAAO,QAAQ,KAAK;EACxC,MAAM,KAAK,EAAE,OAAO;EACpB,MAAM,KAAK,EAAE,OAAO;EACpB,MAAM,aAAa,mBAAmB,IAAI,GAAG;AAC7C,MAAI,WACF,QAAO;;AAGX,QAAO;;AAIT,IAAM,eAAe,GAAyB,MAC5C,EAAE,GAAG,MAAM,EAAE,GAAG,MAAM,KAAK,EAAE,GAAG,MAAM,EAAE,GAAG,MAAM,IAAI;AAEvD,SAAS,mBACP,GACA,GACe;AACf,KAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,KAC3D,QAAO,UAAU,EAAE,KAAK,wBAAwB,EAAE,KAAK;AAEzD,KAAI,CAAC,UAAU,EAAE,YAAY,EAAE,WAAW,CACxC,QAAO,yBAAyB,EAAE,KAAK;CAEzC,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;CACzD,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;AACzD,KACE,MAAM,WAAW,MAAM,UACvB,MAAM,MAAM,CAAC,OAAO,OAAO,MAAM;EAC/B,MAAM,CAAC,OAAO,QAAQ,MAAM;AAC5B,SACE,UAAU,SACV,KAAK,QAAQ,KAAK,OAClB,KAAK,YAAY,KAAK,WACtB,KAAK,YAAY,KAAK;GAExB,CAEF,QAAO,qBAAqB,EAAE,KAAK;AAErC,QAAO;;AAGT,SAAgB,kBAAkB,GAAuB,GAAqB;AAC5E,KAAI,EAAE,QAAQ,EAAE,eAAe,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,KACnE,QAAO;AAET,KAME,EAAE,oBAAoB,aACtB,CAAC,OAAO,IAAI,IAAI,EAAE,WAAW,EAAE,IAAI,IAAI,EAAE,WAAW,CAAC,CAErD,QAAO;CAET,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;CACzD,MAAM,QAAQ,EAAE;AAChB,QACE,MAAM,WAAW,MAAM,UACvB,MAAM,MAAM,CAAC,OAAO,OAAO,MAAM;EAC/B,MAAM,OAAO,MAAM;AACnB,SAAO,UAAU,KAAK,QAAQ,KAAK,YAAY,KAAK;GACpD;;AAIN,SAAS,eAAe,GAAmB;AACzC,KAAI,EAAE,aAAa,OACjB,QAAO,IAAI,MAAM,OAAO,EAAE,CAAC;AAE7B,KAAI,aAAa,SAAS,iBAAiB,EAAE,SAAS,kBACpD,QAAO,IAAI,eAAe,EAAE;AAE9B,QAAO;;AAET,IAAM,YAAY,OAAmB,GAAG,GAAG,OAAO,GAAG,GAAG;AAExD,SAAS,UAAU,WAA4B;AAC7C,QAAO,CAGL,IAAI,IAAI,UAAU,OAAO,KAAI,MAAK,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC,EAC9C,IAAI,IAAI,UAAU,QAAQ,KAAI,MAAK,CAAC,SAAS,EAAE,EAAE,EAAE,CAAC,CAAC,CACtD;;;;;;;;;;AAWH,SAAS,2BACP,MACA,MACA,YACA,YACS;AACT,KACE,KAAK,WAAW,KAAK,UACrB,KAAK,iBAAiB,KAAK,gBAC3B,KAAK,sBAAsB,KAAK,qBAChC,KAAK,gBAAgB,KAAK,YAE1B,QAAO;CAGT,MAAM,YAAY,yBAChB,YACA,KAAK,QACL,KAAK,UACN;CACD,MAAM,YAAY,yBAChB,YACA,KAAK,QACL,KAAK,UACN;AACD,KAAI,CAAC,aAAa,CAAC,UAEjB,QAAO;CAGT,MAAM,cAAc,OAAO,QAAQ,KAAK,QAAQ;CAChD,MAAM,cAAc,OAAO,QAAQ,KAAK,QAAQ;AAChD,KAAI,YAAY,WAAW,YAAY,OACrC,QAAO;CAIT,MAAM,eAAe,IAAI,IACvB,YAAY,KAAK,CAAC,MAAM,SAAS,CAAC,UAAU,QAAQ,OAAO,KAAK,IAAI,CAAC,CACtE;CACD,MAAM,eAAe,IAAI,IACvB,YAAY,KAAK,CAAC,MAAM,SAAS,CAAC,UAAU,QAAQ,OAAO,KAAK,IAAI,CAAC,CACtE;AAED,KAAI,aAAa,IAAI,KAAA,EAAU,IAAI,aAAa,IAAI,KAAA,EAAU,CAE5D,QAAO;AAET,KAAI,aAAa,SAAS,aAAa,KACrC,QAAO;AAET,MAAK,MAAM,CAAC,QAAQ,QAAQ,aAC1B,KAAI,aAAa,IAAI,OAAO,KAAK,IAC/B,QAAO;AAGX,QAAO;;AAGT,SAAS,yBACP,QACA,QACA,MAC+C;AAC/C,MAAK,MAAM,SAAS,OAAO,QAAQ,CACjC,KAAI,MAAM,WAAW,UAAU,MAAM,SAAS,KAC5C,QAAO;;AAMb,SAAS,YACP,SAC0C;CAC1C,MAAM,2BAAW,IAAI,KAA0C;AAC/D,MAAK,MAAM,CAAC,MAAM,SAAS,OAAO,QAAQ,QAAQ,CAGhD,UAAS,IAAI,KAAK,KAAK;EAAC,GAAG;EAAM;EAAK,CAAC;AAEzC,QAAO;;AAGT,SAAS,YAAY,OAAyD;AAC5E,QAAO;EACL,WAAW,KAAK,MAAM,UAAU;EAChC,aAAa,MAAM;EACnB,QAAQ,OAAO,YACb,MAAM,uBAAuB,KAAI,MAAK,CACpC,GACA,EAAC,QAAQ,MAAM,QAAQ,GAAG,KAAI,CAC/B,CAAC,CACH;EACF;;AAKH,SAAS,aAAa,UAA6C;CAGjE,MAAM,EAAC,SAAS,GAAG,YAAY,iBAAiB,GAAG,SAAQ;AAC3D,QAAO;EACL,GAAG;EACH,QAAQ;GACN,SAAS;GACT,MAAM;GACP;EAGD;EACA;EACD;;AAGH,IAAM,+BAAN,cAA2C,MAAM;CAC/C,OAAgB;CAChB;CACA;CAEA,YACE,aACA,OACA,SACA;AACA,QACE,sDAAsD,eACtD,QACD;AACD,OAAK,cAAc;AACnB,OAAK,QAAQ;;;AAIjB,IAAM,6BAAN,cAAyC,MAAM;CAC7C,OAAgB;CAEhB,YAAY,KAAa;AACvB,QACE,GAAG,IAAI,+EACR;;;AAKL,IAAM,iBAAN,cAA6B,WAAW;CACtC,OAAgB;CAEhB,YAAY,OAAgB;AAC1B,QACE,yFACA,EACE,OACD,CACF;;;AAIL,SAAS,2BACP,EAAC,WACD,QACA;CACA,MAAM,MACJ,mBAAmB,SACf,QAAQ,SAAS,QAAQ,GACzB,IAAI,aAAa,CAAC,OAAO,QAAQ;AAEvC,QAAO,MADM,KAAK,MAAM,IAAI,EACP,QAAQ,cAAc"}
@@ -697,6 +697,14 @@ export declare const changeStreamMessageSchema: v.UnionType<[v.UnionType<[v.Tupl
697
697
  errorDetails: v.Optional<Record<string, import("./json.ts").JSONValue | undefined>>;
698
698
  }, undefined>]>, v.TupleType<[v.Type<"status">, v.ObjectType<{
699
699
  ack: v.Type<boolean>;
700
+ lagReport: v.Optional<{
701
+ lastTimings: {
702
+ sendTimeMs: number;
703
+ commitTimeMs: number;
704
+ receiveTimeMs: number;
705
+ };
706
+ nextSendTimeMs: number;
707
+ }>;
700
708
  }, undefined>, v.ObjectType<{
701
709
  watermark: v.Type<string>;
702
710
  }, undefined>]>]>;
@@ -1 +1 @@
1
- {"version":3,"file":"downstream.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/protocol/current/downstream.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAW5D,QAAA,MAAM,KAAK;;;;;;eAIT,CAAC;AACH,QAAA,MAAM,IAAI;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;mBAGR,CAAC;AACH,QAAA,MAAM,MAAM;;;;eAIV,CAAC;AACH,QAAA,MAAM,QAAQ;;eAAmD,CAAC;AAElE,MAAM,MAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,KAAK,CAAC,CAAC;AAC1C,MAAM,MAAM,IAAI,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,IAAI,CAAC,CAAC;AACxC,MAAM,MAAM,MAAM,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,MAAM,CAAC,CAAC;AAC5C,MAAM,MAAM,QAAQ,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,QAAQ,CAAC,CAAC;AAEhD,eAAO,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAyC,CAAC;AAC7E,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAEtE,eAAO,MAAM,yBAAyB;;;;eAGpC,CAAC;AACH,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC;AAE5E,4EAA4E;AAC5E,eAAO,MAAM,yBAAyB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAIrC,CAAC;AAEF,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC"}
1
+ {"version":3,"file":"downstream.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/protocol/current/downstream.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAW5D,QAAA,MAAM,KAAK;;;;;;eAIT,CAAC;AACH,QAAA,MAAM,IAAI;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;mBAGR,CAAC;AACH,QAAA,MAAM,MAAM;;;;eAIV,CAAC;AACH,QAAA,MAAM,QAAQ;;eAAmD,CAAC;AAElE,MAAM,MAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,KAAK,CAAC,CAAC;AAC1C,MAAM,MAAM,IAAI,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,IAAI,CAAC,CAAC;AACxC,MAAM,MAAM,MAAM,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,MAAM,CAAC,CAAC;AAC5C,MAAM,MAAM,QAAQ,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,QAAQ,CAAC,CAAC;AAEhD,eAAO,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAyC,CAAC;AAC7E,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAEtE,eAAO,MAAM,yBAAyB;;;;eAGpC,CAAC;AACH,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC;AAE5E,4EAA4E;AAC5E,eAAO,MAAM,yBAAyB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAIrC,CAAC;AAEF,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC"}
@@ -1,14 +1,31 @@
1
1
  import * as v from '../../../../../../shared/src/valita.ts';
2
2
  /**
3
- * The downstream status message indicates whether it should be echoed
3
+ * The downstream status messages contain metadata about the status
4
+ * of the change-source. indicates whether it should be echoed
4
5
  * back in an upstream status message.
5
6
  */
6
7
  export declare const downstreamStatusSchema: v.ObjectType<{
7
8
  ack: v.Type<boolean>;
9
+ lagReport: v.Optional<{
10
+ lastTimings: {
11
+ sendTimeMs: number;
12
+ commitTimeMs: number;
13
+ receiveTimeMs: number;
14
+ };
15
+ nextSendTimeMs: number;
16
+ }>;
8
17
  }, undefined>;
9
18
  export type DownstreamStatus = v.Infer<typeof downstreamStatusSchema>;
10
19
  export declare const downstreamStatusMessageSchema: v.TupleType<[v.Type<"status">, v.ObjectType<{
11
20
  ack: v.Type<boolean>;
21
+ lagReport: v.Optional<{
22
+ lastTimings: {
23
+ sendTimeMs: number;
24
+ commitTimeMs: number;
25
+ receiveTimeMs: number;
26
+ };
27
+ nextSendTimeMs: number;
28
+ }>;
12
29
  }, undefined>, v.ObjectType<{
13
30
  watermark: v.Type<string>;
14
31
  }, undefined>]>;
@@ -20,6 +37,14 @@ export declare const downstreamStatusMessageSchema: v.TupleType<[v.Type<"status"
20
37
  */
21
38
  export declare const upstreamStatusMessageSchema: v.TupleType<[v.Type<"status">, v.UnionType<[v.ObjectType<{
22
39
  ack: v.Type<boolean>;
40
+ lagReport: v.Optional<{
41
+ lastTimings: {
42
+ sendTimeMs: number;
43
+ commitTimeMs: number;
44
+ receiveTimeMs: number;
45
+ };
46
+ nextSendTimeMs: number;
47
+ }>;
23
48
  }, undefined>, v.ObjectType<{
24
49
  tag: v.Type<"commit">;
25
50
  }, undefined>]>, v.ObjectType<{
@@ -1 +1 @@
1
- {"version":3,"file":"status.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/protocol/current/status.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAG5D;;;GAGG;AACH,eAAO,MAAM,sBAAsB;;aAEjC,CAAC;AAEH,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAEtE,eAAO,MAAM,6BAA6B;;;;eAIxC,CAAC;AAEH;;;;;GAKG;AACH,eAAO,MAAM,2BAA2B;;;;;;eAItC,CAAC;AAEH;;;;;;;;;;;;;;;;;;;;;;;;;GAyBG;AACH,MAAM,MAAM,uBAAuB,GAAG,CAAC,CAAC,KAAK,CAC3C,OAAO,6BAA6B,CACrC,CAAC;AACF,MAAM,MAAM,qBAAqB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,2BAA2B,CAAC,CAAC"}
1
+ {"version":3,"file":"status.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/protocol/current/status.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAI5D;;;;GAIG;AACH,eAAO,MAAM,sBAAsB;;;;;;;;;;aAQjC,CAAC;AAEH,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAEtE,eAAO,MAAM,6BAA6B;;;;;;;;;;;;eAIxC,CAAC;AAEH;;;;;GAKG;AACH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;eAItC,CAAC;AAEH;;;;;;;;;;;;;;;;;;;;;;;;;GAyBG;AACH,MAAM,MAAM,uBAAuB,GAAG,CAAC,CAAC,KAAK,CAC3C,OAAO,6BAA6B,CACrC,CAAC;AACF,MAAM,MAAM,qBAAqB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,2BAA2B,CAAC,CAAC"}
@@ -1,11 +1,16 @@
1
1
  import { valita_exports } from "../../../../../../shared/src/valita.js";
2
2
  import { commitSchema } from "./data.js";
3
+ import { changeSourceReportSchema } from "../../../replicator/reporter/report-schema.js";
3
4
  //#region ../zero-cache/src/services/change-source/protocol/current/status.ts
4
5
  /**
5
- * The downstream status message indicates whether it should be echoed
6
+ * The downstream status messages contain metadata about the status
7
+ * of the change-source. indicates whether it should be echoed
6
8
  * back in an upstream status message.
7
9
  */
8
- var downstreamStatusSchema = valita_exports.object({ ack: valita_exports.boolean().optional(() => true) });
10
+ var downstreamStatusSchema = valita_exports.object({
11
+ ack: valita_exports.boolean().optional(() => true),
12
+ lagReport: changeSourceReportSchema.optional()
13
+ });
9
14
  var downstreamStatusMessageSchema = valita_exports.tuple([
10
15
  valita_exports.literal("status"),
11
16
  downstreamStatusSchema,
@@ -1 +1 @@
1
- {"version":3,"file":"status.js","names":[],"sources":["../../../../../../../../zero-cache/src/services/change-source/protocol/current/status.ts"],"sourcesContent":["import * as v from '../../../../../../shared/src/valita.ts';\nimport {commitSchema} from './data.ts';\n\n/**\n * The downstream status message indicates whether it should be echoed\n * back in an upstream status message.\n */\nexport const downstreamStatusSchema = v.object({\n ack: v.boolean().optional(() => true),\n});\n\nexport type DownstreamStatus = v.Infer<typeof downstreamStatusSchema>;\n\nexport const downstreamStatusMessageSchema = v.tuple([\n v.literal('status'),\n downstreamStatusSchema,\n v.object({watermark: v.string()}),\n]);\n\n/**\n * The `zero-cache` will send the Commit payload to acknowledge a completed\n * transaction (unless the `skipAck` field was specified in the Begin message\n * of the transaction), and will echo back the downstream `status` message if\n * `ack` is true.\n */\nexport const upstreamStatusMessageSchema = v.tuple([\n v.literal('status'),\n v.union(downstreamStatusSchema, commitSchema),\n v.object({watermark: v.string()}),\n]);\n\n/**\n * Status messages convey positional information from both the ChangeSource\n * and the `zero-cache`.\n *\n * A StatusMessage from the ChangeSource indicates a position in its change\n * log. Generally, the watermarks sent in `Commit` messages already convey\n * this information, but a StatusMessage may also be sent to indicate that the\n * log has progressed without any corresponding changes relevant to the\n * subscriber. The watermarks of commit messages and status messages must be\n * monotonic in the stream of messages from the ChangeSource.\n *\n * The `zero-cache` sends StatusMessages to the ChangeSource:\n *\n * * when it has processed a `Commit` received from the ChangeSource,\n * unless the `Begin` message specified `skipAck`.\n *\n * * when it receives a `StatusMessage` and all preceding `Commit` messages\n * have been processed\n *\n * This allows the ChangeSource to clean up change log entries appropriately.\n *\n * Note that StatusMessages from the ChangeSource are optional. If a\n * ChangeSource implementation can track subscriber progress and clean up\n * its change log purely from Commit-driven StatusMessages there is no need\n * for the ChangeSource to send StatusMessages.\n */\nexport type DownstreamStatusMessage = v.Infer<\n typeof downstreamStatusMessageSchema\n>;\nexport type UpstreamStatusMessage = v.Infer<typeof upstreamStatusMessageSchema>;\n"],"mappings":";;;;;;;AAOA,IAAa,yBAAyB,eAAE,OAAO,EAC7C,KAAK,eAAE,SAAS,CAAC,eAAe,KAAK,EACtC,CAAC;AAIF,IAAa,gCAAgC,eAAE,MAAM;CACnD,eAAE,QAAQ,SAAS;CACnB;CACA,eAAE,OAAO,EAAC,WAAW,eAAE,QAAQ,EAAC,CAAC;CAClC,CAAC;;;;;;;AAQF,IAAa,8BAA8B,eAAE,MAAM;CACjD,eAAE,QAAQ,SAAS;CACnB,eAAE,MAAM,wBAAwB,aAAa;CAC7C,eAAE,OAAO,EAAC,WAAW,eAAE,QAAQ,EAAC,CAAC;CAClC,CAAC"}
1
+ {"version":3,"file":"status.js","names":[],"sources":["../../../../../../../../zero-cache/src/services/change-source/protocol/current/status.ts"],"sourcesContent":["import * as v from '../../../../../../shared/src/valita.ts';\nimport {changeSourceReportSchema} from '../../../replicator/reporter/report-schema.ts';\nimport {commitSchema} from './data.ts';\n\n/**\n * The downstream status messages contain metadata about the status\n * of the change-source. indicates whether it should be echoed\n * back in an upstream status message.\n */\nexport const downstreamStatusSchema = v.object({\n // Indicates whether the status message should be echoed back\n // in an upstream status message once the consumer has successfully\n // processed/persisted all preceding changes.\n ack: v.boolean().optional(() => true),\n\n // Contains a lag report for recording end to end latency metrics.\n lagReport: changeSourceReportSchema.optional(),\n});\n\nexport type DownstreamStatus = v.Infer<typeof downstreamStatusSchema>;\n\nexport const downstreamStatusMessageSchema = v.tuple([\n v.literal('status'),\n downstreamStatusSchema,\n v.object({watermark: v.string()}),\n]);\n\n/**\n * The `zero-cache` will send the Commit payload to acknowledge a completed\n * transaction (unless the `skipAck` field was specified in the Begin message\n * of the transaction), and will echo back the downstream `status` message if\n * `ack` is true.\n */\nexport const upstreamStatusMessageSchema = v.tuple([\n v.literal('status'),\n v.union(downstreamStatusSchema, commitSchema),\n v.object({watermark: v.string()}),\n]);\n\n/**\n * Status messages convey positional information from both the ChangeSource\n * and the `zero-cache`.\n *\n * A StatusMessage from the ChangeSource indicates a position in its change\n * log. Generally, the watermarks sent in `Commit` messages already convey\n * this information, but a StatusMessage may also be sent to indicate that the\n * log has progressed without any corresponding changes relevant to the\n * subscriber. The watermarks of commit messages and status messages must be\n * monotonic in the stream of messages from the ChangeSource.\n *\n * The `zero-cache` sends StatusMessages to the ChangeSource:\n *\n * * when it has processed a `Commit` received from the ChangeSource,\n * unless the `Begin` message specified `skipAck`.\n *\n * * when it receives a `StatusMessage` and all preceding `Commit` messages\n * have been processed\n *\n * This allows the ChangeSource to clean up change log entries appropriately.\n *\n * Note that StatusMessages from the ChangeSource are optional. If a\n * ChangeSource implementation can track subscriber progress and clean up\n * its change log purely from Commit-driven StatusMessages there is no need\n * for the ChangeSource to send StatusMessages.\n */\nexport type DownstreamStatusMessage = v.Infer<\n typeof downstreamStatusMessageSchema\n>;\nexport type UpstreamStatusMessage = v.Infer<typeof upstreamStatusMessageSchema>;\n"],"mappings":";;;;;;;;;AASA,IAAa,yBAAyB,eAAE,OAAO;CAI7C,KAAK,eAAE,SAAS,CAAC,eAAe,KAAK;CAGrC,WAAW,yBAAyB,UAAU;CAC/C,CAAC;AAIF,IAAa,gCAAgC,eAAE,MAAM;CACnD,eAAE,QAAQ,SAAS;CACnB;CACA,eAAE,OAAO,EAAC,WAAW,eAAE,QAAQ,EAAC,CAAC;CAClC,CAAC;;;;;;;AAQF,IAAa,8BAA8B,eAAE,MAAM;CACjD,eAAE,QAAQ,SAAS;CACnB,eAAE,MAAM,wBAAwB,aAAa;CAC7C,eAAE,OAAO,EAAC,WAAW,eAAE,QAAQ,EAAC,CAAC;CAClC,CAAC"}
@@ -2,6 +2,14 @@ import * as v from '../../../../../../shared/src/valita.ts';
2
2
  /** At the moment, the only upstream messages are status messages. */
3
3
  export declare const changeSourceUpstreamSchema: v.TupleType<[v.Type<"status">, v.UnionType<[v.ObjectType<{
4
4
  ack: v.Type<boolean>;
5
+ lagReport: v.Optional<{
6
+ lastTimings: {
7
+ sendTimeMs: number;
8
+ commitTimeMs: number;
9
+ receiveTimeMs: number;
10
+ };
11
+ nextSendTimeMs: number;
12
+ }>;
5
13
  }, undefined>, v.ObjectType<{
6
14
  tag: v.Type<"commit">;
7
15
  }, undefined>]>, v.ObjectType<{
@@ -1 +1 @@
1
- {"version":3,"file":"upstream.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/protocol/current/upstream.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAQ5D,sEAAsE;AACtE,eAAO,MAAM,0BAA0B;;;;;;eAA8B,CAAC;AACtE,MAAM,MAAM,oBAAoB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,0BAA0B,CAAC,CAAC;AAE9E;;;;;;;;;;;GAWG;AACH,eAAO,MAAM,qBAAqB;;;;;;;;;;;aAOhC,CAAC;AAEH,MAAM,MAAM,eAAe,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,qBAAqB,CAAC,CAAC"}
1
+ {"version":3,"file":"upstream.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/protocol/current/upstream.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAQ5D,sEAAsE;AACtE,eAAO,MAAM,0BAA0B;;;;;;;;;;;;;;eAA8B,CAAC;AACtE,MAAM,MAAM,oBAAoB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,0BAA0B,CAAC,CAAC;AAE9E;;;;;;;;;;;GAWG;AACH,eAAO,MAAM,qBAAqB;;;;;;;;;;;aAOhC,CAAC;AAEH,MAAM,MAAM,eAAe,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,qBAAqB,CAAC,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"change-streamer-service.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAYjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAClD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAGnD,OAAO,KAAK,EACV,YAAY,EAEb,MAAM,mCAAmC,CAAC;AAC3C,OAAO,EAEL,KAAK,gBAAgB,EACtB,MAAM,iDAAiD,CAAC;AACzD,OAAO,EAGL,KAAK,0BAA0B,EAChC,MAAM,qCAAqC,CAAC;AAC7C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,2CAA2C,CAAC;AAMjF,OAAO,EACL,KAAK,qBAAqB,EAG3B,MAAM,sBAAsB,CAAC;AAY9B;;GAEG;AACH,wBAAsB,kBAAkB,CACtC,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,QAAQ,EAAE,UAAU,EACpB,YAAY,EAAE,YAAY,EAC1B,0BAA0B,EAAE,0BAA0B,EACtD,iBAAiB,EAAE,iBAAiB,EACpC,SAAS,EAAE,OAAO,EAClB,+BAA+B,EAAE,MAAM,EACvC,kCAAkC,EAAE,MAAM,EAC1C,YAAY,oBAAa,GACxB,OAAO,CAAC,qBAAqB,CAAC,CA4BhC;AAED;;;;;;;;;GASG;AACH,MAAM,MAAM,iBAAiB,GAAG,CAAC,SAAS,EAAE,MAAM,EAAE,gBAAgB,CAAC,CAAC"}
1
+ {"version":3,"file":"change-streamer-service.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAYjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAClD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAGnD,OAAO,KAAK,EACV,YAAY,EAEb,MAAM,mCAAmC,CAAC;AAC3C,OAAO,EAEL,KAAK,gBAAgB,EACtB,MAAM,iDAAiD,CAAC;AACzD,OAAO,EAGL,KAAK,0BAA0B,EAChC,MAAM,qCAAqC,CAAC;AAC7C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,2CAA2C,CAAC;AAMjF,OAAO,EACL,KAAK,qBAAqB,EAI3B,MAAM,sBAAsB,CAAC;AAY9B;;GAEG;AACH,wBAAsB,kBAAkB,CACtC,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,QAAQ,EAAE,UAAU,EACpB,YAAY,EAAE,YAAY,EAC1B,0BAA0B,EAAE,0BAA0B,EACtD,iBAAiB,EAAE,iBAAiB,EACpC,SAAS,EAAE,OAAO,EAClB,+BAA+B,EAAE,MAAM,EACvC,kCAAkC,EAAE,MAAM,EAC1C,YAAY,oBAAa,GACxB,OAAO,CAAC,qBAAqB,CAAC,CA4BhC;AAED;;;;;;;;;GASG;AACH,MAAM,MAAM,iBAAiB,GAAG,CAAC,SAAS,EAAE,MAAM,EAAE,gBAAgB,CAAC,CAAC"}
@@ -178,6 +178,7 @@ var ChangeStreamerImpl = class {
178
178
  #initialWatermarks = /* @__PURE__ */ new Set();
179
179
  #serving = resolver();
180
180
  #txCounter = getOrCreateCounter("replication", "transactions", "Count of replicated transactions");
181
+ #latestStatus;
181
182
  #stream;
182
183
  constructor(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, replicaVersion, source, replicationStatusPublisher, autoReset, backPressureLimitHeapProportion, flowControlConsensusPaddingSeconds, setTimeoutFn = setTimeout) {
183
184
  this.id = `change-streamer`;
@@ -195,10 +196,13 @@ var ChangeStreamerImpl = class {
195
196
  this.#replicationStatusPublisher = replicationStatusPublisher;
196
197
  this.#autoReset = autoReset;
197
198
  this.#state = new RunningState(this.id, void 0, setTimeoutFn);
199
+ this.#latestStatus = { tag: "status" };
198
200
  }
199
201
  async run() {
200
202
  this.#lc.info?.("starting change stream");
201
203
  this.#forwarder.startProgressMonitor();
204
+ const lagReport = await this.#source.startLagReporter();
205
+ if (lagReport) this.#latestStatus.lagReport = lagReport;
202
206
  await this.#storer.assumeOwnership();
203
207
  const flushBytesThreshold = getDefaultHighWaterMark(false);
204
208
  while (this.#state.shouldRun()) {
@@ -218,6 +222,10 @@ var ChangeStreamerImpl = class {
218
222
  switch (type) {
219
223
  case "status":
220
224
  if (msg.ack) this.#storer.status(change);
225
+ if (msg.lagReport) {
226
+ this.#latestStatus.lagReport = msg.lagReport;
227
+ this.#forwarder.sendStatus(this.#latestStatus);
228
+ }
221
229
  continue;
222
230
  case "control":
223
231
  await this.#handleControlMessage(msg);
@@ -253,7 +261,7 @@ var ChangeStreamerImpl = class {
253
261
  if (watermark) {
254
262
  this.#lc.warn?.(`aborting interrupted transaction ${watermark}`);
255
263
  this.#storer.abort();
256
- await this.#forwarder.forward([watermark, ["rollback", { tag: "rollback" }]]);
264
+ this.#forwarder.forward([watermark, ["rollback", { tag: "rollback" }]]);
257
265
  }
258
266
  await Promise.all([
259
267
  this.#storer.stop(),
@@ -283,7 +291,7 @@ var ChangeStreamerImpl = class {
283
291
  const { protocolVersion, id, mode, replicaVersion, watermark, initial } = ctx;
284
292
  if (mode === "serving") this.#serving.resolve();
285
293
  const downstream = Subscription.create({ cleanup: () => this.#forwarder.remove(subscriber) });
286
- const subscriber = new Subscriber(protocolVersion, id, watermark, downstream);
294
+ const subscriber = new Subscriber(protocolVersion, id, watermark, downstream, () => this.#latestStatus);
287
295
  if (replicaVersion !== this.#replicaVersion) {
288
296
  this.#lc.warn?.(`rejecting subscriber at replica version ${replicaVersion}`);
289
297
  subscriber.close(1, `current replica version is ${this.#replicaVersion} (requested ${replicaVersion})`);