@rocicorp/zero 1.3.0-canary.2 → 1.3.0-canary.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/out/zero/package.js +2 -2
  2. package/out/zero/package.js.map +1 -1
  3. package/out/zero-cache/src/config/zero-config.d.ts +5 -0
  4. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  5. package/out/zero-cache/src/config/zero-config.js +10 -0
  6. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  7. package/out/zero-cache/src/db/transaction-pool.d.ts +43 -40
  8. package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
  9. package/out/zero-cache/src/db/transaction-pool.js +76 -56
  10. package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
  11. package/out/zero-cache/src/server/anonymous-otel-start.js +1 -1
  12. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  13. package/out/zero-cache/src/server/change-streamer.js +8 -4
  14. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  15. package/out/zero-cache/src/server/logging.d.ts +1 -3
  16. package/out/zero-cache/src/server/logging.d.ts.map +1 -1
  17. package/out/zero-cache/src/server/logging.js +5 -2
  18. package/out/zero-cache/src/server/logging.js.map +1 -1
  19. package/out/zero-cache/src/server/main.d.ts.map +1 -1
  20. package/out/zero-cache/src/server/main.js +4 -4
  21. package/out/zero-cache/src/server/main.js.map +1 -1
  22. package/out/zero-cache/src/server/mutator.js +3 -1
  23. package/out/zero-cache/src/server/mutator.js.map +1 -1
  24. package/out/zero-cache/src/server/otel-log-sink.d.ts.map +1 -1
  25. package/out/zero-cache/src/server/otel-log-sink.js +0 -2
  26. package/out/zero-cache/src/server/otel-log-sink.js.map +1 -1
  27. package/out/zero-cache/src/server/otel-start.d.ts +1 -1
  28. package/out/zero-cache/src/server/otel-start.d.ts.map +1 -1
  29. package/out/zero-cache/src/server/otel-start.js +7 -3
  30. package/out/zero-cache/src/server/otel-start.js.map +1 -1
  31. package/out/zero-cache/src/server/reaper.js +3 -3
  32. package/out/zero-cache/src/server/reaper.js.map +1 -1
  33. package/out/zero-cache/src/server/replicator.d.ts.map +1 -1
  34. package/out/zero-cache/src/server/replicator.js +3 -1
  35. package/out/zero-cache/src/server/replicator.js.map +1 -1
  36. package/out/zero-cache/src/server/runner/run-worker.js +1 -1
  37. package/out/zero-cache/src/server/runner/run-worker.js.map +1 -1
  38. package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
  39. package/out/zero-cache/src/server/syncer.js +7 -6
  40. package/out/zero-cache/src/server/syncer.js.map +1 -1
  41. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +4 -1
  42. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
  43. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +58 -3
  44. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
  45. package/out/zero-cache/src/services/change-source/pg/initial-sync.js +205 -48
  46. package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
  47. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +5 -2
  48. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
  49. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +5 -5
  50. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  51. package/out/zero-cache/src/services/change-streamer/storer.d.ts +5 -1
  52. package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
  53. package/out/zero-cache/src/services/change-streamer/storer.js +9 -4
  54. package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
  55. package/out/zero-cache/src/services/life-cycle.js +1 -1
  56. package/out/zero-cache/src/services/life-cycle.js.map +1 -1
  57. package/out/zero-cache/src/services/replicator/write-worker.js +1 -1
  58. package/out/zero-cache/src/services/replicator/write-worker.js.map +1 -1
  59. package/out/zero-cache/src/services/view-syncer/cvr-store.js +2 -2
  60. package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
  61. package/out/zero-cache/src/services/view-syncer/row-record-cache.js +1 -1
  62. package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
  63. package/out/zero-client/src/client/version.js +1 -1
  64. package/package.json +2 -2
@@ -19,11 +19,11 @@ import { getDefaultHighWaterMark } from "node:stream";
19
19
  /**
20
20
  * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.
21
21
  */
22
- async function initializeStreamer(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, changeSource, replicationStatusPublisher, subscriptionState, purgeLock, autoReset, backPressureLimitHeapProportion, flowControlConsensusPaddingSeconds, setTimeoutFn = setTimeout) {
22
+ async function initializeStreamer(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, changeSource, replicationStatusPublisher, subscriptionState, purgeLock, autoReset, opts, setTimeoutFn = setTimeout) {
23
23
  await initChangeStreamerSchema(lc, changeDB, shard);
24
24
  await ensureReplicationConfig(lc, changeDB, subscriptionState, shard, autoReset, setTimeoutFn);
25
25
  const { replicaVersion } = subscriptionState;
26
- return new ChangeStreamerImpl(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, replicaVersion, changeSource, replicationStatusPublisher, purgeLock, autoReset, backPressureLimitHeapProportion, flowControlConsensusPaddingSeconds, setTimeoutFn);
26
+ return new ChangeStreamerImpl(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, replicaVersion, changeSource, replicationStatusPublisher, purgeLock, autoReset, opts, setTimeoutFn);
27
27
  }
28
28
  var REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS = 5e3;
29
29
  /**
@@ -183,7 +183,7 @@ var ChangeStreamerImpl = class {
183
183
  #latestStatus;
184
184
  #purgeLock;
185
185
  #stream;
186
- constructor(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, replicaVersion, source, replicationStatusPublisher, initialPurgeLock, autoReset, backPressureLimitHeapProportion, flowControlConsensusPaddingSeconds, setTimeoutFn = setTimeout) {
186
+ constructor(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, replicaVersion, source, replicationStatusPublisher, initialPurgeLock, autoReset, opts, setTimeoutFn = setTimeout) {
187
187
  this.id = `change-streamer`;
188
188
  this.#lc = lc.withContext("component", "change-streamer");
189
189
  this.#shard = shard;
@@ -194,8 +194,8 @@ var ChangeStreamerImpl = class {
194
194
  "status",
195
195
  consumed[1],
196
196
  consumed[2]
197
- ]), (err) => this.stop(err), backPressureLimitHeapProportion);
198
- this.#forwarder = new Forwarder(lc, { flowControlConsensusPaddingSeconds });
197
+ ]), (err) => this.stop(err), opts);
198
+ this.#forwarder = new Forwarder(lc, { flowControlConsensusPaddingSeconds: opts.flowControlConsensusPaddingSeconds });
199
199
  this.#replicationStatusPublisher = replicationStatusPublisher;
200
200
  this.#purgeLock = initialPurgeLock;
201
201
  this.#autoReset = autoReset;
@@ -1 +1 @@
1
- {"version":3,"file":"change-streamer-service.js","names":["#lc","#shard","#changeDB","#replicaVersion","#source","#storer","#forwarder","#replicationStatusPublisher","#autoReset","#state","#initialWatermarks","#serving","#txCounter","#changeCounter","#stream","#purgeLock","#latestStatus","#handleControlMessage","#purgeOldChanges"],"sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"sourcesContent":["import {getDefaultHighWaterMark} from 'node:stream';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {unreachable} from '../../../../shared/src/asserts.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport {publishCriticalEvent} from '../../observability/events.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport {\n min,\n type AtLeastOne,\n type LexiVersion,\n} from '../../types/lexi-version.ts';\nimport type {PostgresDB} from '../../types/pg.ts';\nimport type {ShardID} from '../../types/shards.ts';\nimport type {Source} from '../../types/streams.ts';\nimport {Subscription} from '../../types/subscription.ts';\nimport type {\n ChangeSource,\n ChangeStream,\n} from '../change-source/change-source.ts';\nimport {\n type ChangeStreamControl,\n type ChangeStreamData,\n} from '../change-source/protocol/current/downstream.ts';\nimport {\n publishReplicationError,\n replicationStatusError,\n type ReplicationStatusPublisher,\n} from '../replicator/replication-status.ts';\nimport type {SubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {\n DEFAULT_MAX_RETRY_DELAY_MS,\n RunningState,\n UnrecoverableError,\n} from '../running-state.ts';\nimport {\n type ChangeStreamerService,\n type Downstream,\n type Status,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {Forwarder} from './forwarder.ts';\nimport {initChangeStreamerSchema} from './schema/init.ts';\nimport {\n AutoResetSignal,\n ensureReplicationConfig,\n markResetRequired,\n} from './schema/tables.ts';\nimport {Storer, type PurgeLock} from './storer.ts';\nimport {Subscriber} from './subscriber.ts';\n\n/**\n * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.\n */\nexport async function initializeStreamer(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n changeSource: ChangeSource,\n replicationStatusPublisher: ReplicationStatusPublisher,\n subscriptionState: SubscriptionState,\n purgeLock: PurgeLock | null,\n autoReset: boolean,\n backPressureLimitHeapProportion: number,\n flowControlConsensusPaddingSeconds: number,\n setTimeoutFn = setTimeout,\n): Promise<ChangeStreamerService> {\n // Make sure the ChangeLog DB is set up.\n await initChangeStreamerSchema(lc, changeDB, shard);\n await ensureReplicationConfig(\n lc,\n changeDB,\n subscriptionState,\n shard,\n autoReset,\n setTimeoutFn,\n );\n\n const {replicaVersion} = subscriptionState;\n return new ChangeStreamerImpl(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n changeSource,\n replicationStatusPublisher,\n purgeLock,\n autoReset,\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n setTimeoutFn,\n );\n}\n\nconst REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS = 5000;\n\n/**\n * Internally all Downstream messages (not just commits) are given a watermark.\n * These are used for internal ordering for:\n * 1. Replaying new changes in the Storer\n * 2. Filtering old changes in the Subscriber\n *\n * However, only the watermark for `Commit` messages are exposed to\n * subscribers, as that is the only semantically correct watermark to\n * use for tracking a position in a replication stream.\n */\nexport type WatermarkedChange = [watermark: string, ChangeStreamData];\n\n/**\n * Upstream-agnostic dispatch of messages in a {@link ChangeStreamMessage} to a\n * {@link Forwarder} and {@link Storer} to execute the forward-store-ack\n * procedure described in {@link ChangeStreamer}.\n *\n * ### Subscriber Catchup\n *\n * Connecting clients first need to be \"caught up\" to the current watermark\n * (from stored change log entries) before new entries are forwarded to\n * them. This is non-trivial because the replication stream may be in the\n * middle of a pending streamed Transaction for which some entries have\n * already been forwarded but are not yet committed to the store.\n *\n *\n * ```\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * | Historic changes in storage | Pending (streamed) tx | Next tx\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * Replication stream\n * > > > > > > > > >\n * ^ ---> required catchup ---> ^\n * Subscriber watermark Subscription begins\n * ```\n *\n * Preemptively buffering the changes of every pending transaction\n * would be wasteful and consume too much memory for large transactions.\n *\n * Instead, the streamer synchronously dispatches changes and subscriptions\n * to the {@link Forwarder} and the {@link Storer} such that the two\n * components are aligned as to where in the stream the subscription started.\n * The two components then coordinate catchup and handoff via the\n * {@link Subscriber} object with the following algorithm:\n *\n * * If the streamer is in the middle of a pending Transaction, the\n * Subscriber is \"queued\" on both the Forwarder and the Storer. In this\n * state, new changes are *not* forwarded to the Subscriber, and catchup\n * is not yet executed.\n * * Once the commit message for the pending Transaction is processed\n * by the Storer, it begins catchup on the Subscriber (with a READONLY\n * snapshot so that it does not block subsequent storage operations).\n * This catchup is thus guaranteed to load the change log entries of\n * that last Transaction.\n * * When the Forwarder processes that same commit message, it moves the\n * Subscriber from the \"queued\" to the \"active\" set of clients such that\n * the Subscriber begins receiving new changes, starting from the next\n * Transaction.\n * * The Subscriber does not forward those changes, however, if its catchup\n * is not complete. Until then, it buffers the changes in memory.\n * * Once catchup is complete, the buffered changes are immediately sent\n * and the Subscriber henceforth forwards changes as they are received.\n *\n * In the (common) case where the streamer is not in the middle of a pending\n * transaction when a subscription begins, the Storer begins catchup\n * immediately and the Forwarder directly adds the Subscriber to its active\n * set. However, the Subscriber still buffers any forwarded messages until\n * its catchup is complete.\n *\n * ### Watermarks and ordering\n *\n * The ChangeStreamerService depends on its {@link ChangeSource} to send\n * changes in contiguous [`begin`, `data` ..., `data`, `commit`] sequences\n * in commit order. This follows Postgres's Logical Replication Protocol\n * Message Flow:\n *\n * https://www.postgresql.org/docs/16/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW\n *\n * > The logical replication protocol sends individual transactions one by one.\n * > This means that all messages between a pair of Begin and Commit messages belong to the same transaction.\n *\n * In order to correctly replay (new) and filter (old) messages to subscribers\n * at different points in the replication stream, these changes must be assigned\n * watermarks such that they preserve the order in which they were received\n * from the ChangeSource.\n *\n * A previous implementation incorrectly derived these watermarks from the Postgres\n * Log Sequence Numbers (LSN) of each message. However, LSNs from concurrent,\n * non-conflicting transactions can overlap, which can result in a `begin` message\n * with an earlier LSN arriving after a `commit` message. For example, the\n * changes for these transactions:\n *\n * ```\n * LSN: 1 2 3 4 5 6 7 8 9 10\n * tx1: begin data data data commit\n * tx2: begin data data data commit\n * ```\n *\n * will arrive as:\n *\n * ```\n * begin1, data2, data4, data6, commit8, begin3, data5, data7, data9, commit10\n * ```\n *\n * Thus, LSN of non-commit messages are not suitable for tracking the sorting\n * order of the replication stream.\n *\n * Instead, the ChangeStreamer uses the following algorithm for deterministic\n * catchup and filtering of changes:\n *\n * * A `commit` message is assigned to a watermark corresponding to its LSN.\n * These are guaranteed to be in commit order by definition.\n *\n * * `begin` and `data` messages are assigned to the watermark of the\n * preceding `commit` (the previous transaction, or the replication\n * slot's starting LSN) plus 1. This guarantees that they will be sorted\n * after the previously commit transaction even if their LSNs came before it.\n * This is referred to as the `preCommitWatermark`.\n *\n * * In the ChangeLog DB, messages have a secondary sort column `pos`, which is\n * the position of the message within its transaction, with the `begin` message\n * starting at `0`. This guarantees that `begin` and `data` messages will be\n * fetched in the original ChangeSource order during catchup.\n *\n * `begin` and `data` messages share the same watermark, but this is sufficient for\n * Subscriber filtering because subscribers only know about the `commit` watermarks\n * exposed in the `Downstream` `Commit` message. The Subscriber object thus compares\n * the internal watermarks of the incoming messages against the commit watermark of\n * the caller, updating the watermark at every `Commit` message that is forwarded.\n *\n * ### Cleanup\n *\n * As mentioned in the {@link ChangeStreamer} documentation: \"the ChangeStreamer\n * uses a combination of [the \"initial\", i.e. backup-derived watermark and] ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\"\n *\n * More concretely:\n *\n * * The `initial`, backup-derived watermark is the earliest to which cleanup\n * should ever happen.\n *\n * * However, it is possible for the replica backup to be *ahead* of a connected\n * subscriber; and if a network error causes that subscriber to retry from its\n * last watermark, the change streamer must support it.\n *\n * Thus, before cleaning up to an `initial` backup-derived watermark, the change\n * streamer first confirms that all connected subscribers have also passed\n * that watermark.\n */\nclass ChangeStreamerImpl implements ChangeStreamerService {\n readonly id: string;\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #replicaVersion: string;\n readonly #source: ChangeSource;\n readonly #storer: Storer;\n readonly #forwarder: Forwarder;\n readonly #replicationStatusPublisher: ReplicationStatusPublisher;\n\n readonly #autoReset: boolean;\n readonly #state: RunningState;\n readonly #initialWatermarks = new Set<string>();\n\n // Starting the (Postgres) ChangeStream results in killing the previous\n // Postgres subscriber, potentially creating a gap in which the old\n // change-streamer has shut down and the new change-streamer has not yet\n // been recognized as \"healthy\" (and thus does not get any requests).\n //\n // To minimize this gap, delay starting the ChangeStream until the first\n // request from a `serving` replicator, indicating that higher level\n // load-balancing / routing logic has begun routing requests to this task.\n readonly #serving = resolver();\n\n readonly #txCounter = getOrCreateCounter(\n 'replication',\n 'transactions',\n 'Count of replicated transactions',\n );\n readonly #changeCounter = getOrCreateCounter(\n 'replication',\n 'changes',\n 'Count of replicated changes (DML or DDL statements)',\n );\n\n #latestStatus: Status;\n #purgeLock: PurgeLock | null;\n #stream: ChangeStream | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n replicaVersion: string,\n source: ChangeSource,\n replicationStatusPublisher: ReplicationStatusPublisher,\n initialPurgeLock: PurgeLock | null,\n autoReset: boolean,\n backPressureLimitHeapProportion: number,\n flowControlConsensusPaddingSeconds: number,\n setTimeoutFn = setTimeout,\n ) {\n this.id = `change-streamer`;\n this.#lc = lc.withContext('component', 'change-streamer');\n this.#shard = shard;\n this.#changeDB = changeDB;\n this.#replicaVersion = replicaVersion;\n this.#source = source;\n this.#storer = new Storer(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n consumed => this.#stream?.acks.push(['status', consumed[1], consumed[2]]),\n err => this.stop(err),\n backPressureLimitHeapProportion,\n );\n this.#forwarder = new Forwarder(lc, {\n flowControlConsensusPaddingSeconds,\n });\n this.#replicationStatusPublisher = replicationStatusPublisher;\n this.#purgeLock = initialPurgeLock;\n this.#autoReset = autoReset;\n this.#state = new RunningState(this.id, undefined, setTimeoutFn);\n this.#latestStatus = {tag: 'status'};\n }\n\n async run() {\n this.#lc.info?.('starting change stream');\n\n this.#forwarder.startProgressMonitor();\n\n const lagReport = await this.#source.startLagReporter();\n if (lagReport) {\n this.#latestStatus.lagReport = lagReport;\n }\n\n // Once this change-streamer acquires \"ownership\" of the change DB,\n // it is safe to start the storer.\n await this.#storer.assumeOwnership(this.#purgeLock);\n this.#purgeLock = null;\n\n // The threshold in (estimated number of) bytes to send() on subscriber\n // websockets before `await`-ing the I/O buffers to be ready for more.\n const flushBytesThreshold = getDefaultHighWaterMark(false);\n\n while (this.#state.shouldRun()) {\n let err: unknown;\n let watermark: string | null = null;\n let unflushedBytes = 0;\n try {\n const {lastWatermark, backfillRequests} =\n await this.#storer.getStartStreamInitializationParameters();\n const stream = await this.#source.startStream(\n lastWatermark,\n backfillRequests,\n );\n this.#storer.run().catch(e => stream.changes.cancel(e));\n\n this.#stream = stream;\n if (\n this.#state.resetBackoff() >\n REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS\n ) {\n // After recovering from a backoff for which a replication status\n // error was published, publish an OK status\n this.#replicationStatusPublisher.publish(\n this.#lc,\n 'Replicating',\n `Replicating from ${lastWatermark}`,\n );\n }\n watermark = null;\n\n for await (const change of stream.changes) {\n const [type, msg] = change;\n switch (type) {\n case 'status':\n if (msg.ack) {\n this.#storer.status(change); // storer acks once it gets through its queue\n }\n if (msg.lagReport) {\n // Lag reports are not stored in the cdc change log, but rather\n // only forwarded on \"live\" connections. When a new subscriber\n // is catching up, it is initialized with the #latestStatus\n // from which it can measure lag while catching up.\n this.#latestStatus.lagReport = msg.lagReport;\n this.#forwarder.sendStatus(this.#latestStatus);\n }\n continue;\n case 'control':\n await this.#handleControlMessage(msg);\n continue; // control messages are not stored/forwarded\n case 'begin':\n watermark = change[2].commitWatermark;\n break;\n case 'commit':\n if (watermark !== change[2].watermark) {\n throw new UnrecoverableError(\n `commit watermark ${change[2].watermark} does not match 'begin' watermark ${watermark}`,\n );\n }\n this.#txCounter.add(1);\n break;\n default:\n if (type === 'data') {\n this.#changeCounter.add(1);\n }\n if (watermark === null) {\n throw new UnrecoverableError(\n `${type} change (${msg.tag}) received before 'begin' message`,\n );\n }\n break;\n }\n\n const entry: WatermarkedChange = [watermark, change];\n unflushedBytes += this.#storer.store(entry);\n if (unflushedBytes < flushBytesThreshold) {\n // pipeline changes until flushBytesThreshold\n this.#forwarder.forward(entry);\n } else {\n // Wait for messages to clear socket buffers to ensure that they\n // make their way to subscribers. Without this `await`, the\n // messages end up being buffered in this process, which:\n // (1) results in memory pressure and increased GC activity\n // (2) prevents subscribers from processing the messages as they\n // arrive, instead getting them in a large batch after being\n // idle while they were queued (causing further delays).\n await this.#forwarder.forwardWithFlowControl(entry);\n unflushedBytes = 0;\n }\n\n if (type === 'commit' || type === 'rollback') {\n watermark = null;\n }\n\n // Allow the storer to exert back pressure.\n const readyForMore = this.#storer.readyForMore();\n if (readyForMore) {\n await readyForMore;\n }\n }\n } catch (e) {\n err = e;\n } finally {\n this.#stream?.changes.cancel();\n this.#stream = undefined;\n }\n\n // When the change stream is interrupted, abort any pending transaction.\n if (watermark) {\n this.#lc.warn?.(`aborting interrupted transaction ${watermark}`);\n this.#storer.abort();\n this.#forwarder.forward([watermark, ['rollback', {tag: 'rollback'}]]);\n }\n\n // Backoff and drain any pending entries in the storer before reconnecting.\n await Promise.all([\n this.#storer.stop(),\n this.#state.backoff(this.#lc, err),\n this.#state.retryDelay > REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS\n ? publishCriticalEvent(\n this.#lc,\n replicationStatusError(this.#lc, 'Replicating', err),\n )\n : promiseVoid,\n ]);\n }\n\n this.#forwarder.stopProgressMonitor();\n this.#lc.info?.('ChangeStreamer stopped');\n }\n\n async #handleControlMessage(msg: ChangeStreamControl[1]) {\n this.#lc.info?.('received control message', msg);\n const {tag} = msg;\n\n switch (tag) {\n case 'reset-required':\n await markResetRequired(this.#changeDB, this.#shard);\n await publishReplicationError(\n this.#lc,\n 'Replicating',\n msg.message ?? 'Resync required',\n msg.errorDetails,\n );\n if (this.#autoReset) {\n this.#lc.warn?.('shutting down for auto-reset');\n await this.stop(new AutoResetSignal());\n }\n break;\n default:\n unreachable(tag);\n }\n }\n\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const {protocolVersion, id, mode, replicaVersion, watermark} = ctx;\n if (mode === 'serving') {\n this.#serving.resolve();\n }\n const downstream = Subscription.create<Downstream>({\n cleanup: () => this.#forwarder.remove(subscriber),\n });\n const subscriber = new Subscriber(\n protocolVersion,\n id,\n watermark,\n downstream,\n () => this.#latestStatus,\n );\n if (replicaVersion !== this.#replicaVersion) {\n this.#lc.warn?.(\n `rejecting subscriber at replica version ${replicaVersion}`,\n );\n subscriber.close(\n ErrorType.WrongReplicaVersion,\n `current replica version is ${\n this.#replicaVersion\n } (requested ${replicaVersion})`,\n );\n } else {\n this.#lc.debug?.(`adding subscriber ${subscriber.id}`);\n\n this.#forwarder.add(subscriber);\n this.#storer.catchup(subscriber, mode);\n }\n return Promise.resolve(downstream);\n }\n\n scheduleCleanup(watermark: string) {\n const origSize = this.#initialWatermarks.size;\n this.#initialWatermarks.add(watermark);\n\n if (origSize === 0) {\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n\n async getChangeLogState(): Promise<{\n replicaVersion: string;\n minWatermark: string;\n }> {\n const minWatermark = await this.#storer.getMinWatermarkForCatchup();\n if (!minWatermark) {\n this.#lc.warn?.(\n `Unexpected empty changeLog. Resync if \"Local replica watermark\" errors arise`,\n );\n }\n return {\n replicaVersion: this.#replicaVersion,\n minWatermark: minWatermark ?? this.#replicaVersion,\n };\n }\n\n /**\n * Makes a best effort to purge the change log. In the event of a database\n * error, exceptions will be logged and swallowed, so this method is safe\n * to run in a timeout.\n */\n async #purgeOldChanges(): Promise<void> {\n const initial = [...this.#initialWatermarks];\n if (initial.length === 0) {\n this.#lc.warn?.('No initial watermarks to check for cleanup'); // Not expected.\n return;\n }\n const current = [...this.#forwarder.getAcks()];\n if (current.length === 0) {\n // Also not expected, but possible (e.g. subscriber connects, then disconnects).\n // Bail to be safe.\n this.#lc.warn?.('No subscribers to confirm cleanup');\n return;\n }\n try {\n const earliestInitial = min(...(initial as AtLeastOne<LexiVersion>));\n const earliestCurrent = min(...(current as AtLeastOne<LexiVersion>));\n if (earliestCurrent < earliestInitial) {\n this.#lc.info?.(\n `At least one client is behind backup (${earliestCurrent} < ${earliestInitial})`,\n );\n } else {\n this.#lc.info?.(`Purging changes before ${earliestInitial} ...`);\n const start = performance.now();\n const deleted = await this.#storer.purgeRecordsBefore(earliestInitial);\n const elapsed = (performance.now() - start).toFixed(2);\n this.#lc.info?.(\n `Purged ${deleted} changes before ${earliestInitial} (${elapsed} ms)`,\n );\n this.#initialWatermarks.delete(earliestInitial);\n }\n } catch (e) {\n this.#lc.warn?.(`error purging change log`, e);\n } finally {\n if (this.#initialWatermarks.size) {\n // If there are unpurged watermarks to check, schedule the next purge.\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n }\n\n async stop(err?: unknown) {\n this.#state.stop(this.#lc, err);\n this.#stream?.changes.cancel();\n await this.#storer.stop();\n await this.#source.stop();\n }\n}\n\n// The delay between receiving an initial, backup-based watermark\n// and performing a check of whether to purge records before it.\n// This delay should be long enough to handle situations like the following:\n//\n// 1. `litestream restore` downloads a backup for the `replication-manager`\n// 2. `replication-manager` starts up and runs this `change-streamer`\n// 3. `zero-cache`s that are running on a different replica connect to this\n// `change-streamer` after exponential backoff retries.\n//\n// It is possible for a `zero-cache`[3] to be behind the backup restored [1].\n// This cleanup delay (30 seconds) is thus set to be a value comfortably\n// longer than the max delay for exponential backoff (10 seconds) in\n// `services/running-state.ts`. This allows the `zero-cache` [3] to reconnect\n// so that the `change-streamer` can track its progress and know when it has\n// surpassed the initial watermark of the backup [1].\nconst CLEANUP_DELAY_MS = DEFAULT_MAX_RETRY_DELAY_MS * 3;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;AAuDA,eAAsB,mBACpB,IACA,OACA,QACA,kBACA,mBACA,UACA,cACA,4BACA,mBACA,WACA,WACA,iCACA,oCACA,eAAe,YACiB;AAEhC,OAAM,yBAAyB,IAAI,UAAU,MAAM;AACnD,OAAM,wBACJ,IACA,UACA,mBACA,OACA,WACA,aACD;CAED,MAAM,EAAC,mBAAkB;AACzB,QAAO,IAAI,mBACT,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,cACA,4BACA,WACA,WACA,iCACA,oCACA,aACD;;AAGH,IAAM,8CAA8C;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwJpD,IAAM,qBAAN,MAA0D;CACxD;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CAEA;CACA;CACA,qCAA8B,IAAI,KAAa;CAU/C,WAAoB,UAAU;CAE9B,aAAsB,mBACpB,eACA,gBACA,mCACD;CACD,iBAA0B,mBACxB,eACA,WACA,sDACD;CAED;CACA;CACA;CAEA,YACE,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,QACA,4BACA,kBACA,WACA,iCACA,oCACA,eAAe,YACf;AACA,OAAK,KAAK;AACV,QAAA,KAAW,GAAG,YAAY,aAAa,kBAAkB;AACzD,QAAA,QAAc;AACd,QAAA,WAAiB;AACjB,QAAA,iBAAuB;AACvB,QAAA,SAAe;AACf,QAAA,SAAe,IAAI,OACjB,IACA,OACA,QACA,kBACA,mBACA,UACA,iBACA,aAAY,MAAA,QAAc,KAAK,KAAK;GAAC;GAAU,SAAS;GAAI,SAAS;GAAG,CAAC,GACzE,QAAO,KAAK,KAAK,IAAI,EACrB,gCACD;AACD,QAAA,YAAkB,IAAI,UAAU,IAAI,EAClC,oCACD,CAAC;AACF,QAAA,6BAAmC;AACnC,QAAA,YAAkB;AAClB,QAAA,YAAkB;AAClB,QAAA,QAAc,IAAI,aAAa,KAAK,IAAI,KAAA,GAAW,aAAa;AAChE,QAAA,eAAqB,EAAC,KAAK,UAAS;;CAGtC,MAAM,MAAM;AACV,QAAA,GAAS,OAAO,yBAAyB;AAEzC,QAAA,UAAgB,sBAAsB;EAEtC,MAAM,YAAY,MAAM,MAAA,OAAa,kBAAkB;AACvD,MAAI,UACF,OAAA,aAAmB,YAAY;AAKjC,QAAM,MAAA,OAAa,gBAAgB,MAAA,UAAgB;AACnD,QAAA,YAAkB;EAIlB,MAAM,sBAAsB,wBAAwB,MAAM;AAE1D,SAAO,MAAA,MAAY,WAAW,EAAE;GAC9B,IAAI;GACJ,IAAI,YAA2B;GAC/B,IAAI,iBAAiB;AACrB,OAAI;IACF,MAAM,EAAC,eAAe,qBACpB,MAAM,MAAA,OAAa,wCAAwC;IAC7D,MAAM,SAAS,MAAM,MAAA,OAAa,YAChC,eACA,iBACD;AACD,UAAA,OAAa,KAAK,CAAC,OAAM,MAAK,OAAO,QAAQ,OAAO,EAAE,CAAC;AAEvD,UAAA,SAAe;AACf,QACE,MAAA,MAAY,cAAc,GAC1B,4CAIA,OAAA,2BAAiC,QAC/B,MAAA,IACA,eACA,oBAAoB,gBACrB;AAEH,gBAAY;AAEZ,eAAW,MAAM,UAAU,OAAO,SAAS;KACzC,MAAM,CAAC,MAAM,OAAO;AACpB,aAAQ,MAAR;MACE,KAAK;AACH,WAAI,IAAI,IACN,OAAA,OAAa,OAAO,OAAO;AAE7B,WAAI,IAAI,WAAW;AAKjB,cAAA,aAAmB,YAAY,IAAI;AACnC,cAAA,UAAgB,WAAW,MAAA,aAAmB;;AAEhD;MACF,KAAK;AACH,aAAM,MAAA,qBAA2B,IAAI;AACrC;MACF,KAAK;AACH,mBAAY,OAAO,GAAG;AACtB;MACF,KAAK;AACH,WAAI,cAAc,OAAO,GAAG,UAC1B,OAAM,IAAI,mBACR,oBAAoB,OAAO,GAAG,UAAU,oCAAoC,YAC7E;AAEH,aAAA,UAAgB,IAAI,EAAE;AACtB;MACF;AACE,WAAI,SAAS,OACX,OAAA,cAAoB,IAAI,EAAE;AAE5B,WAAI,cAAc,KAChB,OAAM,IAAI,mBACR,GAAG,KAAK,WAAW,IAAI,IAAI,mCAC5B;AAEH;;KAGJ,MAAM,QAA2B,CAAC,WAAW,OAAO;AACpD,uBAAkB,MAAA,OAAa,MAAM,MAAM;AAC3C,SAAI,iBAAiB,oBAEnB,OAAA,UAAgB,QAAQ,MAAM;UACzB;AAQL,YAAM,MAAA,UAAgB,uBAAuB,MAAM;AACnD,uBAAiB;;AAGnB,SAAI,SAAS,YAAY,SAAS,WAChC,aAAY;KAId,MAAM,eAAe,MAAA,OAAa,cAAc;AAChD,SAAI,aACF,OAAM;;YAGH,GAAG;AACV,UAAM;aACE;AACR,UAAA,QAAc,QAAQ,QAAQ;AAC9B,UAAA,SAAe,KAAA;;AAIjB,OAAI,WAAW;AACb,UAAA,GAAS,OAAO,oCAAoC,YAAY;AAChE,UAAA,OAAa,OAAO;AACpB,UAAA,UAAgB,QAAQ,CAAC,WAAW,CAAC,YAAY,EAAC,KAAK,YAAW,CAAC,CAAC,CAAC;;AAIvE,SAAM,QAAQ,IAAI;IAChB,MAAA,OAAa,MAAM;IACnB,MAAA,MAAY,QAAQ,MAAA,IAAU,IAAI;IAClC,MAAA,MAAY,aAAa,8CACrB,qBACE,MAAA,IACA,uBAAuB,MAAA,IAAU,eAAe,IAAI,CACrD,GACD;IACL,CAAC;;AAGJ,QAAA,UAAgB,qBAAqB;AACrC,QAAA,GAAS,OAAO,yBAAyB;;CAG3C,OAAA,qBAA4B,KAA6B;AACvD,QAAA,GAAS,OAAO,4BAA4B,IAAI;EAChD,MAAM,EAAC,QAAO;AAEd,UAAQ,KAAR;GACE,KAAK;AACH,UAAM,kBAAkB,MAAA,UAAgB,MAAA,MAAY;AACpD,UAAM,wBACJ,MAAA,IACA,eACA,IAAI,WAAW,mBACf,IAAI,aACL;AACD,QAAI,MAAA,WAAiB;AACnB,WAAA,GAAS,OAAO,+BAA+B;AAC/C,WAAM,KAAK,KAAK,IAAI,iBAAiB,CAAC;;AAExC;GACF,QACE,aAAY,IAAI;;;CAItB,UAAU,KAAqD;EAC7D,MAAM,EAAC,iBAAiB,IAAI,MAAM,gBAAgB,cAAa;AAC/D,MAAI,SAAS,UACX,OAAA,QAAc,SAAS;EAEzB,MAAM,aAAa,aAAa,OAAmB,EACjD,eAAe,MAAA,UAAgB,OAAO,WAAW,EAClD,CAAC;EACF,MAAM,aAAa,IAAI,WACrB,iBACA,IACA,WACA,kBACM,MAAA,aACP;AACD,MAAI,mBAAmB,MAAA,gBAAsB;AAC3C,SAAA,GAAS,OACP,2CAA2C,iBAC5C;AACD,cAAW,MACT,GACA,8BACE,MAAA,eACD,cAAc,eAAe,GAC/B;SACI;AACL,SAAA,GAAS,QAAQ,qBAAqB,WAAW,KAAK;AAEtD,SAAA,UAAgB,IAAI,WAAW;AAC/B,SAAA,OAAa,QAAQ,YAAY,KAAK;;AAExC,SAAO,QAAQ,QAAQ,WAAW;;CAGpC,gBAAgB,WAAmB;EACjC,MAAM,WAAW,MAAA,kBAAwB;AACzC,QAAA,kBAAwB,IAAI,UAAU;AAEtC,MAAI,aAAa,EACf,OAAA,MAAY,iBAAiB,MAAA,iBAAuB,EAAE,iBAAiB;;CAI3E,MAAM,oBAGH;EACD,MAAM,eAAe,MAAM,MAAA,OAAa,2BAA2B;AACnE,MAAI,CAAC,aACH,OAAA,GAAS,OACP,+EACD;AAEH,SAAO;GACL,gBAAgB,MAAA;GAChB,cAAc,gBAAgB,MAAA;GAC/B;;;;;;;CAQH,OAAA,kBAAwC;EACtC,MAAM,UAAU,CAAC,GAAG,MAAA,kBAAwB;AAC5C,MAAI,QAAQ,WAAW,GAAG;AACxB,SAAA,GAAS,OAAO,6CAA6C;AAC7D;;EAEF,MAAM,UAAU,CAAC,GAAG,MAAA,UAAgB,SAAS,CAAC;AAC9C,MAAI,QAAQ,WAAW,GAAG;AAGxB,SAAA,GAAS,OAAO,oCAAoC;AACpD;;AAEF,MAAI;GACF,MAAM,kBAAkB,IAAI,GAAI,QAAoC;GACpE,MAAM,kBAAkB,IAAI,GAAI,QAAoC;AACpE,OAAI,kBAAkB,gBACpB,OAAA,GAAS,OACP,yCAAyC,gBAAgB,KAAK,gBAAgB,GAC/E;QACI;AACL,UAAA,GAAS,OAAO,0BAA0B,gBAAgB,MAAM;IAChE,MAAM,QAAQ,YAAY,KAAK;IAC/B,MAAM,UAAU,MAAM,MAAA,OAAa,mBAAmB,gBAAgB;IACtE,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,UAAA,GAAS,OACP,UAAU,QAAQ,kBAAkB,gBAAgB,IAAI,QAAQ,MACjE;AACD,UAAA,kBAAwB,OAAO,gBAAgB;;WAE1C,GAAG;AACV,SAAA,GAAS,OAAO,4BAA4B,EAAE;YACtC;AACR,OAAI,MAAA,kBAAwB,KAE1B,OAAA,MAAY,iBAAiB,MAAA,iBAAuB,EAAE,iBAAiB;;;CAK7E,MAAM,KAAK,KAAe;AACxB,QAAA,MAAY,KAAK,MAAA,IAAU,IAAI;AAC/B,QAAA,QAAc,QAAQ,QAAQ;AAC9B,QAAM,MAAA,OAAa,MAAM;AACzB,QAAM,MAAA,OAAa,MAAM;;;AAmB7B,IAAM,mBAAmB,6BAA6B"}
1
+ {"version":3,"file":"change-streamer-service.js","names":["#lc","#shard","#changeDB","#replicaVersion","#source","#storer","#forwarder","#replicationStatusPublisher","#autoReset","#state","#initialWatermarks","#serving","#txCounter","#changeCounter","#stream","#purgeLock","#latestStatus","#handleControlMessage","#purgeOldChanges"],"sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"sourcesContent":["import {getDefaultHighWaterMark} from 'node:stream';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {unreachable} from '../../../../shared/src/asserts.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport {publishCriticalEvent} from '../../observability/events.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport {\n min,\n type AtLeastOne,\n type LexiVersion,\n} from '../../types/lexi-version.ts';\nimport type {PostgresDB} from '../../types/pg.ts';\nimport type {ShardID} from '../../types/shards.ts';\nimport type {Source} from '../../types/streams.ts';\nimport {Subscription} from '../../types/subscription.ts';\nimport type {\n ChangeSource,\n ChangeStream,\n} from '../change-source/change-source.ts';\nimport {\n type ChangeStreamControl,\n type ChangeStreamData,\n} from '../change-source/protocol/current/downstream.ts';\nimport {\n publishReplicationError,\n replicationStatusError,\n type ReplicationStatusPublisher,\n} from '../replicator/replication-status.ts';\nimport type {SubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {\n DEFAULT_MAX_RETRY_DELAY_MS,\n RunningState,\n UnrecoverableError,\n} from '../running-state.ts';\nimport {\n type ChangeStreamerService,\n type Downstream,\n type Status,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {Forwarder} from './forwarder.ts';\nimport {initChangeStreamerSchema} from './schema/init.ts';\nimport {\n AutoResetSignal,\n ensureReplicationConfig,\n markResetRequired,\n} from './schema/tables.ts';\nimport {\n Storer,\n type PurgeLock,\n type TuningOptions as StorerOptions,\n} from './storer.ts';\nimport {Subscriber} from './subscriber.ts';\n\nexport type TuningOptions = StorerOptions & {\n flowControlConsensusPaddingSeconds: number;\n};\n\n/**\n * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.\n */\nexport async function initializeStreamer(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n changeSource: ChangeSource,\n replicationStatusPublisher: ReplicationStatusPublisher,\n subscriptionState: SubscriptionState,\n purgeLock: PurgeLock | null,\n autoReset: boolean,\n opts: TuningOptions,\n setTimeoutFn = setTimeout,\n): Promise<ChangeStreamerService> {\n // Make sure the ChangeLog DB is set up.\n await initChangeStreamerSchema(lc, changeDB, shard);\n await ensureReplicationConfig(\n lc,\n changeDB,\n subscriptionState,\n shard,\n autoReset,\n setTimeoutFn,\n );\n\n const {replicaVersion} = subscriptionState;\n return new ChangeStreamerImpl(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n changeSource,\n replicationStatusPublisher,\n purgeLock,\n autoReset,\n opts,\n setTimeoutFn,\n );\n}\n\nconst REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS = 5000;\n\n/**\n * Internally all Downstream messages (not just commits) are given a watermark.\n * These are used for internal ordering for:\n * 1. Replaying new changes in the Storer\n * 2. Filtering old changes in the Subscriber\n *\n * However, only the watermark for `Commit` messages are exposed to\n * subscribers, as that is the only semantically correct watermark to\n * use for tracking a position in a replication stream.\n */\nexport type WatermarkedChange = [watermark: string, ChangeStreamData];\n\n/**\n * Upstream-agnostic dispatch of messages in a {@link ChangeStreamMessage} to a\n * {@link Forwarder} and {@link Storer} to execute the forward-store-ack\n * procedure described in {@link ChangeStreamer}.\n *\n * ### Subscriber Catchup\n *\n * Connecting clients first need to be \"caught up\" to the current watermark\n * (from stored change log entries) before new entries are forwarded to\n * them. This is non-trivial because the replication stream may be in the\n * middle of a pending streamed Transaction for which some entries have\n * already been forwarded but are not yet committed to the store.\n *\n *\n * ```\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * | Historic changes in storage | Pending (streamed) tx | Next tx\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * Replication stream\n * > > > > > > > > >\n * ^ ---> required catchup ---> ^\n * Subscriber watermark Subscription begins\n * ```\n *\n * Preemptively buffering the changes of every pending transaction\n * would be wasteful and consume too much memory for large transactions.\n *\n * Instead, the streamer synchronously dispatches changes and subscriptions\n * to the {@link Forwarder} and the {@link Storer} such that the two\n * components are aligned as to where in the stream the subscription started.\n * The two components then coordinate catchup and handoff via the\n * {@link Subscriber} object with the following algorithm:\n *\n * * If the streamer is in the middle of a pending Transaction, the\n * Subscriber is \"queued\" on both the Forwarder and the Storer. In this\n * state, new changes are *not* forwarded to the Subscriber, and catchup\n * is not yet executed.\n * * Once the commit message for the pending Transaction is processed\n * by the Storer, it begins catchup on the Subscriber (with a READONLY\n * snapshot so that it does not block subsequent storage operations).\n * This catchup is thus guaranteed to load the change log entries of\n * that last Transaction.\n * * When the Forwarder processes that same commit message, it moves the\n * Subscriber from the \"queued\" to the \"active\" set of clients such that\n * the Subscriber begins receiving new changes, starting from the next\n * Transaction.\n * * The Subscriber does not forward those changes, however, if its catchup\n * is not complete. Until then, it buffers the changes in memory.\n * * Once catchup is complete, the buffered changes are immediately sent\n * and the Subscriber henceforth forwards changes as they are received.\n *\n * In the (common) case where the streamer is not in the middle of a pending\n * transaction when a subscription begins, the Storer begins catchup\n * immediately and the Forwarder directly adds the Subscriber to its active\n * set. However, the Subscriber still buffers any forwarded messages until\n * its catchup is complete.\n *\n * ### Watermarks and ordering\n *\n * The ChangeStreamerService depends on its {@link ChangeSource} to send\n * changes in contiguous [`begin`, `data` ..., `data`, `commit`] sequences\n * in commit order. This follows Postgres's Logical Replication Protocol\n * Message Flow:\n *\n * https://www.postgresql.org/docs/16/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW\n *\n * > The logical replication protocol sends individual transactions one by one.\n * > This means that all messages between a pair of Begin and Commit messages belong to the same transaction.\n *\n * In order to correctly replay (new) and filter (old) messages to subscribers\n * at different points in the replication stream, these changes must be assigned\n * watermarks such that they preserve the order in which they were received\n * from the ChangeSource.\n *\n * A previous implementation incorrectly derived these watermarks from the Postgres\n * Log Sequence Numbers (LSN) of each message. However, LSNs from concurrent,\n * non-conflicting transactions can overlap, which can result in a `begin` message\n * with an earlier LSN arriving after a `commit` message. For example, the\n * changes for these transactions:\n *\n * ```\n * LSN: 1 2 3 4 5 6 7 8 9 10\n * tx1: begin data data data commit\n * tx2: begin data data data commit\n * ```\n *\n * will arrive as:\n *\n * ```\n * begin1, data2, data4, data6, commit8, begin3, data5, data7, data9, commit10\n * ```\n *\n * Thus, LSN of non-commit messages are not suitable for tracking the sorting\n * order of the replication stream.\n *\n * Instead, the ChangeStreamer uses the following algorithm for deterministic\n * catchup and filtering of changes:\n *\n * * A `commit` message is assigned to a watermark corresponding to its LSN.\n * These are guaranteed to be in commit order by definition.\n *\n * * `begin` and `data` messages are assigned to the watermark of the\n * preceding `commit` (the previous transaction, or the replication\n * slot's starting LSN) plus 1. This guarantees that they will be sorted\n * after the previously commit transaction even if their LSNs came before it.\n * This is referred to as the `preCommitWatermark`.\n *\n * * In the ChangeLog DB, messages have a secondary sort column `pos`, which is\n * the position of the message within its transaction, with the `begin` message\n * starting at `0`. This guarantees that `begin` and `data` messages will be\n * fetched in the original ChangeSource order during catchup.\n *\n * `begin` and `data` messages share the same watermark, but this is sufficient for\n * Subscriber filtering because subscribers only know about the `commit` watermarks\n * exposed in the `Downstream` `Commit` message. The Subscriber object thus compares\n * the internal watermarks of the incoming messages against the commit watermark of\n * the caller, updating the watermark at every `Commit` message that is forwarded.\n *\n * ### Cleanup\n *\n * As mentioned in the {@link ChangeStreamer} documentation: \"the ChangeStreamer\n * uses a combination of [the \"initial\", i.e. backup-derived watermark and] ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\"\n *\n * More concretely:\n *\n * * The `initial`, backup-derived watermark is the earliest to which cleanup\n * should ever happen.\n *\n * * However, it is possible for the replica backup to be *ahead* of a connected\n * subscriber; and if a network error causes that subscriber to retry from its\n * last watermark, the change streamer must support it.\n *\n * Thus, before cleaning up to an `initial` backup-derived watermark, the change\n * streamer first confirms that all connected subscribers have also passed\n * that watermark.\n */\nclass ChangeStreamerImpl implements ChangeStreamerService {\n readonly id: string;\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #replicaVersion: string;\n readonly #source: ChangeSource;\n readonly #storer: Storer;\n readonly #forwarder: Forwarder;\n readonly #replicationStatusPublisher: ReplicationStatusPublisher;\n\n readonly #autoReset: boolean;\n readonly #state: RunningState;\n readonly #initialWatermarks = new Set<string>();\n\n // Starting the (Postgres) ChangeStream results in killing the previous\n // Postgres subscriber, potentially creating a gap in which the old\n // change-streamer has shut down and the new change-streamer has not yet\n // been recognized as \"healthy\" (and thus does not get any requests).\n //\n // To minimize this gap, delay starting the ChangeStream until the first\n // request from a `serving` replicator, indicating that higher level\n // load-balancing / routing logic has begun routing requests to this task.\n readonly #serving = resolver();\n\n readonly #txCounter = getOrCreateCounter(\n 'replication',\n 'transactions',\n 'Count of replicated transactions',\n );\n readonly #changeCounter = getOrCreateCounter(\n 'replication',\n 'changes',\n 'Count of replicated changes (DML or DDL statements)',\n );\n\n #latestStatus: Status;\n #purgeLock: PurgeLock | null;\n #stream: ChangeStream | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n replicaVersion: string,\n source: ChangeSource,\n replicationStatusPublisher: ReplicationStatusPublisher,\n initialPurgeLock: PurgeLock | null,\n autoReset: boolean,\n opts: TuningOptions,\n setTimeoutFn = setTimeout,\n ) {\n this.id = `change-streamer`;\n this.#lc = lc.withContext('component', 'change-streamer');\n this.#shard = shard;\n this.#changeDB = changeDB;\n this.#replicaVersion = replicaVersion;\n this.#source = source;\n this.#storer = new Storer(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n consumed => this.#stream?.acks.push(['status', consumed[1], consumed[2]]),\n err => this.stop(err),\n opts,\n );\n this.#forwarder = new Forwarder(lc, {\n flowControlConsensusPaddingSeconds:\n opts.flowControlConsensusPaddingSeconds,\n });\n this.#replicationStatusPublisher = replicationStatusPublisher;\n this.#purgeLock = initialPurgeLock;\n this.#autoReset = autoReset;\n this.#state = new RunningState(this.id, undefined, setTimeoutFn);\n this.#latestStatus = {tag: 'status'};\n }\n\n async run() {\n this.#lc.info?.('starting change stream');\n\n this.#forwarder.startProgressMonitor();\n\n const lagReport = await this.#source.startLagReporter();\n if (lagReport) {\n this.#latestStatus.lagReport = lagReport;\n }\n\n // Once this change-streamer acquires \"ownership\" of the change DB,\n // it is safe to start the storer.\n await this.#storer.assumeOwnership(this.#purgeLock);\n this.#purgeLock = null;\n\n // The threshold in (estimated number of) bytes to send() on subscriber\n // websockets before `await`-ing the I/O buffers to be ready for more.\n const flushBytesThreshold = getDefaultHighWaterMark(false);\n\n while (this.#state.shouldRun()) {\n let err: unknown;\n let watermark: string | null = null;\n let unflushedBytes = 0;\n try {\n const {lastWatermark, backfillRequests} =\n await this.#storer.getStartStreamInitializationParameters();\n const stream = await this.#source.startStream(\n lastWatermark,\n backfillRequests,\n );\n this.#storer.run().catch(e => stream.changes.cancel(e));\n\n this.#stream = stream;\n if (\n this.#state.resetBackoff() >\n REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS\n ) {\n // After recovering from a backoff for which a replication status\n // error was published, publish an OK status\n this.#replicationStatusPublisher.publish(\n this.#lc,\n 'Replicating',\n `Replicating from ${lastWatermark}`,\n );\n }\n watermark = null;\n\n for await (const change of stream.changes) {\n const [type, msg] = change;\n switch (type) {\n case 'status':\n if (msg.ack) {\n this.#storer.status(change); // storer acks once it gets through its queue\n }\n if (msg.lagReport) {\n // Lag reports are not stored in the cdc change log, but rather\n // only forwarded on \"live\" connections. When a new subscriber\n // is catching up, it is initialized with the #latestStatus\n // from which it can measure lag while catching up.\n this.#latestStatus.lagReport = msg.lagReport;\n this.#forwarder.sendStatus(this.#latestStatus);\n }\n continue;\n case 'control':\n await this.#handleControlMessage(msg);\n continue; // control messages are not stored/forwarded\n case 'begin':\n watermark = change[2].commitWatermark;\n break;\n case 'commit':\n if (watermark !== change[2].watermark) {\n throw new UnrecoverableError(\n `commit watermark ${change[2].watermark} does not match 'begin' watermark ${watermark}`,\n );\n }\n this.#txCounter.add(1);\n break;\n default:\n if (type === 'data') {\n this.#changeCounter.add(1);\n }\n if (watermark === null) {\n throw new UnrecoverableError(\n `${type} change (${msg.tag}) received before 'begin' message`,\n );\n }\n break;\n }\n\n const entry: WatermarkedChange = [watermark, change];\n unflushedBytes += this.#storer.store(entry);\n if (unflushedBytes < flushBytesThreshold) {\n // pipeline changes until flushBytesThreshold\n this.#forwarder.forward(entry);\n } else {\n // Wait for messages to clear socket buffers to ensure that they\n // make their way to subscribers. Without this `await`, the\n // messages end up being buffered in this process, which:\n // (1) results in memory pressure and increased GC activity\n // (2) prevents subscribers from processing the messages as they\n // arrive, instead getting them in a large batch after being\n // idle while they were queued (causing further delays).\n await this.#forwarder.forwardWithFlowControl(entry);\n unflushedBytes = 0;\n }\n\n if (type === 'commit' || type === 'rollback') {\n watermark = null;\n }\n\n // Allow the storer to exert back pressure.\n const readyForMore = this.#storer.readyForMore();\n if (readyForMore) {\n await readyForMore;\n }\n }\n } catch (e) {\n err = e;\n } finally {\n this.#stream?.changes.cancel();\n this.#stream = undefined;\n }\n\n // When the change stream is interrupted, abort any pending transaction.\n if (watermark) {\n this.#lc.warn?.(`aborting interrupted transaction ${watermark}`);\n this.#storer.abort();\n this.#forwarder.forward([watermark, ['rollback', {tag: 'rollback'}]]);\n }\n\n // Backoff and drain any pending entries in the storer before reconnecting.\n await Promise.all([\n this.#storer.stop(),\n this.#state.backoff(this.#lc, err),\n this.#state.retryDelay > REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS\n ? publishCriticalEvent(\n this.#lc,\n replicationStatusError(this.#lc, 'Replicating', err),\n )\n : promiseVoid,\n ]);\n }\n\n this.#forwarder.stopProgressMonitor();\n this.#lc.info?.('ChangeStreamer stopped');\n }\n\n async #handleControlMessage(msg: ChangeStreamControl[1]) {\n this.#lc.info?.('received control message', msg);\n const {tag} = msg;\n\n switch (tag) {\n case 'reset-required':\n await markResetRequired(this.#changeDB, this.#shard);\n await publishReplicationError(\n this.#lc,\n 'Replicating',\n msg.message ?? 'Resync required',\n msg.errorDetails,\n );\n if (this.#autoReset) {\n this.#lc.warn?.('shutting down for auto-reset');\n await this.stop(new AutoResetSignal());\n }\n break;\n default:\n unreachable(tag);\n }\n }\n\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const {protocolVersion, id, mode, replicaVersion, watermark} = ctx;\n if (mode === 'serving') {\n this.#serving.resolve();\n }\n const downstream = Subscription.create<Downstream>({\n cleanup: () => this.#forwarder.remove(subscriber),\n });\n const subscriber = new Subscriber(\n protocolVersion,\n id,\n watermark,\n downstream,\n () => this.#latestStatus,\n );\n if (replicaVersion !== this.#replicaVersion) {\n this.#lc.warn?.(\n `rejecting subscriber at replica version ${replicaVersion}`,\n );\n subscriber.close(\n ErrorType.WrongReplicaVersion,\n `current replica version is ${\n this.#replicaVersion\n } (requested ${replicaVersion})`,\n );\n } else {\n this.#lc.debug?.(`adding subscriber ${subscriber.id}`);\n\n this.#forwarder.add(subscriber);\n this.#storer.catchup(subscriber, mode);\n }\n return Promise.resolve(downstream);\n }\n\n scheduleCleanup(watermark: string) {\n const origSize = this.#initialWatermarks.size;\n this.#initialWatermarks.add(watermark);\n\n if (origSize === 0) {\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n\n async getChangeLogState(): Promise<{\n replicaVersion: string;\n minWatermark: string;\n }> {\n const minWatermark = await this.#storer.getMinWatermarkForCatchup();\n if (!minWatermark) {\n this.#lc.warn?.(\n `Unexpected empty changeLog. Resync if \"Local replica watermark\" errors arise`,\n );\n }\n return {\n replicaVersion: this.#replicaVersion,\n minWatermark: minWatermark ?? this.#replicaVersion,\n };\n }\n\n /**\n * Makes a best effort to purge the change log. In the event of a database\n * error, exceptions will be logged and swallowed, so this method is safe\n * to run in a timeout.\n */\n async #purgeOldChanges(): Promise<void> {\n const initial = [...this.#initialWatermarks];\n if (initial.length === 0) {\n this.#lc.warn?.('No initial watermarks to check for cleanup'); // Not expected.\n return;\n }\n const current = [...this.#forwarder.getAcks()];\n if (current.length === 0) {\n // Also not expected, but possible (e.g. subscriber connects, then disconnects).\n // Bail to be safe.\n this.#lc.warn?.('No subscribers to confirm cleanup');\n return;\n }\n try {\n const earliestInitial = min(...(initial as AtLeastOne<LexiVersion>));\n const earliestCurrent = min(...(current as AtLeastOne<LexiVersion>));\n if (earliestCurrent < earliestInitial) {\n this.#lc.info?.(\n `At least one client is behind backup (${earliestCurrent} < ${earliestInitial})`,\n );\n } else {\n this.#lc.info?.(`Purging changes before ${earliestInitial} ...`);\n const start = performance.now();\n const deleted = await this.#storer.purgeRecordsBefore(earliestInitial);\n const elapsed = (performance.now() - start).toFixed(2);\n this.#lc.info?.(\n `Purged ${deleted} changes before ${earliestInitial} (${elapsed} ms)`,\n );\n this.#initialWatermarks.delete(earliestInitial);\n }\n } catch (e) {\n this.#lc.warn?.(`error purging change log`, e);\n } finally {\n if (this.#initialWatermarks.size) {\n // If there are unpurged watermarks to check, schedule the next purge.\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n }\n\n async stop(err?: unknown) {\n this.#state.stop(this.#lc, err);\n this.#stream?.changes.cancel();\n await this.#storer.stop();\n await this.#source.stop();\n }\n}\n\n// The delay between receiving an initial, backup-based watermark\n// and performing a check of whether to purge records before it.\n// This delay should be long enough to handle situations like the following:\n//\n// 1. `litestream restore` downloads a backup for the `replication-manager`\n// 2. `replication-manager` starts up and runs this `change-streamer`\n// 3. `zero-cache`s that are running on a different replica connect to this\n// `change-streamer` after exponential backoff retries.\n//\n// It is possible for a `zero-cache`[3] to be behind the backup restored [1].\n// This cleanup delay (30 seconds) is thus set to be a value comfortably\n// longer than the max delay for exponential backoff (10 seconds) in\n// `services/running-state.ts`. This allows the `zero-cache` [3] to reconnect\n// so that the `change-streamer` can track its progress and know when it has\n// surpassed the initial watermark of the backup [1].\nconst CLEANUP_DELAY_MS = DEFAULT_MAX_RETRY_DELAY_MS * 3;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;AA+DA,eAAsB,mBACpB,IACA,OACA,QACA,kBACA,mBACA,UACA,cACA,4BACA,mBACA,WACA,WACA,MACA,eAAe,YACiB;AAEhC,OAAM,yBAAyB,IAAI,UAAU,MAAM;AACnD,OAAM,wBACJ,IACA,UACA,mBACA,OACA,WACA,aACD;CAED,MAAM,EAAC,mBAAkB;AACzB,QAAO,IAAI,mBACT,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,cACA,4BACA,WACA,WACA,MACA,aACD;;AAGH,IAAM,8CAA8C;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwJpD,IAAM,qBAAN,MAA0D;CACxD;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CAEA;CACA;CACA,qCAA8B,IAAI,KAAa;CAU/C,WAAoB,UAAU;CAE9B,aAAsB,mBACpB,eACA,gBACA,mCACD;CACD,iBAA0B,mBACxB,eACA,WACA,sDACD;CAED;CACA;CACA;CAEA,YACE,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,QACA,4BACA,kBACA,WACA,MACA,eAAe,YACf;AACA,OAAK,KAAK;AACV,QAAA,KAAW,GAAG,YAAY,aAAa,kBAAkB;AACzD,QAAA,QAAc;AACd,QAAA,WAAiB;AACjB,QAAA,iBAAuB;AACvB,QAAA,SAAe;AACf,QAAA,SAAe,IAAI,OACjB,IACA,OACA,QACA,kBACA,mBACA,UACA,iBACA,aAAY,MAAA,QAAc,KAAK,KAAK;GAAC;GAAU,SAAS;GAAI,SAAS;GAAG,CAAC,GACzE,QAAO,KAAK,KAAK,IAAI,EACrB,KACD;AACD,QAAA,YAAkB,IAAI,UAAU,IAAI,EAClC,oCACE,KAAK,oCACR,CAAC;AACF,QAAA,6BAAmC;AACnC,QAAA,YAAkB;AAClB,QAAA,YAAkB;AAClB,QAAA,QAAc,IAAI,aAAa,KAAK,IAAI,KAAA,GAAW,aAAa;AAChE,QAAA,eAAqB,EAAC,KAAK,UAAS;;CAGtC,MAAM,MAAM;AACV,QAAA,GAAS,OAAO,yBAAyB;AAEzC,QAAA,UAAgB,sBAAsB;EAEtC,MAAM,YAAY,MAAM,MAAA,OAAa,kBAAkB;AACvD,MAAI,UACF,OAAA,aAAmB,YAAY;AAKjC,QAAM,MAAA,OAAa,gBAAgB,MAAA,UAAgB;AACnD,QAAA,YAAkB;EAIlB,MAAM,sBAAsB,wBAAwB,MAAM;AAE1D,SAAO,MAAA,MAAY,WAAW,EAAE;GAC9B,IAAI;GACJ,IAAI,YAA2B;GAC/B,IAAI,iBAAiB;AACrB,OAAI;IACF,MAAM,EAAC,eAAe,qBACpB,MAAM,MAAA,OAAa,wCAAwC;IAC7D,MAAM,SAAS,MAAM,MAAA,OAAa,YAChC,eACA,iBACD;AACD,UAAA,OAAa,KAAK,CAAC,OAAM,MAAK,OAAO,QAAQ,OAAO,EAAE,CAAC;AAEvD,UAAA,SAAe;AACf,QACE,MAAA,MAAY,cAAc,GAC1B,4CAIA,OAAA,2BAAiC,QAC/B,MAAA,IACA,eACA,oBAAoB,gBACrB;AAEH,gBAAY;AAEZ,eAAW,MAAM,UAAU,OAAO,SAAS;KACzC,MAAM,CAAC,MAAM,OAAO;AACpB,aAAQ,MAAR;MACE,KAAK;AACH,WAAI,IAAI,IACN,OAAA,OAAa,OAAO,OAAO;AAE7B,WAAI,IAAI,WAAW;AAKjB,cAAA,aAAmB,YAAY,IAAI;AACnC,cAAA,UAAgB,WAAW,MAAA,aAAmB;;AAEhD;MACF,KAAK;AACH,aAAM,MAAA,qBAA2B,IAAI;AACrC;MACF,KAAK;AACH,mBAAY,OAAO,GAAG;AACtB;MACF,KAAK;AACH,WAAI,cAAc,OAAO,GAAG,UAC1B,OAAM,IAAI,mBACR,oBAAoB,OAAO,GAAG,UAAU,oCAAoC,YAC7E;AAEH,aAAA,UAAgB,IAAI,EAAE;AACtB;MACF;AACE,WAAI,SAAS,OACX,OAAA,cAAoB,IAAI,EAAE;AAE5B,WAAI,cAAc,KAChB,OAAM,IAAI,mBACR,GAAG,KAAK,WAAW,IAAI,IAAI,mCAC5B;AAEH;;KAGJ,MAAM,QAA2B,CAAC,WAAW,OAAO;AACpD,uBAAkB,MAAA,OAAa,MAAM,MAAM;AAC3C,SAAI,iBAAiB,oBAEnB,OAAA,UAAgB,QAAQ,MAAM;UACzB;AAQL,YAAM,MAAA,UAAgB,uBAAuB,MAAM;AACnD,uBAAiB;;AAGnB,SAAI,SAAS,YAAY,SAAS,WAChC,aAAY;KAId,MAAM,eAAe,MAAA,OAAa,cAAc;AAChD,SAAI,aACF,OAAM;;YAGH,GAAG;AACV,UAAM;aACE;AACR,UAAA,QAAc,QAAQ,QAAQ;AAC9B,UAAA,SAAe,KAAA;;AAIjB,OAAI,WAAW;AACb,UAAA,GAAS,OAAO,oCAAoC,YAAY;AAChE,UAAA,OAAa,OAAO;AACpB,UAAA,UAAgB,QAAQ,CAAC,WAAW,CAAC,YAAY,EAAC,KAAK,YAAW,CAAC,CAAC,CAAC;;AAIvE,SAAM,QAAQ,IAAI;IAChB,MAAA,OAAa,MAAM;IACnB,MAAA,MAAY,QAAQ,MAAA,IAAU,IAAI;IAClC,MAAA,MAAY,aAAa,8CACrB,qBACE,MAAA,IACA,uBAAuB,MAAA,IAAU,eAAe,IAAI,CACrD,GACD;IACL,CAAC;;AAGJ,QAAA,UAAgB,qBAAqB;AACrC,QAAA,GAAS,OAAO,yBAAyB;;CAG3C,OAAA,qBAA4B,KAA6B;AACvD,QAAA,GAAS,OAAO,4BAA4B,IAAI;EAChD,MAAM,EAAC,QAAO;AAEd,UAAQ,KAAR;GACE,KAAK;AACH,UAAM,kBAAkB,MAAA,UAAgB,MAAA,MAAY;AACpD,UAAM,wBACJ,MAAA,IACA,eACA,IAAI,WAAW,mBACf,IAAI,aACL;AACD,QAAI,MAAA,WAAiB;AACnB,WAAA,GAAS,OAAO,+BAA+B;AAC/C,WAAM,KAAK,KAAK,IAAI,iBAAiB,CAAC;;AAExC;GACF,QACE,aAAY,IAAI;;;CAItB,UAAU,KAAqD;EAC7D,MAAM,EAAC,iBAAiB,IAAI,MAAM,gBAAgB,cAAa;AAC/D,MAAI,SAAS,UACX,OAAA,QAAc,SAAS;EAEzB,MAAM,aAAa,aAAa,OAAmB,EACjD,eAAe,MAAA,UAAgB,OAAO,WAAW,EAClD,CAAC;EACF,MAAM,aAAa,IAAI,WACrB,iBACA,IACA,WACA,kBACM,MAAA,aACP;AACD,MAAI,mBAAmB,MAAA,gBAAsB;AAC3C,SAAA,GAAS,OACP,2CAA2C,iBAC5C;AACD,cAAW,MACT,GACA,8BACE,MAAA,eACD,cAAc,eAAe,GAC/B;SACI;AACL,SAAA,GAAS,QAAQ,qBAAqB,WAAW,KAAK;AAEtD,SAAA,UAAgB,IAAI,WAAW;AAC/B,SAAA,OAAa,QAAQ,YAAY,KAAK;;AAExC,SAAO,QAAQ,QAAQ,WAAW;;CAGpC,gBAAgB,WAAmB;EACjC,MAAM,WAAW,MAAA,kBAAwB;AACzC,QAAA,kBAAwB,IAAI,UAAU;AAEtC,MAAI,aAAa,EACf,OAAA,MAAY,iBAAiB,MAAA,iBAAuB,EAAE,iBAAiB;;CAI3E,MAAM,oBAGH;EACD,MAAM,eAAe,MAAM,MAAA,OAAa,2BAA2B;AACnE,MAAI,CAAC,aACH,OAAA,GAAS,OACP,+EACD;AAEH,SAAO;GACL,gBAAgB,MAAA;GAChB,cAAc,gBAAgB,MAAA;GAC/B;;;;;;;CAQH,OAAA,kBAAwC;EACtC,MAAM,UAAU,CAAC,GAAG,MAAA,kBAAwB;AAC5C,MAAI,QAAQ,WAAW,GAAG;AACxB,SAAA,GAAS,OAAO,6CAA6C;AAC7D;;EAEF,MAAM,UAAU,CAAC,GAAG,MAAA,UAAgB,SAAS,CAAC;AAC9C,MAAI,QAAQ,WAAW,GAAG;AAGxB,SAAA,GAAS,OAAO,oCAAoC;AACpD;;AAEF,MAAI;GACF,MAAM,kBAAkB,IAAI,GAAI,QAAoC;GACpE,MAAM,kBAAkB,IAAI,GAAI,QAAoC;AACpE,OAAI,kBAAkB,gBACpB,OAAA,GAAS,OACP,yCAAyC,gBAAgB,KAAK,gBAAgB,GAC/E;QACI;AACL,UAAA,GAAS,OAAO,0BAA0B,gBAAgB,MAAM;IAChE,MAAM,QAAQ,YAAY,KAAK;IAC/B,MAAM,UAAU,MAAM,MAAA,OAAa,mBAAmB,gBAAgB;IACtE,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,UAAA,GAAS,OACP,UAAU,QAAQ,kBAAkB,gBAAgB,IAAI,QAAQ,MACjE;AACD,UAAA,kBAAwB,OAAO,gBAAgB;;WAE1C,GAAG;AACV,SAAA,GAAS,OAAO,4BAA4B,EAAE;YACtC;AACR,OAAI,MAAA,kBAAwB,KAE1B,OAAA,MAAY,iBAAiB,MAAA,iBAAuB,EAAE,iBAAiB;;;CAK7E,MAAM,KAAK,KAAe;AACxB,QAAA,MAAY,KAAK,MAAA,IAAU,IAAI;AAC/B,QAAA,QAAc,QAAQ,QAAQ;AAC9B,QAAM,MAAA,OAAa,MAAM;AACzB,QAAM,MAAA,OAAa,MAAM;;;AAmB7B,IAAM,mBAAmB,6BAA6B"}
@@ -9,6 +9,10 @@ import type { ReplicatorMode } from '../replicator/replicator.ts';
9
9
  import type { Service } from '../service.ts';
10
10
  import type { WatermarkedChange } from './change-streamer-service.ts';
11
11
  import type { Subscriber } from './subscriber.ts';
12
+ export type TuningOptions = {
13
+ backPressureLimitHeapProportion: number;
14
+ statementTimeoutMs: number;
15
+ };
12
16
  /**
13
17
  * Handles the storage of changes and the catchup of subscribers
14
18
  * that are behind.
@@ -42,7 +46,7 @@ import type { Subscriber } from './subscriber.ts';
42
46
  export declare class Storer implements Service {
43
47
  #private;
44
48
  readonly id = "storer";
45
- constructor(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, db: PostgresDB, replicaVersion: string, onConsumed: (c: Commit | UpstreamStatusMessage) => void, onFatal: (err: Error) => void, backPressureLimitHeapProportion: number);
49
+ constructor(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, db: PostgresDB, replicaVersion: string, onConsumed: (c: Commit | UpstreamStatusMessage) => void, onFatal: (err: Error) => void, { backPressureLimitHeapProportion, statementTimeoutMs }: TuningOptions);
46
50
  assumeOwnership(purgeLock?: PurgeLock | null): Promise<void>;
47
51
  getStartStreamInitializationParameters(): Promise<{
48
52
  lastWatermark: string;
@@ -1 +1 @@
1
- {"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAWjD,OAAO,EAAC,eAAe,EAAC,MAAM,8BAA8B,CAAC;AAC7D,OAAO,EAAC,KAAK,UAAU,EAA2B,MAAM,mBAAmB,CAAC;AAC5E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAKL,KAAK,eAAe,EAMrB,MAAM,sCAAsC,CAAC;AAC9C,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,iDAAiD,CAAC;AAC5E,OAAO,KAAK,EACV,uBAAuB,EACvB,qBAAqB,EACtB,MAAM,6CAA6C,CAAC;AACrD,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8BAA8B,CAAC;AAUpE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AA8BhD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,qBAAa,MAAO,YAAW,OAAO;;IACpC,QAAQ,CAAC,EAAE,YAAY;gBAiBrB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,MAAM,EACtB,UAAU,EAAE,CAAC,CAAC,EAAE,MAAM,GAAG,qBAAqB,KAAK,IAAI,EACvD,OAAO,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI,EAC7B,+BAA+B,EAAE,MAAM;IA8BnC,eAAe,CAAC,SAAS,CAAC,EAAE,SAAS,GAAG,IAAI;IA2B5C,sCAAsC,IAAI,OAAO,CAAC;QACtD,aAAa,EAAE,MAAM,CAAC;QACtB,gBAAgB,EAAE,eAAe,EAAE,CAAC;KACrC,CAAC;IAkCI,yBAAyB,IAAI,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAOzD,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IA+BtD;;OAEG;IACH,KAAK,CAAC,KAAK,EAAE,iBAAiB;IAsB9B,KAAK;IAIL,MAAM,CAAC,CAAC,EAAE,uBAAuB;IAIjC,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,cAAc;IAMpD,YAAY,IAAI,OAAO,CAAC,IAAI,CAAC,GAAG,SAAS;IA0CzC;;;OAGG;IACG,GAAG;IAicT;;;OAGG;IACG,YAAY;IAQlB,IAAI;CAOL;AAgBD,qBAAa,SAAS;;IAGpB,QAAQ,CAAC,cAAc,EAAE,MAAM,CAAC;IAChC,QAAQ,CAAC,YAAY,EAAE,MAAM,CAAC;gBAG5B,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,eAAe,EACnB,cAAc,EAAE,MAAM,EACtB,SAAS,EAAE,MAAM;IAUb,OAAO;CAWd;AAED,qBAAa,WAAW;;gBAKV,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,OAAO,EAAE,EAAE,EAAE,UAAU;IAWpD,OAAO;CA0Bd"}
1
+ {"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAWjD,OAAO,EAAC,eAAe,EAAC,MAAM,8BAA8B,CAAC;AAC7D,OAAO,EAAC,KAAK,UAAU,EAA2B,MAAM,mBAAmB,CAAC;AAC5E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAKL,KAAK,eAAe,EAMrB,MAAM,sCAAsC,CAAC;AAC9C,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,iDAAiD,CAAC;AAC5E,OAAO,KAAK,EACV,uBAAuB,EACvB,qBAAqB,EACtB,MAAM,6CAA6C,CAAC;AACrD,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8BAA8B,CAAC;AAUpE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AA8BhD,MAAM,MAAM,aAAa,GAAG;IAC1B,+BAA+B,EAAE,MAAM,CAAC;IACxC,kBAAkB,EAAE,MAAM,CAAC;CAC5B,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,qBAAa,MAAO,YAAW,OAAO;;IACpC,QAAQ,CAAC,EAAE,YAAY;gBAkBrB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,MAAM,EACtB,UAAU,EAAE,CAAC,CAAC,EAAE,MAAM,GAAG,qBAAqB,KAAK,IAAI,EACvD,OAAO,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI,EAC7B,EAAC,+BAA+B,EAAE,kBAAkB,EAAC,EAAE,aAAa;IA+BhE,eAAe,CAAC,SAAS,CAAC,EAAE,SAAS,GAAG,IAAI;IA2B5C,sCAAsC,IAAI,OAAO,CAAC;QACtD,aAAa,EAAE,MAAM,CAAC;QACtB,gBAAgB,EAAE,eAAe,EAAE,CAAC;KACrC,CAAC;IAkCI,yBAAyB,IAAI,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAOzD,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IA+BtD;;OAEG;IACH,KAAK,CAAC,KAAK,EAAE,iBAAiB;IAsB9B,KAAK;IAIL,MAAM,CAAC,CAAC,EAAE,uBAAuB;IAIjC,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,cAAc;IAMpD,YAAY,IAAI,OAAO,CAAC,IAAI,CAAC,GAAG,SAAS;IA0CzC;;;OAGG;IACG,GAAG;IAocT;;;OAGG;IACG,YAAY;IAQlB,IAAI;CAOL;AAgBD,qBAAa,SAAS;;IAGpB,QAAQ,CAAC,cAAc,EAAE,MAAM,CAAC;IAChC,QAAQ,CAAC,YAAY,EAAE,MAAM,CAAC;gBAG5B,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,eAAe,EACnB,cAAc,EAAE,MAAM,EACtB,SAAS,EAAE,MAAM;IAUb,OAAO;CAWd;AAED,qBAAa,WAAW;;gBAKV,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,OAAO,EAAE,EAAE,EAAE,UAAU;IAWpD,OAAO;CA4Bd"}
@@ -63,9 +63,10 @@ var Storer = class {
63
63
  #onFatal;
64
64
  #queue = new Queue();
65
65
  #backPressureThresholdBytes;
66
+ #statementTimeoutMs;
66
67
  #approximateQueuedBytes = 0;
67
68
  #running = false;
68
- constructor(lc, shard, taskID, discoveryAddress, discoveryProtocol, db, replicaVersion, onConsumed, onFatal, backPressureLimitHeapProportion) {
69
+ constructor(lc, shard, taskID, discoveryAddress, discoveryProtocol, db, replicaVersion, onConsumed, onFatal, { backPressureLimitHeapProportion, statementTimeoutMs }) {
69
70
  this.#lc = lc.withContext("component", "change-log");
70
71
  this.#shard = shard;
71
72
  this.#taskID = taskID;
@@ -75,6 +76,7 @@ var Storer = class {
75
76
  this.#replicaVersion = replicaVersion;
76
77
  this.#onConsumed = onConsumed;
77
78
  this.#onFatal = onFatal;
79
+ this.#statementTimeoutMs = statementTimeoutMs;
78
80
  const heapStats = getHeapStatistics();
79
81
  this.#backPressureThresholdBytes = (heapStats.heap_size_limit - heapStats.used_heap_size) * backPressureLimitHeapProportion;
80
82
  this.#lc.info?.(`Using up to ${(this.#backPressureThresholdBytes / 1024 ** 2).toFixed(2)} MB of --max-old-space-size (~${(heapStats.heap_size_limit / 1024 ** 2).toFixed(2)} MB) to absorb upstream spikes`, { heapStats });
@@ -267,7 +269,10 @@ var Storer = class {
267
269
  const { promise, resolve, reject } = resolver();
268
270
  promise.catch(() => {});
269
271
  tx = {
270
- pool: new TransactionPool(this.#lc.withContext("watermark", watermark), READ_COMMITTED),
272
+ pool: new TransactionPool(this.#lc.withContext("watermark", watermark), {
273
+ mode: READ_COMMITTED,
274
+ statementResponseTimeout: this.#statementTimeoutMs
275
+ }),
271
276
  preCommitWatermark: watermark,
272
277
  pos: 0,
273
278
  startingReplicationState: promise,
@@ -323,7 +328,7 @@ var Storer = class {
323
328
  }
324
329
  async #startCatchup(subs) {
325
330
  if (subs.length === 0) return;
326
- const reader = new TransactionPool(this.#lc.withContext("pool", "catchup"), READONLY);
331
+ const reader = new TransactionPool(this.#lc.withContext("pool", "catchup"), { mode: READONLY });
327
332
  reader.run(this.#db);
328
333
  let lastWatermark;
329
334
  try {
@@ -537,7 +542,7 @@ var PurgeLocker = class {
537
542
  return this.#db(`${cdcSchema(this.#shard)}.${table}`);
538
543
  }
539
544
  async acquire() {
540
- const tx = new TransactionPool(this.#lc, READ_COMMITTED).run(this.#db);
545
+ const tx = new TransactionPool(this.#lc, { mode: READ_COMMITTED }).run(this.#db);
541
546
  const row = await tx.processReadTask((sql) => sql`
542
547
  SELECT watermark FROM ${this.#cdc("changeLog")}
543
548
  ORDER BY watermark, pos LIMIT 1
@@ -1 +1 @@
1
- {"version":3,"file":"storer.js","names":["#lc","#shard","#taskID","#discoveryAddress","#discoveryProtocol","#db","#replicaVersion","#onConsumed","#onFatal","#queue","#backPressureThresholdBytes","#cdc","#approximateQueuedBytes","#running","#readyForMore","#stopped","#processQueue","#cancelQueueEntries","#startCatchup","#trackBackfillMetadata","#maybeReleaseBackPressure","#catchup","#upsertTableMetadataStmt","#upsertColumnBackfillStmt","#tx","#released"],"sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"sourcesContent":["import {getHeapStatistics} from 'node:v8';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport {type PendingQuery, type Row} from 'postgres';\nimport {AbortError} from '../../../../shared/src/abort-error.ts';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {BigIntJSON} from '../../../../shared/src/bigint-json.ts';\nimport {Queue} from '../../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../../shared/src/valita.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {runTx} from '../../db/run-transaction.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../../types/pg.ts';\nimport {cdcSchema, type ShardID} from '../../types/shards.ts';\nimport {\n backfillRequestSchema,\n isDataChange,\n isSchemaChange,\n type BackfillID,\n type BackfillRequest,\n type Change,\n type DataChange,\n type Identifier,\n type SchemaChange,\n type TableMetadata,\n} from '../change-source/protocol/current.ts';\nimport {type Commit} from '../change-source/protocol/current/downstream.ts';\nimport type {\n DownstreamStatusMessage,\n UpstreamStatusMessage,\n} from '../change-source/protocol/current/status.ts';\nimport type {ReplicatorMode} from '../replicator/replicator.ts';\nimport type {Service} from '../service.ts';\nimport type {WatermarkedChange} from './change-streamer-service.ts';\nimport {type ChangeEntry} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {\n AutoResetSignal,\n markResetRequired,\n type BackfillingColumn,\n type ReplicationState,\n type TableMetadataRow,\n} from './schema/tables.ts';\nimport type {Subscriber} from './subscriber.ts';\n\ntype SubscriberAndMode = {\n subscriber: Subscriber;\n mode: ReplicatorMode;\n};\n\ntype QueueEntry =\n | [\n 'change',\n watermark: string,\n json: string,\n orig: Exclude<Change, DataChange> | null, // null for DataChanges\n ]\n | ['ready', callback: () => void]\n | ['subscriber', SubscriberAndMode]\n | DownstreamStatusMessage\n | ['abort']\n | 'stop';\n\ntype PendingTransaction = {\n pool: TransactionPool;\n preCommitWatermark: string;\n pos: number;\n startingReplicationState: Promise<ReplicationState>;\n ack: boolean;\n};\n\nconst backfillRequestsSchema = v.array(backfillRequestSchema);\n\n/**\n * Handles the storage of changes and the catchup of subscribers\n * that are behind.\n *\n * In the context of catchup and cleanup, it is the responsibility of the\n * Storer to decide whether a client can be caught up, or whether the\n * changes needed to catch a client up have been purged.\n *\n * **Maintained invariant**: The Change DB is only empty for a\n * completely new replica (i.e. initial-sync with no changes from the\n * replication stream).\n * * In this case, all new subscribers are expected start from the\n * `replicaVersion`, which is the version at which initial sync\n * was performed, and any attempts to catchup from a different\n * point fail.\n *\n * Conversely, if non-initial changes have flowed through the system\n * (i.e. via the replication stream), the ChangeDB must *not* be empty,\n * and the earliest change in the `changeLog` represents the earliest\n * \"commit\" from (after) which a subscriber can be caught up.\n * * Any attempts to catchup from an earlier point must fail with\n * a `WatermarkTooOld` error.\n * * Failure to do so could result in streaming changes to the\n * subscriber such that there is a gap in its replication history.\n *\n * Note: Subscribers (i.e. `incremental-syncer`) consider an \"error\" signal\n * an unrecoverable error and shut down in response. This allows the\n * production system to replace it with a new task and fresh copy of the\n * replica backup.\n */\nexport class Storer implements Service {\n readonly id = 'storer';\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #taskID: string;\n readonly #discoveryAddress: string;\n readonly #discoveryProtocol: string;\n readonly #db: PostgresDB;\n readonly #replicaVersion: string;\n readonly #onConsumed: (c: Commit | UpstreamStatusMessage) => void;\n readonly #onFatal: (err: Error) => void;\n readonly #queue = new Queue<QueueEntry>();\n readonly #backPressureThresholdBytes: number;\n\n #approximateQueuedBytes = 0;\n #running = false;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n db: PostgresDB,\n replicaVersion: string,\n onConsumed: (c: Commit | UpstreamStatusMessage) => void,\n onFatal: (err: Error) => void,\n backPressureLimitHeapProportion: number,\n ) {\n this.#lc = lc.withContext('component', 'change-log');\n this.#shard = shard;\n this.#taskID = taskID;\n this.#discoveryAddress = discoveryAddress;\n this.#discoveryProtocol = discoveryProtocol;\n this.#db = db;\n this.#replicaVersion = replicaVersion;\n this.#onConsumed = onConsumed;\n this.#onFatal = onFatal;\n\n const heapStats = getHeapStatistics();\n this.#backPressureThresholdBytes =\n (heapStats.heap_size_limit - heapStats.used_heap_size) *\n backPressureLimitHeapProportion;\n\n this.#lc.info?.(\n `Using up to ${(this.#backPressureThresholdBytes / 1024 ** 2).toFixed(2)} MB of ` +\n `--max-old-space-size (~${(heapStats.heap_size_limit / 1024 ** 2).toFixed(2)} MB) ` +\n `to absorb upstream spikes`,\n {heapStats},\n );\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async assumeOwnership(purgeLock?: PurgeLock | null) {\n const db = this.#db;\n const owner = this.#taskID;\n const ownerAddress = this.#discoveryAddress;\n const ownerProtocol = this.#discoveryProtocol;\n // we omit `ws://` so that old view syncer versions that are not expecting the protocol continue to not get it\n const addressWithProtocol =\n ownerProtocol === 'ws'\n ? ownerAddress\n : `${ownerProtocol}://${ownerAddress}`;\n this.#lc.info?.(`assuming ownership at ${addressWithProtocol}`);\n const start = performance.now();\n await db`UPDATE ${this.#cdc('replicationState')} SET ${db({owner, ownerAddress: addressWithProtocol})}`;\n const elapsed = (performance.now() - start).toFixed(2);\n this.#lc.info?.(\n `assumed ownership at ${addressWithProtocol} (${elapsed} ms)`,\n );\n\n if (purgeLock) {\n // Once ownership has been assumed, any initial purge-lock preventing the\n // purging of change-log records can be released, as a change-streamer\n // that was attempting to purge records will correspondingly abort on the\n // ownership check.\n void purgeLock.release();\n }\n }\n\n async getStartStreamInitializationParameters(): Promise<{\n lastWatermark: string;\n backfillRequests: BackfillRequest[];\n }> {\n const [[{lastWatermark}], result] = await runTx(\n this.#db,\n sql => [\n sql<{lastWatermark: string}[]>`\n SELECT \"lastWatermark\" FROM ${this.#cdc('replicationState')}`,\n\n // Formats a BackfillRequest using json_object_agg() to construct the\n // `columns` object. It is LEFT JOIN'ed with the `tableMetadata` table\n // to make it optional and possibly `null`.\n sql`\n SELECT \n json_build_object(\n 'schema', b.\"schema\",\n 'name', b.\"table\",\n 'metadata', t.\"metadata\"\n ) as \"table\",\n json_object_agg(b.\"column\", b.\"backfill\") \n as \"columns\"\n FROM ${this.#cdc('backfilling')} as b\n LEFT JOIN ${this.#cdc('tableMetadata')} as t\n ON (b.\"schema\" = t.\"schema\" AND b.\"table\" = t.\"table\")\n GROUP BY b.\"schema\", b.\"table\", t.\"metadata\"\n `,\n ],\n {mode: Mode.READONLY},\n );\n\n return {\n lastWatermark,\n backfillRequests: v.parse(result, backfillRequestsSchema),\n };\n }\n\n async getMinWatermarkForCatchup(): Promise<string | null> {\n const [{minWatermark}] = await this.#db<{minWatermark: string | null}[]>\n /*sql*/ `\n SELECT min(watermark) as \"minWatermark\" FROM ${this.#cdc('changeLog')}`;\n return minWatermark;\n }\n\n purgeRecordsBefore(watermark: string): Promise<number> {\n return runTx(this.#db, async sql => {\n // This NOWAIT pre-check is an optimization to abort the transaction\n // (and release associated resources) early.\n await sql<{watermark: string}[]>`\n SELECT watermark FROM ${this.#cdc('changeLog')}\n ORDER BY watermark, pos LIMIT 1\n FOR UPDATE NOWAIT\n `;\n // If the row is purge-locked by an incoming replication-manager, it\n // will assume ownership of the change-log before releasing the lock.\n // This DELETE blocks until the lock is released, allowing the change\n // in ownership to be reliably detected (and the transaction aborted)\n // in the subsequent check.\n const [{deleted}] = await sql<{deleted: bigint}[]>`\n WITH purged AS (\n DELETE FROM ${this.#cdc('changeLog')} WHERE watermark < ${watermark} \n RETURNING watermark, pos\n ) SELECT COUNT(*) as deleted FROM purged;`;\n\n const [{owner}] = await sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')} FOR SHARE`;\n if (owner !== this.#taskID) {\n throw new AbortError(\n `aborting changeLog purge to ${watermark} because ownership has been taken by ${owner}`,\n );\n }\n return Number(deleted);\n });\n }\n\n /**\n * @returns The size of the serialized entry, for memory / I/O estimations.\n */\n store(entry: WatermarkedChange) {\n const [watermark, [_tag, change]] = entry;\n // Eagerly stringify the JSON object so that the memory usage can be\n // more accurately measured (i.e. without an extra object traversal and\n // ad hoc memory counting heuristics).\n //\n // This essentially moves the stringify() computation out of the pg client,\n // which is instead configured to pass `string` objects directly as JSON\n // strings for JSON-valued columns (see TypeOptions.sendStringAsJson).\n const json = BigIntJSON.stringify(change);\n this.#approximateQueuedBytes += json.length;\n\n this.#queue.enqueue([\n 'change',\n watermark,\n json,\n isDataChange(change) ? null : change, // drop DataChanges to save memory\n ]);\n\n return json.length;\n }\n\n abort() {\n this.#queue.enqueue(['abort']);\n }\n\n status(s: DownstreamStatusMessage) {\n this.#queue.enqueue(s);\n }\n\n catchup(subscriber: Subscriber, mode: ReplicatorMode) {\n this.#queue.enqueue(['subscriber', {subscriber, mode}]);\n }\n\n #readyForMore: Resolver<void> | null = null;\n\n readyForMore(): Promise<void> | undefined {\n if (!this.#running) {\n return undefined;\n }\n if (\n this.#readyForMore === null &&\n this.#approximateQueuedBytes > this.#backPressureThresholdBytes\n ) {\n this.#lc.warn?.(\n `applying back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)\\n` +\n `\\n` +\n `To inspect changeLog backlog in your change DB:\\n` +\n ` SELECT\\n` +\n ` (change->'relation'->>'schema') || '.' || (change->'relation'->>'name') AS table_name,\\n` +\n ` change->>'tag' AS operation,\\n` +\n ` COUNT(*) AS count\\n` +\n ` FROM \"<app_id>/cdc\".\"changeLog\"\\n` +\n ` GROUP BY 1, 2\\n` +\n ` ORDER BY 3 DESC\\n` +\n ` LIMIT 20;`,\n );\n this.#readyForMore = resolver();\n }\n return this.#readyForMore?.promise;\n }\n\n #maybeReleaseBackPressure() {\n if (\n this.#readyForMore !== null &&\n // Wait for at least 20% of the threshold to free up.\n this.#approximateQueuedBytes < this.#backPressureThresholdBytes * 0.8\n ) {\n this.#lc.info?.(\n `releasing back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)`,\n );\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n }\n\n #stopped = promiseVoid;\n\n /**\n * Runs the storer loop until {@link stop()} is called, or an error is thrown.\n * Once {@link run()} completes, it can be called again.\n */\n async run() {\n assert(!this.#running, `storer is already running`);\n\n const {promise: stopped, resolve: signalStopped} = resolver();\n this.#running = true;\n this.#stopped = stopped;\n\n this.#lc.info?.('starting storer');\n let err: unknown;\n try {\n await this.#processQueue();\n } catch (e) {\n err = e; // used in finally\n throw e;\n } finally {\n // Release any pending backpressure so the upstream can proceed\n if (this.#readyForMore !== null) {\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n this.#cancelQueueEntries(\n this.#queue.drain().filter(entry => entry !== undefined),\n err,\n );\n this.#running = false;\n signalStopped();\n this.#lc.info?.('storer stopped');\n }\n }\n\n #cancelQueueEntries(queue: QueueEntry[], e: unknown) {\n if (queue.length === 0) {\n return;\n }\n this.#lc.info?.(\n `canceling ${queue.length} entries from the changeLog queue`,\n );\n const err = e instanceof Error ? e : new AbortError('server shutting down');\n for (const entry of queue) {\n if (entry === 'stop') {\n continue;\n }\n const type = entry[0];\n switch (type) {\n case 'subscriber': {\n // Disconnect subscribers waiting to be caught up so that they can\n // reconnect and try again.\n const {subscriber} = entry[1];\n this.#lc.info?.(`disconnecting ${subscriber.id}`);\n subscriber.fail(err);\n break;\n }\n }\n }\n }\n\n async #processQueue() {\n let tx: PendingTransaction | null = null;\n let msg: QueueEntry | false;\n\n const catchupQueue: SubscriberAndMode[] = [];\n try {\n while ((msg = await this.#queue.dequeue()) !== 'stop') {\n const [msgType] = msg;\n switch (msgType) {\n case 'ready': {\n const signalReady = msg[1];\n signalReady();\n continue;\n }\n case 'subscriber': {\n const subscriber = msg[1];\n if (tx) {\n catchupQueue.push(subscriber); // Wait for the current tx to complete.\n } else {\n await this.#startCatchup([subscriber]); // Catch up immediately.\n }\n continue;\n }\n case 'status':\n this.#onConsumed(msg);\n continue;\n case 'abort': {\n if (tx) {\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n }\n continue;\n }\n }\n // msgType === 'change'\n const [_, watermark, json, change] = msg;\n const tag = change?.tag;\n this.#approximateQueuedBytes -= json.length;\n\n if (tag === 'begin') {\n assert(!tx, 'received BEGIN in the middle of a transaction');\n const {promise, resolve, reject} = resolver<ReplicationState>();\n void promise.catch(() => {}); // handle rejections before the await\n tx = {\n pool: new TransactionPool(\n this.#lc.withContext('watermark', watermark),\n Mode.READ_COMMITTED,\n ),\n preCommitWatermark: watermark,\n pos: 0,\n startingReplicationState: promise,\n ack: !change.skipAck,\n };\n tx.pool.run(this.#db);\n // Acquire a lock on the replicationState row to detect and/or prevent\n // a concurrent ownership change.\n void tx.pool.process(tx => {\n tx<ReplicationState[]> /*sql*/ `\n SELECT * FROM ${this.#cdc('replicationState')} FOR UPDATE`.then(\n ([result]) => resolve(result),\n reject,\n );\n return [];\n });\n } else {\n assert(tx, () => `received change outside of transaction: ${json}`);\n tx.pos++;\n }\n\n const entry = {\n watermark: tag === 'commit' ? watermark : tx.preCommitWatermark,\n precommit: tag === 'commit' ? tx.preCommitWatermark : null,\n pos: tx.pos,\n change: json,\n };\n\n const processed = tx.pool.process(sql => [\n sql`INSERT INTO ${this.#cdc('changeLog')} ${sql(entry)}`,\n ...(change !== null && isSchemaChange(change)\n ? this.#trackBackfillMetadata(sql, change)\n : []),\n ]);\n\n if (tx.pos % 100 === 0) {\n // Backpressure is exerted on commit when awaiting tx.pool.done().\n // However, backpressure checks need to be regularly done for\n // very large transactions in order to avoid memory blowup.\n await processed;\n }\n this.#maybeReleaseBackPressure();\n\n if (tag === 'commit') {\n const {owner} = await tx.startingReplicationState;\n if (owner !== this.#taskID) {\n // Ownership change reflected in the replicationState read in 'begin'.\n tx.pool.fail(\n new AbortError(\n `changeLog ownership has been assumed by ${owner}`,\n ),\n );\n } else {\n // Update the replication state.\n const lastWatermark = watermark;\n void tx.pool.process(tx => [\n tx`\n UPDATE ${this.#cdc('replicationState')} SET ${tx({lastWatermark})}`,\n ]);\n tx.pool.setDone();\n }\n\n await tx.pool.done();\n\n // ACK the LSN to the upstream Postgres.\n if (tx.ack) {\n this.#onConsumed(['commit', change, {watermark}]);\n }\n tx = null;\n\n // Before beginning the next transaction, open a READONLY snapshot to\n // concurrently catchup any queued subscribers.\n await this.#startCatchup(catchupQueue.splice(0));\n } else if (tag === 'rollback') {\n // Aborted transactions are not stored in the changeLog. Abort the current tx\n // and process catchup of subscribers that were waiting for it to end.\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n\n await this.#startCatchup(catchupQueue.splice(0));\n }\n }\n } catch (e) {\n catchupQueue.forEach(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n }\n\n async #startCatchup(subs: SubscriberAndMode[]) {\n if (subs.length === 0) {\n return;\n }\n\n const reader = new TransactionPool(\n this.#lc.withContext('pool', 'catchup'),\n Mode.READONLY,\n );\n reader.run(this.#db);\n\n let lastWatermark: string | undefined;\n try {\n // Ensure that the transaction has started (and is thus holding a snapshot\n // of the database) before continuing on to commit more changes. This is\n // done by performing a single read on the db, which determines the\n // snapshot for the REPEATABLE_READ transaction.\n [{lastWatermark}] = await reader.processReadTask(\n sql => sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')}\n `,\n );\n } catch (e) {\n subs.map(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n\n // Run the actual catchup queries in the background. Errors are handled in\n // #catchup() by disconnecting the associated subscriber.\n void Promise.all(\n subs.map(sub => this.#catchup(sub, lastWatermark, reader)),\n ).finally(() => reader.setDone());\n }\n\n async #catchup(\n {subscriber: sub, mode}: SubscriberAndMode,\n lastWatermark: string,\n reader: TransactionPool,\n ) {\n try {\n await reader.processReadTask(async tx => {\n const start = Date.now();\n\n // When starting from initial-sync, there won't be a change with a watermark\n // equal to the replica version. This is the empty changeLog scenario.\n let watermarkFound = sub.watermark === this.#replicaVersion;\n let count = 0;\n let lastBatchConsumed: Promise<unknown> | undefined;\n\n for await (const entries of tx<ChangeEntry[]> /*sql*/ `\n SELECT watermark, change FROM ${this.#cdc('changeLog')}\n WHERE watermark >= ${sub.watermark}\n AND watermark <= ${lastWatermark}\n ORDER BY watermark, pos`.cursor(2000)) {\n // Wait for the last batch of entries to be consumed by the\n // subscriber before sending down the current batch. This pipelining\n // allows one batch of changes to be received from the change-db\n // while the previous batch of changes are sent to the subscriber,\n // resulting in flow control that caps the number of changes\n // referenced in memory to 2 * batch-size.\n const start = performance.now();\n await lastBatchConsumed;\n const elapsed = performance.now() - start;\n if (lastBatchConsumed) {\n (elapsed > 100 ? this.#lc.info : this.#lc.debug)?.(\n `waited ${elapsed.toFixed(3)} ms for ${sub.id} to consume last batch of catchup entries`,\n );\n }\n\n for (const entry of entries) {\n if (entry.watermark === sub.watermark) {\n // This should be the first entry.\n // Catchup starts from *after* the watermark.\n watermarkFound = true;\n } else if (watermarkFound) {\n lastBatchConsumed = sub.catchup(toDownstream(entry));\n count++;\n } else if (mode === 'backup') {\n throw new AutoResetSignal(\n `backup replica at watermark ${sub.watermark} is behind change db: ${entry.watermark})`,\n );\n } else {\n this.#lc.warn?.(\n `rejecting subscriber at watermark ${sub.watermark} (earliest watermark: ${entry.watermark})`,\n );\n sub.close(\n ErrorType.WatermarkTooOld,\n `earliest supported watermark is ${entry.watermark} (requested ${sub.watermark})`,\n );\n return;\n }\n }\n }\n if (watermarkFound) {\n await lastBatchConsumed;\n this.#lc.info?.(\n `caught up ${sub.id} with ${count} changes (${\n Date.now() - start\n } ms)`,\n );\n } else {\n this.#lc.warn?.(\n `subscriber at watermark ${sub.watermark} is ahead of latest watermark`,\n );\n }\n // Flushes the backlog of messages buffered during catchup and\n // allows the subscription to forward subsequent messages immediately.\n sub.setCaughtUp();\n });\n } catch (err) {\n this.#lc.error?.(`error while catching up subscriber ${sub.id}`, err);\n if (err instanceof AutoResetSignal) {\n await markResetRequired(this.#db, this.#shard);\n this.#onFatal(err);\n }\n sub.fail(err);\n }\n }\n\n /**\n * Returns the db statements necessary to track backfill and table metadata\n * presented in the `change`, if any.\n */\n #trackBackfillMetadata(sql: PostgresTransaction, change: SchemaChange) {\n const stmts: PendingQuery<Row[]>[] = [];\n\n switch (change.tag) {\n case 'update-table-metadata': {\n const {table, new: metadata} = change;\n stmts.push(this.#upsertTableMetadataStmt(sql, table, metadata));\n break;\n }\n\n case 'create-table': {\n const {spec, metadata, backfill} = change;\n if (metadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, spec, metadata));\n }\n if (backfill) {\n Object.entries(backfill).forEach(([col, backfill]) => {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, spec, col, backfill),\n );\n });\n }\n break;\n }\n\n case 'rename-table': {\n const {old} = change;\n const row = {schema: change.new.schema, table: change.new.name};\n stmts.push(\n sql`UPDATE ${this.#cdc('tableMetadata')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n sql`UPDATE ${this.#cdc('backfilling')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n );\n break;\n }\n\n case 'drop-table': {\n const {\n id: {schema, name},\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('tableMetadata')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n );\n break;\n }\n\n case 'add-column': {\n const {table, tableMetadata, column, backfill} = change;\n if (tableMetadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, table, tableMetadata));\n }\n if (backfill) {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, table, column.name, backfill),\n );\n }\n break;\n }\n\n case 'update-column': {\n const {\n table: {schema, name: table},\n old: {name: oldName},\n new: {name: newName},\n } = change;\n if (oldName !== newName) {\n stmts.push(\n sql`UPDATE ${this.#cdc('backfilling')} SET \"column\" = ${newName}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" = ${oldName}`,\n );\n }\n break;\n }\n\n case 'drop-column': {\n const {\n table: {schema, name},\n column,\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name} AND \"column\" = ${column}`,\n );\n break;\n }\n\n case 'backfill-completed': {\n const {\n relation: {schema, name: table, rowKey},\n columns,\n } = change;\n const cols = [...rowKey.columns, ...columns];\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" IN ${sql(cols)}`,\n );\n }\n }\n return stmts;\n }\n\n #upsertTableMetadataStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n metadata: TableMetadata,\n ) {\n const row: TableMetadataRow = {schema, table, metadata};\n return sql`\n INSERT INTO ${this.#cdc('tableMetadata')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n #upsertColumnBackfillStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n column: string,\n backfill: BackfillID,\n ) {\n const row: BackfillingColumn = {schema, table, column, backfill};\n return sql`\n INSERT INTO ${this.#cdc('backfilling')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\", \"column\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n /**\n * Waits until all currently queued entries have been processed.\n * This is only used in tests.\n */\n async allProcessed() {\n if (this.#running) {\n const {promise, resolve} = resolver();\n this.#queue.enqueue(['ready', resolve]);\n await promise;\n }\n }\n\n stop() {\n if (this.#running) {\n this.#lc.info?.(`draining ${this.#queue.size()} changeLog entries`);\n this.#queue.enqueue('stop');\n }\n return this.#stopped;\n }\n}\n\nfunction toDownstream(entry: ChangeEntry): WatermarkedChange {\n const {watermark, change} = entry;\n switch (change.tag) {\n case 'begin':\n return [watermark, ['begin', change, {commitWatermark: watermark}]];\n case 'commit':\n return [watermark, ['commit', change, {watermark}]];\n case 'rollback':\n return [watermark, ['rollback', change]];\n default:\n return [watermark, ['data', change]];\n }\n}\n\nexport class PurgeLock {\n readonly #lc: LogContext;\n readonly #tx: TransactionPool;\n readonly replicaVersion: string;\n readonly minWatermark: string;\n\n constructor(\n lc: LogContext,\n tx: TransactionPool,\n replicaVersion: string,\n watermark: string,\n ) {\n this.#lc = lc;\n this.#tx = tx;\n this.replicaVersion = replicaVersion;\n this.minWatermark = watermark;\n }\n\n #released = false;\n\n async release() {\n if (this.#released) {\n return;\n }\n this.#released = true;\n this.#tx.setDone();\n await this.#tx\n .done()\n .catch(e => this.#lc.warn?.(`error from purge-lock release`, e));\n this.#lc.info?.(`released purge lock on ${this.minWatermark}`);\n }\n}\n\nexport class PurgeLocker {\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #db: PostgresDB;\n\n constructor(lc: LogContext, shard: ShardID, db: PostgresDB) {\n this.#lc = lc.withContext('component', 'purge-locker');\n this.#shard = shard;\n this.#db = db;\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async acquire() {\n const tx = new TransactionPool(this.#lc, Mode.READ_COMMITTED).run(this.#db);\n const row = await tx.processReadTask(\n sql => sql<{watermark: string}[]>`\n SELECT watermark FROM ${this.#cdc('changeLog')}\n ORDER BY watermark, pos LIMIT 1\n FOR SHARE \n `,\n );\n if (row.length === 0) {\n this.#lc.info?.(`changeLog is empty. No rows to purge-lock.`);\n tx.setDone();\n await tx.done();\n return null;\n }\n const [{watermark}] = row;\n const [{replicaVersion}] = await tx.processReadTask(\n sql => sql<{replicaVersion: string}[]>`\n SELECT \"replicaVersion\" FROM ${this.#cdc('replicationConfig')}\n `,\n );\n this.#lc.info?.(\n `locked watermark ${watermark} from being purged from replica@${replicaVersion}`,\n );\n return new PurgeLock(this.#lc, tx, replicaVersion, watermark);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;AAwEA,IAAM,yBAAyB,eAAE,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAgC7D,IAAa,SAAb,MAAuC;CACrC,KAAc;CACd;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA,SAAkB,IAAI,OAAmB;CACzC;CAEA,0BAA0B;CAC1B,WAAW;CAEX,YACE,IACA,OACA,QACA,kBACA,mBACA,IACA,gBACA,YACA,SACA,iCACA;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,aAAa;AACpD,QAAA,QAAc;AACd,QAAA,SAAe;AACf,QAAA,mBAAyB;AACzB,QAAA,oBAA0B;AAC1B,QAAA,KAAW;AACX,QAAA,iBAAuB;AACvB,QAAA,aAAmB;AACnB,QAAA,UAAgB;EAEhB,MAAM,YAAY,mBAAmB;AACrC,QAAA,8BACG,UAAU,kBAAkB,UAAU,kBACvC;AAEF,QAAA,GAAS,OACP,gBAAgB,MAAA,6BAAmC,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAC5C,UAAU,kBAAkB,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAE/E,EAAC,WAAU,CACZ;;CAIH,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,UAAU,MAAA,MAAY,CAAC,GAAG,QAAQ;;CAGvD,MAAM,gBAAgB,WAA8B;EAClD,MAAM,KAAK,MAAA;EACX,MAAM,QAAQ,MAAA;EACd,MAAM,eAAe,MAAA;EACrB,MAAM,gBAAgB,MAAA;EAEtB,MAAM,sBACJ,kBAAkB,OACd,eACA,GAAG,cAAc,KAAK;AAC5B,QAAA,GAAS,OAAO,yBAAyB,sBAAsB;EAC/D,MAAM,QAAQ,YAAY,KAAK;AAC/B,QAAM,EAAE,UAAU,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG;GAAC;GAAO,cAAc;GAAoB,CAAC;EACrG,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,QAAA,GAAS,OACP,wBAAwB,oBAAoB,IAAI,QAAQ,MACzD;AAED,MAAI,UAKG,WAAU,SAAS;;CAI5B,MAAM,yCAGH;EACD,MAAM,CAAC,CAAC,EAAC,kBAAiB,UAAU,MAAM,MACxC,MAAA,KACA,QAAO,CACL,GAA8B;sCACA,MAAA,IAAU,mBAAmB,IAK3D,GAAG;;;;;;;;;iBASM,MAAA,IAAU,cAAc,CAAC;sBACpB,MAAA,IAAU,gBAAgB,CAAC;;;UAI1C,EACD,EAAC,MAAM,UAAc,CACtB;AAED,SAAO;GACL;GACA,kBAAkB,MAAQ,QAAQ,uBAAuB;GAC1D;;CAGH,MAAM,4BAAoD;EACxD,MAAM,CAAC,EAAC,kBAAiB,MAAM,MAAA,EACvB;qDACyC,MAAA,IAAU,YAAY;AACvE,SAAO;;CAGT,mBAAmB,WAAoC;AACrD,SAAO,MAAM,MAAA,IAAU,OAAM,QAAO;AAGlC,SAAM,GAA0B;kCACJ,MAAA,IAAU,YAAY,CAAC;;;;GASnD,MAAM,CAAC,EAAC,aAAY,MAAM,GAAwB;;wBAEhC,MAAA,IAAU,YAAY,CAAC,qBAAqB,UAAU;;;GAIxE,MAAM,CAAC,EAAC,WAAU,MAAM,GAAuB;wBAC7B,MAAA,IAAU,mBAAmB,CAAC;AAChD,OAAI,UAAU,MAAA,OACZ,OAAM,IAAI,WACR,+BAA+B,UAAU,uCAAuC,QACjF;AAEH,UAAO,OAAO,QAAQ;IACtB;;;;;CAMJ,MAAM,OAA0B;EAC9B,MAAM,CAAC,WAAW,CAAC,MAAM,WAAW;EAQpC,MAAM,OAAO,WAAW,UAAU,OAAO;AACzC,QAAA,0BAAgC,KAAK;AAErC,QAAA,MAAY,QAAQ;GAClB;GACA;GACA;GACA,aAAa,OAAO,GAAG,OAAO;GAC/B,CAAC;AAEF,SAAO,KAAK;;CAGd,QAAQ;AACN,QAAA,MAAY,QAAQ,CAAC,QAAQ,CAAC;;CAGhC,OAAO,GAA4B;AACjC,QAAA,MAAY,QAAQ,EAAE;;CAGxB,QAAQ,YAAwB,MAAsB;AACpD,QAAA,MAAY,QAAQ,CAAC,cAAc;GAAC;GAAY;GAAK,CAAC,CAAC;;CAGzD,gBAAuC;CAEvC,eAA0C;AACxC,MAAI,CAAC,MAAA,QACH;AAEF,MACE,MAAA,iBAAuB,QACvB,MAAA,yBAA+B,MAAA,4BAC/B;AACA,SAAA,GAAS,OACP,+BAA+B,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,4SAW7H;AACD,SAAA,eAAqB,UAAU;;AAEjC,SAAO,MAAA,cAAoB;;CAG7B,4BAA4B;AAC1B,MACE,MAAA,iBAAuB,QAEvB,MAAA,yBAA+B,MAAA,6BAAmC,IAClE;AACA,SAAA,GAAS,OACP,gCAAgC,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,MAC9H;AACD,SAAA,aAAmB,SAAS;AAC5B,SAAA,eAAqB;;;CAIzB,WAAW;;;;;CAMX,MAAM,MAAM;AACV,SAAO,CAAC,MAAA,SAAe,4BAA4B;EAEnD,MAAM,EAAC,SAAS,SAAS,SAAS,kBAAiB,UAAU;AAC7D,QAAA,UAAgB;AAChB,QAAA,UAAgB;AAEhB,QAAA,GAAS,OAAO,kBAAkB;EAClC,IAAI;AACJ,MAAI;AACF,SAAM,MAAA,cAAoB;WACnB,GAAG;AACV,SAAM;AACN,SAAM;YACE;AAER,OAAI,MAAA,iBAAuB,MAAM;AAC/B,UAAA,aAAmB,SAAS;AAC5B,UAAA,eAAqB;;AAEvB,SAAA,mBACE,MAAA,MAAY,OAAO,CAAC,QAAO,UAAS,UAAU,KAAA,EAAU,EACxD,IACD;AACD,SAAA,UAAgB;AAChB,kBAAe;AACf,SAAA,GAAS,OAAO,iBAAiB;;;CAIrC,oBAAoB,OAAqB,GAAY;AACnD,MAAI,MAAM,WAAW,EACnB;AAEF,QAAA,GAAS,OACP,aAAa,MAAM,OAAO,mCAC3B;EACD,MAAM,MAAM,aAAa,QAAQ,IAAI,IAAI,WAAW,uBAAuB;AAC3E,OAAK,MAAM,SAAS,OAAO;AACzB,OAAI,UAAU,OACZ;AAGF,WADa,MAAM,IACnB;IACE,KAAK,cAAc;KAGjB,MAAM,EAAC,eAAc,MAAM;AAC3B,WAAA,GAAS,OAAO,iBAAiB,WAAW,KAAK;AACjD,gBAAW,KAAK,IAAI;AACpB;;;;;CAMR,OAAA,eAAsB;EACpB,IAAI,KAAgC;EACpC,IAAI;EAEJ,MAAM,eAAoC,EAAE;AAC5C,MAAI;AACF,WAAQ,MAAM,MAAM,MAAA,MAAY,SAAS,MAAM,QAAQ;IACrD,MAAM,CAAC,WAAW;AAClB,YAAQ,SAAR;KACE,KAAK,SAAS;MACZ,MAAM,cAAc,IAAI;AACxB,mBAAa;AACb;;KAEF,KAAK,cAAc;MACjB,MAAM,aAAa,IAAI;AACvB,UAAI,GACF,cAAa,KAAK,WAAW;UAE7B,OAAM,MAAA,aAAmB,CAAC,WAAW,CAAC;AAExC;;KAEF,KAAK;AACH,YAAA,WAAiB,IAAI;AACrB;KACF,KAAK;AACH,UAAI,IAAI;AACN,UAAG,KAAK,OAAO;AACf,aAAM,GAAG,KAAK,MAAM;AACpB,YAAK;;AAEP;;IAIJ,MAAM,CAAC,GAAG,WAAW,MAAM,UAAU;IACrC,MAAM,MAAM,QAAQ;AACpB,UAAA,0BAAgC,KAAK;AAErC,QAAI,QAAQ,SAAS;AACnB,YAAO,CAAC,IAAI,gDAAgD;KAC5D,MAAM,EAAC,SAAS,SAAS,WAAU,UAA4B;AAC1D,aAAQ,YAAY,GAAG;AAC5B,UAAK;MACH,MAAM,IAAI,gBACR,MAAA,GAAS,YAAY,aAAa,UAAU,EAC5C,eACD;MACD,oBAAoB;MACpB,KAAK;MACL,0BAA0B;MAC1B,KAAK,CAAC,OAAO;MACd;AACD,QAAG,KAAK,IAAI,MAAA,GAAS;AAGhB,QAAG,KAAK,SAAQ,OAAM;AACzB,QAA+B;0BACjB,MAAA,IAAU,mBAAmB,CAAC,aAAa,MACtD,CAAC,YAAY,QAAQ,OAAO,EAC7B,OACD;AACD,aAAO,EAAE;OACT;WACG;AACL,YAAO,UAAU,2CAA2C,OAAO;AACnE,QAAG;;IAGL,MAAM,QAAQ;KACZ,WAAW,QAAQ,WAAW,YAAY,GAAG;KAC7C,WAAW,QAAQ,WAAW,GAAG,qBAAqB;KACtD,KAAK,GAAG;KACR,QAAQ;KACT;IAED,MAAM,YAAY,GAAG,KAAK,SAAQ,QAAO,CACvC,GAAG,eAAe,MAAA,IAAU,YAAY,CAAC,GAAG,IAAI,MAAM,IACtD,GAAI,WAAW,QAAQ,eAAe,OAAO,GACzC,MAAA,sBAA4B,KAAK,OAAO,GACxC,EAAE,CACP,CAAC;AAEF,QAAI,GAAG,MAAM,QAAQ,EAInB,OAAM;AAER,UAAA,0BAAgC;AAEhC,QAAI,QAAQ,UAAU;KACpB,MAAM,EAAC,UAAS,MAAM,GAAG;AACzB,SAAI,UAAU,MAAA,OAEZ,IAAG,KAAK,KACN,IAAI,WACF,2CAA2C,QAC5C,CACF;UACI;MAEL,MAAM,gBAAgB;AACjB,SAAG,KAAK,SAAQ,OAAM,CACzB,EAAE;qBACK,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG,EAAC,eAAc,CAAC,GAChE,CAAC;AACF,SAAG,KAAK,SAAS;;AAGnB,WAAM,GAAG,KAAK,MAAM;AAGpB,SAAI,GAAG,IACL,OAAA,WAAiB;MAAC;MAAU;MAAQ,EAAC,WAAU;MAAC,CAAC;AAEnD,UAAK;AAIL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;eACvC,QAAQ,YAAY;AAG7B,QAAG,KAAK,OAAO;AACf,WAAM,GAAG,KAAK,MAAM;AACpB,UAAK;AAEL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;;;WAG7C,GAAG;AACV,gBAAa,SAAS,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC1D,SAAM;;;CAIV,OAAA,aAAoB,MAA2B;AAC7C,MAAI,KAAK,WAAW,EAClB;EAGF,MAAM,SAAS,IAAI,gBACjB,MAAA,GAAS,YAAY,QAAQ,UAAU,EACvC,SACD;AACD,SAAO,IAAI,MAAA,GAAS;EAEpB,IAAI;AACJ,MAAI;AAKF,IAAC,CAAC,kBAAkB,MAAM,OAAO,iBAC/B,QAAO,GAAuB;wBACd,MAAA,IAAU,mBAAmB,CAAC;QAE/C;WACM,GAAG;AACV,QAAK,KAAK,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC9C,SAAM;;AAKH,UAAQ,IACX,KAAK,KAAI,QAAO,MAAA,QAAc,KAAK,eAAe,OAAO,CAAC,CAC3D,CAAC,cAAc,OAAO,SAAS,CAAC;;CAGnC,OAAA,QACE,EAAC,YAAY,KAAK,QAClB,eACA,QACA;AACA,MAAI;AACF,SAAM,OAAO,gBAAgB,OAAM,OAAM;IACvC,MAAM,QAAQ,KAAK,KAAK;IAIxB,IAAI,iBAAiB,IAAI,cAAc,MAAA;IACvC,IAAI,QAAQ;IACZ,IAAI;AAEJ,eAAW,MAAM,WAAW,EAA0B;0CACpB,MAAA,IAAU,YAAY,CAAC;gCACjC,IAAI,UAAU;gCACd,cAAc;oCACV,OAAO,IAAK,EAAE;KAOxC,MAAM,QAAQ,YAAY,KAAK;AAC/B,WAAM;KACN,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,SAAI,kBACF,EAAC,UAAU,MAAM,MAAA,GAAS,OAAO,MAAA,GAAS,SACxC,UAAU,QAAQ,QAAQ,EAAE,CAAC,UAAU,IAAI,GAAG,2CAC/C;AAGH,UAAK,MAAM,SAAS,QAClB,KAAI,MAAM,cAAc,IAAI,UAG1B,kBAAiB;cACR,gBAAgB;AACzB,0BAAoB,IAAI,QAAQ,aAAa,MAAM,CAAC;AACpD;gBACS,SAAS,SAClB,OAAM,IAAI,gBACR,+BAA+B,IAAI,UAAU,wBAAwB,MAAM,UAAU,GACtF;UACI;AACL,YAAA,GAAS,OACP,qCAAqC,IAAI,UAAU,wBAAwB,MAAM,UAAU,GAC5F;AACD,UAAI,MACF,GACA,mCAAmC,MAAM,UAAU,cAAc,IAAI,UAAU,GAChF;AACD;;;AAIN,QAAI,gBAAgB;AAClB,WAAM;AACN,WAAA,GAAS,OACP,aAAa,IAAI,GAAG,QAAQ,MAAM,YAChC,KAAK,KAAK,GAAG,MACd,MACF;UAED,OAAA,GAAS,OACP,2BAA2B,IAAI,UAAU,+BAC1C;AAIH,QAAI,aAAa;KACjB;WACK,KAAK;AACZ,SAAA,GAAS,QAAQ,sCAAsC,IAAI,MAAM,IAAI;AACrE,OAAI,eAAe,iBAAiB;AAClC,UAAM,kBAAkB,MAAA,IAAU,MAAA,MAAY;AAC9C,UAAA,QAAc,IAAI;;AAEpB,OAAI,KAAK,IAAI;;;;;;;CAQjB,uBAAuB,KAA0B,QAAsB;EACrE,MAAM,QAA+B,EAAE;AAEvC,UAAQ,OAAO,KAAf;GACE,KAAK,yBAAyB;IAC5B,MAAM,EAAC,OAAO,KAAK,aAAY;AAC/B,UAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,SAAS,CAAC;AAC/D;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,MAAM,UAAU,aAAY;AACnC,QAAI,SACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,MAAM,SAAS,CAAC;AAEhE,QAAI,SACF,QAAO,QAAQ,SAAS,CAAC,SAAS,CAAC,KAAK,cAAc;AACpD,WAAM,KACJ,MAAA,yBAA+B,KAAK,MAAM,KAAK,SAAS,CACzD;MACD;AAEJ;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,QAAO;IACd,MAAM,MAAM;KAAC,QAAQ,OAAO,IAAI;KAAQ,OAAO,OAAO,IAAI;KAAK;AAC/D,UAAM,KACJ,GAAG,UAAU,MAAA,IAAU,gBAAgB,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC/B,IAAI,OAAO,iBAAiB,IAAI,QACzD,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC7B,IAAI,OAAO,iBAAiB,IAAI,OAC1D;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EACJ,IAAI,EAAC,QAAQ,WACX;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,gBAAgB,CAAC;mCACpB,OAAO,iBAAiB,QACjD,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,OAClD;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EAAC,OAAO,eAAe,QAAQ,aAAY;AACjD,QAAI,cACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,cAAc,CAAC;AAEtE,QAAI,SACF,OAAM,KACJ,MAAA,yBAA+B,KAAK,OAAO,OAAO,MAAM,SAAS,CAClE;AAEH;;GAGF,KAAK,iBAAiB;IACpB,MAAM,EACJ,OAAO,EAAC,QAAQ,MAAM,SACtB,KAAK,EAAC,MAAM,WACZ,KAAK,EAAC,MAAM,cACV;AACJ,QAAI,YAAY,QACd,OAAM,KACJ,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,kBAAkB,QAAQ;mCACzC,OAAO,iBAAiB,MAAM,kBAAkB,UACxE;AAEH;;GAGF,KAAK,eAAe;IAClB,MAAM,EACJ,OAAO,EAAC,QAAQ,QAChB,WACE;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,KAAK,kBAAkB,SACzE;AACD;;GAGF,KAAK,sBAAsB;IACzB,MAAM,EACJ,UAAU,EAAC,QAAQ,MAAM,OAAO,UAChC,YACE;IACJ,MAAM,OAAO,CAAC,GAAG,OAAO,SAAS,GAAG,QAAQ;AAC5C,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,MAAM,mBAAmB,IAAI,KAAK,GACpF;;;AAGL,SAAO;;CAGT,yBACE,KACA,EAAC,QAAQ,MAAM,SACf,UACA;EACA,MAAM,MAAwB;GAAC;GAAQ;GAAO;GAAS;AACvD,SAAO,GAAG;sBACQ,MAAA,IAAU,gBAAgB,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEnC,IAAI,IAAI,CAAC;;;CAIjC,0BACE,KACA,EAAC,QAAQ,MAAM,SACf,QACA,UACA;EACA,MAAM,MAAyB;GAAC;GAAQ;GAAO;GAAQ;GAAS;AAChE,SAAO,GAAG;sBACQ,MAAA,IAAU,cAAc,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEjC,IAAI,IAAI,CAAC;;;;;;;CAQjC,MAAM,eAAe;AACnB,MAAI,MAAA,SAAe;GACjB,MAAM,EAAC,SAAS,YAAW,UAAU;AACrC,SAAA,MAAY,QAAQ,CAAC,SAAS,QAAQ,CAAC;AACvC,SAAM;;;CAIV,OAAO;AACL,MAAI,MAAA,SAAe;AACjB,SAAA,GAAS,OAAO,YAAY,MAAA,MAAY,MAAM,CAAC,oBAAoB;AACnE,SAAA,MAAY,QAAQ,OAAO;;AAE7B,SAAO,MAAA;;;AAIX,SAAS,aAAa,OAAuC;CAC3D,MAAM,EAAC,WAAW,WAAU;AAC5B,SAAQ,OAAO,KAAf;EACE,KAAK,QACH,QAAO,CAAC,WAAW;GAAC;GAAS;GAAQ,EAAC,iBAAiB,WAAU;GAAC,CAAC;EACrE,KAAK,SACH,QAAO,CAAC,WAAW;GAAC;GAAU;GAAQ,EAAC,WAAU;GAAC,CAAC;EACrD,KAAK,WACH,QAAO,CAAC,WAAW,CAAC,YAAY,OAAO,CAAC;EAC1C,QACE,QAAO,CAAC,WAAW,CAAC,QAAQ,OAAO,CAAC;;;AAI1C,IAAa,YAAb,MAAuB;CACrB;CACA;CACA;CACA;CAEA,YACE,IACA,IACA,gBACA,WACA;AACA,QAAA,KAAW;AACX,QAAA,KAAW;AACX,OAAK,iBAAiB;AACtB,OAAK,eAAe;;CAGtB,YAAY;CAEZ,MAAM,UAAU;AACd,MAAI,MAAA,SACF;AAEF,QAAA,WAAiB;AACjB,QAAA,GAAS,SAAS;AAClB,QAAM,MAAA,GACH,MAAM,CACN,OAAM,MAAK,MAAA,GAAS,OAAO,iCAAiC,EAAE,CAAC;AAClE,QAAA,GAAS,OAAO,0BAA0B,KAAK,eAAe;;;AAIlE,IAAa,cAAb,MAAyB;CACvB;CACA;CACA;CAEA,YAAY,IAAgB,OAAgB,IAAgB;AAC1D,QAAA,KAAW,GAAG,YAAY,aAAa,eAAe;AACtD,QAAA,QAAc;AACd,QAAA,KAAW;;CAIb,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,UAAU,MAAA,MAAY,CAAC,GAAG,QAAQ;;CAGvD,MAAM,UAAU;EACd,MAAM,KAAK,IAAI,gBAAgB,MAAA,IAAU,eAAoB,CAAC,IAAI,MAAA,GAAS;EAC3E,MAAM,MAAM,MAAM,GAAG,iBACnB,QAAO,GAA0B;8BACT,MAAA,IAAU,YAAY,CAAC;;;MAIhD;AACD,MAAI,IAAI,WAAW,GAAG;AACpB,SAAA,GAAS,OAAO,6CAA6C;AAC7D,MAAG,SAAS;AACZ,SAAM,GAAG,MAAM;AACf,UAAO;;EAET,MAAM,CAAC,EAAC,eAAc;EACtB,MAAM,CAAC,EAAC,oBAAmB,MAAM,GAAG,iBAClC,QAAO,GAA+B;uCACL,MAAA,IAAU,oBAAoB,CAAC;QAEjE;AACD,QAAA,GAAS,OACP,oBAAoB,UAAU,kCAAkC,iBACjE;AACD,SAAO,IAAI,UAAU,MAAA,IAAU,IAAI,gBAAgB,UAAU"}
1
+ {"version":3,"file":"storer.js","names":["#lc","#shard","#taskID","#discoveryAddress","#discoveryProtocol","#db","#replicaVersion","#onConsumed","#onFatal","#queue","#backPressureThresholdBytes","#statementTimeoutMs","#cdc","#approximateQueuedBytes","#running","#readyForMore","#stopped","#processQueue","#cancelQueueEntries","#startCatchup","#trackBackfillMetadata","#maybeReleaseBackPressure","#catchup","#upsertTableMetadataStmt","#upsertColumnBackfillStmt","#tx","#released"],"sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"sourcesContent":["import {getHeapStatistics} from 'node:v8';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport {type PendingQuery, type Row} from 'postgres';\nimport {AbortError} from '../../../../shared/src/abort-error.ts';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {BigIntJSON} from '../../../../shared/src/bigint-json.ts';\nimport {Queue} from '../../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../../shared/src/valita.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {runTx} from '../../db/run-transaction.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../../types/pg.ts';\nimport {cdcSchema, type ShardID} from '../../types/shards.ts';\nimport {\n backfillRequestSchema,\n isDataChange,\n isSchemaChange,\n type BackfillID,\n type BackfillRequest,\n type Change,\n type DataChange,\n type Identifier,\n type SchemaChange,\n type TableMetadata,\n} from '../change-source/protocol/current.ts';\nimport {type Commit} from '../change-source/protocol/current/downstream.ts';\nimport type {\n DownstreamStatusMessage,\n UpstreamStatusMessage,\n} from '../change-source/protocol/current/status.ts';\nimport type {ReplicatorMode} from '../replicator/replicator.ts';\nimport type {Service} from '../service.ts';\nimport type {WatermarkedChange} from './change-streamer-service.ts';\nimport {type ChangeEntry} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {\n AutoResetSignal,\n markResetRequired,\n type BackfillingColumn,\n type ReplicationState,\n type TableMetadataRow,\n} from './schema/tables.ts';\nimport type {Subscriber} from './subscriber.ts';\n\ntype SubscriberAndMode = {\n subscriber: Subscriber;\n mode: ReplicatorMode;\n};\n\ntype QueueEntry =\n | [\n 'change',\n watermark: string,\n json: string,\n orig: Exclude<Change, DataChange> | null, // null for DataChanges\n ]\n | ['ready', callback: () => void]\n | ['subscriber', SubscriberAndMode]\n | DownstreamStatusMessage\n | ['abort']\n | 'stop';\n\ntype PendingTransaction = {\n pool: TransactionPool;\n preCommitWatermark: string;\n pos: number;\n startingReplicationState: Promise<ReplicationState>;\n ack: boolean;\n};\n\nconst backfillRequestsSchema = v.array(backfillRequestSchema);\n\nexport type TuningOptions = {\n backPressureLimitHeapProportion: number;\n statementTimeoutMs: number;\n};\n\n/**\n * Handles the storage of changes and the catchup of subscribers\n * that are behind.\n *\n * In the context of catchup and cleanup, it is the responsibility of the\n * Storer to decide whether a client can be caught up, or whether the\n * changes needed to catch a client up have been purged.\n *\n * **Maintained invariant**: The Change DB is only empty for a\n * completely new replica (i.e. initial-sync with no changes from the\n * replication stream).\n * * In this case, all new subscribers are expected start from the\n * `replicaVersion`, which is the version at which initial sync\n * was performed, and any attempts to catchup from a different\n * point fail.\n *\n * Conversely, if non-initial changes have flowed through the system\n * (i.e. via the replication stream), the ChangeDB must *not* be empty,\n * and the earliest change in the `changeLog` represents the earliest\n * \"commit\" from (after) which a subscriber can be caught up.\n * * Any attempts to catchup from an earlier point must fail with\n * a `WatermarkTooOld` error.\n * * Failure to do so could result in streaming changes to the\n * subscriber such that there is a gap in its replication history.\n *\n * Note: Subscribers (i.e. `incremental-syncer`) consider an \"error\" signal\n * an unrecoverable error and shut down in response. This allows the\n * production system to replace it with a new task and fresh copy of the\n * replica backup.\n */\nexport class Storer implements Service {\n readonly id = 'storer';\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #taskID: string;\n readonly #discoveryAddress: string;\n readonly #discoveryProtocol: string;\n readonly #db: PostgresDB;\n readonly #replicaVersion: string;\n readonly #onConsumed: (c: Commit | UpstreamStatusMessage) => void;\n readonly #onFatal: (err: Error) => void;\n readonly #queue = new Queue<QueueEntry>();\n readonly #backPressureThresholdBytes: number;\n readonly #statementTimeoutMs: number;\n\n #approximateQueuedBytes = 0;\n #running = false;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n db: PostgresDB,\n replicaVersion: string,\n onConsumed: (c: Commit | UpstreamStatusMessage) => void,\n onFatal: (err: Error) => void,\n {backPressureLimitHeapProportion, statementTimeoutMs}: TuningOptions,\n ) {\n this.#lc = lc.withContext('component', 'change-log');\n this.#shard = shard;\n this.#taskID = taskID;\n this.#discoveryAddress = discoveryAddress;\n this.#discoveryProtocol = discoveryProtocol;\n this.#db = db;\n this.#replicaVersion = replicaVersion;\n this.#onConsumed = onConsumed;\n this.#onFatal = onFatal;\n this.#statementTimeoutMs = statementTimeoutMs;\n\n const heapStats = getHeapStatistics();\n this.#backPressureThresholdBytes =\n (heapStats.heap_size_limit - heapStats.used_heap_size) *\n backPressureLimitHeapProportion;\n\n this.#lc.info?.(\n `Using up to ${(this.#backPressureThresholdBytes / 1024 ** 2).toFixed(2)} MB of ` +\n `--max-old-space-size (~${(heapStats.heap_size_limit / 1024 ** 2).toFixed(2)} MB) ` +\n `to absorb upstream spikes`,\n {heapStats},\n );\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async assumeOwnership(purgeLock?: PurgeLock | null) {\n const db = this.#db;\n const owner = this.#taskID;\n const ownerAddress = this.#discoveryAddress;\n const ownerProtocol = this.#discoveryProtocol;\n // we omit `ws://` so that old view syncer versions that are not expecting the protocol continue to not get it\n const addressWithProtocol =\n ownerProtocol === 'ws'\n ? ownerAddress\n : `${ownerProtocol}://${ownerAddress}`;\n this.#lc.info?.(`assuming ownership at ${addressWithProtocol}`);\n const start = performance.now();\n await db`UPDATE ${this.#cdc('replicationState')} SET ${db({owner, ownerAddress: addressWithProtocol})}`;\n const elapsed = (performance.now() - start).toFixed(2);\n this.#lc.info?.(\n `assumed ownership at ${addressWithProtocol} (${elapsed} ms)`,\n );\n\n if (purgeLock) {\n // Once ownership has been assumed, any initial purge-lock preventing the\n // purging of change-log records can be released, as a change-streamer\n // that was attempting to purge records will correspondingly abort on the\n // ownership check.\n void purgeLock.release();\n }\n }\n\n async getStartStreamInitializationParameters(): Promise<{\n lastWatermark: string;\n backfillRequests: BackfillRequest[];\n }> {\n const [[{lastWatermark}], result] = await runTx(\n this.#db,\n sql => [\n sql<{lastWatermark: string}[]>`\n SELECT \"lastWatermark\" FROM ${this.#cdc('replicationState')}`,\n\n // Formats a BackfillRequest using json_object_agg() to construct the\n // `columns` object. It is LEFT JOIN'ed with the `tableMetadata` table\n // to make it optional and possibly `null`.\n sql`\n SELECT \n json_build_object(\n 'schema', b.\"schema\",\n 'name', b.\"table\",\n 'metadata', t.\"metadata\"\n ) as \"table\",\n json_object_agg(b.\"column\", b.\"backfill\") \n as \"columns\"\n FROM ${this.#cdc('backfilling')} as b\n LEFT JOIN ${this.#cdc('tableMetadata')} as t\n ON (b.\"schema\" = t.\"schema\" AND b.\"table\" = t.\"table\")\n GROUP BY b.\"schema\", b.\"table\", t.\"metadata\"\n `,\n ],\n {mode: Mode.READONLY},\n );\n\n return {\n lastWatermark,\n backfillRequests: v.parse(result, backfillRequestsSchema),\n };\n }\n\n async getMinWatermarkForCatchup(): Promise<string | null> {\n const [{minWatermark}] = await this.#db<{minWatermark: string | null}[]>\n /*sql*/ `\n SELECT min(watermark) as \"minWatermark\" FROM ${this.#cdc('changeLog')}`;\n return minWatermark;\n }\n\n purgeRecordsBefore(watermark: string): Promise<number> {\n return runTx(this.#db, async sql => {\n // This NOWAIT pre-check is an optimization to abort the transaction\n // (and release associated resources) early.\n await sql<{watermark: string}[]>`\n SELECT watermark FROM ${this.#cdc('changeLog')}\n ORDER BY watermark, pos LIMIT 1\n FOR UPDATE NOWAIT\n `;\n // If the row is purge-locked by an incoming replication-manager, it\n // will assume ownership of the change-log before releasing the lock.\n // This DELETE blocks until the lock is released, allowing the change\n // in ownership to be reliably detected (and the transaction aborted)\n // in the subsequent check.\n const [{deleted}] = await sql<{deleted: bigint}[]>`\n WITH purged AS (\n DELETE FROM ${this.#cdc('changeLog')} WHERE watermark < ${watermark} \n RETURNING watermark, pos\n ) SELECT COUNT(*) as deleted FROM purged;`;\n\n const [{owner}] = await sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')} FOR SHARE`;\n if (owner !== this.#taskID) {\n throw new AbortError(\n `aborting changeLog purge to ${watermark} because ownership has been taken by ${owner}`,\n );\n }\n return Number(deleted);\n });\n }\n\n /**\n * @returns The size of the serialized entry, for memory / I/O estimations.\n */\n store(entry: WatermarkedChange) {\n const [watermark, [_tag, change]] = entry;\n // Eagerly stringify the JSON object so that the memory usage can be\n // more accurately measured (i.e. without an extra object traversal and\n // ad hoc memory counting heuristics).\n //\n // This essentially moves the stringify() computation out of the pg client,\n // which is instead configured to pass `string` objects directly as JSON\n // strings for JSON-valued columns (see TypeOptions.sendStringAsJson).\n const json = BigIntJSON.stringify(change);\n this.#approximateQueuedBytes += json.length;\n\n this.#queue.enqueue([\n 'change',\n watermark,\n json,\n isDataChange(change) ? null : change, // drop DataChanges to save memory\n ]);\n\n return json.length;\n }\n\n abort() {\n this.#queue.enqueue(['abort']);\n }\n\n status(s: DownstreamStatusMessage) {\n this.#queue.enqueue(s);\n }\n\n catchup(subscriber: Subscriber, mode: ReplicatorMode) {\n this.#queue.enqueue(['subscriber', {subscriber, mode}]);\n }\n\n #readyForMore: Resolver<void> | null = null;\n\n readyForMore(): Promise<void> | undefined {\n if (!this.#running) {\n return undefined;\n }\n if (\n this.#readyForMore === null &&\n this.#approximateQueuedBytes > this.#backPressureThresholdBytes\n ) {\n this.#lc.warn?.(\n `applying back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)\\n` +\n `\\n` +\n `To inspect changeLog backlog in your change DB:\\n` +\n ` SELECT\\n` +\n ` (change->'relation'->>'schema') || '.' || (change->'relation'->>'name') AS table_name,\\n` +\n ` change->>'tag' AS operation,\\n` +\n ` COUNT(*) AS count\\n` +\n ` FROM \"<app_id>/cdc\".\"changeLog\"\\n` +\n ` GROUP BY 1, 2\\n` +\n ` ORDER BY 3 DESC\\n` +\n ` LIMIT 20;`,\n );\n this.#readyForMore = resolver();\n }\n return this.#readyForMore?.promise;\n }\n\n #maybeReleaseBackPressure() {\n if (\n this.#readyForMore !== null &&\n // Wait for at least 20% of the threshold to free up.\n this.#approximateQueuedBytes < this.#backPressureThresholdBytes * 0.8\n ) {\n this.#lc.info?.(\n `releasing back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)`,\n );\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n }\n\n #stopped = promiseVoid;\n\n /**\n * Runs the storer loop until {@link stop()} is called, or an error is thrown.\n * Once {@link run()} completes, it can be called again.\n */\n async run() {\n assert(!this.#running, `storer is already running`);\n\n const {promise: stopped, resolve: signalStopped} = resolver();\n this.#running = true;\n this.#stopped = stopped;\n\n this.#lc.info?.('starting storer');\n let err: unknown;\n try {\n await this.#processQueue();\n } catch (e) {\n err = e; // used in finally\n throw e;\n } finally {\n // Release any pending backpressure so the upstream can proceed\n if (this.#readyForMore !== null) {\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n this.#cancelQueueEntries(\n this.#queue.drain().filter(entry => entry !== undefined),\n err,\n );\n this.#running = false;\n signalStopped();\n this.#lc.info?.('storer stopped');\n }\n }\n\n #cancelQueueEntries(queue: QueueEntry[], e: unknown) {\n if (queue.length === 0) {\n return;\n }\n this.#lc.info?.(\n `canceling ${queue.length} entries from the changeLog queue`,\n );\n const err = e instanceof Error ? e : new AbortError('server shutting down');\n for (const entry of queue) {\n if (entry === 'stop') {\n continue;\n }\n const type = entry[0];\n switch (type) {\n case 'subscriber': {\n // Disconnect subscribers waiting to be caught up so that they can\n // reconnect and try again.\n const {subscriber} = entry[1];\n this.#lc.info?.(`disconnecting ${subscriber.id}`);\n subscriber.fail(err);\n break;\n }\n }\n }\n }\n\n async #processQueue() {\n let tx: PendingTransaction | null = null;\n let msg: QueueEntry | false;\n\n const catchupQueue: SubscriberAndMode[] = [];\n try {\n while ((msg = await this.#queue.dequeue()) !== 'stop') {\n const [msgType] = msg;\n switch (msgType) {\n case 'ready': {\n const signalReady = msg[1];\n signalReady();\n continue;\n }\n case 'subscriber': {\n const subscriber = msg[1];\n if (tx) {\n catchupQueue.push(subscriber); // Wait for the current tx to complete.\n } else {\n await this.#startCatchup([subscriber]); // Catch up immediately.\n }\n continue;\n }\n case 'status':\n this.#onConsumed(msg);\n continue;\n case 'abort': {\n if (tx) {\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n }\n continue;\n }\n }\n // msgType === 'change'\n const [_, watermark, json, change] = msg;\n const tag = change?.tag;\n this.#approximateQueuedBytes -= json.length;\n\n if (tag === 'begin') {\n assert(!tx, 'received BEGIN in the middle of a transaction');\n const {promise, resolve, reject} = resolver<ReplicationState>();\n void promise.catch(() => {}); // handle rejections before the await\n tx = {\n pool: new TransactionPool(\n this.#lc.withContext('watermark', watermark),\n {\n mode: Mode.READ_COMMITTED,\n statementResponseTimeout: this.#statementTimeoutMs,\n },\n ),\n preCommitWatermark: watermark,\n pos: 0,\n startingReplicationState: promise,\n ack: !change.skipAck,\n };\n tx.pool.run(this.#db);\n // Acquire a lock on the replicationState row to detect and/or prevent\n // a concurrent ownership change.\n void tx.pool.process(tx => {\n tx<ReplicationState[]> /*sql*/ `\n SELECT * FROM ${this.#cdc('replicationState')} FOR UPDATE`.then(\n ([result]) => resolve(result),\n reject,\n );\n return [];\n });\n } else {\n assert(tx, () => `received change outside of transaction: ${json}`);\n tx.pos++;\n }\n\n const entry = {\n watermark: tag === 'commit' ? watermark : tx.preCommitWatermark,\n precommit: tag === 'commit' ? tx.preCommitWatermark : null,\n pos: tx.pos,\n change: json,\n };\n\n const processed = tx.pool.process(sql => [\n sql`INSERT INTO ${this.#cdc('changeLog')} ${sql(entry)}`,\n ...(change !== null && isSchemaChange(change)\n ? this.#trackBackfillMetadata(sql, change)\n : []),\n ]);\n\n if (tx.pos % 100 === 0) {\n // Backpressure is exerted on commit when awaiting tx.pool.done().\n // However, backpressure checks need to be regularly done for\n // very large transactions in order to avoid memory blowup.\n await processed;\n }\n this.#maybeReleaseBackPressure();\n\n if (tag === 'commit') {\n const {owner} = await tx.startingReplicationState;\n if (owner !== this.#taskID) {\n // Ownership change reflected in the replicationState read in 'begin'.\n tx.pool.fail(\n new AbortError(\n `changeLog ownership has been assumed by ${owner}`,\n ),\n );\n } else {\n // Update the replication state.\n const lastWatermark = watermark;\n void tx.pool.process(tx => [\n tx`\n UPDATE ${this.#cdc('replicationState')} SET ${tx({lastWatermark})}`,\n ]);\n tx.pool.setDone();\n }\n\n await tx.pool.done();\n\n // ACK the LSN to the upstream Postgres.\n if (tx.ack) {\n this.#onConsumed(['commit', change, {watermark}]);\n }\n tx = null;\n\n // Before beginning the next transaction, open a READONLY snapshot to\n // concurrently catchup any queued subscribers.\n await this.#startCatchup(catchupQueue.splice(0));\n } else if (tag === 'rollback') {\n // Aborted transactions are not stored in the changeLog. Abort the current tx\n // and process catchup of subscribers that were waiting for it to end.\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n\n await this.#startCatchup(catchupQueue.splice(0));\n }\n }\n } catch (e) {\n catchupQueue.forEach(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n }\n\n async #startCatchup(subs: SubscriberAndMode[]) {\n if (subs.length === 0) {\n return;\n }\n\n const reader = new TransactionPool(\n this.#lc.withContext('pool', 'catchup'),\n {mode: Mode.READONLY},\n );\n reader.run(this.#db);\n\n let lastWatermark: string | undefined;\n try {\n // Ensure that the transaction has started (and is thus holding a snapshot\n // of the database) before continuing on to commit more changes. This is\n // done by performing a single read on the db, which determines the\n // snapshot for the REPEATABLE_READ transaction.\n [{lastWatermark}] = await reader.processReadTask(\n sql => sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')}\n `,\n );\n } catch (e) {\n subs.map(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n\n // Run the actual catchup queries in the background. Errors are handled in\n // #catchup() by disconnecting the associated subscriber.\n void Promise.all(\n subs.map(sub => this.#catchup(sub, lastWatermark, reader)),\n ).finally(() => reader.setDone());\n }\n\n async #catchup(\n {subscriber: sub, mode}: SubscriberAndMode,\n lastWatermark: string,\n reader: TransactionPool,\n ) {\n try {\n await reader.processReadTask(async tx => {\n const start = Date.now();\n\n // When starting from initial-sync, there won't be a change with a watermark\n // equal to the replica version. This is the empty changeLog scenario.\n let watermarkFound = sub.watermark === this.#replicaVersion;\n let count = 0;\n let lastBatchConsumed: Promise<unknown> | undefined;\n\n for await (const entries of tx<ChangeEntry[]> /*sql*/ `\n SELECT watermark, change FROM ${this.#cdc('changeLog')}\n WHERE watermark >= ${sub.watermark}\n AND watermark <= ${lastWatermark}\n ORDER BY watermark, pos`.cursor(2000)) {\n // Wait for the last batch of entries to be consumed by the\n // subscriber before sending down the current batch. This pipelining\n // allows one batch of changes to be received from the change-db\n // while the previous batch of changes are sent to the subscriber,\n // resulting in flow control that caps the number of changes\n // referenced in memory to 2 * batch-size.\n const start = performance.now();\n await lastBatchConsumed;\n const elapsed = performance.now() - start;\n if (lastBatchConsumed) {\n (elapsed > 100 ? this.#lc.info : this.#lc.debug)?.(\n `waited ${elapsed.toFixed(3)} ms for ${sub.id} to consume last batch of catchup entries`,\n );\n }\n\n for (const entry of entries) {\n if (entry.watermark === sub.watermark) {\n // This should be the first entry.\n // Catchup starts from *after* the watermark.\n watermarkFound = true;\n } else if (watermarkFound) {\n lastBatchConsumed = sub.catchup(toDownstream(entry));\n count++;\n } else if (mode === 'backup') {\n throw new AutoResetSignal(\n `backup replica at watermark ${sub.watermark} is behind change db: ${entry.watermark})`,\n );\n } else {\n this.#lc.warn?.(\n `rejecting subscriber at watermark ${sub.watermark} (earliest watermark: ${entry.watermark})`,\n );\n sub.close(\n ErrorType.WatermarkTooOld,\n `earliest supported watermark is ${entry.watermark} (requested ${sub.watermark})`,\n );\n return;\n }\n }\n }\n if (watermarkFound) {\n await lastBatchConsumed;\n this.#lc.info?.(\n `caught up ${sub.id} with ${count} changes (${\n Date.now() - start\n } ms)`,\n );\n } else {\n this.#lc.warn?.(\n `subscriber at watermark ${sub.watermark} is ahead of latest watermark`,\n );\n }\n // Flushes the backlog of messages buffered during catchup and\n // allows the subscription to forward subsequent messages immediately.\n sub.setCaughtUp();\n });\n } catch (err) {\n this.#lc.error?.(`error while catching up subscriber ${sub.id}`, err);\n if (err instanceof AutoResetSignal) {\n await markResetRequired(this.#db, this.#shard);\n this.#onFatal(err);\n }\n sub.fail(err);\n }\n }\n\n /**\n * Returns the db statements necessary to track backfill and table metadata\n * presented in the `change`, if any.\n */\n #trackBackfillMetadata(sql: PostgresTransaction, change: SchemaChange) {\n const stmts: PendingQuery<Row[]>[] = [];\n\n switch (change.tag) {\n case 'update-table-metadata': {\n const {table, new: metadata} = change;\n stmts.push(this.#upsertTableMetadataStmt(sql, table, metadata));\n break;\n }\n\n case 'create-table': {\n const {spec, metadata, backfill} = change;\n if (metadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, spec, metadata));\n }\n if (backfill) {\n Object.entries(backfill).forEach(([col, backfill]) => {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, spec, col, backfill),\n );\n });\n }\n break;\n }\n\n case 'rename-table': {\n const {old} = change;\n const row = {schema: change.new.schema, table: change.new.name};\n stmts.push(\n sql`UPDATE ${this.#cdc('tableMetadata')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n sql`UPDATE ${this.#cdc('backfilling')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n );\n break;\n }\n\n case 'drop-table': {\n const {\n id: {schema, name},\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('tableMetadata')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n );\n break;\n }\n\n case 'add-column': {\n const {table, tableMetadata, column, backfill} = change;\n if (tableMetadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, table, tableMetadata));\n }\n if (backfill) {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, table, column.name, backfill),\n );\n }\n break;\n }\n\n case 'update-column': {\n const {\n table: {schema, name: table},\n old: {name: oldName},\n new: {name: newName},\n } = change;\n if (oldName !== newName) {\n stmts.push(\n sql`UPDATE ${this.#cdc('backfilling')} SET \"column\" = ${newName}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" = ${oldName}`,\n );\n }\n break;\n }\n\n case 'drop-column': {\n const {\n table: {schema, name},\n column,\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name} AND \"column\" = ${column}`,\n );\n break;\n }\n\n case 'backfill-completed': {\n const {\n relation: {schema, name: table, rowKey},\n columns,\n } = change;\n const cols = [...rowKey.columns, ...columns];\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" IN ${sql(cols)}`,\n );\n }\n }\n return stmts;\n }\n\n #upsertTableMetadataStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n metadata: TableMetadata,\n ) {\n const row: TableMetadataRow = {schema, table, metadata};\n return sql`\n INSERT INTO ${this.#cdc('tableMetadata')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n #upsertColumnBackfillStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n column: string,\n backfill: BackfillID,\n ) {\n const row: BackfillingColumn = {schema, table, column, backfill};\n return sql`\n INSERT INTO ${this.#cdc('backfilling')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\", \"column\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n /**\n * Waits until all currently queued entries have been processed.\n * This is only used in tests.\n */\n async allProcessed() {\n if (this.#running) {\n const {promise, resolve} = resolver();\n this.#queue.enqueue(['ready', resolve]);\n await promise;\n }\n }\n\n stop() {\n if (this.#running) {\n this.#lc.info?.(`draining ${this.#queue.size()} changeLog entries`);\n this.#queue.enqueue('stop');\n }\n return this.#stopped;\n }\n}\n\nfunction toDownstream(entry: ChangeEntry): WatermarkedChange {\n const {watermark, change} = entry;\n switch (change.tag) {\n case 'begin':\n return [watermark, ['begin', change, {commitWatermark: watermark}]];\n case 'commit':\n return [watermark, ['commit', change, {watermark}]];\n case 'rollback':\n return [watermark, ['rollback', change]];\n default:\n return [watermark, ['data', change]];\n }\n}\n\nexport class PurgeLock {\n readonly #lc: LogContext;\n readonly #tx: TransactionPool;\n readonly replicaVersion: string;\n readonly minWatermark: string;\n\n constructor(\n lc: LogContext,\n tx: TransactionPool,\n replicaVersion: string,\n watermark: string,\n ) {\n this.#lc = lc;\n this.#tx = tx;\n this.replicaVersion = replicaVersion;\n this.minWatermark = watermark;\n }\n\n #released = false;\n\n async release() {\n if (this.#released) {\n return;\n }\n this.#released = true;\n this.#tx.setDone();\n await this.#tx\n .done()\n .catch(e => this.#lc.warn?.(`error from purge-lock release`, e));\n this.#lc.info?.(`released purge lock on ${this.minWatermark}`);\n }\n}\n\nexport class PurgeLocker {\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #db: PostgresDB;\n\n constructor(lc: LogContext, shard: ShardID, db: PostgresDB) {\n this.#lc = lc.withContext('component', 'purge-locker');\n this.#shard = shard;\n this.#db = db;\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async acquire() {\n const tx = new TransactionPool(this.#lc, {mode: Mode.READ_COMMITTED}).run(\n this.#db,\n );\n const row = await tx.processReadTask(\n sql => sql<{watermark: string}[]>`\n SELECT watermark FROM ${this.#cdc('changeLog')}\n ORDER BY watermark, pos LIMIT 1\n FOR SHARE \n `,\n );\n if (row.length === 0) {\n this.#lc.info?.(`changeLog is empty. No rows to purge-lock.`);\n tx.setDone();\n await tx.done();\n return null;\n }\n const [{watermark}] = row;\n const [{replicaVersion}] = await tx.processReadTask(\n sql => sql<{replicaVersion: string}[]>`\n SELECT \"replicaVersion\" FROM ${this.#cdc('replicationConfig')}\n `,\n );\n this.#lc.info?.(\n `locked watermark ${watermark} from being purged from replica@${replicaVersion}`,\n );\n return new PurgeLock(this.#lc, tx, replicaVersion, watermark);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;AAwEA,IAAM,yBAAyB,eAAE,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqC7D,IAAa,SAAb,MAAuC;CACrC,KAAc;CACd;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA,SAAkB,IAAI,OAAmB;CACzC;CACA;CAEA,0BAA0B;CAC1B,WAAW;CAEX,YACE,IACA,OACA,QACA,kBACA,mBACA,IACA,gBACA,YACA,SACA,EAAC,iCAAiC,sBAClC;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,aAAa;AACpD,QAAA,QAAc;AACd,QAAA,SAAe;AACf,QAAA,mBAAyB;AACzB,QAAA,oBAA0B;AAC1B,QAAA,KAAW;AACX,QAAA,iBAAuB;AACvB,QAAA,aAAmB;AACnB,QAAA,UAAgB;AAChB,QAAA,qBAA2B;EAE3B,MAAM,YAAY,mBAAmB;AACrC,QAAA,8BACG,UAAU,kBAAkB,UAAU,kBACvC;AAEF,QAAA,GAAS,OACP,gBAAgB,MAAA,6BAAmC,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAC5C,UAAU,kBAAkB,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAE/E,EAAC,WAAU,CACZ;;CAIH,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,UAAU,MAAA,MAAY,CAAC,GAAG,QAAQ;;CAGvD,MAAM,gBAAgB,WAA8B;EAClD,MAAM,KAAK,MAAA;EACX,MAAM,QAAQ,MAAA;EACd,MAAM,eAAe,MAAA;EACrB,MAAM,gBAAgB,MAAA;EAEtB,MAAM,sBACJ,kBAAkB,OACd,eACA,GAAG,cAAc,KAAK;AAC5B,QAAA,GAAS,OAAO,yBAAyB,sBAAsB;EAC/D,MAAM,QAAQ,YAAY,KAAK;AAC/B,QAAM,EAAE,UAAU,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG;GAAC;GAAO,cAAc;GAAoB,CAAC;EACrG,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,QAAA,GAAS,OACP,wBAAwB,oBAAoB,IAAI,QAAQ,MACzD;AAED,MAAI,UAKG,WAAU,SAAS;;CAI5B,MAAM,yCAGH;EACD,MAAM,CAAC,CAAC,EAAC,kBAAiB,UAAU,MAAM,MACxC,MAAA,KACA,QAAO,CACL,GAA8B;sCACA,MAAA,IAAU,mBAAmB,IAK3D,GAAG;;;;;;;;;iBASM,MAAA,IAAU,cAAc,CAAC;sBACpB,MAAA,IAAU,gBAAgB,CAAC;;;UAI1C,EACD,EAAC,MAAM,UAAc,CACtB;AAED,SAAO;GACL;GACA,kBAAkB,MAAQ,QAAQ,uBAAuB;GAC1D;;CAGH,MAAM,4BAAoD;EACxD,MAAM,CAAC,EAAC,kBAAiB,MAAM,MAAA,EACvB;qDACyC,MAAA,IAAU,YAAY;AACvE,SAAO;;CAGT,mBAAmB,WAAoC;AACrD,SAAO,MAAM,MAAA,IAAU,OAAM,QAAO;AAGlC,SAAM,GAA0B;kCACJ,MAAA,IAAU,YAAY,CAAC;;;;GASnD,MAAM,CAAC,EAAC,aAAY,MAAM,GAAwB;;wBAEhC,MAAA,IAAU,YAAY,CAAC,qBAAqB,UAAU;;;GAIxE,MAAM,CAAC,EAAC,WAAU,MAAM,GAAuB;wBAC7B,MAAA,IAAU,mBAAmB,CAAC;AAChD,OAAI,UAAU,MAAA,OACZ,OAAM,IAAI,WACR,+BAA+B,UAAU,uCAAuC,QACjF;AAEH,UAAO,OAAO,QAAQ;IACtB;;;;;CAMJ,MAAM,OAA0B;EAC9B,MAAM,CAAC,WAAW,CAAC,MAAM,WAAW;EAQpC,MAAM,OAAO,WAAW,UAAU,OAAO;AACzC,QAAA,0BAAgC,KAAK;AAErC,QAAA,MAAY,QAAQ;GAClB;GACA;GACA;GACA,aAAa,OAAO,GAAG,OAAO;GAC/B,CAAC;AAEF,SAAO,KAAK;;CAGd,QAAQ;AACN,QAAA,MAAY,QAAQ,CAAC,QAAQ,CAAC;;CAGhC,OAAO,GAA4B;AACjC,QAAA,MAAY,QAAQ,EAAE;;CAGxB,QAAQ,YAAwB,MAAsB;AACpD,QAAA,MAAY,QAAQ,CAAC,cAAc;GAAC;GAAY;GAAK,CAAC,CAAC;;CAGzD,gBAAuC;CAEvC,eAA0C;AACxC,MAAI,CAAC,MAAA,QACH;AAEF,MACE,MAAA,iBAAuB,QACvB,MAAA,yBAA+B,MAAA,4BAC/B;AACA,SAAA,GAAS,OACP,+BAA+B,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,4SAW7H;AACD,SAAA,eAAqB,UAAU;;AAEjC,SAAO,MAAA,cAAoB;;CAG7B,4BAA4B;AAC1B,MACE,MAAA,iBAAuB,QAEvB,MAAA,yBAA+B,MAAA,6BAAmC,IAClE;AACA,SAAA,GAAS,OACP,gCAAgC,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,MAC9H;AACD,SAAA,aAAmB,SAAS;AAC5B,SAAA,eAAqB;;;CAIzB,WAAW;;;;;CAMX,MAAM,MAAM;AACV,SAAO,CAAC,MAAA,SAAe,4BAA4B;EAEnD,MAAM,EAAC,SAAS,SAAS,SAAS,kBAAiB,UAAU;AAC7D,QAAA,UAAgB;AAChB,QAAA,UAAgB;AAEhB,QAAA,GAAS,OAAO,kBAAkB;EAClC,IAAI;AACJ,MAAI;AACF,SAAM,MAAA,cAAoB;WACnB,GAAG;AACV,SAAM;AACN,SAAM;YACE;AAER,OAAI,MAAA,iBAAuB,MAAM;AAC/B,UAAA,aAAmB,SAAS;AAC5B,UAAA,eAAqB;;AAEvB,SAAA,mBACE,MAAA,MAAY,OAAO,CAAC,QAAO,UAAS,UAAU,KAAA,EAAU,EACxD,IACD;AACD,SAAA,UAAgB;AAChB,kBAAe;AACf,SAAA,GAAS,OAAO,iBAAiB;;;CAIrC,oBAAoB,OAAqB,GAAY;AACnD,MAAI,MAAM,WAAW,EACnB;AAEF,QAAA,GAAS,OACP,aAAa,MAAM,OAAO,mCAC3B;EACD,MAAM,MAAM,aAAa,QAAQ,IAAI,IAAI,WAAW,uBAAuB;AAC3E,OAAK,MAAM,SAAS,OAAO;AACzB,OAAI,UAAU,OACZ;AAGF,WADa,MAAM,IACnB;IACE,KAAK,cAAc;KAGjB,MAAM,EAAC,eAAc,MAAM;AAC3B,WAAA,GAAS,OAAO,iBAAiB,WAAW,KAAK;AACjD,gBAAW,KAAK,IAAI;AACpB;;;;;CAMR,OAAA,eAAsB;EACpB,IAAI,KAAgC;EACpC,IAAI;EAEJ,MAAM,eAAoC,EAAE;AAC5C,MAAI;AACF,WAAQ,MAAM,MAAM,MAAA,MAAY,SAAS,MAAM,QAAQ;IACrD,MAAM,CAAC,WAAW;AAClB,YAAQ,SAAR;KACE,KAAK,SAAS;MACZ,MAAM,cAAc,IAAI;AACxB,mBAAa;AACb;;KAEF,KAAK,cAAc;MACjB,MAAM,aAAa,IAAI;AACvB,UAAI,GACF,cAAa,KAAK,WAAW;UAE7B,OAAM,MAAA,aAAmB,CAAC,WAAW,CAAC;AAExC;;KAEF,KAAK;AACH,YAAA,WAAiB,IAAI;AACrB;KACF,KAAK;AACH,UAAI,IAAI;AACN,UAAG,KAAK,OAAO;AACf,aAAM,GAAG,KAAK,MAAM;AACpB,YAAK;;AAEP;;IAIJ,MAAM,CAAC,GAAG,WAAW,MAAM,UAAU;IACrC,MAAM,MAAM,QAAQ;AACpB,UAAA,0BAAgC,KAAK;AAErC,QAAI,QAAQ,SAAS;AACnB,YAAO,CAAC,IAAI,gDAAgD;KAC5D,MAAM,EAAC,SAAS,SAAS,WAAU,UAA4B;AAC1D,aAAQ,YAAY,GAAG;AAC5B,UAAK;MACH,MAAM,IAAI,gBACR,MAAA,GAAS,YAAY,aAAa,UAAU,EAC5C;OACE,MAAM;OACN,0BAA0B,MAAA;OAC3B,CACF;MACD,oBAAoB;MACpB,KAAK;MACL,0BAA0B;MAC1B,KAAK,CAAC,OAAO;MACd;AACD,QAAG,KAAK,IAAI,MAAA,GAAS;AAGhB,QAAG,KAAK,SAAQ,OAAM;AACzB,QAA+B;0BACjB,MAAA,IAAU,mBAAmB,CAAC,aAAa,MACtD,CAAC,YAAY,QAAQ,OAAO,EAC7B,OACD;AACD,aAAO,EAAE;OACT;WACG;AACL,YAAO,UAAU,2CAA2C,OAAO;AACnE,QAAG;;IAGL,MAAM,QAAQ;KACZ,WAAW,QAAQ,WAAW,YAAY,GAAG;KAC7C,WAAW,QAAQ,WAAW,GAAG,qBAAqB;KACtD,KAAK,GAAG;KACR,QAAQ;KACT;IAED,MAAM,YAAY,GAAG,KAAK,SAAQ,QAAO,CACvC,GAAG,eAAe,MAAA,IAAU,YAAY,CAAC,GAAG,IAAI,MAAM,IACtD,GAAI,WAAW,QAAQ,eAAe,OAAO,GACzC,MAAA,sBAA4B,KAAK,OAAO,GACxC,EAAE,CACP,CAAC;AAEF,QAAI,GAAG,MAAM,QAAQ,EAInB,OAAM;AAER,UAAA,0BAAgC;AAEhC,QAAI,QAAQ,UAAU;KACpB,MAAM,EAAC,UAAS,MAAM,GAAG;AACzB,SAAI,UAAU,MAAA,OAEZ,IAAG,KAAK,KACN,IAAI,WACF,2CAA2C,QAC5C,CACF;UACI;MAEL,MAAM,gBAAgB;AACjB,SAAG,KAAK,SAAQ,OAAM,CACzB,EAAE;qBACK,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG,EAAC,eAAc,CAAC,GAChE,CAAC;AACF,SAAG,KAAK,SAAS;;AAGnB,WAAM,GAAG,KAAK,MAAM;AAGpB,SAAI,GAAG,IACL,OAAA,WAAiB;MAAC;MAAU;MAAQ,EAAC,WAAU;MAAC,CAAC;AAEnD,UAAK;AAIL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;eACvC,QAAQ,YAAY;AAG7B,QAAG,KAAK,OAAO;AACf,WAAM,GAAG,KAAK,MAAM;AACpB,UAAK;AAEL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;;;WAG7C,GAAG;AACV,gBAAa,SAAS,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC1D,SAAM;;;CAIV,OAAA,aAAoB,MAA2B;AAC7C,MAAI,KAAK,WAAW,EAClB;EAGF,MAAM,SAAS,IAAI,gBACjB,MAAA,GAAS,YAAY,QAAQ,UAAU,EACvC,EAAC,MAAM,UAAc,CACtB;AACD,SAAO,IAAI,MAAA,GAAS;EAEpB,IAAI;AACJ,MAAI;AAKF,IAAC,CAAC,kBAAkB,MAAM,OAAO,iBAC/B,QAAO,GAAuB;wBACd,MAAA,IAAU,mBAAmB,CAAC;QAE/C;WACM,GAAG;AACV,QAAK,KAAK,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC9C,SAAM;;AAKH,UAAQ,IACX,KAAK,KAAI,QAAO,MAAA,QAAc,KAAK,eAAe,OAAO,CAAC,CAC3D,CAAC,cAAc,OAAO,SAAS,CAAC;;CAGnC,OAAA,QACE,EAAC,YAAY,KAAK,QAClB,eACA,QACA;AACA,MAAI;AACF,SAAM,OAAO,gBAAgB,OAAM,OAAM;IACvC,MAAM,QAAQ,KAAK,KAAK;IAIxB,IAAI,iBAAiB,IAAI,cAAc,MAAA;IACvC,IAAI,QAAQ;IACZ,IAAI;AAEJ,eAAW,MAAM,WAAW,EAA0B;0CACpB,MAAA,IAAU,YAAY,CAAC;gCACjC,IAAI,UAAU;gCACd,cAAc;oCACV,OAAO,IAAK,EAAE;KAOxC,MAAM,QAAQ,YAAY,KAAK;AAC/B,WAAM;KACN,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,SAAI,kBACF,EAAC,UAAU,MAAM,MAAA,GAAS,OAAO,MAAA,GAAS,SACxC,UAAU,QAAQ,QAAQ,EAAE,CAAC,UAAU,IAAI,GAAG,2CAC/C;AAGH,UAAK,MAAM,SAAS,QAClB,KAAI,MAAM,cAAc,IAAI,UAG1B,kBAAiB;cACR,gBAAgB;AACzB,0BAAoB,IAAI,QAAQ,aAAa,MAAM,CAAC;AACpD;gBACS,SAAS,SAClB,OAAM,IAAI,gBACR,+BAA+B,IAAI,UAAU,wBAAwB,MAAM,UAAU,GACtF;UACI;AACL,YAAA,GAAS,OACP,qCAAqC,IAAI,UAAU,wBAAwB,MAAM,UAAU,GAC5F;AACD,UAAI,MACF,GACA,mCAAmC,MAAM,UAAU,cAAc,IAAI,UAAU,GAChF;AACD;;;AAIN,QAAI,gBAAgB;AAClB,WAAM;AACN,WAAA,GAAS,OACP,aAAa,IAAI,GAAG,QAAQ,MAAM,YAChC,KAAK,KAAK,GAAG,MACd,MACF;UAED,OAAA,GAAS,OACP,2BAA2B,IAAI,UAAU,+BAC1C;AAIH,QAAI,aAAa;KACjB;WACK,KAAK;AACZ,SAAA,GAAS,QAAQ,sCAAsC,IAAI,MAAM,IAAI;AACrE,OAAI,eAAe,iBAAiB;AAClC,UAAM,kBAAkB,MAAA,IAAU,MAAA,MAAY;AAC9C,UAAA,QAAc,IAAI;;AAEpB,OAAI,KAAK,IAAI;;;;;;;CAQjB,uBAAuB,KAA0B,QAAsB;EACrE,MAAM,QAA+B,EAAE;AAEvC,UAAQ,OAAO,KAAf;GACE,KAAK,yBAAyB;IAC5B,MAAM,EAAC,OAAO,KAAK,aAAY;AAC/B,UAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,SAAS,CAAC;AAC/D;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,MAAM,UAAU,aAAY;AACnC,QAAI,SACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,MAAM,SAAS,CAAC;AAEhE,QAAI,SACF,QAAO,QAAQ,SAAS,CAAC,SAAS,CAAC,KAAK,cAAc;AACpD,WAAM,KACJ,MAAA,yBAA+B,KAAK,MAAM,KAAK,SAAS,CACzD;MACD;AAEJ;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,QAAO;IACd,MAAM,MAAM;KAAC,QAAQ,OAAO,IAAI;KAAQ,OAAO,OAAO,IAAI;KAAK;AAC/D,UAAM,KACJ,GAAG,UAAU,MAAA,IAAU,gBAAgB,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC/B,IAAI,OAAO,iBAAiB,IAAI,QACzD,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC7B,IAAI,OAAO,iBAAiB,IAAI,OAC1D;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EACJ,IAAI,EAAC,QAAQ,WACX;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,gBAAgB,CAAC;mCACpB,OAAO,iBAAiB,QACjD,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,OAClD;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EAAC,OAAO,eAAe,QAAQ,aAAY;AACjD,QAAI,cACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,cAAc,CAAC;AAEtE,QAAI,SACF,OAAM,KACJ,MAAA,yBAA+B,KAAK,OAAO,OAAO,MAAM,SAAS,CAClE;AAEH;;GAGF,KAAK,iBAAiB;IACpB,MAAM,EACJ,OAAO,EAAC,QAAQ,MAAM,SACtB,KAAK,EAAC,MAAM,WACZ,KAAK,EAAC,MAAM,cACV;AACJ,QAAI,YAAY,QACd,OAAM,KACJ,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,kBAAkB,QAAQ;mCACzC,OAAO,iBAAiB,MAAM,kBAAkB,UACxE;AAEH;;GAGF,KAAK,eAAe;IAClB,MAAM,EACJ,OAAO,EAAC,QAAQ,QAChB,WACE;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,KAAK,kBAAkB,SACzE;AACD;;GAGF,KAAK,sBAAsB;IACzB,MAAM,EACJ,UAAU,EAAC,QAAQ,MAAM,OAAO,UAChC,YACE;IACJ,MAAM,OAAO,CAAC,GAAG,OAAO,SAAS,GAAG,QAAQ;AAC5C,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,MAAM,mBAAmB,IAAI,KAAK,GACpF;;;AAGL,SAAO;;CAGT,yBACE,KACA,EAAC,QAAQ,MAAM,SACf,UACA;EACA,MAAM,MAAwB;GAAC;GAAQ;GAAO;GAAS;AACvD,SAAO,GAAG;sBACQ,MAAA,IAAU,gBAAgB,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEnC,IAAI,IAAI,CAAC;;;CAIjC,0BACE,KACA,EAAC,QAAQ,MAAM,SACf,QACA,UACA;EACA,MAAM,MAAyB;GAAC;GAAQ;GAAO;GAAQ;GAAS;AAChE,SAAO,GAAG;sBACQ,MAAA,IAAU,cAAc,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEjC,IAAI,IAAI,CAAC;;;;;;;CAQjC,MAAM,eAAe;AACnB,MAAI,MAAA,SAAe;GACjB,MAAM,EAAC,SAAS,YAAW,UAAU;AACrC,SAAA,MAAY,QAAQ,CAAC,SAAS,QAAQ,CAAC;AACvC,SAAM;;;CAIV,OAAO;AACL,MAAI,MAAA,SAAe;AACjB,SAAA,GAAS,OAAO,YAAY,MAAA,MAAY,MAAM,CAAC,oBAAoB;AACnE,SAAA,MAAY,QAAQ,OAAO;;AAE7B,SAAO,MAAA;;;AAIX,SAAS,aAAa,OAAuC;CAC3D,MAAM,EAAC,WAAW,WAAU;AAC5B,SAAQ,OAAO,KAAf;EACE,KAAK,QACH,QAAO,CAAC,WAAW;GAAC;GAAS;GAAQ,EAAC,iBAAiB,WAAU;GAAC,CAAC;EACrE,KAAK,SACH,QAAO,CAAC,WAAW;GAAC;GAAU;GAAQ,EAAC,WAAU;GAAC,CAAC;EACrD,KAAK,WACH,QAAO,CAAC,WAAW,CAAC,YAAY,OAAO,CAAC;EAC1C,QACE,QAAO,CAAC,WAAW,CAAC,QAAQ,OAAO,CAAC;;;AAI1C,IAAa,YAAb,MAAuB;CACrB;CACA;CACA;CACA;CAEA,YACE,IACA,IACA,gBACA,WACA;AACA,QAAA,KAAW;AACX,QAAA,KAAW;AACX,OAAK,iBAAiB;AACtB,OAAK,eAAe;;CAGtB,YAAY;CAEZ,MAAM,UAAU;AACd,MAAI,MAAA,SACF;AAEF,QAAA,WAAiB;AACjB,QAAA,GAAS,SAAS;AAClB,QAAM,MAAA,GACH,MAAM,CACN,OAAM,MAAK,MAAA,GAAS,OAAO,iCAAiC,EAAE,CAAC;AAClE,QAAA,GAAS,OAAO,0BAA0B,KAAK,eAAe;;;AAIlE,IAAa,cAAb,MAAyB;CACvB;CACA;CACA;CAEA,YAAY,IAAgB,OAAgB,IAAgB;AAC1D,QAAA,KAAW,GAAG,YAAY,aAAa,eAAe;AACtD,QAAA,QAAc;AACd,QAAA,KAAW;;CAIb,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,UAAU,MAAA,MAAY,CAAC,GAAG,QAAQ;;CAGvD,MAAM,UAAU;EACd,MAAM,KAAK,IAAI,gBAAgB,MAAA,IAAU,EAAC,MAAM,gBAAoB,CAAC,CAAC,IACpE,MAAA,GACD;EACD,MAAM,MAAM,MAAM,GAAG,iBACnB,QAAO,GAA0B;8BACT,MAAA,IAAU,YAAY,CAAC;;;MAIhD;AACD,MAAI,IAAI,WAAW,GAAG;AACpB,SAAA,GAAS,OAAO,6CAA6C;AAC7D,MAAG,SAAS;AACZ,SAAM,GAAG,MAAM;AACf,UAAO;;EAET,MAAM,CAAC,EAAC,eAAc;EACtB,MAAM,CAAC,EAAC,oBAAmB,MAAM,GAAG,iBAClC,QAAO,GAA+B;uCACL,MAAA,IAAU,oBAAoB,CAAC;QAEjE;AACD,QAAA,GAAS,OACP,oBAAoB,UAAU,kCAAkC,iBACjE;AACD,SAAO,IAAI,UAAU,MAAA,IAAU,IAAI,gBAAgB,UAAU"}
@@ -54,7 +54,7 @@ var ProcessManager = class {
54
54
  isOpen = false;
55
55
  this.#onExit(code, signal, null, type, name, proc);
56
56
  });
57
- proc.on("error", (err) => this.#lc[isOpen ? "error" : "warn"]?.(`error from ${name} ${proc.pid}`, err));
57
+ proc.on("error", (err) => this.#lc[!isOpen || this.#drainStart > 0 ? "warn" : "error"]?.(`error from ${name} ${proc.pid}`, err));
58
58
  }
59
59
  #initializing = /* @__PURE__ */ new Map();
60
60
  #nextID = 0;
@@ -1 +1 @@
1
- {"version":3,"file":"life-cycle.js","names":["#lc","#userFacing","#all","#exitImpl","#start","#ready","#startDrain","#kill","#exit","#runningState","#drainStart","#onExit","#initializing","#nextID","#stopInterval","#lastHeartbeat","#checkIntervalTimer","#checkStopInterval","#checkImmediateTimer"],"sources":["../../../../../zero-cache/src/services/life-cycle.ts"],"sourcesContent":["import type {IncomingHttpHeaders} from 'node:http';\nimport {pid} from 'node:process';\nimport type {EventEmitter} from 'stream';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {\n singleProcessMode,\n type Subprocess,\n type Worker,\n} from '../types/processes.ts';\nimport {RunningState} from './running-state.ts';\nimport type {SingletonService} from './service.ts';\n\n/**\n * * `user-facing` workers serve external requests and are the first to\n * receive a `SIGTERM` or `SIGINT` signal for graceful shutdown.\n *\n * * `supporting` workers support `user-facing` workers and are sent\n * the `SIGTERM` signal only after all `user-facing` workers have\n * exited.\n *\n * For other kill signals, such as `SIGQUIT` and `SIGABRT`, all workers\n * are stopped without draining. `SIGQUIT` is used to represent an\n * intentional shutdown (for which draining is not beneficial), whereas\n * `SIGABRT` is used for unexpected process exits.\n */\nexport type WorkerType = 'user-facing' | 'supporting';\n\nexport const GRACEFUL_SHUTDOWN = ['SIGTERM', 'SIGINT'] as const;\nexport const FORCEFUL_SHUTDOWN = ['SIGQUIT', 'SIGABRT'] as const;\n\ntype GracefulShutdownSignal = (typeof GRACEFUL_SHUTDOWN)[number];\n\n// An internal error code used to indicate that a message has already been\n// logged at level ERRROR. When a process exits with this error code, the\n// parent process logs the exit at level WARN instead of ERROR.\nexport const UNHANDLED_EXCEPTION_ERROR_CODE = 13;\n\n// An internal error code used to indicate that the server should exit\n// without draining (e.g. due to a supporting worker get a signal to shut\n// down), but the exit is otherwise intentional.\nexport const INTENTIONAL_SHUTDOWN_ERROR_CODE = 14;\n\n/**\n * Handles readiness, termination signals, and coordination of graceful\n * shutdown.\n */\nexport class ProcessManager {\n readonly #lc: LogContext;\n readonly #userFacing = new Set<Subprocess>();\n readonly #all = new Set<Subprocess>();\n readonly #exitImpl: (code: number) => never;\n readonly #start = Date.now();\n readonly #ready: Promise<void>[] = [];\n\n #runningState = new RunningState('process-manager');\n #drainStart = 0;\n\n constructor(lc: LogContext, proc: EventEmitter) {\n this.#lc = lc.withContext('component', 'process-manager');\n\n // Propagate `SIGTERM` and `SIGINT` to all user-facing workers,\n // initiating a graceful shutdown. The parent process will\n // exit once all user-facing workers have exited ...\n for (const signal of GRACEFUL_SHUTDOWN) {\n proc.on(signal, () => this.#startDrain(signal));\n }\n\n // ... which will result in sending `SIGTERM` to the remaining workers.\n proc.on('exit', code =>\n this.#kill(\n this.#all,\n code === 0\n ? 'SIGTERM' // graceful, drained shutdown\n : code === INTENTIONAL_SHUTDOWN_ERROR_CODE\n ? 'SIGQUIT' // intentional abort without drain\n : 'SIGABRT', // unintentional shutdown, alertable error\n ),\n );\n\n // For other (catchable) kill signals, exit with a non-zero error code\n // to send a `SIGQUIT` (intentional shutdown) or `SIGABRT` (unexpected\n // shutdown) to all workers. For these signals, workers are stopped\n // immediately without draining, since there is no merit to slowly draining\n // when supporting workers have stopped.\n //\n // The logic for handling these signals is in `runUntilKilled()`.\n for (const signal of FORCEFUL_SHUTDOWN) {\n proc.on(signal, () =>\n this.#exit(signal === 'SIGQUIT' ? INTENTIONAL_SHUTDOWN_ERROR_CODE : -1),\n );\n }\n\n this.#exitImpl = (code: number) => {\n if (singleProcessMode()) {\n return proc.emit('exit', code) as never; // For unit / integration tests.\n }\n process.exit(code);\n };\n }\n\n done() {\n return this.#runningState.stopped();\n }\n\n #exit(code: number) {\n this.#lc.info?.('exiting with code', code);\n this.#runningState.stop(this.#lc);\n void this.#lc.flush().finally(() => this.#exitImpl(code));\n }\n\n #startDrain(signal: GracefulShutdownSignal) {\n if (this.#all.size === 0) {\n // Shutdown if a signal is received before any subprocesses are added.\n this.#lc.info?.(`exiting on ${signal}`);\n this.#exit(0);\n }\n this.#lc.info?.(`initiating drain (${signal})`);\n this.#drainStart = Date.now();\n if (this.#userFacing.size) {\n this.#kill(this.#userFacing, signal);\n } else {\n this.#kill(this.#all, signal);\n }\n }\n\n addSubprocess(proc: Subprocess, type: WorkerType, name: string) {\n if (type === 'user-facing') {\n this.#userFacing.add(proc);\n }\n this.#all.add(proc);\n\n let isOpen = true;\n proc.on('close', (code, signal) => {\n isOpen = false;\n this.#onExit(code, signal, null, type, name, proc);\n });\n\n // As per https://nodejs.org/api/child_process.html#event-error\n // 'error' events can happen when sending a message to a child process\n // fails. This is not really an error when the server is shutting down,\n // so log any post-close errors at 'warn'.\n proc.on('error', err =>\n this.#lc[isOpen ? 'error' : 'warn']?.(\n `error from ${name} ${proc.pid}`,\n err,\n ),\n );\n }\n\n readonly #initializing = new Map<number, string>();\n #nextID = 0;\n\n addWorker(worker: Worker, type: WorkerType, name: string): Worker {\n this.addSubprocess(worker, type, name);\n\n const id = ++this.#nextID;\n this.#initializing.set(id, name);\n const {promise, resolve} = resolver();\n this.#ready.push(promise);\n\n worker.onceMessageType('ready', () => {\n this.#lc.debug?.(`${name} ready (${Date.now() - this.#start} ms)`);\n this.#initializing.delete(id);\n resolve();\n });\n\n return worker;\n }\n\n initializing(): string[] {\n return [...this.#initializing.values()];\n }\n\n async allWorkersReady() {\n await Promise.all(this.#ready);\n }\n\n logErrorAndExit(err: unknown, name: string) {\n // only accessible by the main (i.e. user-facing) process.\n this.#onExit(-1, null, err, 'user-facing', name, undefined);\n }\n\n #onExit(\n code: number,\n sig: NodeJS.Signals | null,\n err: unknown,\n type: WorkerType,\n name: string,\n worker: Subprocess | undefined,\n ) {\n // Remove the worker from maps to avoid attempting to send more signals to it.\n if (worker) {\n this.#userFacing.delete(worker);\n this.#all.delete(worker);\n }\n\n const pid = worker?.pid ?? process.pid;\n\n if (type === 'supporting') {\n // Supporting workers like the replication-manager shut down without a\n // drain signal when receiving protocol-specific instructions (like auto\n // reset). In this case, a special error code is used to signal that the\n // server should be shut down without draining, but it is otherwise not\n // considered an unexpected/alertable error.\n if (code === 0 && (this.#drainStart === 0 || this.#userFacing.size > 0)) {\n code = INTENTIONAL_SHUTDOWN_ERROR_CODE;\n }\n const log =\n code === 0 || code === INTENTIONAL_SHUTDOWN_ERROR_CODE\n ? 'info'\n : 'warn';\n this.#lc[log]?.(`${name} (${pid}) exited with code (${code})`, err ?? '');\n return this.#exit(code);\n }\n\n const log =\n code === 0 || code === INTENTIONAL_SHUTDOWN_ERROR_CODE\n ? 'info'\n : this.#drainStart > 0 || code === UNHANDLED_EXCEPTION_ERROR_CODE\n ? 'warn'\n : 'error';\n this.#lc[log]?.(\n sig\n ? `${name} (${pid}) killed with (${sig})`\n : `${name} (${pid}) exited with code (${code})`,\n err ?? '',\n );\n\n // user-facing workers exited or finished draining.\n if (this.#userFacing.size === 0) {\n this.#lc.info?.(\n this.#drainStart\n ? `all user-facing workers drained (${\n Date.now() - this.#drainStart\n } ms)`\n : `all user-facing workers exited`,\n );\n return this.#exit(0);\n }\n\n if (this.#drainStart === 0) {\n // If a user-facing worker exits without receiving a drain signal,\n // shutdown the server.\n return this.#exit(code || -1);\n }\n\n return undefined;\n }\n\n #kill(workers: Iterable<Subprocess>, signal: NodeJS.Signals) {\n for (const worker of workers) {\n try {\n worker.kill(signal);\n } catch (e) {\n this.#lc.error?.(e);\n }\n }\n }\n}\n\n/**\n * Runs the specified services, stopping them on `SIGTERM` or `SIGINT` with\n * an optional {@link SingletonService.drain drain()}, or stopping them\n * without draining for `SIGQUIT`.\n *\n * @returns a Promise that resolves/rejects when any of the services stops/throws.\n */\n\nexport async function runUntilKilled(\n lc: LogContext,\n parent: EventEmitter,\n ...services: SingletonService[]\n): Promise<void> {\n if (services.length === 0) {\n return;\n }\n for (const signal of [...GRACEFUL_SHUTDOWN, ...FORCEFUL_SHUTDOWN]) {\n parent.once(signal, () => {\n const GRACEFUL_SIGNALS = GRACEFUL_SHUTDOWN as readonly NodeJS.Signals[];\n\n services.forEach(async svc => {\n if (GRACEFUL_SIGNALS.includes(signal) && svc.drain) {\n lc.info?.(`draining ${svc.constructor.name} ${svc.id} (${signal})`);\n await svc.drain();\n }\n lc.info?.(`stopping ${svc.constructor.name} ${svc.id} (${signal})`);\n await svc.stop();\n });\n });\n }\n\n try {\n // Run all services and resolve when any of them stops.\n const svc = await Promise.race(\n services.map(svc => svc.run().then(() => svc)),\n );\n lc.info?.(`${svc.constructor.name} (${svc.id}) stopped`);\n } catch (e) {\n lc.error?.(`exiting on error`, e);\n throw e;\n }\n}\n\nexport async function exitAfter(run: () => Promise<void>) {\n try {\n await run();\n // oxlint-disable-next-line no-console\n console.info(`pid ${pid} exiting normally`);\n process.exit(0);\n } catch (e) {\n // oxlint-disable-next-line no-console\n console.error(`pid ${pid} exiting with error`, e);\n process.exit(-1);\n }\n}\n\nconst DEFAULT_STOP_INTERVAL_MS = 20_000;\n\n/**\n * The HeartbeatMonitor monitors the cadence heartbeats (e.g. \"/keepalive\"\n * health checks made to HttpServices) that signal that the server\n * should continue processing requests. When a configurable `stopInterval`\n * elapses without receiving these heartbeats, the monitor initiates a\n * graceful shutdown of the server. This works with common load balancing\n * frameworks such as AWS Elastic Load Balancing.\n *\n * The HeartbeatMonitor is **opt-in** in that it only kicks in after it\n * starts receiving keepalives.\n */\nexport class HeartbeatMonitor {\n readonly #stopInterval: number;\n\n #lc: LogContext;\n #checkIntervalTimer: NodeJS.Timeout | undefined;\n #checkImmediateTimer: NodeJS.Immediate | undefined;\n #lastHeartbeat = 0;\n\n constructor(lc: LogContext, stopInterval = DEFAULT_STOP_INTERVAL_MS) {\n this.#lc = lc;\n this.#stopInterval = stopInterval;\n }\n\n onHeartbeat(reqHeaders: IncomingHttpHeaders) {\n this.#lastHeartbeat = Date.now();\n if (this.#checkIntervalTimer === undefined) {\n this.#lc.info?.(\n `starting heartbeat monitor at ${\n this.#stopInterval / 1000\n } second interval`,\n reqHeaders,\n );\n // e.g. check every 5 seconds to see if it's been over 20 seconds\n // since the last heartbeat.\n this.#checkIntervalTimer = setInterval(\n this.#checkStopInterval,\n this.#stopInterval / 4,\n );\n }\n }\n\n #checkStopInterval = () => {\n // In the Node.js event loop, timers like setInterval and setTimeout\n // run *before* I/O events coming from network sockets or file reads/writes.\n // When this process gets starved of CPU resources for long periods of time,\n // for example when other processes are monopolizing all available cores,\n // pathological behavior can emerge:\n // - keepalive network request comes in, but is queued in Node internals waiting\n // for time on the event loop\n // - CPU is starved/monopolized by other processes for longer than the time\n // configured via this.#stopInterval\n // - When CPU becomes available and the event loop wakes up, this stop interval\n // check is run *before* the keepalive request is processed. The value of\n // this.#lastHeartbeat is now very stale, and erroneously triggers a shutdown\n // even though keepalive requests were about to be processed and update\n // this.#lastHeartbeat. Downtime ensues.\n //\n // To avoid this, we push the check out to a phase of the event loop *after*\n // I/O events are processed, using setImmediate():\n // https://nodejs.org/en/learn/asynchronous-work/event-loop-timers-and-nexttick#setimmediate-vs-settimeout\n //\n // This ensures we see a value for this.#lastHeartbeat that reflects\n // any keepalive requests that came in during the current event loop turn.\n this.#checkImmediateTimer = setImmediate(() => {\n this.#checkImmediateTimer = undefined;\n const timeSinceLastHeartbeat = Date.now() - this.#lastHeartbeat;\n if (timeSinceLastHeartbeat >= this.#stopInterval) {\n this.#lc.info?.(\n `last heartbeat received ${\n timeSinceLastHeartbeat / 1000\n } seconds ago. draining.`,\n );\n process.kill(process.pid, GRACEFUL_SHUTDOWN[0]);\n }\n });\n };\n\n stop() {\n clearTimeout(this.#checkIntervalTimer);\n if (this.#checkImmediateTimer) {\n clearImmediate(this.#checkImmediateTimer);\n }\n }\n}\n"],"mappings":";;;;;AA4BA,IAAa,oBAAoB,CAAC,WAAW,SAAS;AACtD,IAAa,oBAAoB,CAAC,WAAW,UAAU;;;;;AAkBvD,IAAa,iBAAb,MAA4B;CAC1B;CACA,8BAAuB,IAAI,KAAiB;CAC5C,uBAAgB,IAAI,KAAiB;CACrC;CACA,SAAkB,KAAK,KAAK;CAC5B,SAAmC,EAAE;CAErC,gBAAgB,IAAI,aAAa,kBAAkB;CACnD,cAAc;CAEd,YAAY,IAAgB,MAAoB;AAC9C,QAAA,KAAW,GAAG,YAAY,aAAa,kBAAkB;AAKzD,OAAK,MAAM,UAAU,kBACnB,MAAK,GAAG,cAAc,MAAA,WAAiB,OAAO,CAAC;AAIjD,OAAK,GAAG,SAAQ,SACd,MAAA,KACE,MAAA,KACA,SAAS,IACL,YACA,SAAA,KACE,YACA,UACP,CACF;AASD,OAAK,MAAM,UAAU,kBACnB,MAAK,GAAG,cACN,MAAA,KAAW,WAAW,YAAA,KAA8C,GAAG,CACxE;AAGH,QAAA,YAAkB,SAAiB;AACjC,OAAI,mBAAmB,CACrB,QAAO,KAAK,KAAK,QAAQ,KAAK;AAEhC,WAAQ,KAAK,KAAK;;;CAItB,OAAO;AACL,SAAO,MAAA,aAAmB,SAAS;;CAGrC,MAAM,MAAc;AAClB,QAAA,GAAS,OAAO,qBAAqB,KAAK;AAC1C,QAAA,aAAmB,KAAK,MAAA,GAAS;AAC5B,QAAA,GAAS,OAAO,CAAC,cAAc,MAAA,SAAe,KAAK,CAAC;;CAG3D,YAAY,QAAgC;AAC1C,MAAI,MAAA,IAAU,SAAS,GAAG;AAExB,SAAA,GAAS,OAAO,cAAc,SAAS;AACvC,SAAA,KAAW,EAAE;;AAEf,QAAA,GAAS,OAAO,qBAAqB,OAAO,GAAG;AAC/C,QAAA,aAAmB,KAAK,KAAK;AAC7B,MAAI,MAAA,WAAiB,KACnB,OAAA,KAAW,MAAA,YAAkB,OAAO;MAEpC,OAAA,KAAW,MAAA,KAAW,OAAO;;CAIjC,cAAc,MAAkB,MAAkB,MAAc;AAC9D,MAAI,SAAS,cACX,OAAA,WAAiB,IAAI,KAAK;AAE5B,QAAA,IAAU,IAAI,KAAK;EAEnB,IAAI,SAAS;AACb,OAAK,GAAG,UAAU,MAAM,WAAW;AACjC,YAAS;AACT,SAAA,OAAa,MAAM,QAAQ,MAAM,MAAM,MAAM,KAAK;IAClD;AAMF,OAAK,GAAG,UAAS,QACf,MAAA,GAAS,SAAS,UAAU,UAC1B,cAAc,KAAK,GAAG,KAAK,OAC3B,IACD,CACF;;CAGH,gCAAyB,IAAI,KAAqB;CAClD,UAAU;CAEV,UAAU,QAAgB,MAAkB,MAAsB;AAChE,OAAK,cAAc,QAAQ,MAAM,KAAK;EAEtC,MAAM,KAAK,EAAE,MAAA;AACb,QAAA,aAAmB,IAAI,IAAI,KAAK;EAChC,MAAM,EAAC,SAAS,YAAW,UAAU;AACrC,QAAA,MAAY,KAAK,QAAQ;AAEzB,SAAO,gBAAgB,eAAe;AACpC,SAAA,GAAS,QAAQ,GAAG,KAAK,UAAU,KAAK,KAAK,GAAG,MAAA,MAAY,MAAM;AAClE,SAAA,aAAmB,OAAO,GAAG;AAC7B,YAAS;IACT;AAEF,SAAO;;CAGT,eAAyB;AACvB,SAAO,CAAC,GAAG,MAAA,aAAmB,QAAQ,CAAC;;CAGzC,MAAM,kBAAkB;AACtB,QAAM,QAAQ,IAAI,MAAA,MAAY;;CAGhC,gBAAgB,KAAc,MAAc;AAE1C,QAAA,OAAa,IAAI,MAAM,KAAK,eAAe,MAAM,KAAA,EAAU;;CAG7D,QACE,MACA,KACA,KACA,MACA,MACA,QACA;AAEA,MAAI,QAAQ;AACV,SAAA,WAAiB,OAAO,OAAO;AAC/B,SAAA,IAAU,OAAO,OAAO;;EAG1B,MAAM,MAAM,QAAQ,OAAO,QAAQ;AAEnC,MAAI,SAAS,cAAc;AAMzB,OAAI,SAAS,MAAM,MAAA,eAAqB,KAAK,MAAA,WAAiB,OAAO,GACnE,QAAA;GAEF,MAAM,MACJ,SAAS,KAAK,SAAA,KACV,SACA;AACN,SAAA,GAAS,OAAO,GAAG,KAAK,IAAI,IAAI,sBAAsB,KAAK,IAAI,OAAO,GAAG;AACzE,UAAO,MAAA,KAAW,KAAK;;EAGzB,MAAM,MACJ,SAAS,KAAK,SAAA,KACV,SACA,MAAA,aAAmB,KAAK,SAAA,KACtB,SACA;AACR,QAAA,GAAS,OACP,MACI,GAAG,KAAK,IAAI,IAAI,iBAAiB,IAAI,KACrC,GAAG,KAAK,IAAI,IAAI,sBAAsB,KAAK,IAC/C,OAAO,GACR;AAGD,MAAI,MAAA,WAAiB,SAAS,GAAG;AAC/B,SAAA,GAAS,OACP,MAAA,aACI,oCACE,KAAK,KAAK,GAAG,MAAA,WACd,QACD,iCACL;AACD,UAAO,MAAA,KAAW,EAAE;;AAGtB,MAAI,MAAA,eAAqB,EAGvB,QAAO,MAAA,KAAW,QAAQ,GAAG;;CAMjC,MAAM,SAA+B,QAAwB;AAC3D,OAAK,MAAM,UAAU,QACnB,KAAI;AACF,UAAO,KAAK,OAAO;WACZ,GAAG;AACV,SAAA,GAAS,QAAQ,EAAE;;;;;;;;;;;AAc3B,eAAsB,eACpB,IACA,QACA,GAAG,UACY;AACf,KAAI,SAAS,WAAW,EACtB;AAEF,MAAK,MAAM,UAAU,CAAC,GAAG,mBAAmB,GAAG,kBAAkB,CAC/D,QAAO,KAAK,cAAc;EACxB,MAAM,mBAAmB;AAEzB,WAAS,QAAQ,OAAM,QAAO;AAC5B,OAAI,iBAAiB,SAAS,OAAO,IAAI,IAAI,OAAO;AAClD,OAAG,OAAO,YAAY,IAAI,YAAY,KAAK,GAAG,IAAI,GAAG,IAAI,OAAO,GAAG;AACnE,UAAM,IAAI,OAAO;;AAEnB,MAAG,OAAO,YAAY,IAAI,YAAY,KAAK,GAAG,IAAI,GAAG,IAAI,OAAO,GAAG;AACnE,SAAM,IAAI,MAAM;IAChB;GACF;AAGJ,KAAI;EAEF,MAAM,MAAM,MAAM,QAAQ,KACxB,SAAS,KAAI,QAAO,IAAI,KAAK,CAAC,WAAW,IAAI,CAAC,CAC/C;AACD,KAAG,OAAO,GAAG,IAAI,YAAY,KAAK,IAAI,IAAI,GAAG,WAAW;UACjD,GAAG;AACV,KAAG,QAAQ,oBAAoB,EAAE;AACjC,QAAM;;;AAIV,eAAsB,UAAU,KAA0B;AACxD,KAAI;AACF,QAAM,KAAK;AAEX,UAAQ,KAAK,OAAO,IAAI,mBAAmB;AAC3C,UAAQ,KAAK,EAAE;UACR,GAAG;AAEV,UAAQ,MAAM,OAAO,IAAI,sBAAsB,EAAE;AACjD,UAAQ,KAAK,GAAG;;;AAIpB,IAAM,2BAA2B;;;;;;;;;;;;AAajC,IAAa,mBAAb,MAA8B;CAC5B;CAEA;CACA;CACA;CACA,iBAAiB;CAEjB,YAAY,IAAgB,eAAe,0BAA0B;AACnE,QAAA,KAAW;AACX,QAAA,eAAqB;;CAGvB,YAAY,YAAiC;AAC3C,QAAA,gBAAsB,KAAK,KAAK;AAChC,MAAI,MAAA,uBAA6B,KAAA,GAAW;AAC1C,SAAA,GAAS,OACP,iCACE,MAAA,eAAqB,IACtB,mBACD,WACD;AAGD,SAAA,qBAA2B,YACzB,MAAA,mBACA,MAAA,eAAqB,EACtB;;;CAIL,2BAA2B;AAsBzB,QAAA,sBAA4B,mBAAmB;AAC7C,SAAA,sBAA4B,KAAA;GAC5B,MAAM,yBAAyB,KAAK,KAAK,GAAG,MAAA;AAC5C,OAAI,0BAA0B,MAAA,cAAoB;AAChD,UAAA,GAAS,OACP,2BACE,yBAAyB,IAC1B,yBACF;AACD,YAAQ,KAAK,QAAQ,KAAK,kBAAkB,GAAG;;IAEjD;;CAGJ,OAAO;AACL,eAAa,MAAA,mBAAyB;AACtC,MAAI,MAAA,oBACF,gBAAe,MAAA,oBAA0B"}
1
+ {"version":3,"file":"life-cycle.js","names":["#lc","#userFacing","#all","#exitImpl","#start","#ready","#startDrain","#kill","#exit","#runningState","#drainStart","#onExit","#initializing","#nextID","#stopInterval","#lastHeartbeat","#checkIntervalTimer","#checkStopInterval","#checkImmediateTimer"],"sources":["../../../../../zero-cache/src/services/life-cycle.ts"],"sourcesContent":["import type {IncomingHttpHeaders} from 'node:http';\nimport {pid} from 'node:process';\nimport type {EventEmitter} from 'stream';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {\n singleProcessMode,\n type Subprocess,\n type Worker,\n} from '../types/processes.ts';\nimport {RunningState} from './running-state.ts';\nimport type {SingletonService} from './service.ts';\n\n/**\n * * `user-facing` workers serve external requests and are the first to\n * receive a `SIGTERM` or `SIGINT` signal for graceful shutdown.\n *\n * * `supporting` workers support `user-facing` workers and are sent\n * the `SIGTERM` signal only after all `user-facing` workers have\n * exited.\n *\n * For other kill signals, such as `SIGQUIT` and `SIGABRT`, all workers\n * are stopped without draining. `SIGQUIT` is used to represent an\n * intentional shutdown (for which draining is not beneficial), whereas\n * `SIGABRT` is used for unexpected process exits.\n */\nexport type WorkerType = 'user-facing' | 'supporting';\n\nexport const GRACEFUL_SHUTDOWN = ['SIGTERM', 'SIGINT'] as const;\nexport const FORCEFUL_SHUTDOWN = ['SIGQUIT', 'SIGABRT'] as const;\n\ntype GracefulShutdownSignal = (typeof GRACEFUL_SHUTDOWN)[number];\n\n// An internal error code used to indicate that a message has already been\n// logged at level ERRROR. When a process exits with this error code, the\n// parent process logs the exit at level WARN instead of ERROR.\nexport const UNHANDLED_EXCEPTION_ERROR_CODE = 13;\n\n// An internal error code used to indicate that the server should exit\n// without draining (e.g. due to a supporting worker get a signal to shut\n// down), but the exit is otherwise intentional.\nexport const INTENTIONAL_SHUTDOWN_ERROR_CODE = 14;\n\n/**\n * Handles readiness, termination signals, and coordination of graceful\n * shutdown.\n */\nexport class ProcessManager {\n readonly #lc: LogContext;\n readonly #userFacing = new Set<Subprocess>();\n readonly #all = new Set<Subprocess>();\n readonly #exitImpl: (code: number) => never;\n readonly #start = Date.now();\n readonly #ready: Promise<void>[] = [];\n\n #runningState = new RunningState('process-manager');\n #drainStart = 0;\n\n constructor(lc: LogContext, proc: EventEmitter) {\n this.#lc = lc.withContext('component', 'process-manager');\n\n // Propagate `SIGTERM` and `SIGINT` to all user-facing workers,\n // initiating a graceful shutdown. The parent process will\n // exit once all user-facing workers have exited ...\n for (const signal of GRACEFUL_SHUTDOWN) {\n proc.on(signal, () => this.#startDrain(signal));\n }\n\n // ... which will result in sending `SIGTERM` to the remaining workers.\n proc.on('exit', code =>\n this.#kill(\n this.#all,\n code === 0\n ? 'SIGTERM' // graceful, drained shutdown\n : code === INTENTIONAL_SHUTDOWN_ERROR_CODE\n ? 'SIGQUIT' // intentional abort without drain\n : 'SIGABRT', // unintentional shutdown, alertable error\n ),\n );\n\n // For other (catchable) kill signals, exit with a non-zero error code\n // to send a `SIGQUIT` (intentional shutdown) or `SIGABRT` (unexpected\n // shutdown) to all workers. For these signals, workers are stopped\n // immediately without draining, since there is no merit to slowly draining\n // when supporting workers have stopped.\n //\n // The logic for handling these signals is in `runUntilKilled()`.\n for (const signal of FORCEFUL_SHUTDOWN) {\n proc.on(signal, () =>\n this.#exit(signal === 'SIGQUIT' ? INTENTIONAL_SHUTDOWN_ERROR_CODE : -1),\n );\n }\n\n this.#exitImpl = (code: number) => {\n if (singleProcessMode()) {\n return proc.emit('exit', code) as never; // For unit / integration tests.\n }\n process.exit(code);\n };\n }\n\n done() {\n return this.#runningState.stopped();\n }\n\n #exit(code: number) {\n this.#lc.info?.('exiting with code', code);\n this.#runningState.stop(this.#lc);\n void this.#lc.flush().finally(() => this.#exitImpl(code));\n }\n\n #startDrain(signal: GracefulShutdownSignal) {\n if (this.#all.size === 0) {\n // Shutdown if a signal is received before any subprocesses are added.\n this.#lc.info?.(`exiting on ${signal}`);\n this.#exit(0);\n }\n this.#lc.info?.(`initiating drain (${signal})`);\n this.#drainStart = Date.now();\n if (this.#userFacing.size) {\n this.#kill(this.#userFacing, signal);\n } else {\n this.#kill(this.#all, signal);\n }\n }\n\n addSubprocess(proc: Subprocess, type: WorkerType, name: string) {\n if (type === 'user-facing') {\n this.#userFacing.add(proc);\n }\n this.#all.add(proc);\n\n let isOpen = true;\n proc.on('close', (code, signal) => {\n isOpen = false;\n this.#onExit(code, signal, null, type, name, proc);\n });\n\n // As per https://nodejs.org/api/child_process.html#event-error\n // 'error' events can happen when sending a message to a child process\n // fails. This is not really an error when the server is shutting down,\n // so log any post-close errors at 'warn'.\n proc.on('error', err =>\n this.#lc[!isOpen || this.#drainStart > 0 ? 'warn' : 'error']?.(\n `error from ${name} ${proc.pid}`,\n err,\n ),\n );\n }\n\n readonly #initializing = new Map<number, string>();\n #nextID = 0;\n\n addWorker(worker: Worker, type: WorkerType, name: string): Worker {\n this.addSubprocess(worker, type, name);\n\n const id = ++this.#nextID;\n this.#initializing.set(id, name);\n const {promise, resolve} = resolver();\n this.#ready.push(promise);\n\n worker.onceMessageType('ready', () => {\n this.#lc.debug?.(`${name} ready (${Date.now() - this.#start} ms)`);\n this.#initializing.delete(id);\n resolve();\n });\n\n return worker;\n }\n\n initializing(): string[] {\n return [...this.#initializing.values()];\n }\n\n async allWorkersReady() {\n await Promise.all(this.#ready);\n }\n\n logErrorAndExit(err: unknown, name: string) {\n // only accessible by the main (i.e. user-facing) process.\n this.#onExit(-1, null, err, 'user-facing', name, undefined);\n }\n\n #onExit(\n code: number,\n sig: NodeJS.Signals | null,\n err: unknown,\n type: WorkerType,\n name: string,\n worker: Subprocess | undefined,\n ) {\n // Remove the worker from maps to avoid attempting to send more signals to it.\n if (worker) {\n this.#userFacing.delete(worker);\n this.#all.delete(worker);\n }\n\n const pid = worker?.pid ?? process.pid;\n\n if (type === 'supporting') {\n // Supporting workers like the replication-manager shut down without a\n // drain signal when receiving protocol-specific instructions (like auto\n // reset). In this case, a special error code is used to signal that the\n // server should be shut down without draining, but it is otherwise not\n // considered an unexpected/alertable error.\n if (code === 0 && (this.#drainStart === 0 || this.#userFacing.size > 0)) {\n code = INTENTIONAL_SHUTDOWN_ERROR_CODE;\n }\n const log =\n code === 0 || code === INTENTIONAL_SHUTDOWN_ERROR_CODE\n ? 'info'\n : 'warn';\n this.#lc[log]?.(`${name} (${pid}) exited with code (${code})`, err ?? '');\n return this.#exit(code);\n }\n\n const log =\n code === 0 || code === INTENTIONAL_SHUTDOWN_ERROR_CODE\n ? 'info'\n : this.#drainStart > 0 || code === UNHANDLED_EXCEPTION_ERROR_CODE\n ? 'warn'\n : 'error';\n this.#lc[log]?.(\n sig\n ? `${name} (${pid}) killed with (${sig})`\n : `${name} (${pid}) exited with code (${code})`,\n err ?? '',\n );\n\n // user-facing workers exited or finished draining.\n if (this.#userFacing.size === 0) {\n this.#lc.info?.(\n this.#drainStart\n ? `all user-facing workers drained (${\n Date.now() - this.#drainStart\n } ms)`\n : `all user-facing workers exited`,\n );\n return this.#exit(0);\n }\n\n if (this.#drainStart === 0) {\n // If a user-facing worker exits without receiving a drain signal,\n // shutdown the server.\n return this.#exit(code || -1);\n }\n\n return undefined;\n }\n\n #kill(workers: Iterable<Subprocess>, signal: NodeJS.Signals) {\n for (const worker of workers) {\n try {\n worker.kill(signal);\n } catch (e) {\n this.#lc.error?.(e);\n }\n }\n }\n}\n\n/**\n * Runs the specified services, stopping them on `SIGTERM` or `SIGINT` with\n * an optional {@link SingletonService.drain drain()}, or stopping them\n * without draining for `SIGQUIT`.\n *\n * @returns a Promise that resolves/rejects when any of the services stops/throws.\n */\n\nexport async function runUntilKilled(\n lc: LogContext,\n parent: EventEmitter,\n ...services: SingletonService[]\n): Promise<void> {\n if (services.length === 0) {\n return;\n }\n for (const signal of [...GRACEFUL_SHUTDOWN, ...FORCEFUL_SHUTDOWN]) {\n parent.once(signal, () => {\n const GRACEFUL_SIGNALS = GRACEFUL_SHUTDOWN as readonly NodeJS.Signals[];\n\n services.forEach(async svc => {\n if (GRACEFUL_SIGNALS.includes(signal) && svc.drain) {\n lc.info?.(`draining ${svc.constructor.name} ${svc.id} (${signal})`);\n await svc.drain();\n }\n lc.info?.(`stopping ${svc.constructor.name} ${svc.id} (${signal})`);\n await svc.stop();\n });\n });\n }\n\n try {\n // Run all services and resolve when any of them stops.\n const svc = await Promise.race(\n services.map(svc => svc.run().then(() => svc)),\n );\n lc.info?.(`${svc.constructor.name} (${svc.id}) stopped`);\n } catch (e) {\n lc.error?.(`exiting on error`, e);\n throw e;\n }\n}\n\nexport async function exitAfter(run: () => Promise<void>) {\n try {\n await run();\n // oxlint-disable-next-line no-console\n console.info(`pid ${pid} exiting normally`);\n process.exit(0);\n } catch (e) {\n // oxlint-disable-next-line no-console\n console.error(`pid ${pid} exiting with error`, e);\n process.exit(-1);\n }\n}\n\nconst DEFAULT_STOP_INTERVAL_MS = 20_000;\n\n/**\n * The HeartbeatMonitor monitors the cadence heartbeats (e.g. \"/keepalive\"\n * health checks made to HttpServices) that signal that the server\n * should continue processing requests. When a configurable `stopInterval`\n * elapses without receiving these heartbeats, the monitor initiates a\n * graceful shutdown of the server. This works with common load balancing\n * frameworks such as AWS Elastic Load Balancing.\n *\n * The HeartbeatMonitor is **opt-in** in that it only kicks in after it\n * starts receiving keepalives.\n */\nexport class HeartbeatMonitor {\n readonly #stopInterval: number;\n\n #lc: LogContext;\n #checkIntervalTimer: NodeJS.Timeout | undefined;\n #checkImmediateTimer: NodeJS.Immediate | undefined;\n #lastHeartbeat = 0;\n\n constructor(lc: LogContext, stopInterval = DEFAULT_STOP_INTERVAL_MS) {\n this.#lc = lc;\n this.#stopInterval = stopInterval;\n }\n\n onHeartbeat(reqHeaders: IncomingHttpHeaders) {\n this.#lastHeartbeat = Date.now();\n if (this.#checkIntervalTimer === undefined) {\n this.#lc.info?.(\n `starting heartbeat monitor at ${\n this.#stopInterval / 1000\n } second interval`,\n reqHeaders,\n );\n // e.g. check every 5 seconds to see if it's been over 20 seconds\n // since the last heartbeat.\n this.#checkIntervalTimer = setInterval(\n this.#checkStopInterval,\n this.#stopInterval / 4,\n );\n }\n }\n\n #checkStopInterval = () => {\n // In the Node.js event loop, timers like setInterval and setTimeout\n // run *before* I/O events coming from network sockets or file reads/writes.\n // When this process gets starved of CPU resources for long periods of time,\n // for example when other processes are monopolizing all available cores,\n // pathological behavior can emerge:\n // - keepalive network request comes in, but is queued in Node internals waiting\n // for time on the event loop\n // - CPU is starved/monopolized by other processes for longer than the time\n // configured via this.#stopInterval\n // - When CPU becomes available and the event loop wakes up, this stop interval\n // check is run *before* the keepalive request is processed. The value of\n // this.#lastHeartbeat is now very stale, and erroneously triggers a shutdown\n // even though keepalive requests were about to be processed and update\n // this.#lastHeartbeat. Downtime ensues.\n //\n // To avoid this, we push the check out to a phase of the event loop *after*\n // I/O events are processed, using setImmediate():\n // https://nodejs.org/en/learn/asynchronous-work/event-loop-timers-and-nexttick#setimmediate-vs-settimeout\n //\n // This ensures we see a value for this.#lastHeartbeat that reflects\n // any keepalive requests that came in during the current event loop turn.\n this.#checkImmediateTimer = setImmediate(() => {\n this.#checkImmediateTimer = undefined;\n const timeSinceLastHeartbeat = Date.now() - this.#lastHeartbeat;\n if (timeSinceLastHeartbeat >= this.#stopInterval) {\n this.#lc.info?.(\n `last heartbeat received ${\n timeSinceLastHeartbeat / 1000\n } seconds ago. draining.`,\n );\n process.kill(process.pid, GRACEFUL_SHUTDOWN[0]);\n }\n });\n };\n\n stop() {\n clearTimeout(this.#checkIntervalTimer);\n if (this.#checkImmediateTimer) {\n clearImmediate(this.#checkImmediateTimer);\n }\n }\n}\n"],"mappings":";;;;;AA4BA,IAAa,oBAAoB,CAAC,WAAW,SAAS;AACtD,IAAa,oBAAoB,CAAC,WAAW,UAAU;;;;;AAkBvD,IAAa,iBAAb,MAA4B;CAC1B;CACA,8BAAuB,IAAI,KAAiB;CAC5C,uBAAgB,IAAI,KAAiB;CACrC;CACA,SAAkB,KAAK,KAAK;CAC5B,SAAmC,EAAE;CAErC,gBAAgB,IAAI,aAAa,kBAAkB;CACnD,cAAc;CAEd,YAAY,IAAgB,MAAoB;AAC9C,QAAA,KAAW,GAAG,YAAY,aAAa,kBAAkB;AAKzD,OAAK,MAAM,UAAU,kBACnB,MAAK,GAAG,cAAc,MAAA,WAAiB,OAAO,CAAC;AAIjD,OAAK,GAAG,SAAQ,SACd,MAAA,KACE,MAAA,KACA,SAAS,IACL,YACA,SAAA,KACE,YACA,UACP,CACF;AASD,OAAK,MAAM,UAAU,kBACnB,MAAK,GAAG,cACN,MAAA,KAAW,WAAW,YAAA,KAA8C,GAAG,CACxE;AAGH,QAAA,YAAkB,SAAiB;AACjC,OAAI,mBAAmB,CACrB,QAAO,KAAK,KAAK,QAAQ,KAAK;AAEhC,WAAQ,KAAK,KAAK;;;CAItB,OAAO;AACL,SAAO,MAAA,aAAmB,SAAS;;CAGrC,MAAM,MAAc;AAClB,QAAA,GAAS,OAAO,qBAAqB,KAAK;AAC1C,QAAA,aAAmB,KAAK,MAAA,GAAS;AAC5B,QAAA,GAAS,OAAO,CAAC,cAAc,MAAA,SAAe,KAAK,CAAC;;CAG3D,YAAY,QAAgC;AAC1C,MAAI,MAAA,IAAU,SAAS,GAAG;AAExB,SAAA,GAAS,OAAO,cAAc,SAAS;AACvC,SAAA,KAAW,EAAE;;AAEf,QAAA,GAAS,OAAO,qBAAqB,OAAO,GAAG;AAC/C,QAAA,aAAmB,KAAK,KAAK;AAC7B,MAAI,MAAA,WAAiB,KACnB,OAAA,KAAW,MAAA,YAAkB,OAAO;MAEpC,OAAA,KAAW,MAAA,KAAW,OAAO;;CAIjC,cAAc,MAAkB,MAAkB,MAAc;AAC9D,MAAI,SAAS,cACX,OAAA,WAAiB,IAAI,KAAK;AAE5B,QAAA,IAAU,IAAI,KAAK;EAEnB,IAAI,SAAS;AACb,OAAK,GAAG,UAAU,MAAM,WAAW;AACjC,YAAS;AACT,SAAA,OAAa,MAAM,QAAQ,MAAM,MAAM,MAAM,KAAK;IAClD;AAMF,OAAK,GAAG,UAAS,QACf,MAAA,GAAS,CAAC,UAAU,MAAA,aAAmB,IAAI,SAAS,WAClD,cAAc,KAAK,GAAG,KAAK,OAC3B,IACD,CACF;;CAGH,gCAAyB,IAAI,KAAqB;CAClD,UAAU;CAEV,UAAU,QAAgB,MAAkB,MAAsB;AAChE,OAAK,cAAc,QAAQ,MAAM,KAAK;EAEtC,MAAM,KAAK,EAAE,MAAA;AACb,QAAA,aAAmB,IAAI,IAAI,KAAK;EAChC,MAAM,EAAC,SAAS,YAAW,UAAU;AACrC,QAAA,MAAY,KAAK,QAAQ;AAEzB,SAAO,gBAAgB,eAAe;AACpC,SAAA,GAAS,QAAQ,GAAG,KAAK,UAAU,KAAK,KAAK,GAAG,MAAA,MAAY,MAAM;AAClE,SAAA,aAAmB,OAAO,GAAG;AAC7B,YAAS;IACT;AAEF,SAAO;;CAGT,eAAyB;AACvB,SAAO,CAAC,GAAG,MAAA,aAAmB,QAAQ,CAAC;;CAGzC,MAAM,kBAAkB;AACtB,QAAM,QAAQ,IAAI,MAAA,MAAY;;CAGhC,gBAAgB,KAAc,MAAc;AAE1C,QAAA,OAAa,IAAI,MAAM,KAAK,eAAe,MAAM,KAAA,EAAU;;CAG7D,QACE,MACA,KACA,KACA,MACA,MACA,QACA;AAEA,MAAI,QAAQ;AACV,SAAA,WAAiB,OAAO,OAAO;AAC/B,SAAA,IAAU,OAAO,OAAO;;EAG1B,MAAM,MAAM,QAAQ,OAAO,QAAQ;AAEnC,MAAI,SAAS,cAAc;AAMzB,OAAI,SAAS,MAAM,MAAA,eAAqB,KAAK,MAAA,WAAiB,OAAO,GACnE,QAAA;GAEF,MAAM,MACJ,SAAS,KAAK,SAAA,KACV,SACA;AACN,SAAA,GAAS,OAAO,GAAG,KAAK,IAAI,IAAI,sBAAsB,KAAK,IAAI,OAAO,GAAG;AACzE,UAAO,MAAA,KAAW,KAAK;;EAGzB,MAAM,MACJ,SAAS,KAAK,SAAA,KACV,SACA,MAAA,aAAmB,KAAK,SAAA,KACtB,SACA;AACR,QAAA,GAAS,OACP,MACI,GAAG,KAAK,IAAI,IAAI,iBAAiB,IAAI,KACrC,GAAG,KAAK,IAAI,IAAI,sBAAsB,KAAK,IAC/C,OAAO,GACR;AAGD,MAAI,MAAA,WAAiB,SAAS,GAAG;AAC/B,SAAA,GAAS,OACP,MAAA,aACI,oCACE,KAAK,KAAK,GAAG,MAAA,WACd,QACD,iCACL;AACD,UAAO,MAAA,KAAW,EAAE;;AAGtB,MAAI,MAAA,eAAqB,EAGvB,QAAO,MAAA,KAAW,QAAQ,GAAG;;CAMjC,MAAM,SAA+B,QAAwB;AAC3D,OAAK,MAAM,UAAU,QACnB,KAAI;AACF,UAAO,KAAK,OAAO;WACZ,GAAG;AACV,SAAA,GAAS,QAAQ,EAAE;;;;;;;;;;;AAc3B,eAAsB,eACpB,IACA,QACA,GAAG,UACY;AACf,KAAI,SAAS,WAAW,EACtB;AAEF,MAAK,MAAM,UAAU,CAAC,GAAG,mBAAmB,GAAG,kBAAkB,CAC/D,QAAO,KAAK,cAAc;EACxB,MAAM,mBAAmB;AAEzB,WAAS,QAAQ,OAAM,QAAO;AAC5B,OAAI,iBAAiB,SAAS,OAAO,IAAI,IAAI,OAAO;AAClD,OAAG,OAAO,YAAY,IAAI,YAAY,KAAK,GAAG,IAAI,GAAG,IAAI,OAAO,GAAG;AACnE,UAAM,IAAI,OAAO;;AAEnB,MAAG,OAAO,YAAY,IAAI,YAAY,KAAK,GAAG,IAAI,GAAG,IAAI,OAAO,GAAG;AACnE,SAAM,IAAI,MAAM;IAChB;GACF;AAGJ,KAAI;EAEF,MAAM,MAAM,MAAM,QAAQ,KACxB,SAAS,KAAI,QAAO,IAAI,KAAK,CAAC,WAAW,IAAI,CAAC,CAC/C;AACD,KAAG,OAAO,GAAG,IAAI,YAAY,KAAK,IAAI,IAAI,GAAG,WAAW;UACjD,GAAG;AACV,KAAG,QAAQ,oBAAoB,EAAE;AACjC,QAAM;;;AAIV,eAAsB,UAAU,KAA0B;AACxD,KAAI;AACF,QAAM,KAAK;AAEX,UAAQ,KAAK,OAAO,IAAI,mBAAmB;AAC3C,UAAQ,KAAK,EAAE;UACR,GAAG;AAEV,UAAQ,MAAM,OAAO,IAAI,sBAAsB,EAAE;AACjD,UAAQ,KAAK,GAAG;;;AAIpB,IAAM,2BAA2B;;;;;;;;;;;;AAajC,IAAa,mBAAb,MAA8B;CAC5B;CAEA;CACA;CACA;CACA,iBAAiB;CAEjB,YAAY,IAAgB,eAAe,0BAA0B;AACnE,QAAA,KAAW;AACX,QAAA,eAAqB;;CAGvB,YAAY,YAAiC;AAC3C,QAAA,gBAAsB,KAAK,KAAK;AAChC,MAAI,MAAA,uBAA6B,KAAA,GAAW;AAC1C,SAAA,GAAS,OACP,iCACE,MAAA,eAAqB,IACtB,mBACD,WACD;AAGD,SAAA,qBAA2B,YACzB,MAAA,mBACA,MAAA,eAAqB,EACtB;;;CAIL,2BAA2B;AAsBzB,QAAA,sBAA4B,mBAAmB;AAC7C,SAAA,sBAA4B,KAAA;GAC5B,MAAM,yBAAyB,KAAK,KAAK,GAAG,MAAA;AAC5C,OAAI,0BAA0B,MAAA,cAAoB;AAChD,UAAA,GAAS,OACP,2BACE,yBAAyB,IAC1B,yBACF;AACD,YAAQ,KAAK,QAAQ,KAAK,kBAAkB,GAAG;;IAEjD;;CAGJ,OAAO;AACL,eAAa,MAAA,mBAAyB;AACtC,MAAI,MAAA,oBACF,gBAAe,MAAA,oBAA0B"}
@@ -22,7 +22,7 @@ function createAPI() {
22
22
  }
23
23
  return {
24
24
  init(dbPath, cpMode, pragmas, logConfig) {
25
- lc = createLogContext({ log: logConfig }, { worker: "write-worker" });
25
+ lc = createLogContext({ log: logConfig }, "write-worker");
26
26
  db = new Database(lc, dbPath);
27
27
  applyPragmas(db, pragmas);
28
28
  runner = new StatementRunner(db);