@rocicorp/zero 0.26.0 → 0.26.1-canary.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/out/analyze-query/src/run-ast.d.ts.map +1 -1
- package/out/analyze-query/src/run-ast.js +4 -1
- package/out/analyze-query/src/run-ast.js.map +1 -1
- package/out/replicache/src/btree/node.js +4 -4
- package/out/replicache/src/btree/node.js.map +1 -1
- package/out/replicache/src/btree/write.js +2 -2
- package/out/replicache/src/btree/write.js.map +1 -1
- package/out/replicache/src/dag/gc.js +5 -2
- package/out/replicache/src/dag/gc.js.map +1 -1
- package/out/replicache/src/db/write.d.ts.map +1 -1
- package/out/replicache/src/db/write.js +21 -6
- package/out/replicache/src/db/write.js.map +1 -1
- package/out/replicache/src/error-responses.d.ts.map +1 -1
- package/out/replicache/src/error-responses.js +4 -1
- package/out/replicache/src/error-responses.js.map +1 -1
- package/out/replicache/src/persist/clients.d.ts.map +1 -1
- package/out/replicache/src/persist/clients.js +4 -1
- package/out/replicache/src/persist/clients.js.map +1 -1
- package/out/replicache/src/persist/collect-idb-databases.d.ts.map +1 -1
- package/out/replicache/src/persist/collect-idb-databases.js +2 -1
- package/out/replicache/src/persist/collect-idb-databases.js.map +1 -1
- package/out/replicache/src/persist/idb-databases-store.d.ts.map +1 -1
- package/out/replicache/src/persist/idb-databases-store.js +4 -1
- package/out/replicache/src/persist/idb-databases-store.js.map +1 -1
- package/out/replicache/src/process-scheduler.js +4 -1
- package/out/replicache/src/process-scheduler.js.map +1 -1
- package/out/replicache/src/replicache-impl.js +2 -2
- package/out/replicache/src/replicache-impl.js.map +1 -1
- package/out/replicache/src/subscriptions.d.ts.map +1 -1
- package/out/replicache/src/subscriptions.js +5 -2
- package/out/replicache/src/subscriptions.js.map +1 -1
- package/out/replicache/src/sync/diff.d.ts.map +1 -1
- package/out/replicache/src/sync/diff.js +4 -1
- package/out/replicache/src/sync/diff.js.map +1 -1
- package/out/replicache/src/sync/pull.d.ts.map +1 -1
- package/out/replicache/src/sync/pull.js +4 -1
- package/out/replicache/src/sync/pull.js.map +1 -1
- package/out/replicache/src/sync/push.d.ts.map +1 -1
- package/out/replicache/src/sync/push.js +5 -2
- package/out/replicache/src/sync/push.js.map +1 -1
- package/out/shared/src/asserts.d.ts +1 -1
- package/out/shared/src/asserts.d.ts.map +1 -1
- package/out/shared/src/asserts.js +1 -1
- package/out/shared/src/asserts.js.map +1 -1
- package/out/z2s/src/compiler.d.ts.map +1 -1
- package/out/z2s/src/compiler.js +8 -2
- package/out/z2s/src/compiler.js.map +1 -1
- package/out/zero/package.json.js +1 -1
- package/out/zero-cache/src/config/zero-config.d.ts +4 -0
- package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
- package/out/zero-cache/src/config/zero-config.js +17 -0
- package/out/zero-cache/src/config/zero-config.js.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.js +17 -11
- package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
- package/out/zero-cache/src/observability/events.d.ts.map +1 -1
- package/out/zero-cache/src/observability/events.js +28 -9
- package/out/zero-cache/src/observability/events.js.map +1 -1
- package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/server/change-streamer.js +3 -1
- package/out/zero-cache/src/server/change-streamer.js.map +1 -1
- package/out/zero-cache/src/services/analyze.js +1 -0
- package/out/zero-cache/src/services/analyze.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +29 -14
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +6 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js +69 -25
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +6 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.js +12 -8
- package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts +26 -0
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.js +15 -3
- package/out/zero-cache/src/services/change-source/protocol/current/data.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +30 -0
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current.js +2 -1
- package/out/zero-cache/src/services/change-streamer/broadcast.d.ts +100 -0
- package/out/zero-cache/src/services/change-streamer/broadcast.d.ts.map +1 -0
- package/out/zero-cache/src/services/change-streamer/broadcast.js +171 -0
- package/out/zero-cache/src/services/change-streamer/broadcast.js.map +1 -0
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +22 -9
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts +10 -0
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/forwarder.d.ts +17 -1
- package/out/zero-cache/src/services/change-streamer/forwarder.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/forwarder.js +52 -4
- package/out/zero-cache/src/services/change-streamer/forwarder.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.d.ts +18 -0
- package/out/zero-cache/src/services/change-streamer/subscriber.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.js +68 -12
- package/out/zero-cache/src/services/change-streamer/subscriber.js.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.d.ts +2 -0
- package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.js +8 -6
- package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
- package/out/zero-cache/src/services/replicator/incremental-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/incremental-sync.js +39 -1
- package/out/zero-cache/src/services/replicator/incremental-sync.js.map +1 -1
- package/out/zero-cache/src/services/replicator/replication-status.d.ts +4 -3
- package/out/zero-cache/src/services/replicator/replication-status.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/replication-status.js +25 -10
- package/out/zero-cache/src/services/replicator/replication-status.js.map +1 -1
- package/out/zero-cache/src/services/run-ast.d.ts.map +1 -1
- package/out/zero-cache/src/services/run-ast.js +22 -2
- package/out/zero-cache/src/services/run-ast.js.map +1 -1
- package/out/zero-cache/src/services/running-state.d.ts +1 -0
- package/out/zero-cache/src/services/running-state.d.ts.map +1 -1
- package/out/zero-cache/src/services/running-state.js +4 -0
- package/out/zero-cache/src/services/running-state.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js +8 -2
- package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +10 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.js +15 -7
- package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
- package/out/zero-cache/src/types/subscription.d.ts +3 -1
- package/out/zero-cache/src/types/subscription.d.ts.map +1 -1
- package/out/zero-cache/src/types/subscription.js +21 -9
- package/out/zero-cache/src/types/subscription.js.map +1 -1
- package/out/zero-client/src/client/http-string.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zero-client/src/client/zero.js.map +1 -1
- package/out/zero-events/src/status.d.ts +8 -0
- package/out/zero-events/src/status.d.ts.map +1 -1
- package/out/zero-schema/src/permissions.d.ts.map +1 -1
- package/out/zero-schema/src/permissions.js +4 -1
- package/out/zero-schema/src/permissions.js.map +1 -1
- package/out/zero-server/src/process-mutations.d.ts.map +1 -1
- package/out/zero-server/src/process-mutations.js +13 -19
- package/out/zero-server/src/process-mutations.js.map +1 -1
- package/out/zql/src/builder/filter.d.ts.map +1 -1
- package/out/zql/src/builder/filter.js +5 -2
- package/out/zql/src/builder/filter.js.map +1 -1
- package/out/zql/src/ivm/constraint.js.map +1 -1
- package/package.json +1 -1
|
@@ -31,7 +31,8 @@ async function runWorker(parent, env, ...args) {
|
|
|
31
31
|
address,
|
|
32
32
|
protocol,
|
|
33
33
|
startupDelayMs,
|
|
34
|
-
backPressureLimitHeapProportion
|
|
34
|
+
backPressureLimitHeapProportion,
|
|
35
|
+
flowControlConsensusPaddingSeconds
|
|
35
36
|
},
|
|
36
37
|
upstream,
|
|
37
38
|
change,
|
|
@@ -83,6 +84,7 @@ async function runWorker(parent, env, ...args) {
|
|
|
83
84
|
subscriptionState,
|
|
84
85
|
autoReset ?? false,
|
|
85
86
|
backPressureLimitHeapProportion,
|
|
87
|
+
flowControlConsensusPaddingSeconds,
|
|
86
88
|
setTimeout
|
|
87
89
|
);
|
|
88
90
|
break;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-streamer.js","sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"sourcesContent":["import {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {DatabaseInitError} from '../../../zqlite/src/db.ts';\nimport {getServerContext} from '../config/server-context.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {deleteLiteDB} from '../db/delete-lite-db.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink, publishCriticalEvent} from '../observability/events.ts';\nimport {initializeCustomChangeSource} from '../services/change-source/custom/change-source.ts';\nimport {initializePostgresChangeSource} from '../services/change-source/pg/change-source.ts';\nimport {BackupMonitor} from '../services/change-streamer/backup-monitor.ts';\nimport {ChangeStreamerHttpServer} from '../services/change-streamer/change-streamer-http.ts';\nimport {initializeStreamer} from '../services/change-streamer/change-streamer-service.ts';\nimport type {ChangeStreamerService} from '../services/change-streamer/change-streamer.ts';\nimport {ReplicaMonitor} from '../services/change-streamer/replica-monitor.ts';\nimport {AutoResetSignal} from '../services/change-streamer/schema/tables.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {replicationStatusError} from '../services/replicator/replication-status.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardConfig} from '../types/shards.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n assert(args.length > 0, `parent startMs not specified`);\n const parentStartMs = parseInt(args[0]);\n\n const config = getNormalizedZeroConfig({env, argv: args.slice(1)});\n const {\n taskID,\n changeStreamer: {\n port,\n address,\n protocol,\n startupDelayMs,\n backPressureLimitHeapProportion,\n },\n upstream,\n change,\n replica,\n initialSync,\n litestream,\n } = config;\n\n startOtelAuto(createLogContext(config, {worker: 'change-streamer'}, false));\n const lc = createLogContext(config, {worker: 'change-streamer'}, true);\n initEventSink(lc, config);\n\n // Kick off DB connection warmup in the background.\n const changeDB = pgClient(\n lc,\n change.db,\n {\n max: change.maxConns,\n connection: {['application_name']: 'zero-change-streamer'},\n },\n {sendStringAsJson: true},\n );\n void warmupConnections(lc, changeDB, 'change');\n\n const {autoReset} = config;\n const shard = getShardConfig(config);\n\n let changeStreamer: ChangeStreamerService | undefined;\n\n const context = getServerContext(config);\n\n for (const first of [true, false]) {\n try {\n // Note: This performs initial sync of the replica if necessary.\n const {changeSource, subscriptionState} =\n upstream.type === 'pg'\n ? await initializePostgresChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n initialSync,\n context,\n )\n : await initializeCustomChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n context,\n );\n\n changeStreamer = await initializeStreamer(\n lc,\n shard,\n taskID,\n address,\n protocol,\n changeDB,\n changeSource,\n subscriptionState,\n autoReset ?? false,\n backPressureLimitHeapProportion,\n setTimeout,\n );\n break;\n } catch (e) {\n if (first && e instanceof AutoResetSignal) {\n lc.warn?.(`resetting replica ${replica.file}`, e);\n // TODO: Make deleteLiteDB work with litestream. It will probably have to be\n // a semantic wipe instead of a file delete.\n deleteLiteDB(replica.file);\n continue; // execute again with a fresh initial-sync\n }\n await publishCriticalEvent(\n lc,\n replicationStatusError(lc, 'Initializing', e),\n );\n if (e instanceof DatabaseInitError) {\n throw new Error(\n `Cannot open ZERO_REPLICA_FILE at \"${replica.file}\". Please check that the path is valid.`,\n {cause: e},\n );\n }\n throw e;\n }\n }\n // impossible: upstream must have advanced in order for replication to be stuck.\n assert(changeStreamer, `resetting replica did not advance replicaVersion`);\n\n const {backupURL, port: metricsPort} = litestream;\n const monitor = backupURL\n ? new BackupMonitor(\n lc,\n backupURL,\n `http://localhost:${metricsPort}/metrics`,\n changeStreamer,\n // The time between when the zero-cache was started to when the\n // change-streamer is ready to start serves as the initial delay for\n // watermark cleanup (as it either includes a similar replica\n // restoration/preparation step, or an initial-sync, which\n // generally takes longer).\n //\n // Consider: Also account for permanent volumes?\n Date.now() - parentStartMs,\n )\n : new ReplicaMonitor(lc, replica.file, changeStreamer);\n\n const changeStreamerWebServer = new ChangeStreamerHttpServer(\n lc,\n config,\n {port, startupDelayMs},\n parent,\n changeStreamer,\n monitor instanceof BackupMonitor ? monitor : null,\n );\n\n parent.send(['ready', {ready: true}]);\n\n // Note: The changeStreamer itself is not started here; it is started by the\n // changeStreamerWebServer.\n return runUntilKilled(lc, parent, changeStreamerWebServer, monitor);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;AA4BA,eAA8B,UAC5B,QACA,QACG,MACY;AACf,SAAO,KAAK,SAAS,GAAG,8BAA8B;AACtD,QAAM,gBAAgB,SAAS,KAAK,CAAC,CAAC;AAEtC,QAAM,SAAS,wBAAwB,EAAC,KAAK,MAAM,KAAK,MAAM,CAAC,GAAE;AACjE,QAAM;AAAA,IACJ;AAAA,IACA,gBAAgB;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA,IACE;AAEJ,gBAAc,iBAAiB,QAAQ,EAAC,QAAQ,kBAAA,GAAoB,KAAK,CAAC;AAC1E,QAAM,KAAK,iBAAiB,QAAQ,EAAC,QAAQ,kBAAA,GAAoB,IAAI;AACrE,gBAAc,IAAI,MAAM;AAGxB,QAAM,WAAW;AAAA,IACf;AAAA,IACA,OAAO;AAAA,IACP;AAAA,MACE,KAAK,OAAO;AAAA,MACZ,YAAY,EAAC,CAAC,kBAAkB,GAAG,uBAAA;AAAA,IAAsB;AAAA,IAE3D,EAAC,kBAAkB,KAAA;AAAA,EAAI;AAEzB,OAAK,kBAAkB,IAAI,UAAU,QAAQ;AAE7C,QAAM,EAAC,cAAa;AACpB,QAAM,QAAQ,eAAe,MAAM;AAEnC,MAAI;AAEJ,QAAM,UAAU,iBAAiB,MAAM;AAEvC,aAAW,SAAS,CAAC,MAAM,KAAK,GAAG;AACjC,QAAI;AAEF,YAAM,EAAC,cAAc,kBAAA,IACnB,SAAS,SAAS,OACd,MAAM;AAAA,QACJ;AAAA,QACA,SAAS;AAAA,QACT;AAAA,QACA,QAAQ;AAAA,QACR;AAAA,QACA;AAAA,MAAA,IAEF,MAAM;AAAA,QACJ;AAAA,QACA,SAAS;AAAA,QACT;AAAA,QACA,QAAQ;AAAA,QACR;AAAA,MAAA;AAGR,uBAAiB,MAAM;AAAA,QACrB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA,aAAa;AAAA,QACb;AAAA,QACA;AAAA,MAAA;AAEF;AAAA,IACF,SAAS,GAAG;AACV,UAAI,SAAS,aAAa,iBAAiB;AACzC,WAAG,OAAO,qBAAqB,QAAQ,IAAI,IAAI,CAAC;AAGhD,qBAAa,QAAQ,IAAI;AACzB;AAAA,MACF;AACA,YAAM;AAAA,QACJ;AAAA,QACA,uBAAuB,IAAI,gBAAgB,CAAC;AAAA,MAAA;AAE9C,UAAI,aAAa,mBAAmB;AAClC,cAAM,IAAI;AAAA,UACR,qCAAqC,QAAQ,IAAI;AAAA,UACjD,EAAC,OAAO,EAAA;AAAA,QAAC;AAAA,MAEb;AACA,YAAM;AAAA,IACR;AAAA,EACF;AAEA,SAAO,gBAAgB,kDAAkD;AAEzE,QAAM,EAAC,WAAW,MAAM,YAAA,IAAe;AACvC,QAAM,UAAU,YACZ,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,oBAAoB,WAAW;AAAA,IAC/B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAQA,KAAK,QAAQ;AAAA,EAAA,IAEf,IAAI,eAAe,IAAI,QAAQ,MAAM,cAAc;AAEvD,QAAM,0BAA0B,IAAI;AAAA,IAClC;AAAA,IACA;AAAA,IACA,EAAC,MAAM,eAAA;AAAA,IACP;AAAA,IACA;AAAA,IACA,mBAAmB,gBAAgB,UAAU;AAAA,EAAA;AAG/C,SAAO,KAAK,CAAC,SAAS,EAAC,OAAO,KAAA,CAAK,CAAC;AAIpC,SAAO,eAAe,IAAI,QAAQ,yBAAyB,OAAO;AACpE;AAGA,IAAI,CAAC,qBAAqB;AACxB,OAAK;AAAA,IAAU,MACb,UAAU,KAAK,YAAY,GAAG,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,CAAC,CAAC;AAAA,EAAA;AAEvE;"}
|
|
1
|
+
{"version":3,"file":"change-streamer.js","sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"sourcesContent":["import {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {DatabaseInitError} from '../../../zqlite/src/db.ts';\nimport {getServerContext} from '../config/server-context.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {deleteLiteDB} from '../db/delete-lite-db.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink, publishCriticalEvent} from '../observability/events.ts';\nimport {initializeCustomChangeSource} from '../services/change-source/custom/change-source.ts';\nimport {initializePostgresChangeSource} from '../services/change-source/pg/change-source.ts';\nimport {BackupMonitor} from '../services/change-streamer/backup-monitor.ts';\nimport {ChangeStreamerHttpServer} from '../services/change-streamer/change-streamer-http.ts';\nimport {initializeStreamer} from '../services/change-streamer/change-streamer-service.ts';\nimport type {ChangeStreamerService} from '../services/change-streamer/change-streamer.ts';\nimport {ReplicaMonitor} from '../services/change-streamer/replica-monitor.ts';\nimport {AutoResetSignal} from '../services/change-streamer/schema/tables.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {replicationStatusError} from '../services/replicator/replication-status.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardConfig} from '../types/shards.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n assert(args.length > 0, `parent startMs not specified`);\n const parentStartMs = parseInt(args[0]);\n\n const config = getNormalizedZeroConfig({env, argv: args.slice(1)});\n const {\n taskID,\n changeStreamer: {\n port,\n address,\n protocol,\n startupDelayMs,\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n },\n upstream,\n change,\n replica,\n initialSync,\n litestream,\n } = config;\n\n startOtelAuto(createLogContext(config, {worker: 'change-streamer'}, false));\n const lc = createLogContext(config, {worker: 'change-streamer'}, true);\n initEventSink(lc, config);\n\n // Kick off DB connection warmup in the background.\n const changeDB = pgClient(\n lc,\n change.db,\n {\n max: change.maxConns,\n connection: {['application_name']: 'zero-change-streamer'},\n },\n {sendStringAsJson: true},\n );\n void warmupConnections(lc, changeDB, 'change');\n\n const {autoReset} = config;\n const shard = getShardConfig(config);\n\n let changeStreamer: ChangeStreamerService | undefined;\n\n const context = getServerContext(config);\n\n for (const first of [true, false]) {\n try {\n // Note: This performs initial sync of the replica if necessary.\n const {changeSource, subscriptionState} =\n upstream.type === 'pg'\n ? await initializePostgresChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n initialSync,\n context,\n )\n : await initializeCustomChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n context,\n );\n\n changeStreamer = await initializeStreamer(\n lc,\n shard,\n taskID,\n address,\n protocol,\n changeDB,\n changeSource,\n subscriptionState,\n autoReset ?? false,\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n setTimeout,\n );\n break;\n } catch (e) {\n if (first && e instanceof AutoResetSignal) {\n lc.warn?.(`resetting replica ${replica.file}`, e);\n // TODO: Make deleteLiteDB work with litestream. It will probably have to be\n // a semantic wipe instead of a file delete.\n deleteLiteDB(replica.file);\n continue; // execute again with a fresh initial-sync\n }\n await publishCriticalEvent(\n lc,\n replicationStatusError(lc, 'Initializing', e),\n );\n if (e instanceof DatabaseInitError) {\n throw new Error(\n `Cannot open ZERO_REPLICA_FILE at \"${replica.file}\". Please check that the path is valid.`,\n {cause: e},\n );\n }\n throw e;\n }\n }\n // impossible: upstream must have advanced in order for replication to be stuck.\n assert(changeStreamer, `resetting replica did not advance replicaVersion`);\n\n const {backupURL, port: metricsPort} = litestream;\n const monitor = backupURL\n ? new BackupMonitor(\n lc,\n backupURL,\n `http://localhost:${metricsPort}/metrics`,\n changeStreamer,\n // The time between when the zero-cache was started to when the\n // change-streamer is ready to start serves as the initial delay for\n // watermark cleanup (as it either includes a similar replica\n // restoration/preparation step, or an initial-sync, which\n // generally takes longer).\n //\n // Consider: Also account for permanent volumes?\n Date.now() - parentStartMs,\n )\n : new ReplicaMonitor(lc, replica.file, changeStreamer);\n\n const changeStreamerWebServer = new ChangeStreamerHttpServer(\n lc,\n config,\n {port, startupDelayMs},\n parent,\n changeStreamer,\n monitor instanceof BackupMonitor ? monitor : null,\n );\n\n parent.send(['ready', {ready: true}]);\n\n // Note: The changeStreamer itself is not started here; it is started by the\n // changeStreamerWebServer.\n return runUntilKilled(lc, parent, changeStreamerWebServer, monitor);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;AA4BA,eAA8B,UAC5B,QACA,QACG,MACY;AACf,SAAO,KAAK,SAAS,GAAG,8BAA8B;AACtD,QAAM,gBAAgB,SAAS,KAAK,CAAC,CAAC;AAEtC,QAAM,SAAS,wBAAwB,EAAC,KAAK,MAAM,KAAK,MAAM,CAAC,GAAE;AACjE,QAAM;AAAA,IACJ;AAAA,IACA,gBAAgB;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,IAEF;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA,IACE;AAEJ,gBAAc,iBAAiB,QAAQ,EAAC,QAAQ,kBAAA,GAAoB,KAAK,CAAC;AAC1E,QAAM,KAAK,iBAAiB,QAAQ,EAAC,QAAQ,kBAAA,GAAoB,IAAI;AACrE,gBAAc,IAAI,MAAM;AAGxB,QAAM,WAAW;AAAA,IACf;AAAA,IACA,OAAO;AAAA,IACP;AAAA,MACE,KAAK,OAAO;AAAA,MACZ,YAAY,EAAC,CAAC,kBAAkB,GAAG,uBAAA;AAAA,IAAsB;AAAA,IAE3D,EAAC,kBAAkB,KAAA;AAAA,EAAI;AAEzB,OAAK,kBAAkB,IAAI,UAAU,QAAQ;AAE7C,QAAM,EAAC,cAAa;AACpB,QAAM,QAAQ,eAAe,MAAM;AAEnC,MAAI;AAEJ,QAAM,UAAU,iBAAiB,MAAM;AAEvC,aAAW,SAAS,CAAC,MAAM,KAAK,GAAG;AACjC,QAAI;AAEF,YAAM,EAAC,cAAc,kBAAA,IACnB,SAAS,SAAS,OACd,MAAM;AAAA,QACJ;AAAA,QACA,SAAS;AAAA,QACT;AAAA,QACA,QAAQ;AAAA,QACR;AAAA,QACA;AAAA,MAAA,IAEF,MAAM;AAAA,QACJ;AAAA,QACA,SAAS;AAAA,QACT;AAAA,QACA,QAAQ;AAAA,QACR;AAAA,MAAA;AAGR,uBAAiB,MAAM;AAAA,QACrB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA,aAAa;AAAA,QACb;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAEF;AAAA,IACF,SAAS,GAAG;AACV,UAAI,SAAS,aAAa,iBAAiB;AACzC,WAAG,OAAO,qBAAqB,QAAQ,IAAI,IAAI,CAAC;AAGhD,qBAAa,QAAQ,IAAI;AACzB;AAAA,MACF;AACA,YAAM;AAAA,QACJ;AAAA,QACA,uBAAuB,IAAI,gBAAgB,CAAC;AAAA,MAAA;AAE9C,UAAI,aAAa,mBAAmB;AAClC,cAAM,IAAI;AAAA,UACR,qCAAqC,QAAQ,IAAI;AAAA,UACjD,EAAC,OAAO,EAAA;AAAA,QAAC;AAAA,MAEb;AACA,YAAM;AAAA,IACR;AAAA,EACF;AAEA,SAAO,gBAAgB,kDAAkD;AAEzE,QAAM,EAAC,WAAW,MAAM,YAAA,IAAe;AACvC,QAAM,UAAU,YACZ,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,oBAAoB,WAAW;AAAA,IAC/B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAQA,KAAK,QAAQ;AAAA,EAAA,IAEf,IAAI,eAAe,IAAI,QAAQ,MAAM,cAAc;AAEvD,QAAM,0BAA0B,IAAI;AAAA,IAClC;AAAA,IACA;AAAA,IACA,EAAC,MAAM,eAAA;AAAA,IACP;AAAA,IACA;AAAA,IACA,mBAAmB,gBAAgB,UAAU;AAAA,EAAA;AAG/C,SAAO,KAAK,CAAC,SAAS,EAAC,OAAO,KAAA,CAAK,CAAC;AAIpC,SAAO,eAAe,IAAI,QAAQ,yBAAyB,OAAO;AACpE;AAGA,IAAI,CAAC,qBAAqB;AACxB,OAAK;AAAA,IAAU,MACb,UAAU,KAAK,YAAY,GAAG,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,CAAC,CAAC;AAAA,EAAA;AAEvE;"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"analyze.js","sources":["../../../../../zero-cache/src/services/analyze.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST} from '../../../zero-protocol/src/ast.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport {Debug} from '../../../zql/src/builder/debug-delegate.ts';\nimport {MemoryStorage} from '../../../zql/src/ivm/memory-storage.ts';\nimport {\n AccumulatorDebugger,\n serializePlanDebugEvents,\n} from '../../../zql/src/planner/planner-debug.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\nimport {explainQueries} from '../../../zqlite/src/explain-queries.ts';\nimport {createSQLiteCostModel} from '../../../zqlite/src/sqlite-cost-model.ts';\nimport {TableSource} from '../../../zqlite/src/table-source.ts';\nimport type {JWTAuth} from '../auth/auth.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {computeZqlSpecs, mustGetTableSpec} from '../db/lite-tables.ts';\nimport type {LiteAndZqlSpec, LiteTableSpec} from '../db/specs.ts';\nimport {runAst} from './run-ast.ts';\nimport {TimeSliceTimer} from './view-syncer/view-syncer.ts';\n\nconst TIME_SLICE_LAP_THRESHOLD_MS = 200;\n\nexport async function analyzeQuery(\n lc: LogContext,\n config: NormalizedZeroConfig,\n clientSchema: ClientSchema,\n ast: AST,\n syncedRows = true,\n vendedRows = false,\n permissions?: PermissionsConfig,\n auth?: JWTAuth,\n joinPlans = false,\n): Promise<AnalyzeQueryResult> {\n using db = new Database(lc, config.replica.file);\n const fullTables = new Map<string, LiteTableSpec>();\n const tableSpecs = new Map<string, LiteAndZqlSpec>();\n const tables = new Map<string, TableSource>();\n\n computeZqlSpecs(\n lc,\n db,\n {includeBackfillingColumns: false},\n tableSpecs,\n fullTables,\n );\n\n const planDebugger = joinPlans ? new AccumulatorDebugger() : undefined;\n const costModel = joinPlans\n ? createSQLiteCostModel(db, tableSpecs)\n : undefined;\n const timer = await new TimeSliceTimer(lc).start();\n const shouldYield = () => timer.elapsedLap() > TIME_SLICE_LAP_THRESHOLD_MS;\n const yieldProcess = () => timer.yieldProcess();\n const result = await runAst(\n lc,\n clientSchema,\n ast,\n true,\n {\n applyPermissions: permissions !== undefined,\n syncedRows,\n vendedRows,\n auth,\n db,\n tableSpecs,\n permissions,\n costModel,\n planDebugger,\n host: {\n debug: new Debug(),\n getSource(tableName: string) {\n let source = tables.get(tableName);\n if (source) {\n return source;\n }\n\n const tableSpec = mustGetTableSpec(tableSpecs, tableName);\n const {primaryKey} = tableSpec.tableSpec;\n\n source = new TableSource(\n lc,\n config.log,\n db,\n tableName,\n tableSpec.zqlSpec,\n primaryKey,\n shouldYield,\n );\n tables.set(tableName, source);\n return source;\n },\n createStorage() {\n return new MemoryStorage();\n },\n decorateSourceInput: input => input,\n decorateInput: input => input,\n addEdge() {},\n decorateFilterInput: input => input,\n },\n },\n yieldProcess,\n );\n\n result.sqlitePlans = explainQueries(result.readRowCountsByQuery ?? {}, db);\n\n if (planDebugger) {\n result.joinPlans = serializePlanDebugEvents(planDebugger.events);\n }\n\n return result;\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsBA,MAAM,8BAA8B;AAEpC,eAAsB,aACpB,IACA,QACA,cACA,KACA,aAAa,MACb,aAAa,OACb,aACA,MACA,YAAY,OACiB;AAC7B;AAAA;AAAA,UAAM,KAAK,oBAAI,SAAS,IAAI,OAAO,QAAQ,IAAI;AAC/C,UAAM,iCAAiB,IAAA;AACvB,UAAM,iCAAiB,IAAA;AACvB,UAAM,6BAAa,IAAA;AAEnB;AAAA,MACE;AAAA,MACA;AAAA,MACA,EAAC,2BAA2B,MAAA;AAAA,MAC5B;AAAA,MACA;AAAA,IAAA;AAGF,UAAM,eAAe,YAAY,IAAI,oBAAA,IAAwB;AAC7D,UAAM,YAAY,YACd,sBAAsB,IAAI,UAAU,IACpC;AACJ,UAAM,QAAQ,MAAM,IAAI,eAAe,EAAE,EAAE,MAAA;AAC3C,UAAM,cAAc,MAAM,MAAM,WAAA,IAAe;AAC/C,UAAM,eAAe,MAAM,MAAM,aAAA;AACjC,UAAM,SAAS,MAAM;AAAA,MACnB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,QACE,kBAAkB,gBAAgB;AAAA,QAClC;AAAA,QACA;AAAA,QACA;AAAA,
|
|
1
|
+
{"version":3,"file":"analyze.js","sources":["../../../../../zero-cache/src/services/analyze.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST} from '../../../zero-protocol/src/ast.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport {Debug} from '../../../zql/src/builder/debug-delegate.ts';\nimport {MemoryStorage} from '../../../zql/src/ivm/memory-storage.ts';\nimport {\n AccumulatorDebugger,\n serializePlanDebugEvents,\n} from '../../../zql/src/planner/planner-debug.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\nimport {explainQueries} from '../../../zqlite/src/explain-queries.ts';\nimport {createSQLiteCostModel} from '../../../zqlite/src/sqlite-cost-model.ts';\nimport {TableSource} from '../../../zqlite/src/table-source.ts';\nimport type {JWTAuth} from '../auth/auth.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {computeZqlSpecs, mustGetTableSpec} from '../db/lite-tables.ts';\nimport type {LiteAndZqlSpec, LiteTableSpec} from '../db/specs.ts';\nimport {runAst} from './run-ast.ts';\nimport {TimeSliceTimer} from './view-syncer/view-syncer.ts';\n\nconst TIME_SLICE_LAP_THRESHOLD_MS = 200;\n\nexport async function analyzeQuery(\n lc: LogContext,\n config: NormalizedZeroConfig,\n clientSchema: ClientSchema,\n ast: AST,\n syncedRows = true,\n vendedRows = false,\n permissions?: PermissionsConfig,\n auth?: JWTAuth,\n joinPlans = false,\n): Promise<AnalyzeQueryResult> {\n using db = new Database(lc, config.replica.file);\n const fullTables = new Map<string, LiteTableSpec>();\n const tableSpecs = new Map<string, LiteAndZqlSpec>();\n const tables = new Map<string, TableSource>();\n\n computeZqlSpecs(\n lc,\n db,\n {includeBackfillingColumns: false},\n tableSpecs,\n fullTables,\n );\n\n const planDebugger = joinPlans ? new AccumulatorDebugger() : undefined;\n const costModel = joinPlans\n ? createSQLiteCostModel(db, tableSpecs)\n : undefined;\n const timer = await new TimeSliceTimer(lc).start();\n const shouldYield = () => timer.elapsedLap() > TIME_SLICE_LAP_THRESHOLD_MS;\n const yieldProcess = () => timer.yieldProcess();\n const result = await runAst(\n lc,\n clientSchema,\n ast,\n true,\n {\n applyPermissions: permissions !== undefined,\n syncedRows,\n vendedRows,\n auth,\n db,\n tableSpecs,\n permissions,\n costModel,\n planDebugger,\n host: {\n debug: new Debug(),\n getSource(tableName: string) {\n let source = tables.get(tableName);\n if (source) {\n return source;\n }\n\n const tableSpec = mustGetTableSpec(tableSpecs, tableName);\n const {primaryKey} = tableSpec.tableSpec;\n\n source = new TableSource(\n lc,\n config.log,\n db,\n tableName,\n tableSpec.zqlSpec,\n primaryKey,\n shouldYield,\n );\n tables.set(tableName, source);\n return source;\n },\n createStorage() {\n return new MemoryStorage();\n },\n decorateSourceInput: input => input,\n decorateInput: input => input,\n addEdge() {},\n decorateFilterInput: input => input,\n },\n },\n yieldProcess,\n );\n\n result.sqlitePlans = explainQueries(result.readRowCountsByQuery ?? {}, db);\n\n if (planDebugger) {\n result.joinPlans = serializePlanDebugEvents(planDebugger.events);\n }\n\n return result;\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsBA,MAAM,8BAA8B;AAEpC,eAAsB,aACpB,IACA,QACA,cACA,KACA,aAAa,MACb,aAAa,OACb,aACA,MACA,YAAY,OACiB;AAC7B;AAAA;AAAA,UAAM,KAAK,oBAAI,SAAS,IAAI,OAAO,QAAQ,IAAI;AAC/C,UAAM,iCAAiB,IAAA;AACvB,UAAM,iCAAiB,IAAA;AACvB,UAAM,6BAAa,IAAA;AAEnB;AAAA,MACE;AAAA,MACA;AAAA,MACA,EAAC,2BAA2B,MAAA;AAAA,MAC5B;AAAA,MACA;AAAA,IAAA;AAGF,UAAM,eAAe,YAAY,IAAI,oBAAA,IAAwB;AAC7D,UAAM,YAAY,YACd,sBAAsB,IAAI,UAAU,IACpC;AACJ,UAAM,QAAQ,MAAM,IAAI,eAAe,EAAE,EAAE,MAAA;AAC3C,UAAM,cAAc,MAAM,MAAM,WAAA,IAAe;AAC/C,UAAM,eAAe,MAAM,MAAM,aAAA;AACjC,UAAM,SAAS,MAAM;AAAA,MACnB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,QACE,kBAAkB,gBAAgB;AAAA,QAClC;AAAA,QACA;AAAA,QACA;AAAA,QAEA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA,MAAM;AAAA,UACJ,OAAO,IAAI,MAAA;AAAA,UACX,UAAU,WAAmB;AAC3B,gBAAI,SAAS,OAAO,IAAI,SAAS;AACjC,gBAAI,QAAQ;AACV,qBAAO;AAAA,YACT;AAEA,kBAAM,YAAY,iBAAiB,YAAY,SAAS;AACxD,kBAAM,EAAC,eAAc,UAAU;AAE/B,qBAAS,IAAI;AAAA,cACX;AAAA,cACA,OAAO;AAAA,cACP;AAAA,cACA;AAAA,cACA,UAAU;AAAA,cACV;AAAA,cACA;AAAA,YAAA;AAEF,mBAAO,IAAI,WAAW,MAAM;AAC5B,mBAAO;AAAA,UACT;AAAA,UACA,gBAAgB;AACd,mBAAO,IAAI,cAAA;AAAA,UACb;AAAA,UACA,qBAAqB,CAAA,UAAS;AAAA,UAC9B,eAAe,CAAA,UAAS;AAAA,UACxB,UAAU;AAAA,UAAC;AAAA,UACX,qBAAqB,CAAA,UAAS;AAAA,QAAA;AAAA,MAChC;AAAA,MAEF;AAAA,IAAA;AAGF,WAAO,cAAc,eAAe,OAAO,wBAAwB,CAAA,GAAI,EAAE;AAEzE,QAAI,cAAc;AAChB,aAAO,YAAY,yBAAyB,aAAa,MAAM;AAAA,IACjE;AAEA,WAAO;AAAA,WA5EP;AAAA;AAAA;AAAA;AAAA;AA6EF;"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"backfill-stream.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAWjD,OAAO,KAAK,EACV,iBAAiB,EACjB,eAAe,
|
|
1
|
+
{"version":3,"file":"backfill-stream.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAWjD,OAAO,KAAK,EACV,iBAAiB,EACjB,eAAe,EAGf,eAAe,EAChB,MAAM,wBAAwB,CAAC;AAYhC,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,mBAAmB,CAAC;AAI/C,KAAK,aAAa,GAAG;IACnB;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;CAC9B,CAAC;AAOF;;;;;GAKG;AACH,wBAAuB,cAAc,CACnC,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,EAAC,IAAI,EAAE,YAAY,EAAC,EAAE,IAAI,CAAC,OAAO,EAAE,MAAM,GAAG,cAAc,CAAC,EAC5D,EAAE,EAAE,eAAe,EACnB,IAAI,GAAE,aAAkB,GACvB,cAAc,CAAC,eAAe,GAAG,iBAAiB,CAAC,CAyDrD"}
|
|
@@ -9,7 +9,7 @@ import { TransactionPool } from "../../../db/transaction-pool.js";
|
|
|
9
9
|
import { pgClient } from "../../../types/pg.js";
|
|
10
10
|
import { SchemaIncompatibilityError } from "../common/backfill-manager.js";
|
|
11
11
|
import { tableMetadataSchema, columnMetadataSchema } from "./backfill-metadata.js";
|
|
12
|
-
import {
|
|
12
|
+
import { makeDownloadStatements, createReplicationSlot } from "./initial-sync.js";
|
|
13
13
|
import { toStateVersionString } from "./lsn.js";
|
|
14
14
|
import { getPublicationInfo } from "./schema/published.js";
|
|
15
15
|
const POSTGRES_COPY_CHUNK_SIZE = 64 * 1024;
|
|
@@ -17,7 +17,9 @@ async function* streamBackfill(lc, upstreamURI, { slot, publications }, bf, opts
|
|
|
17
17
|
lc = lc.withContext("component", "backfill").withContext("table", bf.table.name);
|
|
18
18
|
const { flushThresholdBytes = POSTGRES_COPY_CHUNK_SIZE } = opts;
|
|
19
19
|
const db = pgClient(lc, upstreamURI, {
|
|
20
|
-
connection: { ["application_name"]: "backfill-stream" }
|
|
20
|
+
connection: { ["application_name"]: "backfill-stream" },
|
|
21
|
+
["max_lifetime"]: 120 * 60
|
|
22
|
+
// set a long (2h) limit for COPY streaming
|
|
21
23
|
});
|
|
22
24
|
const tx = new TransactionPool(lc, READONLY).run(db);
|
|
23
25
|
try {
|
|
@@ -35,7 +37,7 @@ async function* streamBackfill(lc, upstreamURI, { slot, publications }, bf, opts
|
|
|
35
37
|
lc,
|
|
36
38
|
tx,
|
|
37
39
|
backfill,
|
|
38
|
-
|
|
40
|
+
makeDownloadStatements(tableSpec, cols),
|
|
39
41
|
cols.map((col) => types.getTypeParser(tableSpec.columns[col].typeOID)),
|
|
40
42
|
flushThresholdBytes
|
|
41
43
|
);
|
|
@@ -51,21 +53,34 @@ async function* streamBackfill(lc, upstreamURI, { slot, publications }, bf, opts
|
|
|
51
53
|
void db.end().catch((e) => lc.warn?.(`error closing backfill connection`, e));
|
|
52
54
|
}
|
|
53
55
|
}
|
|
54
|
-
async function* stream(lc, tx, backfill,
|
|
56
|
+
async function* stream(lc, tx, backfill, { select, getTotalRows, getTotalBytes }, colParsers, flushThresholdBytes) {
|
|
55
57
|
const start = performance.now();
|
|
56
|
-
|
|
58
|
+
const [rows, bytes] = await tx.processReadTask(
|
|
59
|
+
(sql) => Promise.all([
|
|
60
|
+
sql.unsafe(getTotalRows),
|
|
61
|
+
sql.unsafe(getTotalBytes)
|
|
62
|
+
])
|
|
63
|
+
);
|
|
64
|
+
const status = {
|
|
65
|
+
rows: 0,
|
|
66
|
+
totalRows: Number(rows[0].totalRows),
|
|
67
|
+
totalBytes: Number(bytes[0].totalBytes)
|
|
68
|
+
};
|
|
69
|
+
let elapsed = (performance.now() - start).toFixed(3);
|
|
70
|
+
lc.info?.(`Computed total rows and bytes for: ${select} (${elapsed} ms)`, {
|
|
71
|
+
status
|
|
72
|
+
});
|
|
57
73
|
const copyStream = await tx.processReadTask(
|
|
58
|
-
(sql) => sql.unsafe(`COPY (${
|
|
74
|
+
(sql) => sql.unsafe(`COPY (${select}) TO STDOUT`).readable()
|
|
59
75
|
);
|
|
60
76
|
const tsvParser = new TsvParser();
|
|
61
|
-
let totalRows = 0;
|
|
62
77
|
let totalBytes = 0;
|
|
63
78
|
let totalMsgs = 0;
|
|
64
79
|
let rowValues = [];
|
|
65
80
|
let bufferedBytes = 0;
|
|
66
81
|
const logFlushed = () => {
|
|
67
82
|
lc.debug?.(
|
|
68
|
-
`Flushed ${rowValues.length} rows, ${bufferedBytes} bytes (total: rows=${
|
|
83
|
+
`Flushed ${rowValues.length} rows, ${bufferedBytes} bytes (total: rows=${status.rows}, msgs=${totalMsgs}, bytes=${totalBytes})`
|
|
69
84
|
);
|
|
70
85
|
};
|
|
71
86
|
let row = Array.from({ length: colParsers.length });
|
|
@@ -76,7 +91,7 @@ async function* stream(lc, tx, backfill, selectStmt, colParsers, flushThresholdB
|
|
|
76
91
|
row[col] = text === null ? null : colParsers[col](text);
|
|
77
92
|
if (++col === colParsers.length) {
|
|
78
93
|
rowValues.push(row);
|
|
79
|
-
|
|
94
|
+
status.rows++;
|
|
80
95
|
row = Array.from({ length: colParsers.length });
|
|
81
96
|
col = 0;
|
|
82
97
|
}
|
|
@@ -84,7 +99,7 @@ async function* stream(lc, tx, backfill, selectStmt, colParsers, flushThresholdB
|
|
|
84
99
|
bufferedBytes += chunk.byteLength;
|
|
85
100
|
totalBytes += chunk.byteLength;
|
|
86
101
|
if (bufferedBytes >= flushThresholdBytes) {
|
|
87
|
-
yield { tag: "backfill", ...backfill, rowValues };
|
|
102
|
+
yield { tag: "backfill", ...backfill, rowValues, status };
|
|
88
103
|
totalMsgs++;
|
|
89
104
|
logFlushed();
|
|
90
105
|
rowValues = [];
|
|
@@ -92,14 +107,14 @@ async function* stream(lc, tx, backfill, selectStmt, colParsers, flushThresholdB
|
|
|
92
107
|
}
|
|
93
108
|
}
|
|
94
109
|
if (rowValues.length > 0) {
|
|
95
|
-
yield { tag: "backfill", ...backfill, rowValues };
|
|
110
|
+
yield { tag: "backfill", ...backfill, rowValues, status };
|
|
96
111
|
totalMsgs++;
|
|
97
112
|
logFlushed();
|
|
98
113
|
}
|
|
99
|
-
yield { tag: "backfill-completed", ...backfill };
|
|
100
|
-
|
|
114
|
+
yield { tag: "backfill-completed", ...backfill, status };
|
|
115
|
+
elapsed = (performance.now() - start).toFixed(3);
|
|
101
116
|
lc.info?.(
|
|
102
|
-
`Finished streaming ${
|
|
117
|
+
`Finished streaming ${status.rows} rows, ${totalMsgs} msgs, ${totalBytes} bytes (${elapsed} ms)`
|
|
103
118
|
);
|
|
104
119
|
}
|
|
105
120
|
async function setSnapshot(lc, upstreamURI, tx, slotNamePrefix) {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"backfill-stream.js","sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"sourcesContent":["import {\n PG_UNDEFINED_COLUMN,\n PG_UNDEFINED_TABLE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres from 'postgres';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {READONLY} from '../../../db/mode-enum.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {getTypeParsers, type TypeParser} from '../../../db/pg-type-parser.ts';\nimport type {PublishedTableSpec} from '../../../db/specs.ts';\nimport {TransactionPool} from '../../../db/transaction-pool.ts';\nimport {pgClient} from '../../../types/pg.ts';\nimport {SchemaIncompatibilityError} from '../common/backfill-manager.ts';\nimport type {\n BackfillCompleted,\n BackfillRequest,\n JSONValue,\n MessageBackfill,\n} from '../protocol/current.ts';\nimport {\n columnMetadataSchema,\n tableMetadataSchema,\n} from './backfill-metadata.ts';\nimport {\n createReplicationSlot,\n makeSelectPublishedStmt,\n} from './initial-sync.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport type {Replica} from './schema/shard.ts';\n\ntype BackfillParams = Omit<BackfillCompleted, 'tag'>;\n\ntype StreamOptions = {\n /**\n * The number of bytes at which to flush a batch of rows in a\n * backfill message. Defaults to Node's getDefaultHighWatermark().\n */\n flushThresholdBytes?: number;\n};\n\n// The size of chunks that Postgres sends on COPY stream.\n// This happens to match NodeJS's getDefaultHighWatermark()\n// (for Node v20+).\nconst POSTGRES_COPY_CHUNK_SIZE = 64 * 1024;\n\n/**\n * Streams a series of `backfill` messages (ending with `backfill-complete`)\n * at a set watermark (i.e. LSN). The data is retrieved via a COPY stream\n * made at a transaction snapshot corresponding to specific LSN, obtained by\n * creating a short-lived replication slot.\n */\nexport async function* streamBackfill(\n lc: LogContext,\n upstreamURI: string,\n {slot, publications}: Pick<Replica, 'slot' | 'publications'>,\n bf: BackfillRequest,\n opts: StreamOptions = {},\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n lc = lc\n .withContext('component', 'backfill')\n .withContext('table', bf.table.name);\n\n const {flushThresholdBytes = POSTGRES_COPY_CHUNK_SIZE} = opts;\n const db = pgClient(lc, upstreamURI, {\n connection: {['application_name']: 'backfill-stream'},\n });\n const tx = new TransactionPool(lc, READONLY).run(db);\n try {\n const watermark = await setSnapshot(lc, upstreamURI, tx, slot);\n const {tableSpec, backfill} = await validateSchema(\n tx,\n publications,\n bf,\n watermark,\n );\n const types = await getTypeParsers(db, {returnJsonAsString: true});\n\n // Note: validateSchema ensures that the rowKey and columns are disjoint\n const {relation, columns} = backfill;\n const cols = [...relation.rowKey.columns, ...columns];\n\n yield* stream(\n lc,\n tx,\n backfill,\n makeSelectPublishedStmt(tableSpec, cols),\n cols.map(col => types.getTypeParser(tableSpec.columns[col].typeOID)),\n flushThresholdBytes,\n );\n } catch (e) {\n // Although we make the best effort to validate the schema at the\n // transaction snapshot, certain forms of `ALTER TABLE` are not\n // MVCC safe and not \"frozen\" in the snapshot:\n //\n // https://www.postgresql.org/docs/current/mvcc-caveats.html\n //\n // Handle these errors as schema incompatibility errors rather than\n // unknown runtime errors.\n if (\n e instanceof postgres.PostgresError &&\n (e.code === PG_UNDEFINED_TABLE || e.code === PG_UNDEFINED_COLUMN)\n ) {\n throw new SchemaIncompatibilityError(bf, String(e), {cause: e});\n }\n throw e;\n } finally {\n tx.setDone();\n // errors are already thrown and handled from processReadTask()\n void tx.done().catch(() => {});\n // Workaround postgres.js hanging at the end of some COPY commands:\n // https://github.com/porsager/postgres/issues/499\n void db.end().catch(e => lc.warn?.(`error closing backfill connection`, e));\n }\n}\n\nasync function* stream(\n lc: LogContext,\n tx: TransactionPool,\n backfill: BackfillParams,\n selectStmt: string,\n colParsers: TypeParser[],\n flushThresholdBytes: number,\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n const start = performance.now();\n lc.info?.(`Starting backfill copy stream:`, selectStmt);\n const copyStream = await tx.processReadTask(sql =>\n sql.unsafe(`COPY (${selectStmt}) TO STDOUT`).readable(),\n );\n\n const tsvParser = new TsvParser();\n let totalRows = 0;\n let totalBytes = 0;\n let totalMsgs = 0;\n let rowValues: JSONValue[][] = [];\n let bufferedBytes = 0;\n\n const logFlushed = () => {\n lc.debug?.(\n `Flushed ${rowValues.length} rows, ${bufferedBytes} bytes ` +\n `(total: rows=${totalRows}, msgs=${totalMsgs}, bytes=${totalBytes})`,\n );\n };\n\n // Tracks the row being parsed.\n let row: JSONValue[] = Array.from({length: colParsers.length});\n let col = 0;\n\n for await (const data of copyStream) {\n const chunk = data as Buffer;\n for (const text of tsvParser.parse(chunk)) {\n row[col] = text === null ? null : (colParsers[col](text) as JSONValue);\n\n if (++col === colParsers.length) {\n rowValues.push(row);\n totalRows++;\n row = Array.from({length: colParsers.length});\n col = 0;\n }\n }\n bufferedBytes += chunk.byteLength;\n totalBytes += chunk.byteLength;\n\n if (bufferedBytes >= flushThresholdBytes) {\n yield {tag: 'backfill', ...backfill, rowValues};\n totalMsgs++;\n logFlushed();\n rowValues = [];\n bufferedBytes = 0;\n }\n }\n\n // Flush the last batch of rows.\n if (rowValues.length > 0) {\n yield {tag: 'backfill', ...backfill, rowValues};\n totalMsgs++;\n logFlushed();\n }\n\n yield {tag: 'backfill-completed', ...backfill};\n const elapsed = performance.now() - start;\n lc.info?.(\n `Finished streaming ${totalRows} rows, ${totalMsgs} msgs, ${totalBytes} bytes ` +\n `(${elapsed.toFixed(3)} ms)`,\n );\n}\n\n/**\n * Creates (and drops) a replication slot in order to obtain a snapshot\n * that corresponds with a specific LSN. Sets the snapshot on the\n * TransactionPool and returns the watermark corresponding to the LSN.\n *\n * (Note that PG's other LSN-related functions are not scoped to a\n * transaction; this is the only way to get set a transaction at a specific\n * LSN.)\n */\nasync function setSnapshot(\n lc: LogContext,\n upstreamURI: string,\n tx: TransactionPool,\n slotNamePrefix: string,\n) {\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const tempSlot = `${slotNamePrefix}_bf_${Date.now()}`;\n try {\n const {snapshot_name: snapshot, consistent_point: lsn} =\n await createReplicationSlot(lc, replicationSession, tempSlot);\n\n await tx.processReadTask(sql =>\n sql.unsafe(`SET TRANSACTION SNAPSHOT '${snapshot}'`),\n );\n // Once the snapshot has been set, the replication session and slot can\n // be closed / dropped.\n await replicationSession.unsafe(`DROP_REPLICATION_SLOT \"${tempSlot}\"`);\n\n const watermark = toStateVersionString(lsn);\n lc.info?.(`Opened snapshot transaction at LSN ${lsn} (${watermark})`);\n return watermark;\n } catch (e) {\n // In the event of a failure, clean up the replication slot if created.\n await replicationSession.unsafe(\n /*sql*/\n `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = '${tempSlot}'`,\n );\n lc.error?.(`Failed to create backfill snapshot`, e);\n throw e;\n } finally {\n await replicationSession.end();\n }\n}\n\nfunction validateSchema(\n tx: TransactionPool,\n publications: string[],\n bf: BackfillRequest,\n watermark: string,\n): Promise<{\n tableSpec: PublishedTableSpec;\n backfill: BackfillParams;\n}> {\n return tx.processReadTask(async sql => {\n const {tables} = await getPublicationInfo(sql, publications);\n const spec = tables.find(\n spec => spec.schema === bf.table.schema && spec.name === bf.table.name,\n );\n if (!spec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table has been renamed or dropped`,\n );\n }\n const tableMeta = v.parse(bf.table.metadata, tableMetadataSchema);\n if (spec.schemaOID !== tableMeta.schemaOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Schema no longer corresponds to the original schema`,\n );\n }\n if (spec.oid !== tableMeta.relationOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table no longer corresponds to the original table`,\n );\n }\n if (\n !equals(\n new Set(Object.keys(tableMeta.rowKey)),\n new Set(spec.replicaIdentityColumns),\n )\n ) {\n throw new SchemaIncompatibilityError(\n bf,\n 'Row key (e.g. PRIMARY KEY or INDEX) has changed',\n );\n }\n const allCols = [\n ...Object.entries(tableMeta.rowKey),\n ...Object.entries(bf.columns),\n ];\n for (const [col, val] of allCols) {\n const colSpec = spec.columns[col];\n if (!colSpec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} has been renamed or dropped`,\n );\n }\n const colMeta = v.parse(val, columnMetadataSchema);\n if (colMeta.attNum !== colSpec.pos) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} no longer corresponds to the original column`,\n );\n }\n }\n const backfill: BackfillParams = {\n relation: {\n schema: bf.table.schema,\n name: bf.table.name,\n rowKey: {columns: Object.keys(tableMeta.rowKey)},\n },\n columns: Object.keys(bf.columns).filter(\n col => !(col in tableMeta.rowKey),\n ),\n watermark,\n };\n return {tableSpec: spec, backfill};\n });\n}\n"],"names":["spec","v.parse"],"mappings":";;;;;;;;;;;;;;AA8CA,MAAM,2BAA2B,KAAK;AAQtC,gBAAuB,eACrB,IACA,aACA,EAAC,MAAM,gBACP,IACA,OAAsB,IAC+B;AACrD,OAAK,GACF,YAAY,aAAa,UAAU,EACnC,YAAY,SAAS,GAAG,MAAM,IAAI;AAErC,QAAM,EAAC,sBAAsB,yBAAA,IAA4B;AACzD,QAAM,KAAK,SAAS,IAAI,aAAa;AAAA,IACnC,YAAY,EAAC,CAAC,kBAAkB,GAAG,kBAAA;AAAA,EAAiB,CACrD;AACD,QAAM,KAAK,IAAI,gBAAgB,IAAI,QAAQ,EAAE,IAAI,EAAE;AACnD,MAAI;AACF,UAAM,YAAY,MAAM,YAAY,IAAI,aAAa,IAAI,IAAI;AAC7D,UAAM,EAAC,WAAW,SAAA,IAAY,MAAM;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,UAAM,QAAQ,MAAM,eAAe,IAAI,EAAC,oBAAoB,MAAK;AAGjE,UAAM,EAAC,UAAU,QAAA,IAAW;AAC5B,UAAM,OAAO,CAAC,GAAG,SAAS,OAAO,SAAS,GAAG,OAAO;AAEpD,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA,wBAAwB,WAAW,IAAI;AAAA,MACvC,KAAK,IAAI,CAAA,QAAO,MAAM,cAAc,UAAU,QAAQ,GAAG,EAAE,OAAO,CAAC;AAAA,MACnE;AAAA,IAAA;AAAA,EAEJ,SAAS,GAAG;AASV,QACE,aAAa,SAAS,kBACrB,EAAE,SAAS,sBAAsB,EAAE,SAAS,sBAC7C;AACA,YAAM,IAAI,2BAA2B,IAAI,OAAO,CAAC,GAAG,EAAC,OAAO,GAAE;AAAA,IAChE;AACA,UAAM;AAAA,EACR,UAAA;AACE,OAAG,QAAA;AAEH,SAAK,GAAG,OAAO,MAAM,MAAM;AAAA,IAAC,CAAC;AAG7B,SAAK,GAAG,MAAM,MAAM,OAAK,GAAG,OAAO,qCAAqC,CAAC,CAAC;AAAA,EAC5E;AACF;AAEA,gBAAgB,OACd,IACA,IACA,UACA,YACA,YACA,qBACqD;AACrD,QAAM,QAAQ,YAAY,IAAA;AAC1B,KAAG,OAAO,kCAAkC,UAAU;AACtD,QAAM,aAAa,MAAM,GAAG;AAAA,IAAgB,SAC1C,IAAI,OAAO,SAAS,UAAU,aAAa,EAAE,SAAA;AAAA,EAAS;AAGxD,QAAM,YAAY,IAAI,UAAA;AACtB,MAAI,YAAY;AAChB,MAAI,aAAa;AACjB,MAAI,YAAY;AAChB,MAAI,YAA2B,CAAA;AAC/B,MAAI,gBAAgB;AAEpB,QAAM,aAAa,MAAM;AACvB,OAAG;AAAA,MACD,WAAW,UAAU,MAAM,UAAU,aAAa,uBAChC,SAAS,UAAU,SAAS,WAAW,UAAU;AAAA,IAAA;AAAA,EAEvE;AAGA,MAAI,MAAmB,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO;AAC7D,MAAI,MAAM;AAEV,mBAAiB,QAAQ,YAAY;AACnC,UAAM,QAAQ;AACd,eAAW,QAAQ,UAAU,MAAM,KAAK,GAAG;AACzC,UAAI,GAAG,IAAI,SAAS,OAAO,OAAQ,WAAW,GAAG,EAAE,IAAI;AAEvD,UAAI,EAAE,QAAQ,WAAW,QAAQ;AAC/B,kBAAU,KAAK,GAAG;AAClB;AACA,cAAM,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO;AAC5C,cAAM;AAAA,MACR;AAAA,IACF;AACA,qBAAiB,MAAM;AACvB,kBAAc,MAAM;AAEpB,QAAI,iBAAiB,qBAAqB;AACxC,YAAM,EAAC,KAAK,YAAY,GAAG,UAAU,UAAA;AACrC;AACA,iBAAA;AACA,kBAAY,CAAA;AACZ,sBAAgB;AAAA,IAClB;AAAA,EACF;AAGA,MAAI,UAAU,SAAS,GAAG;AACxB,UAAM,EAAC,KAAK,YAAY,GAAG,UAAU,UAAA;AACrC;AACA,eAAA;AAAA,EACF;AAEA,QAAM,EAAC,KAAK,sBAAsB,GAAG,SAAA;AACrC,QAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,KAAG;AAAA,IACD,sBAAsB,SAAS,UAAU,SAAS,UAAU,UAAU,WAChE,QAAQ,QAAQ,CAAC,CAAC;AAAA,EAAA;AAE5B;AAWA,eAAe,YACb,IACA,aACA,IACA,gBACA;AACA,QAAM,qBAAqB,SAAS,IAAI,aAAa;AAAA,IACnD,CAAC,aAAa,GAAG;AAAA;AAAA,IACjB,YAAY,EAAC,aAAa,WAAA;AAAA;AAAA,EAAU,CACrC;AACD,QAAM,WAAW,GAAG,cAAc,OAAO,KAAK,KAAK;AACnD,MAAI;AACF,UAAM,EAAC,eAAe,UAAU,kBAAkB,QAChD,MAAM,sBAAsB,IAAI,oBAAoB,QAAQ;AAE9D,UAAM,GAAG;AAAA,MAAgB,CAAA,QACvB,IAAI,OAAO,6BAA6B,QAAQ,GAAG;AAAA,IAAA;AAIrD,UAAM,mBAAmB,OAAO,0BAA0B,QAAQ,GAAG;AAErE,UAAM,YAAY,qBAAqB,GAAG;AAC1C,OAAG,OAAO,sCAAsC,GAAG,KAAK,SAAS,GAAG;AACpE,WAAO;AAAA,EACT,SAAS,GAAG;AAEV,UAAM,mBAAmB;AAAA;AAAA,MAEvB;AAAA,8BACwB,QAAQ;AAAA,IAAA;AAElC,OAAG,QAAQ,sCAAsC,CAAC;AAClD,UAAM;AAAA,EACR,UAAA;AACE,UAAM,mBAAmB,IAAA;AAAA,EAC3B;AACF;AAEA,SAAS,eACP,IACA,cACA,IACA,WAIC;AACD,SAAO,GAAG,gBAAgB,OAAM,QAAO;AACrC,UAAM,EAAC,OAAA,IAAU,MAAM,mBAAmB,KAAK,YAAY;AAC3D,UAAM,OAAO,OAAO;AAAA,MAClB,CAAAA,UAAQA,MAAK,WAAW,GAAG,MAAM,UAAUA,MAAK,SAAS,GAAG,MAAM;AAAA,IAAA;AAEpE,QAAI,CAAC,MAAM;AACT,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM,YAAYC,MAAQ,GAAG,MAAM,UAAU,mBAAmB;AAChE,QAAI,KAAK,cAAc,UAAU,WAAW;AAC1C,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,QAAI,KAAK,QAAQ,UAAU,aAAa;AACtC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,QACE,CAAC;AAAA,MACC,IAAI,IAAI,OAAO,KAAK,UAAU,MAAM,CAAC;AAAA,MACrC,IAAI,IAAI,KAAK,sBAAsB;AAAA,IAAA,GAErC;AACA,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM,UAAU;AAAA,MACd,GAAG,OAAO,QAAQ,UAAU,MAAM;AAAA,MAClC,GAAG,OAAO,QAAQ,GAAG,OAAO;AAAA,IAAA;AAE9B,eAAW,CAAC,KAAK,GAAG,KAAK,SAAS;AAChC,YAAM,UAAU,KAAK,QAAQ,GAAG;AAChC,UAAI,CAAC,SAAS;AACZ,cAAM,IAAI;AAAA,UACR;AAAA,UACA,UAAU,GAAG;AAAA,QAAA;AAAA,MAEjB;AACA,YAAM,UAAUA,MAAQ,KAAK,oBAAoB;AACjD,UAAI,QAAQ,WAAW,QAAQ,KAAK;AAClC,cAAM,IAAI;AAAA,UACR;AAAA,UACA,UAAU,GAAG;AAAA,QAAA;AAAA,MAEjB;AAAA,IACF;AACA,UAAM,WAA2B;AAAA,MAC/B,UAAU;AAAA,QACR,QAAQ,GAAG,MAAM;AAAA,QACjB,MAAM,GAAG,MAAM;AAAA,QACf,QAAQ,EAAC,SAAS,OAAO,KAAK,UAAU,MAAM,EAAA;AAAA,MAAC;AAAA,MAEjD,SAAS,OAAO,KAAK,GAAG,OAAO,EAAE;AAAA,QAC/B,CAAA,QAAO,EAAE,OAAO,UAAU;AAAA,MAAA;AAAA,MAE5B;AAAA,IAAA;AAEF,WAAO,EAAC,WAAW,MAAM,SAAA;AAAA,EAC3B,CAAC;AACH;"}
|
|
1
|
+
{"version":3,"file":"backfill-stream.js","sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"sourcesContent":["import {\n PG_UNDEFINED_COLUMN,\n PG_UNDEFINED_TABLE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres from 'postgres';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {READONLY} from '../../../db/mode-enum.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {getTypeParsers, type TypeParser} from '../../../db/pg-type-parser.ts';\nimport type {PublishedTableSpec} from '../../../db/specs.ts';\nimport {TransactionPool} from '../../../db/transaction-pool.ts';\nimport {pgClient} from '../../../types/pg.ts';\nimport {SchemaIncompatibilityError} from '../common/backfill-manager.ts';\nimport type {\n BackfillCompleted,\n BackfillRequest,\n DownloadStatus,\n JSONValue,\n MessageBackfill,\n} from '../protocol/current.ts';\nimport {\n columnMetadataSchema,\n tableMetadataSchema,\n} from './backfill-metadata.ts';\nimport {\n createReplicationSlot,\n makeDownloadStatements,\n type DownloadStatements,\n} from './initial-sync.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport type {Replica} from './schema/shard.ts';\n\ntype BackfillParams = Omit<BackfillCompleted, 'tag'>;\n\ntype StreamOptions = {\n /**\n * The number of bytes at which to flush a batch of rows in a\n * backfill message. Defaults to Node's getDefaultHighWatermark().\n */\n flushThresholdBytes?: number;\n};\n\n// The size of chunks that Postgres sends on COPY stream.\n// This happens to match NodeJS's getDefaultHighWatermark()\n// (for Node v20+).\nconst POSTGRES_COPY_CHUNK_SIZE = 64 * 1024;\n\n/**\n * Streams a series of `backfill` messages (ending with `backfill-complete`)\n * at a set watermark (i.e. LSN). The data is retrieved via a COPY stream\n * made at a transaction snapshot corresponding to specific LSN, obtained by\n * creating a short-lived replication slot.\n */\nexport async function* streamBackfill(\n lc: LogContext,\n upstreamURI: string,\n {slot, publications}: Pick<Replica, 'slot' | 'publications'>,\n bf: BackfillRequest,\n opts: StreamOptions = {},\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n lc = lc\n .withContext('component', 'backfill')\n .withContext('table', bf.table.name);\n\n const {flushThresholdBytes = POSTGRES_COPY_CHUNK_SIZE} = opts;\n const db = pgClient(lc, upstreamURI, {\n connection: {['application_name']: 'backfill-stream'},\n ['max_lifetime']: 120 * 60, // set a long (2h) limit for COPY streaming\n });\n const tx = new TransactionPool(lc, READONLY).run(db);\n try {\n const watermark = await setSnapshot(lc, upstreamURI, tx, slot);\n const {tableSpec, backfill} = await validateSchema(\n tx,\n publications,\n bf,\n watermark,\n );\n const types = await getTypeParsers(db, {returnJsonAsString: true});\n\n // Note: validateSchema ensures that the rowKey and columns are disjoint\n const {relation, columns} = backfill;\n const cols = [...relation.rowKey.columns, ...columns];\n\n yield* stream(\n lc,\n tx,\n backfill,\n makeDownloadStatements(tableSpec, cols),\n cols.map(col => types.getTypeParser(tableSpec.columns[col].typeOID)),\n flushThresholdBytes,\n );\n } catch (e) {\n // Although we make the best effort to validate the schema at the\n // transaction snapshot, certain forms of `ALTER TABLE` are not\n // MVCC safe and not \"frozen\" in the snapshot:\n //\n // https://www.postgresql.org/docs/current/mvcc-caveats.html\n //\n // Handle these errors as schema incompatibility errors rather than\n // unknown runtime errors.\n if (\n e instanceof postgres.PostgresError &&\n (e.code === PG_UNDEFINED_TABLE || e.code === PG_UNDEFINED_COLUMN)\n ) {\n throw new SchemaIncompatibilityError(bf, String(e), {cause: e});\n }\n throw e;\n } finally {\n tx.setDone();\n // errors are already thrown and handled from processReadTask()\n void tx.done().catch(() => {});\n // Workaround postgres.js hanging at the end of some COPY commands:\n // https://github.com/porsager/postgres/issues/499\n void db.end().catch(e => lc.warn?.(`error closing backfill connection`, e));\n }\n}\n\nasync function* stream(\n lc: LogContext,\n tx: TransactionPool,\n backfill: BackfillParams,\n {select, getTotalRows, getTotalBytes}: DownloadStatements,\n colParsers: TypeParser[],\n flushThresholdBytes: number,\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n const start = performance.now();\n const [rows, bytes] = await tx.processReadTask(sql =>\n Promise.all([\n sql.unsafe<{totalRows: bigint}[]>(getTotalRows),\n sql.unsafe<{totalBytes: bigint}[]>(getTotalBytes),\n ]),\n );\n const status: DownloadStatus = {\n rows: 0,\n totalRows: Number(rows[0].totalRows),\n totalBytes: Number(bytes[0].totalBytes),\n };\n\n let elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(`Computed total rows and bytes for: ${select} (${elapsed} ms)`, {\n status,\n });\n const copyStream = await tx.processReadTask(sql =>\n sql.unsafe(`COPY (${select}) TO STDOUT`).readable(),\n );\n\n const tsvParser = new TsvParser();\n let totalBytes = 0;\n let totalMsgs = 0;\n let rowValues: JSONValue[][] = [];\n let bufferedBytes = 0;\n\n const logFlushed = () => {\n lc.debug?.(\n `Flushed ${rowValues.length} rows, ${bufferedBytes} bytes ` +\n `(total: rows=${status.rows}, msgs=${totalMsgs}, bytes=${totalBytes})`,\n );\n };\n\n // Tracks the row being parsed.\n let row: JSONValue[] = Array.from({length: colParsers.length});\n let col = 0;\n\n for await (const data of copyStream) {\n const chunk = data as Buffer;\n for (const text of tsvParser.parse(chunk)) {\n row[col] = text === null ? null : (colParsers[col](text) as JSONValue);\n\n if (++col === colParsers.length) {\n rowValues.push(row);\n status.rows++;\n row = Array.from({length: colParsers.length});\n col = 0;\n }\n }\n bufferedBytes += chunk.byteLength;\n totalBytes += chunk.byteLength;\n\n if (bufferedBytes >= flushThresholdBytes) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n rowValues = [];\n bufferedBytes = 0;\n }\n }\n\n // Flush the last batch of rows.\n if (rowValues.length > 0) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n }\n\n yield {tag: 'backfill-completed', ...backfill, status};\n elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(\n `Finished streaming ${status.rows} rows, ${totalMsgs} msgs, ${totalBytes} bytes ` +\n `(${elapsed} ms)`,\n );\n}\n\n/**\n * Creates (and drops) a replication slot in order to obtain a snapshot\n * that corresponds with a specific LSN. Sets the snapshot on the\n * TransactionPool and returns the watermark corresponding to the LSN.\n *\n * (Note that PG's other LSN-related functions are not scoped to a\n * transaction; this is the only way to get set a transaction at a specific\n * LSN.)\n */\nasync function setSnapshot(\n lc: LogContext,\n upstreamURI: string,\n tx: TransactionPool,\n slotNamePrefix: string,\n) {\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const tempSlot = `${slotNamePrefix}_bf_${Date.now()}`;\n try {\n const {snapshot_name: snapshot, consistent_point: lsn} =\n await createReplicationSlot(lc, replicationSession, tempSlot);\n\n await tx.processReadTask(sql =>\n sql.unsafe(`SET TRANSACTION SNAPSHOT '${snapshot}'`),\n );\n // Once the snapshot has been set, the replication session and slot can\n // be closed / dropped.\n await replicationSession.unsafe(`DROP_REPLICATION_SLOT \"${tempSlot}\"`);\n\n const watermark = toStateVersionString(lsn);\n lc.info?.(`Opened snapshot transaction at LSN ${lsn} (${watermark})`);\n return watermark;\n } catch (e) {\n // In the event of a failure, clean up the replication slot if created.\n await replicationSession.unsafe(\n /*sql*/\n `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = '${tempSlot}'`,\n );\n lc.error?.(`Failed to create backfill snapshot`, e);\n throw e;\n } finally {\n await replicationSession.end();\n }\n}\n\nfunction validateSchema(\n tx: TransactionPool,\n publications: string[],\n bf: BackfillRequest,\n watermark: string,\n): Promise<{\n tableSpec: PublishedTableSpec;\n backfill: BackfillParams;\n}> {\n return tx.processReadTask(async sql => {\n const {tables} = await getPublicationInfo(sql, publications);\n const spec = tables.find(\n spec => spec.schema === bf.table.schema && spec.name === bf.table.name,\n );\n if (!spec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table has been renamed or dropped`,\n );\n }\n const tableMeta = v.parse(bf.table.metadata, tableMetadataSchema);\n if (spec.schemaOID !== tableMeta.schemaOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Schema no longer corresponds to the original schema`,\n );\n }\n if (spec.oid !== tableMeta.relationOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table no longer corresponds to the original table`,\n );\n }\n if (\n !equals(\n new Set(Object.keys(tableMeta.rowKey)),\n new Set(spec.replicaIdentityColumns),\n )\n ) {\n throw new SchemaIncompatibilityError(\n bf,\n 'Row key (e.g. PRIMARY KEY or INDEX) has changed',\n );\n }\n const allCols = [\n ...Object.entries(tableMeta.rowKey),\n ...Object.entries(bf.columns),\n ];\n for (const [col, val] of allCols) {\n const colSpec = spec.columns[col];\n if (!colSpec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} has been renamed or dropped`,\n );\n }\n const colMeta = v.parse(val, columnMetadataSchema);\n if (colMeta.attNum !== colSpec.pos) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} no longer corresponds to the original column`,\n );\n }\n }\n const backfill: BackfillParams = {\n relation: {\n schema: bf.table.schema,\n name: bf.table.name,\n rowKey: {columns: Object.keys(tableMeta.rowKey)},\n },\n columns: Object.keys(bf.columns).filter(\n col => !(col in tableMeta.rowKey),\n ),\n watermark,\n };\n return {tableSpec: spec, backfill};\n });\n}\n"],"names":["spec","v.parse"],"mappings":";;;;;;;;;;;;;;AAgDA,MAAM,2BAA2B,KAAK;AAQtC,gBAAuB,eACrB,IACA,aACA,EAAC,MAAM,gBACP,IACA,OAAsB,IAC+B;AACrD,OAAK,GACF,YAAY,aAAa,UAAU,EACnC,YAAY,SAAS,GAAG,MAAM,IAAI;AAErC,QAAM,EAAC,sBAAsB,yBAAA,IAA4B;AACzD,QAAM,KAAK,SAAS,IAAI,aAAa;AAAA,IACnC,YAAY,EAAC,CAAC,kBAAkB,GAAG,kBAAA;AAAA,IACnC,CAAC,cAAc,GAAG,MAAM;AAAA;AAAA,EAAA,CACzB;AACD,QAAM,KAAK,IAAI,gBAAgB,IAAI,QAAQ,EAAE,IAAI,EAAE;AACnD,MAAI;AACF,UAAM,YAAY,MAAM,YAAY,IAAI,aAAa,IAAI,IAAI;AAC7D,UAAM,EAAC,WAAW,SAAA,IAAY,MAAM;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,UAAM,QAAQ,MAAM,eAAe,IAAI,EAAC,oBAAoB,MAAK;AAGjE,UAAM,EAAC,UAAU,QAAA,IAAW;AAC5B,UAAM,OAAO,CAAC,GAAG,SAAS,OAAO,SAAS,GAAG,OAAO;AAEpD,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA,uBAAuB,WAAW,IAAI;AAAA,MACtC,KAAK,IAAI,CAAA,QAAO,MAAM,cAAc,UAAU,QAAQ,GAAG,EAAE,OAAO,CAAC;AAAA,MACnE;AAAA,IAAA;AAAA,EAEJ,SAAS,GAAG;AASV,QACE,aAAa,SAAS,kBACrB,EAAE,SAAS,sBAAsB,EAAE,SAAS,sBAC7C;AACA,YAAM,IAAI,2BAA2B,IAAI,OAAO,CAAC,GAAG,EAAC,OAAO,GAAE;AAAA,IAChE;AACA,UAAM;AAAA,EACR,UAAA;AACE,OAAG,QAAA;AAEH,SAAK,GAAG,OAAO,MAAM,MAAM;AAAA,IAAC,CAAC;AAG7B,SAAK,GAAG,MAAM,MAAM,OAAK,GAAG,OAAO,qCAAqC,CAAC,CAAC;AAAA,EAC5E;AACF;AAEA,gBAAgB,OACd,IACA,IACA,UACA,EAAC,QAAQ,cAAc,cAAA,GACvB,YACA,qBACqD;AACrD,QAAM,QAAQ,YAAY,IAAA;AAC1B,QAAM,CAAC,MAAM,KAAK,IAAI,MAAM,GAAG;AAAA,IAAgB,CAAA,QAC7C,QAAQ,IAAI;AAAA,MACV,IAAI,OAA8B,YAAY;AAAA,MAC9C,IAAI,OAA+B,aAAa;AAAA,IAAA,CACjD;AAAA,EAAA;AAEH,QAAM,SAAyB;AAAA,IAC7B,MAAM;AAAA,IACN,WAAW,OAAO,KAAK,CAAC,EAAE,SAAS;AAAA,IACnC,YAAY,OAAO,MAAM,CAAC,EAAE,UAAU;AAAA,EAAA;AAGxC,MAAI,WAAW,YAAY,IAAA,IAAQ,OAAO,QAAQ,CAAC;AACnD,KAAG,OAAO,sCAAsC,MAAM,KAAK,OAAO,QAAQ;AAAA,IACxE;AAAA,EAAA,CACD;AACD,QAAM,aAAa,MAAM,GAAG;AAAA,IAAgB,SAC1C,IAAI,OAAO,SAAS,MAAM,aAAa,EAAE,SAAA;AAAA,EAAS;AAGpD,QAAM,YAAY,IAAI,UAAA;AACtB,MAAI,aAAa;AACjB,MAAI,YAAY;AAChB,MAAI,YAA2B,CAAA;AAC/B,MAAI,gBAAgB;AAEpB,QAAM,aAAa,MAAM;AACvB,OAAG;AAAA,MACD,WAAW,UAAU,MAAM,UAAU,aAAa,uBAChC,OAAO,IAAI,UAAU,SAAS,WAAW,UAAU;AAAA,IAAA;AAAA,EAEzE;AAGA,MAAI,MAAmB,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO;AAC7D,MAAI,MAAM;AAEV,mBAAiB,QAAQ,YAAY;AACnC,UAAM,QAAQ;AACd,eAAW,QAAQ,UAAU,MAAM,KAAK,GAAG;AACzC,UAAI,GAAG,IAAI,SAAS,OAAO,OAAQ,WAAW,GAAG,EAAE,IAAI;AAEvD,UAAI,EAAE,QAAQ,WAAW,QAAQ;AAC/B,kBAAU,KAAK,GAAG;AAClB,eAAO;AACP,cAAM,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO;AAC5C,cAAM;AAAA,MACR;AAAA,IACF;AACA,qBAAiB,MAAM;AACvB,kBAAc,MAAM;AAEpB,QAAI,iBAAiB,qBAAqB;AACxC,YAAM,EAAC,KAAK,YAAY,GAAG,UAAU,WAAW,OAAA;AAChD;AACA,iBAAA;AACA,kBAAY,CAAA;AACZ,sBAAgB;AAAA,IAClB;AAAA,EACF;AAGA,MAAI,UAAU,SAAS,GAAG;AACxB,UAAM,EAAC,KAAK,YAAY,GAAG,UAAU,WAAW,OAAA;AAChD;AACA,eAAA;AAAA,EACF;AAEA,QAAM,EAAC,KAAK,sBAAsB,GAAG,UAAU,OAAA;AAC/C,aAAW,YAAY,IAAA,IAAQ,OAAO,QAAQ,CAAC;AAC/C,KAAG;AAAA,IACD,sBAAsB,OAAO,IAAI,UAAU,SAAS,UAAU,UAAU,WAClE,OAAO;AAAA,EAAA;AAEjB;AAWA,eAAe,YACb,IACA,aACA,IACA,gBACA;AACA,QAAM,qBAAqB,SAAS,IAAI,aAAa;AAAA,IACnD,CAAC,aAAa,GAAG;AAAA;AAAA,IACjB,YAAY,EAAC,aAAa,WAAA;AAAA;AAAA,EAAU,CACrC;AACD,QAAM,WAAW,GAAG,cAAc,OAAO,KAAK,KAAK;AACnD,MAAI;AACF,UAAM,EAAC,eAAe,UAAU,kBAAkB,QAChD,MAAM,sBAAsB,IAAI,oBAAoB,QAAQ;AAE9D,UAAM,GAAG;AAAA,MAAgB,CAAA,QACvB,IAAI,OAAO,6BAA6B,QAAQ,GAAG;AAAA,IAAA;AAIrD,UAAM,mBAAmB,OAAO,0BAA0B,QAAQ,GAAG;AAErE,UAAM,YAAY,qBAAqB,GAAG;AAC1C,OAAG,OAAO,sCAAsC,GAAG,KAAK,SAAS,GAAG;AACpE,WAAO;AAAA,EACT,SAAS,GAAG;AAEV,UAAM,mBAAmB;AAAA;AAAA,MAEvB;AAAA,8BACwB,QAAQ;AAAA,IAAA;AAElC,OAAG,QAAQ,sCAAsC,CAAC;AAClD,UAAM;AAAA,EACR,UAAA;AACE,UAAM,mBAAmB,IAAA;AAAA,EAC3B;AACF;AAEA,SAAS,eACP,IACA,cACA,IACA,WAIC;AACD,SAAO,GAAG,gBAAgB,OAAM,QAAO;AACrC,UAAM,EAAC,OAAA,IAAU,MAAM,mBAAmB,KAAK,YAAY;AAC3D,UAAM,OAAO,OAAO;AAAA,MAClB,CAAAA,UAAQA,MAAK,WAAW,GAAG,MAAM,UAAUA,MAAK,SAAS,GAAG,MAAM;AAAA,IAAA;AAEpE,QAAI,CAAC,MAAM;AACT,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM,YAAYC,MAAQ,GAAG,MAAM,UAAU,mBAAmB;AAChE,QAAI,KAAK,cAAc,UAAU,WAAW;AAC1C,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,QAAI,KAAK,QAAQ,UAAU,aAAa;AACtC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,QACE,CAAC;AAAA,MACC,IAAI,IAAI,OAAO,KAAK,UAAU,MAAM,CAAC;AAAA,MACrC,IAAI,IAAI,KAAK,sBAAsB;AAAA,IAAA,GAErC;AACA,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM,UAAU;AAAA,MACd,GAAG,OAAO,QAAQ,UAAU,MAAM;AAAA,MAClC,GAAG,OAAO,QAAQ,GAAG,OAAO;AAAA,IAAA;AAE9B,eAAW,CAAC,KAAK,GAAG,KAAK,SAAS;AAChC,YAAM,UAAU,KAAK,QAAQ,GAAG;AAChC,UAAI,CAAC,SAAS;AACZ,cAAM,IAAI;AAAA,UACR;AAAA,UACA,UAAU,GAAG;AAAA,QAAA;AAAA,MAEjB;AACA,YAAM,UAAUA,MAAQ,KAAK,oBAAoB;AACjD,UAAI,QAAQ,WAAW,QAAQ,KAAK;AAClC,cAAM,IAAI;AAAA,UACR;AAAA,UACA,UAAU,GAAG;AAAA,QAAA;AAAA,MAEjB;AAAA,IACF;AACA,UAAM,WAA2B;AAAA,MAC/B,UAAU;AAAA,QACR,QAAQ,GAAG,MAAM;AAAA,QACjB,MAAM,GAAG,MAAM;AAAA,QACf,QAAQ,EAAC,SAAS,OAAO,KAAK,UAAU,MAAM,EAAA;AAAA,MAAC;AAAA,MAEjD,SAAS,OAAO,KAAK,GAAG,OAAO,EAAE;AAAA,QAC/B,CAAA,QAAO,EAAE,OAAO,UAAU;AAAA,MAAA;AAAA,MAE5B;AAAA,IAAA;AAEF,WAAO,EAAC,WAAW,MAAM,SAAA;AAAA,EAC3B,CAAC;AACH;"}
|
|
@@ -19,6 +19,11 @@ type ReplicationSlot = {
|
|
|
19
19
|
};
|
|
20
20
|
export declare function createReplicationSlot(lc: LogContext, session: postgres.Sql, slotName: string): Promise<ReplicationSlot>;
|
|
21
21
|
export declare const INSERT_BATCH_SIZE = 50;
|
|
22
|
-
export
|
|
22
|
+
export type DownloadStatements = {
|
|
23
|
+
select: string;
|
|
24
|
+
getTotalRows: string;
|
|
25
|
+
getTotalBytes: string;
|
|
26
|
+
};
|
|
27
|
+
export declare function makeDownloadStatements(table: PublishedTableSpec, cols: string[]): DownloadStatements;
|
|
23
28
|
export {};
|
|
24
29
|
//# sourceMappingURL=initial-sync.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"initial-sync.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/initial-sync.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,QAAQ,MAAM,UAAU,CAAC;AAChC,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,0CAA0C,CAAC;
|
|
1
|
+
{"version":3,"file":"initial-sync.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/initial-sync.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,QAAQ,MAAM,UAAU,CAAC;AAChC,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,0CAA0C,CAAC;AAIzE,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iCAAiC,CAAC;AAa9D,OAAO,KAAK,EAAY,kBAAkB,EAAC,MAAM,sBAAsB,CAAC;AAexE,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,0BAA0B,CAAC;AAkB1D,MAAM,MAAM,kBAAkB,GAAG;IAC/B,gBAAgB,EAAE,MAAM,CAAC;IACzB,WAAW,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;CACnC,CAAC;AAEF,4EAA4E;AAC5E,MAAM,MAAM,aAAa,GAAG,UAAU,CAAC;AAEvC,wBAAsB,WAAW,CAC/B,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,EAClB,EAAE,EAAE,QAAQ,EACZ,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,kBAAkB,EAC/B,OAAO,EAAE,aAAa,iBA6LvB;AAkGD,KAAK,eAAe,GAAG;IACrB,SAAS,EAAE,MAAM,CAAC;IAClB,gBAAgB,EAAE,MAAM,CAAC;IACzB,aAAa,EAAE,MAAM,CAAC;IACtB,aAAa,EAAE,MAAM,CAAC;CACvB,CAAC;AAKF,wBAAsB,qBAAqB,CACzC,EAAE,EAAE,UAAU,EACd,OAAO,EAAE,QAAQ,CAAC,GAAG,EACrB,QAAQ,EAAE,MAAM,GACf,OAAO,CAAC,eAAe,CAAC,CAQ1B;AA6BD,eAAO,MAAM,iBAAiB,KAAK,CAAC;AAMpC,MAAM,MAAM,kBAAkB,GAAG;IAC/B,MAAM,EAAE,MAAM,CAAC;IACf,YAAY,EAAE,MAAM,CAAC;IACrB,aAAa,EAAE,MAAM,CAAC;CACvB,CAAC;AAEF,wBAAgB,sBAAsB,CACpC,KAAK,EAAE,kBAAkB,EACzB,IAAI,EAAE,MAAM,EAAE,GACb,kBAAkB,CAgBpB"}
|
|
@@ -108,7 +108,9 @@ async function initialSync(lc, shard, tx, upstreamURI, syncOptions, context) {
|
|
|
108
108
|
const numWorkers = platform() === "win32" ? numTables : Math.min(tableCopyWorkers, numTables);
|
|
109
109
|
const copyPool = pgClient(lc, upstreamURI, {
|
|
110
110
|
max: numWorkers,
|
|
111
|
-
connection: { ["application_name"]: "initial-sync-copy-worker" }
|
|
111
|
+
connection: { ["application_name"]: "initial-sync-copy-worker" },
|
|
112
|
+
["max_lifetime"]: 120 * 60
|
|
113
|
+
// set a long (2h) limit for COPY streaming
|
|
112
114
|
});
|
|
113
115
|
const copiers = startTableCopyWorkers(
|
|
114
116
|
lc,
|
|
@@ -119,21 +121,30 @@ async function initialSync(lc, shard, tx, upstreamURI, syncOptions, context) {
|
|
|
119
121
|
);
|
|
120
122
|
try {
|
|
121
123
|
createLiteTables(tx, tables, initialVersion);
|
|
124
|
+
const downloads = await Promise.all(
|
|
125
|
+
tables.map(
|
|
126
|
+
(spec) => copiers.processReadTask(
|
|
127
|
+
(db, lc2) => getInitialDownloadState(lc2, db, spec)
|
|
128
|
+
)
|
|
129
|
+
)
|
|
130
|
+
);
|
|
122
131
|
statusPublisher.publish(
|
|
123
132
|
lc,
|
|
124
133
|
"Initializing",
|
|
125
134
|
`Copying ${numTables} upstream tables at version ${initialVersion}`,
|
|
126
|
-
5e3
|
|
135
|
+
5e3,
|
|
136
|
+
() => ({ downloadStatus: downloads.map(({ status }) => status) })
|
|
127
137
|
);
|
|
128
138
|
void copyProfiler?.start();
|
|
129
139
|
const rowCounts = await Promise.all(
|
|
130
|
-
|
|
140
|
+
downloads.map(
|
|
131
141
|
(table) => copiers.processReadTask(
|
|
132
142
|
(db, lc2) => copy(lc2, table, copyPool, db, tx)
|
|
133
143
|
)
|
|
134
144
|
)
|
|
135
145
|
);
|
|
136
146
|
void copyProfiler?.stopAndDispose(lc, "initial-copy");
|
|
147
|
+
copiers.setDone();
|
|
137
148
|
const total = rowCounts.reduce(
|
|
138
149
|
(acc, curr) => ({
|
|
139
150
|
rows: acc.rows + curr.rows,
|
|
@@ -164,12 +175,7 @@ async function initialSync(lc, shard, tx, upstreamURI, syncOptions, context) {
|
|
|
164
175
|
`Synced ${total.rows.toLocaleString()} rows of ${numTables} tables in ${publications} up to ${lsn} (flush: ${total.flushTime.toFixed(3)}, index: ${index.toFixed(3)}, total: ${elapsed.toFixed(3)} ms)`
|
|
165
176
|
);
|
|
166
177
|
} finally {
|
|
167
|
-
|
|
168
|
-
if (platform() === "win32") {
|
|
169
|
-
void copyPool.end().catch((e) => lc.warn?.(`Error closing copyPool`, e));
|
|
170
|
-
} else {
|
|
171
|
-
await copyPool.end();
|
|
172
|
-
}
|
|
178
|
+
void copyPool.end().catch((e) => lc.warn?.(`Error closing copyPool`, e));
|
|
173
179
|
}
|
|
174
180
|
} catch (e) {
|
|
175
181
|
lc.warn?.(`dropping replication slot ${slotName}`, e);
|
|
@@ -285,20 +291,58 @@ const INSERT_BATCH_SIZE = 50;
|
|
|
285
291
|
const MB = 1024 * 1024;
|
|
286
292
|
const MAX_BUFFERED_ROWS = 1e4;
|
|
287
293
|
const BUFFERED_SIZE_THRESHOLD = 8 * MB;
|
|
288
|
-
function
|
|
294
|
+
function makeDownloadStatements(table, cols) {
|
|
289
295
|
const filterConditions = Object.values(table.publications).map(({ rowFilter }) => rowFilter).filter((f) => !!f);
|
|
290
|
-
|
|
296
|
+
const where = filterConditions.length === 0 ? "" : (
|
|
291
297
|
/*sql*/
|
|
292
|
-
`
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
))
|
|
298
|
+
`WHERE ${filterConditions.join(" OR ")}`
|
|
299
|
+
);
|
|
300
|
+
const fromTable = (
|
|
301
|
+
/*sql*/
|
|
302
|
+
`FROM ${id(table.schema)}.${id(table.name)} ${where}`
|
|
297
303
|
);
|
|
304
|
+
const totalBytes = `(${cols.map((col) => `SUM(COALESCE(pg_column_size(${id(col)}), 0))`).join(" + ")})`;
|
|
305
|
+
const stmts = {
|
|
306
|
+
select: (
|
|
307
|
+
/*sql*/
|
|
308
|
+
`SELECT ${cols.map(id).join(",")} ${fromTable}`
|
|
309
|
+
),
|
|
310
|
+
getTotalRows: (
|
|
311
|
+
/*sql*/
|
|
312
|
+
`SELECT COUNT(*) AS "totalRows" ${fromTable}`
|
|
313
|
+
),
|
|
314
|
+
getTotalBytes: (
|
|
315
|
+
/*sql*/
|
|
316
|
+
`SELECT ${totalBytes} AS "totalBytes" ${fromTable}`
|
|
317
|
+
)
|
|
318
|
+
};
|
|
319
|
+
return stmts;
|
|
320
|
+
}
|
|
321
|
+
async function getInitialDownloadState(lc, sql, spec) {
|
|
322
|
+
const start = performance.now();
|
|
323
|
+
const table = liteTableName(spec);
|
|
324
|
+
const columns = Object.keys(spec.columns);
|
|
325
|
+
const stmts = makeDownloadStatements(spec, columns);
|
|
326
|
+
const rowsResult = sql.unsafe(stmts.getTotalRows).execute();
|
|
327
|
+
const bytesResult = sql.unsafe(stmts.getTotalBytes).execute();
|
|
328
|
+
const state = {
|
|
329
|
+
spec,
|
|
330
|
+
status: {
|
|
331
|
+
table,
|
|
332
|
+
columns,
|
|
333
|
+
rows: 0,
|
|
334
|
+
totalRows: Number((await rowsResult)[0].totalRows),
|
|
335
|
+
totalBytes: Number((await bytesResult)[0].totalBytes)
|
|
336
|
+
}
|
|
337
|
+
};
|
|
338
|
+
const elapsed = (performance.now() - start).toFixed(3);
|
|
339
|
+
lc.info?.(`Computed initial download state for ${table} (${elapsed} ms)`, {
|
|
340
|
+
state: state.status
|
|
341
|
+
});
|
|
342
|
+
return state;
|
|
298
343
|
}
|
|
299
|
-
async function copy(lc, table, dbClient, from, to) {
|
|
344
|
+
async function copy(lc, { spec: table, status }, dbClient, from, to) {
|
|
300
345
|
const start = performance.now();
|
|
301
|
-
let rows = 0;
|
|
302
346
|
let flushTime = 0;
|
|
303
347
|
const tableName = liteTableName(table);
|
|
304
348
|
const orderedColumns = Object.entries(table.columns);
|
|
@@ -315,7 +359,7 @@ async function copy(lc, table, dbClient, from, to) {
|
|
|
315
359
|
const insertBatchStmt = to.prepare(
|
|
316
360
|
insertSql + `,${valuesSql}`.repeat(INSERT_BATCH_SIZE - 1)
|
|
317
361
|
);
|
|
318
|
-
const
|
|
362
|
+
const { select } = makeDownloadStatements(table, columnNames);
|
|
319
363
|
const valuesPerRow = columnSpecs.length;
|
|
320
364
|
const valuesPerBatch = valuesPerRow * INSERT_BATCH_SIZE;
|
|
321
365
|
const pendingValues = Array.from({
|
|
@@ -338,14 +382,14 @@ async function copy(lc, table, dbClient, from, to) {
|
|
|
338
382
|
pendingValues[i] = void 0;
|
|
339
383
|
}
|
|
340
384
|
pendingSize = 0;
|
|
341
|
-
rows += flushedRows;
|
|
385
|
+
status.rows += flushedRows;
|
|
342
386
|
const elapsed2 = performance.now() - start2;
|
|
343
387
|
flushTime += elapsed2;
|
|
344
388
|
lc.debug?.(
|
|
345
389
|
`flushed ${flushedRows} ${tableName} rows (${flushedSize} bytes) in ${elapsed2.toFixed(3)} ms`
|
|
346
390
|
);
|
|
347
391
|
}
|
|
348
|
-
lc.info?.(`Starting copy stream of ${tableName}:`,
|
|
392
|
+
lc.info?.(`Starting copy stream of ${tableName}:`, select);
|
|
349
393
|
const pgParsers = await getTypeParsers(dbClient, { returnJsonAsString: true });
|
|
350
394
|
const parsers = columnSpecs.map((c) => {
|
|
351
395
|
const pgParse = pgParsers.getTypeParser(c.typeOID);
|
|
@@ -358,7 +402,7 @@ async function copy(lc, table, dbClient, from, to) {
|
|
|
358
402
|
const tsvParser = new TsvParser();
|
|
359
403
|
let col = 0;
|
|
360
404
|
await pipeline(
|
|
361
|
-
await from.unsafe(`COPY (${
|
|
405
|
+
await from.unsafe(`COPY (${select}) TO STDOUT`).readable(),
|
|
362
406
|
new Writable({
|
|
363
407
|
highWaterMark: BUFFERED_SIZE_THRESHOLD,
|
|
364
408
|
write(chunk, _encoding, callback) {
|
|
@@ -390,14 +434,14 @@ async function copy(lc, table, dbClient, from, to) {
|
|
|
390
434
|
);
|
|
391
435
|
const elapsed = performance.now() - start;
|
|
392
436
|
lc.info?.(
|
|
393
|
-
`Finished copying ${rows} rows into ${tableName} (flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `
|
|
437
|
+
`Finished copying ${status.rows} rows into ${tableName} (flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `
|
|
394
438
|
);
|
|
395
|
-
return { rows, flushTime };
|
|
439
|
+
return { rows: status.rows, flushTime };
|
|
396
440
|
}
|
|
397
441
|
export {
|
|
398
442
|
INSERT_BATCH_SIZE,
|
|
399
443
|
createReplicationSlot,
|
|
400
444
|
initialSync,
|
|
401
|
-
|
|
445
|
+
makeDownloadStatements
|
|
402
446
|
};
|
|
403
447
|
//# sourceMappingURL=initial-sync.js.map
|