@rocicorp/zero 1.3.0 → 1.4.0-canary.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/out/analyze-query/src/analyze-cli.d.ts +24 -0
- package/out/analyze-query/src/analyze-cli.d.ts.map +1 -0
- package/out/analyze-query/src/analyze-cli.js +289 -0
- package/out/analyze-query/src/analyze-cli.js.map +1 -0
- package/out/analyze-query/src/bin-analyze.js +6 -6
- package/out/analyze-query/src/bin-transform.js +2 -2
- package/out/ast-to-zql/src/bin.js +1 -1
- package/out/shared/src/logging.d.ts.map +1 -1
- package/out/shared/src/logging.js +1 -1
- package/out/shared/src/logging.js.map +1 -1
- package/out/shared/src/options.d.ts.map +1 -1
- package/out/shared/src/options.js +1 -1
- package/out/shared/src/options.js.map +1 -1
- package/out/z2s/src/compiler.d.ts.map +1 -1
- package/out/z2s/src/compiler.js +4 -1
- package/out/z2s/src/compiler.js.map +1 -1
- package/out/z2s/src/sql.d.ts.map +1 -1
- package/out/z2s/src/sql.js +1 -0
- package/out/z2s/src/sql.js.map +1 -1
- package/out/zero/package.js +95 -89
- package/out/zero/package.js.map +1 -1
- package/out/zero/src/analyze.d.ts +2 -0
- package/out/zero/src/analyze.d.ts.map +1 -0
- package/out/zero/src/analyze.js +2 -0
- package/out/zero/src/bindings.js +1 -1
- package/out/zero/src/zero-cache-dev.js +1 -1
- package/out/zero/src/zero-cache-dev.js.map +1 -1
- package/out/zero/src/zero-out.js +1 -1
- package/out/zero-cache/src/auth/auth.d.ts.map +1 -1
- package/out/zero-cache/src/auth/auth.js.map +1 -1
- package/out/zero-cache/src/auth/load-permissions.js +2 -2
- package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.js +5 -14
- package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
- package/out/zero-cache/src/config/network.d.ts +1 -1
- package/out/zero-cache/src/config/network.d.ts.map +1 -1
- package/out/zero-cache/src/config/network.js +1 -1
- package/out/zero-cache/src/config/network.js.map +1 -1
- package/out/zero-cache/src/config/normalize.d.ts.map +1 -1
- package/out/zero-cache/src/config/normalize.js.map +1 -1
- package/out/zero-cache/src/config/zero-config.d.ts +5 -0
- package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
- package/out/zero-cache/src/config/zero-config.js +16 -3
- package/out/zero-cache/src/config/zero-config.js.map +1 -1
- package/out/zero-cache/src/db/lite-tables.d.ts.map +1 -1
- package/out/zero-cache/src/db/lite-tables.js +3 -3
- package/out/zero-cache/src/db/lite-tables.js.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.d.ts +43 -40
- package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.js +76 -56
- package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
- package/out/zero-cache/src/observability/events.d.ts.map +1 -1
- package/out/zero-cache/src/observability/events.js +1 -1
- package/out/zero-cache/src/observability/events.js.map +1 -1
- package/out/zero-cache/src/scripts/decommission.js +1 -1
- package/out/zero-cache/src/scripts/deploy-permissions.js +2 -2
- package/out/zero-cache/src/scripts/permissions.js +1 -1
- package/out/zero-cache/src/server/anonymous-otel-start.d.ts.map +1 -1
- package/out/zero-cache/src/server/anonymous-otel-start.js +4 -4
- package/out/zero-cache/src/server/anonymous-otel-start.js.map +1 -1
- package/out/zero-cache/src/server/change-streamer.d.ts +1 -1
- package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/server/change-streamer.js +27 -12
- package/out/zero-cache/src/server/change-streamer.js.map +1 -1
- package/out/zero-cache/src/server/logging.d.ts +1 -3
- package/out/zero-cache/src/server/logging.d.ts.map +1 -1
- package/out/zero-cache/src/server/logging.js +6 -3
- package/out/zero-cache/src/server/logging.js.map +1 -1
- package/out/zero-cache/src/server/main.d.ts.map +1 -1
- package/out/zero-cache/src/server/main.js +26 -26
- package/out/zero-cache/src/server/main.js.map +1 -1
- package/out/zero-cache/src/server/mutator.js +4 -2
- package/out/zero-cache/src/server/mutator.js.map +1 -1
- package/out/zero-cache/src/server/otel-log-sink.d.ts.map +1 -1
- package/out/zero-cache/src/server/otel-log-sink.js +0 -2
- package/out/zero-cache/src/server/otel-log-sink.js.map +1 -1
- package/out/zero-cache/src/server/otel-start.d.ts +1 -1
- package/out/zero-cache/src/server/otel-start.d.ts.map +1 -1
- package/out/zero-cache/src/server/otel-start.js +7 -3
- package/out/zero-cache/src/server/otel-start.js.map +1 -1
- package/out/zero-cache/src/server/reaper.js +6 -6
- package/out/zero-cache/src/server/reaper.js.map +1 -1
- package/out/zero-cache/src/server/replicator.d.ts.map +1 -1
- package/out/zero-cache/src/server/replicator.js +5 -3
- package/out/zero-cache/src/server/replicator.js.map +1 -1
- package/out/zero-cache/src/server/runner/run-worker.js +2 -2
- package/out/zero-cache/src/server/runner/run-worker.js.map +1 -1
- package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/server/syncer.js +13 -12
- package/out/zero-cache/src/server/syncer.js.map +1 -1
- package/out/zero-cache/src/server/worker-dispatcher.js +1 -1
- package/out/zero-cache/src/services/analyze.js +1 -1
- package/out/zero-cache/src/services/change-source/common/backfill-manager.js +1 -1
- package/out/zero-cache/src/services/change-source/common/replica-schema.js +1 -1
- package/out/zero-cache/src/services/change-source/custom/change-source.js +2 -2
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +4 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.js +19 -23
- package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +58 -3
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js +209 -52
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js +2 -2
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts +24 -15
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +35 -58
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.js +2 -2
- package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts +1 -2
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.js +15 -18
- package/out/zero-cache/src/services/change-source/pg/schema/published.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/shard.js +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.js +1 -1
- package/out/zero-cache/src/services/change-streamer/backup-monitor.js +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.js +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +5 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +10 -7
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/replica-monitor.js +2 -2
- package/out/zero-cache/src/services/change-streamer/storer.d.ts +19 -2
- package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.js +70 -6
- package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
- package/out/zero-cache/src/services/heapz.d.ts.map +1 -1
- package/out/zero-cache/src/services/heapz.js +1 -1
- package/out/zero-cache/src/services/heapz.js.map +1 -1
- package/out/zero-cache/src/services/life-cycle.d.ts +2 -1
- package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
- package/out/zero-cache/src/services/life-cycle.js +10 -7
- package/out/zero-cache/src/services/life-cycle.js.map +1 -1
- package/out/zero-cache/src/services/litestream/commands.d.ts +15 -4
- package/out/zero-cache/src/services/litestream/commands.d.ts.map +1 -1
- package/out/zero-cache/src/services/litestream/commands.js +40 -34
- package/out/zero-cache/src/services/litestream/commands.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/mutagen.js +3 -3
- package/out/zero-cache/src/services/mutagen/pusher.d.ts +28 -28
- package/out/zero-cache/src/services/replicator/change-processor.js +2 -2
- package/out/zero-cache/src/services/replicator/incremental-sync.js +1 -1
- package/out/zero-cache/src/services/replicator/schema/replication-state.js +1 -1
- package/out/zero-cache/src/services/replicator/write-worker-client.js.map +1 -1
- package/out/zero-cache/src/services/replicator/write-worker.js +3 -3
- package/out/zero-cache/src/services/replicator/write-worker.js.map +1 -1
- package/out/zero-cache/src/services/run-ast.d.ts.map +1 -1
- package/out/zero-cache/src/services/run-ast.js +3 -3
- package/out/zero-cache/src/services/run-ast.js.map +1 -1
- package/out/zero-cache/src/services/statz.d.ts.map +1 -1
- package/out/zero-cache/src/services/statz.js +3 -3
- package/out/zero-cache/src/services/statz.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/active-users-gauge.js +1 -1
- package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts +2 -2
- package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/connection-context-manager.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-purger.js +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.js +3 -3
- package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js +1 -1
- package/out/zero-cache/src/services/view-syncer/inspect-handler.js +2 -2
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +6 -16
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +31 -39
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js +4 -4
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.js +2 -2
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.js +6 -6
- package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
- package/out/zero-cache/src/types/profiler.d.ts.map +1 -1
- package/out/zero-cache/src/types/profiler.js.map +1 -1
- package/out/zero-cache/src/types/row-key.d.ts.map +1 -1
- package/out/zero-cache/src/types/row-key.js.map +1 -1
- package/out/zero-cache/src/types/streams.d.ts +1 -1
- package/out/zero-cache/src/types/streams.d.ts.map +1 -1
- package/out/zero-cache/src/types/streams.js.map +1 -1
- package/out/zero-cache/src/types/websocket-handoff.d.ts +1 -1
- package/out/zero-cache/src/types/websocket-handoff.d.ts.map +1 -1
- package/out/zero-cache/src/types/websocket-handoff.js +1 -1
- package/out/zero-cache/src/types/websocket-handoff.js.map +1 -1
- package/out/zero-cache/src/workers/connection.d.ts +1 -1
- package/out/zero-cache/src/workers/connection.d.ts.map +1 -1
- package/out/zero-cache/src/workers/connection.js.map +1 -1
- package/out/zero-cache/src/workers/mutator.js.map +1 -1
- package/out/zero-cache/src/workers/syncer.d.ts +1 -1
- package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/workers/syncer.js +3 -3
- package/out/zero-cache/src/workers/syncer.js.map +1 -1
- package/out/zero-client/src/client/bindings.js +1 -1
- package/out/zero-client/src/client/crud-impl.d.ts.map +1 -1
- package/out/zero-client/src/client/crud-impl.js +4 -13
- package/out/zero-client/src/client/crud-impl.js.map +1 -1
- package/out/zero-client/src/client/inspector/inspector.d.ts +24 -0
- package/out/zero-client/src/client/inspector/inspector.d.ts.map +1 -1
- package/out/zero-client/src/client/inspector/inspector.js +28 -0
- package/out/zero-client/src/client/inspector/inspector.js.map +1 -1
- package/out/zero-client/src/client/inspector/lazy-inspector.d.ts +9 -0
- package/out/zero-client/src/client/inspector/lazy-inspector.d.ts.map +1 -1
- package/out/zero-client/src/client/inspector/lazy-inspector.js +28 -1
- package/out/zero-client/src/client/inspector/lazy-inspector.js.map +1 -1
- package/out/zero-client/src/client/ivm-branch.d.ts.map +1 -1
- package/out/zero-client/src/client/ivm-branch.js +4 -13
- package/out/zero-client/src/client/ivm-branch.js.map +1 -1
- package/out/zero-client/src/client/log-options.d.ts +1 -0
- package/out/zero-client/src/client/log-options.d.ts.map +1 -1
- package/out/zero-client/src/client/log-options.js +3 -2
- package/out/zero-client/src/client/log-options.js.map +1 -1
- package/out/zero-client/src/client/options.d.ts +13 -1
- package/out/zero-client/src/client/options.d.ts.map +1 -1
- package/out/zero-client/src/client/options.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zero-client/src/client/zero.d.ts.map +1 -1
- package/out/zero-client/src/client/zero.js +2 -1
- package/out/zero-client/src/client/zero.js.map +1 -1
- package/out/zero-protocol/src/error.d.ts.map +1 -1
- package/out/zero-protocol/src/error.js +1 -1
- package/out/zero-protocol/src/error.js.map +1 -1
- package/out/zero-react/src/bindings.js +1 -1
- package/out/zero-solid/src/bindings.js +1 -1
- package/out/zero-solid/src/solid-view.d.ts.map +1 -1
- package/out/zero-solid/src/solid-view.js +14 -14
- package/out/zero-solid/src/solid-view.js.map +1 -1
- package/out/zql/src/builder/builder.d.ts.map +1 -1
- package/out/zql/src/builder/builder.js.map +1 -1
- package/out/zql/src/ivm/array-view.d.ts.map +1 -1
- package/out/zql/src/ivm/array-view.js +27 -2
- package/out/zql/src/ivm/array-view.js.map +1 -1
- package/out/zql/src/ivm/change-index-enum.d.ts +9 -0
- package/out/zql/src/ivm/change-index-enum.d.ts.map +1 -0
- package/out/zql/src/ivm/change-index.d.ts +5 -0
- package/out/zql/src/ivm/change-index.d.ts.map +1 -0
- package/out/zql/src/ivm/change-type-enum.d.ts +9 -0
- package/out/zql/src/ivm/change-type-enum.d.ts.map +1 -0
- package/out/zql/src/ivm/change-type.d.ts +5 -0
- package/out/zql/src/ivm/change-type.d.ts.map +1 -0
- package/out/zql/src/ivm/change.d.ts +20 -22
- package/out/zql/src/ivm/change.d.ts.map +1 -1
- package/out/zql/src/ivm/change.js +33 -0
- package/out/zql/src/ivm/change.js.map +1 -0
- package/out/zql/src/ivm/exists.d.ts.map +1 -1
- package/out/zql/src/ivm/exists.js +27 -38
- package/out/zql/src/ivm/exists.js.map +1 -1
- package/out/zql/src/ivm/fan-in.d.ts +3 -2
- package/out/zql/src/ivm/fan-in.d.ts.map +1 -1
- package/out/zql/src/ivm/fan-in.js.map +1 -1
- package/out/zql/src/ivm/fan-out.d.ts +1 -1
- package/out/zql/src/ivm/fan-out.d.ts.map +1 -1
- package/out/zql/src/ivm/fan-out.js +1 -1
- package/out/zql/src/ivm/fan-out.js.map +1 -1
- package/out/zql/src/ivm/filter-operators.d.ts +3 -3
- package/out/zql/src/ivm/filter-operators.d.ts.map +1 -1
- package/out/zql/src/ivm/filter-operators.js.map +1 -1
- package/out/zql/src/ivm/filter-push.d.ts.map +1 -1
- package/out/zql/src/ivm/filter-push.js +7 -7
- package/out/zql/src/ivm/filter-push.js.map +1 -1
- package/out/zql/src/ivm/filter.d.ts +1 -1
- package/out/zql/src/ivm/filter.d.ts.map +1 -1
- package/out/zql/src/ivm/filter.js.map +1 -1
- package/out/zql/src/ivm/flipped-join.d.ts.map +1 -1
- package/out/zql/src/ivm/flipped-join.js +49 -58
- package/out/zql/src/ivm/flipped-join.js.map +1 -1
- package/out/zql/src/ivm/join-utils.d.ts +2 -6
- package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
- package/out/zql/src/ivm/join-utils.js +25 -25
- package/out/zql/src/ivm/join-utils.js.map +1 -1
- package/out/zql/src/ivm/join.d.ts.map +1 -1
- package/out/zql/src/ivm/join.js +32 -51
- package/out/zql/src/ivm/join.js.map +1 -1
- package/out/zql/src/ivm/maybe-split-and-push-edit-change.d.ts +1 -1
- package/out/zql/src/ivm/maybe-split-and-push-edit-change.d.ts.map +1 -1
- package/out/zql/src/ivm/maybe-split-and-push-edit-change.js +5 -10
- package/out/zql/src/ivm/maybe-split-and-push-edit-change.js.map +1 -1
- package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
- package/out/zql/src/ivm/memory-source.js +52 -60
- package/out/zql/src/ivm/memory-source.js.map +1 -1
- package/out/zql/src/ivm/operator.d.ts +1 -1
- package/out/zql/src/ivm/operator.d.ts.map +1 -1
- package/out/zql/src/ivm/operator.js +2 -4
- package/out/zql/src/ivm/operator.js.map +1 -1
- package/out/zql/src/ivm/push-accumulated.d.ts +3 -2
- package/out/zql/src/ivm/push-accumulated.d.ts.map +1 -1
- package/out/zql/src/ivm/push-accumulated.js +98 -122
- package/out/zql/src/ivm/push-accumulated.js.map +1 -1
- package/out/zql/src/ivm/skip-yields.d.ts +4 -0
- package/out/zql/src/ivm/skip-yields.d.ts.map +1 -0
- package/out/zql/src/ivm/skip-yields.js +33 -0
- package/out/zql/src/ivm/skip-yields.js.map +1 -0
- package/out/zql/src/ivm/skip.d.ts +1 -1
- package/out/zql/src/ivm/skip.d.ts.map +1 -1
- package/out/zql/src/ivm/skip.js +2 -2
- package/out/zql/src/ivm/skip.js.map +1 -1
- package/out/zql/src/ivm/source-change-index-enum.d.ts +7 -0
- package/out/zql/src/ivm/source-change-index-enum.d.ts.map +1 -0
- package/out/zql/src/ivm/source-change-index.d.ts +5 -0
- package/out/zql/src/ivm/source-change-index.d.ts.map +1 -0
- package/out/zql/src/ivm/source.d.ts +11 -13
- package/out/zql/src/ivm/source.d.ts.map +1 -1
- package/out/zql/src/ivm/source.js +26 -0
- package/out/zql/src/ivm/source.js.map +1 -0
- package/out/zql/src/ivm/take.d.ts.map +1 -1
- package/out/zql/src/ivm/take.js +27 -50
- package/out/zql/src/ivm/take.js.map +1 -1
- package/out/zql/src/ivm/union-fan-in.d.ts +2 -1
- package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
- package/out/zql/src/ivm/union-fan-in.js +3 -3
- package/out/zql/src/ivm/union-fan-in.js.map +1 -1
- package/out/zql/src/ivm/union-fan-out.d.ts.map +1 -1
- package/out/zql/src/ivm/union-fan-out.js +1 -1
- package/out/zql/src/ivm/union-fan-out.js.map +1 -1
- package/out/zql/src/ivm/view-apply-change.js +1 -1
- package/out/zql/src/planner/planner-debug.d.ts +2 -2
- package/out/zql/src/planner/planner-debug.d.ts.map +1 -1
- package/out/zql/src/planner/planner-debug.js.map +1 -1
- package/out/zql/src/planner/planner-graph.d.ts +1 -1
- package/out/zql/src/planner/planner-graph.d.ts.map +1 -1
- package/out/zql/src/planner/planner-graph.js.map +1 -1
- package/out/zqlite/src/internal/sql-inline.d.ts.map +1 -1
- package/out/zqlite/src/internal/sql-inline.js.map +1 -1
- package/out/zqlite/src/query-builder.d.ts.map +1 -1
- package/out/zqlite/src/query-builder.js.map +1 -1
- package/out/zqlite/src/table-source.d.ts.map +1 -1
- package/out/zqlite/src/table-source.js +11 -11
- package/out/zqlite/src/table-source.js.map +1 -1
- package/package.json +99 -93
|
@@ -1,17 +1,19 @@
|
|
|
1
1
|
import { must } from "../../../../../shared/src/must.js";
|
|
2
2
|
import { equals } from "../../../../../shared/src/set-utils.js";
|
|
3
3
|
import { ALLOWED_APP_ID_CHARACTERS } from "../../../types/shards.js";
|
|
4
|
-
import
|
|
5
|
-
import { toStateVersionString } from "./lsn.js";
|
|
4
|
+
import "../../../../../zqlite/src/db.js";
|
|
6
5
|
import { liteValue } from "../../../types/lite.js";
|
|
7
6
|
import { liteTableName } from "../../../types/names.js";
|
|
8
7
|
import { mapPostgresToLite, mapPostgresToLiteIndex } from "../../../db/pg-to-lite.js";
|
|
9
8
|
import { ColumnMetadataStore } from "../../replicator/schema/column-metadata.js";
|
|
9
|
+
import { computeZqlSpecs, listIndexes, listTables } from "../../../db/lite-tables.js";
|
|
10
10
|
import { initReplicationState } from "../../replicator/schema/replication-state.js";
|
|
11
|
+
import { id } from "../../../types/sql.js";
|
|
12
|
+
import { pgClient } from "../../../types/pg.js";
|
|
13
|
+
import { toStateVersionString } from "./lsn.js";
|
|
11
14
|
import { READONLY } from "../../../db/mode-enum.js";
|
|
12
15
|
import { runTx } from "../../../db/run-transaction.js";
|
|
13
16
|
import { getPublicationInfo } from "./schema/published.js";
|
|
14
|
-
import { id } from "../../../types/sql.js";
|
|
15
17
|
import { addReplica, dropShard, getInternalShardConfig, newReplicationSlot, replicationSlotExpression, validatePublications } from "./schema/shard.js";
|
|
16
18
|
import { createLiteIndexStatement, createLiteTableStatement } from "../../../db/create.js";
|
|
17
19
|
import { ReplicationStatusPublisher } from "../../replicator/replication-status.js";
|
|
@@ -22,55 +24,68 @@ import { TransactionPool, importSnapshot } from "../../../db/transaction-pool.js
|
|
|
22
24
|
import { BinaryCopyParser, hasBinaryDecoder, makeBinaryDecoder, textCastDecoder } from "../../../db/pg-copy-binary.js";
|
|
23
25
|
import { CpuProfiler } from "../../../types/profiler.js";
|
|
24
26
|
import { ensureShardSchema } from "./schema/init.js";
|
|
25
|
-
import
|
|
27
|
+
import { resolver } from "@rocicorp/resolver";
|
|
26
28
|
import { platform } from "node:os";
|
|
29
|
+
import postgres from "postgres";
|
|
30
|
+
import "node:path";
|
|
27
31
|
import { PG_CONFIGURATION_LIMIT_EXCEEDED, PG_INSUFFICIENT_PRIVILEGE } from "@drdgvhbh/postgres-error-codes";
|
|
28
32
|
import { Writable } from "node:stream";
|
|
29
33
|
import { pipeline as pipeline$1 } from "node:stream/promises";
|
|
30
34
|
//#region ../zero-cache/src/services/change-source/pg/initial-sync.ts
|
|
31
35
|
async function initialSync(lc, shard, tx, upstreamURI, syncOptions, context) {
|
|
32
36
|
if (!ALLOWED_APP_ID_CHARACTERS.test(shard.appID)) throw new Error("The App ID may only consist of lower-case letters, numbers, and the underscore character");
|
|
33
|
-
const { tableCopyWorkers, profileCopy, textCopy = false, replicationSlotFailover = false } = syncOptions;
|
|
37
|
+
const { tableCopyWorkers, profileCopy, textCopy = false, replicationSlotFailover = false, shadow } = syncOptions;
|
|
34
38
|
const copyProfiler = profileCopy ? await CpuProfiler.connect() : null;
|
|
35
39
|
const sql = pgClient(lc, upstreamURI);
|
|
36
|
-
const replicationSession = pgClient(lc, upstreamURI, {
|
|
40
|
+
const replicationSession = shadow ? void 0 : pgClient(lc, upstreamURI, {
|
|
37
41
|
["fetch_types"]: false,
|
|
38
42
|
connection: { replication: "database" }
|
|
39
43
|
});
|
|
40
44
|
const slotName = newReplicationSlot(shard);
|
|
41
|
-
const statusPublisher = ReplicationStatusPublisher.forRunningTransaction(tx).publish(lc, "Initializing");
|
|
45
|
+
const statusPublisher = ReplicationStatusPublisher.forRunningTransaction(tx, shadow ? async () => {} : void 0).publish(lc, "Initializing");
|
|
46
|
+
let releaseShadowSnapshot;
|
|
42
47
|
try {
|
|
43
48
|
const pgVersion = await checkUpstreamConfig(sql);
|
|
44
|
-
const { publications } = await ensurePublishedTables(lc, sql, shard);
|
|
49
|
+
const { publications } = shadow ? await getInternalShardConfig(sql, shard) : await ensurePublishedTables(lc, sql, shard);
|
|
45
50
|
lc.info?.(`Upstream is setup with publications [${publications}]`);
|
|
46
51
|
const { database, host } = sql.options;
|
|
47
|
-
lc.info?.(`opening replication session to ${database}@${host}`);
|
|
48
|
-
let
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
lc.warn?.(`Dropped inactive replication slots: ${dropped.map(({ slot }) => slot)}`, e);
|
|
52
|
+
lc.info?.(shadow ? `acquiring exported snapshot on ${database}@${host} (shadow mode)` : `opening replication session to ${database}@${host}`);
|
|
53
|
+
let snapshot;
|
|
54
|
+
let lsn;
|
|
55
|
+
if (shadow) {
|
|
56
|
+
const acquired = await acquireExportedSnapshotForShadowSync(lc, upstreamURI);
|
|
57
|
+
snapshot = acquired.snapshot;
|
|
58
|
+
lsn = acquired.lsn;
|
|
59
|
+
releaseShadowSnapshot = acquired.release;
|
|
60
|
+
} else {
|
|
61
|
+
let slot;
|
|
62
|
+
for (let first = true;; first = false) try {
|
|
63
|
+
slot = await createReplicationSlot(lc, must(replicationSession), slotName, replicationSlotFailover && pgVersion >= 17e4);
|
|
64
|
+
break;
|
|
65
|
+
} catch (e) {
|
|
66
|
+
if (first && e instanceof postgres.PostgresError) {
|
|
67
|
+
if (e.code === PG_INSUFFICIENT_PRIVILEGE) {
|
|
68
|
+
await sql`ALTER ROLE current_user WITH REPLICATION`;
|
|
69
|
+
lc.info?.(`Added the REPLICATION role to database user`);
|
|
66
70
|
continue;
|
|
67
71
|
}
|
|
68
|
-
|
|
72
|
+
if (e.code === PG_CONFIGURATION_LIMIT_EXCEEDED) {
|
|
73
|
+
const dropped = await sql`
|
|
74
|
+
SELECT slot_name as slot, pg_drop_replication_slot(slot_name)
|
|
75
|
+
FROM pg_replication_slots
|
|
76
|
+
WHERE slot_name LIKE ${replicationSlotExpression(shard)} AND NOT active`;
|
|
77
|
+
if (dropped.length) {
|
|
78
|
+
lc.warn?.(`Dropped inactive replication slots: ${dropped.map(({ slot }) => slot)}`, e);
|
|
79
|
+
continue;
|
|
80
|
+
}
|
|
81
|
+
lc.error?.(`Unable to drop replication slots`, e);
|
|
82
|
+
}
|
|
69
83
|
}
|
|
84
|
+
throw e;
|
|
70
85
|
}
|
|
71
|
-
|
|
86
|
+
snapshot = slot.snapshot_name;
|
|
87
|
+
lsn = slot.consistent_point;
|
|
72
88
|
}
|
|
73
|
-
const { snapshot_name: snapshot, consistent_point: lsn } = slot;
|
|
74
89
|
const initialVersion = toStateVersionString(lsn);
|
|
75
90
|
initReplicationState(tx, publications, initialVersion, context);
|
|
76
91
|
const start = performance.now();
|
|
@@ -91,10 +106,12 @@ async function initialSync(lc, shard, tx, upstreamURI, syncOptions, context) {
|
|
|
91
106
|
const copiers = startTableCopyWorkers(lc, copyPool, snapshot, numWorkers, numTables);
|
|
92
107
|
try {
|
|
93
108
|
createLiteTables(tx, tables, initialVersion);
|
|
94
|
-
const
|
|
109
|
+
const sampleRate = shadow?.sampleRate;
|
|
110
|
+
const maxRowsPerTable = shadow?.maxRowsPerTable;
|
|
111
|
+
const downloads = await Promise.all(tables.map((spec) => copiers.processReadTask((db, lc) => getInitialDownloadState(lc, db, spec, shadow !== void 0))));
|
|
95
112
|
statusPublisher.publish(lc, "Initializing", `Copying ${numTables} upstream tables at version ${initialVersion}`, 5e3, () => ({ downloadStatus: downloads.map(({ status }) => status) }));
|
|
96
113
|
copyProfiler?.start();
|
|
97
|
-
const rowCounts = await Promise.all(downloads.map((table) => copiers.processReadTask((db, lc) => copy(lc, table, copyPool, db, tx, textCopy))));
|
|
114
|
+
const rowCounts = await Promise.all(downloads.map((table) => copiers.processReadTask((db, lc) => copy(lc, table, copyPool, db, tx, textCopy, sampleRate, maxRowsPerTable))));
|
|
98
115
|
copyProfiler?.stopAndDispose(lc, "initial-copy");
|
|
99
116
|
copiers.setDone();
|
|
100
117
|
const total = rowCounts.reduce((acc, curr) => ({
|
|
@@ -109,22 +126,29 @@ async function initialSync(lc, shard, tx, upstreamURI, syncOptions, context) {
|
|
|
109
126
|
createLiteIndices(tx, indexes);
|
|
110
127
|
const index = performance.now() - indexStart;
|
|
111
128
|
lc.info?.(`Created indexes (${index.toFixed(3)} ms)`);
|
|
112
|
-
|
|
129
|
+
if (shadow) {
|
|
130
|
+
const rowsByTable = /* @__PURE__ */ new Map();
|
|
131
|
+
for (let i = 0; i < downloads.length; i++) rowsByTable.set(downloads[i].status.table, rowCounts[i].rows);
|
|
132
|
+
verifyShadowReplica(lc, tx, published, rowsByTable);
|
|
133
|
+
} else await addReplica(sql, shard, slotName, initialVersion, published, context);
|
|
113
134
|
const elapsed = performance.now() - start;
|
|
114
135
|
lc.info?.(`Synced ${total.rows.toLocaleString()} rows of ${numTables} tables in ${publications} up to ${lsn} (flush: ${total.flushTime.toFixed(3)}, index: ${index.toFixed(3)}, total: ${elapsed.toFixed(3)} ms)`);
|
|
115
136
|
} finally {
|
|
116
137
|
copyPool.end().catch((e) => lc.warn?.(`Error closing copyPool`, e));
|
|
117
138
|
}
|
|
118
139
|
} catch (e) {
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
140
|
+
if (!shadow) {
|
|
141
|
+
lc.warn?.(`dropping replication slot ${slotName}`, e);
|
|
142
|
+
await sql`
|
|
143
|
+
SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots
|
|
144
|
+
WHERE slot_name = ${slotName};
|
|
145
|
+
`.catch((e) => lc.warn?.(`Unable to drop replication slot ${slotName}`, e));
|
|
146
|
+
}
|
|
124
147
|
await statusPublisher.publishAndThrowError(lc, "Initializing", e);
|
|
125
148
|
} finally {
|
|
126
149
|
statusPublisher.stop();
|
|
127
|
-
await
|
|
150
|
+
if (releaseShadowSnapshot) await releaseShadowSnapshot().catch((e) => lc.warn?.(`Error releasing shadow snapshot`, e));
|
|
151
|
+
if (replicationSession) await replicationSession.end();
|
|
128
152
|
await sql.end();
|
|
129
153
|
}
|
|
130
154
|
}
|
|
@@ -160,12 +184,64 @@ async function ensurePublishedTables(lc, sql, shard, validate = true) {
|
|
|
160
184
|
}
|
|
161
185
|
function startTableCopyWorkers(lc, db, snapshot, numWorkers, numTables) {
|
|
162
186
|
const { init } = importSnapshot(snapshot);
|
|
163
|
-
const tableCopiers = new TransactionPool(lc,
|
|
187
|
+
const tableCopiers = new TransactionPool(lc, {
|
|
188
|
+
mode: READONLY,
|
|
189
|
+
init,
|
|
190
|
+
initialWorkers: numWorkers
|
|
191
|
+
});
|
|
164
192
|
tableCopiers.run(db);
|
|
165
193
|
lc.info?.(`Started ${numWorkers} workers to copy ${numTables} tables`);
|
|
166
194
|
if (parseInt(process.versions.node) < 22) lc.warn?.("\n\n\nOlder versions of Node have a bug that results in an unresponsive\nPostgres connection after running certain combinations of COPY commands.\nIf initial sync hangs, run zero-cache with Node v22+. This has the additional\nbenefit of being consistent with the Node version run in the production container image.\n\n\n");
|
|
167
195
|
return tableCopiers;
|
|
168
196
|
}
|
|
197
|
+
/**
|
|
198
|
+
* Shadow-mode alternative to `createReplicationSlot`: opens a dedicated
|
|
199
|
+
* READ ONLY REPEATABLE READ transaction on a normal connection, exports the
|
|
200
|
+
* snapshot and captures the current WAL LSN, then holds the transaction
|
|
201
|
+
* open until `release()` is called. The held transaction keeps the snapshot
|
|
202
|
+
* importable by the table-copy workers for the duration of the COPY phase.
|
|
203
|
+
*
|
|
204
|
+
* Idle-in-transaction timeout is disabled locally so the exporter doesn't
|
|
205
|
+
* get killed while workers are still importing.
|
|
206
|
+
*/
|
|
207
|
+
async function acquireExportedSnapshotForShadowSync(lc, upstreamURI) {
|
|
208
|
+
const holder = pgClient(lc, upstreamURI, {
|
|
209
|
+
max: 1,
|
|
210
|
+
connection: { ["application_name"]: "shadow-initial-sync-snapshot" }
|
|
211
|
+
});
|
|
212
|
+
const ready = resolver();
|
|
213
|
+
const release = resolver();
|
|
214
|
+
const held = holder.begin(READONLY, async (tx) => {
|
|
215
|
+
await tx`SET LOCAL idle_in_transaction_session_timeout = 0`.execute();
|
|
216
|
+
const [row] = await tx`
|
|
217
|
+
SELECT pg_export_snapshot() AS snapshot,
|
|
218
|
+
pg_current_wal_lsn()::text AS lsn`;
|
|
219
|
+
ready.resolve(row);
|
|
220
|
+
await release.promise;
|
|
221
|
+
}).catch((e) => ready.reject(e));
|
|
222
|
+
let snapshot;
|
|
223
|
+
let lsn;
|
|
224
|
+
try {
|
|
225
|
+
({snapshot, lsn} = await ready.promise);
|
|
226
|
+
} catch (e) {
|
|
227
|
+
await holder.end().catch((err) => lc.warn?.(`Error ending shadow snapshot holder after failure`, err));
|
|
228
|
+
throw e;
|
|
229
|
+
}
|
|
230
|
+
lc.info?.(`Exported snapshot ${snapshot} at LSN ${lsn} (shadow initial sync)`);
|
|
231
|
+
return {
|
|
232
|
+
snapshot,
|
|
233
|
+
lsn,
|
|
234
|
+
release: async () => {
|
|
235
|
+
release.resolve();
|
|
236
|
+
try {
|
|
237
|
+
await held;
|
|
238
|
+
} catch (e) {
|
|
239
|
+
lc.warn?.(`snapshot holder transaction ended with error`, e);
|
|
240
|
+
}
|
|
241
|
+
await holder.end();
|
|
242
|
+
}
|
|
243
|
+
};
|
|
244
|
+
}
|
|
169
245
|
async function createReplicationSlot(lc, session, slotName, failover = false) {
|
|
170
246
|
const [slot] = failover ? await session.unsafe(`CREATE_REPLICATION_SLOT "${slotName}" LOGICAL pgoutput (FAILOVER)`) : await session.unsafe(`CREATE_REPLICATION_SLOT "${slotName}" LOGICAL pgoutput`);
|
|
171
247
|
lc.info?.(`Created replication slot ${slotName}`, slot);
|
|
@@ -182,24 +258,103 @@ function createLiteTables(tx, tables, initialVersion) {
|
|
|
182
258
|
function createLiteIndices(tx, indices) {
|
|
183
259
|
for (const index of indices) tx.exec(createLiteIndexStatement(mapPostgresToLiteIndex(index)));
|
|
184
260
|
}
|
|
261
|
+
/**
|
|
262
|
+
* Runs structural assertions over a just-synced replica and throws if any
|
|
263
|
+
* fail. Only called in shadow mode — a successful return means the replica
|
|
264
|
+
* is schema-complete, row-count consistent, ZQL-queryable, and its column
|
|
265
|
+
* metadata is in sync with its lite schema.
|
|
266
|
+
*
|
|
267
|
+
* Exported for testing.
|
|
268
|
+
*/
|
|
269
|
+
function verifyShadowReplica(lc, db, published, rowsByTable) {
|
|
270
|
+
const issues = [];
|
|
271
|
+
const liteTables = listTables(db);
|
|
272
|
+
const liteTableByName = new Map(liteTables.map((t) => [t.name, t]));
|
|
273
|
+
for (const pt of published.tables) {
|
|
274
|
+
const name = liteTableName(pt);
|
|
275
|
+
const lite = liteTableByName.get(name);
|
|
276
|
+
if (!lite) {
|
|
277
|
+
issues.push(`missing table in replica: ${name}`);
|
|
278
|
+
continue;
|
|
279
|
+
}
|
|
280
|
+
for (const col of Object.keys(pt.columns)) if (!(col in lite.columns)) issues.push(`column missing in replica table ${name}: ${col}`);
|
|
281
|
+
}
|
|
282
|
+
const liteIndexNames = new Set(listIndexes(db).map((i) => i.name));
|
|
283
|
+
for (const ix of published.indexes) {
|
|
284
|
+
const mapped = mapPostgresToLiteIndex(ix);
|
|
285
|
+
if (!liteIndexNames.has(mapped.name)) issues.push(`missing index in replica: ${mapped.name} on ${mapped.tableName}`);
|
|
286
|
+
}
|
|
287
|
+
for (const [table, expected] of rowsByTable) try {
|
|
288
|
+
const [row] = db.prepare(`SELECT COUNT(*) as count FROM "${table}"`).all();
|
|
289
|
+
if (row.count !== expected) issues.push(`row count mismatch for table ${table}: copy counter reported ${expected}, replica has ${row.count}`);
|
|
290
|
+
} catch (e) {
|
|
291
|
+
issues.push(`could not count rows in table ${table}: ${String(e)}`);
|
|
292
|
+
}
|
|
293
|
+
const tableSpecs = computeZqlSpecs(lc, db, { includeBackfillingColumns: false });
|
|
294
|
+
for (const pt of published.tables) {
|
|
295
|
+
const name = liteTableName(pt);
|
|
296
|
+
if (!tableSpecs.has(name)) issues.push(`table not queryable via ZQL (dropped by computeZqlSpecs): ${name}`);
|
|
297
|
+
}
|
|
298
|
+
const meta = must(ColumnMetadataStore.getInstance(db));
|
|
299
|
+
for (const pt of published.tables) {
|
|
300
|
+
const name = liteTableName(pt);
|
|
301
|
+
const rows = meta.getTable(name);
|
|
302
|
+
for (const col of Object.keys(pt.columns)) if (!rows.has(col)) issues.push(`missing column_metadata row for ${name}.${col}`);
|
|
303
|
+
}
|
|
304
|
+
if (issues.length) throw new Error(`Shadow replica verification failed (${issues.length} issue(s)):\n` + issues.map((i) => ` - ${i}`).join("\n"));
|
|
305
|
+
}
|
|
185
306
|
var MB = 1024 * 1024;
|
|
186
307
|
var MAX_BUFFERED_ROWS = 1e4;
|
|
187
308
|
var BUFFERED_SIZE_THRESHOLD = 8 * MB;
|
|
188
|
-
|
|
309
|
+
/**
|
|
310
|
+
* Produces ` TABLESAMPLE BERNOULLI(n)` when `sampleRate` is < 1, else `''`.
|
|
311
|
+
* Row-level Bernoulli sampling is used (rather than SYSTEM) because it
|
|
312
|
+
* produces a more uniform sample and, unlike SYSTEM, still returns rows
|
|
313
|
+
* for small tables at low rates.
|
|
314
|
+
*/
|
|
315
|
+
function tableSampleClause(sampleRate) {
|
|
316
|
+
if (sampleRate === void 0 || sampleRate >= 1) return "";
|
|
317
|
+
return ` TABLESAMPLE BERNOULLI(${parseFloat((sampleRate * 100).toFixed(6))})`;
|
|
318
|
+
}
|
|
319
|
+
function limitClause(maxRowsPerTable) {
|
|
320
|
+
return maxRowsPerTable !== void 0 ? ` LIMIT ${maxRowsPerTable}` : "";
|
|
321
|
+
}
|
|
322
|
+
function makeDownloadStatements(table, cols, sampleRate, maxRowsPerTable) {
|
|
189
323
|
const filterConditions = Object.values(table.publications).map(({ rowFilter }) => rowFilter).filter((f) => !!f);
|
|
190
324
|
const where = filterConditions.length === 0 ? "" : `WHERE ${filterConditions.join(" OR ")}`;
|
|
191
|
-
const
|
|
325
|
+
const sample = tableSampleClause(sampleRate);
|
|
326
|
+
const limit = limitClause(maxRowsPerTable);
|
|
327
|
+
const fromTable = `FROM ${id(table.schema)}.${id(table.name)}${sample} ${where}`;
|
|
328
|
+
const select = `SELECT ${cols.map(id).join(",")} ${fromTable}${limit}`;
|
|
329
|
+
if (limit) {
|
|
330
|
+
const bytesExpr = cols.map((col) => `COALESCE(pg_column_size(${id(col)}), 0)`).join(" + ");
|
|
331
|
+
return {
|
|
332
|
+
select,
|
|
333
|
+
getTotalRows: `SELECT COUNT(*)::bigint AS "totalRows" FROM (SELECT 1 AS _ ${fromTable}${limit}) s`,
|
|
334
|
+
getTotalBytes: `SELECT COALESCE(SUM(b), 0)::bigint AS "totalBytes" FROM (SELECT (${bytesExpr}) AS b ${fromTable}${limit}) s`
|
|
335
|
+
};
|
|
336
|
+
}
|
|
192
337
|
const totalBytes = `(${cols.map((col) => `SUM(COALESCE(pg_column_size(${id(col)}), 0))`).join(" + ")})`;
|
|
193
338
|
return {
|
|
194
|
-
select
|
|
339
|
+
select,
|
|
195
340
|
getTotalRows: `SELECT COUNT(*) AS "totalRows" ${fromTable}`,
|
|
196
341
|
getTotalBytes: `SELECT ${totalBytes} AS "totalBytes" ${fromTable}`
|
|
197
342
|
};
|
|
198
343
|
}
|
|
199
|
-
async function getInitialDownloadState(lc, sql, spec) {
|
|
344
|
+
async function getInitialDownloadState(lc, sql, spec, skipTotals) {
|
|
200
345
|
const start = performance.now();
|
|
201
346
|
const table = liteTableName(spec);
|
|
202
347
|
const columns = Object.keys(spec.columns);
|
|
348
|
+
if (skipTotals) return {
|
|
349
|
+
spec,
|
|
350
|
+
status: {
|
|
351
|
+
table,
|
|
352
|
+
columns,
|
|
353
|
+
rows: 0,
|
|
354
|
+
totalRows: 0,
|
|
355
|
+
totalBytes: 0
|
|
356
|
+
}
|
|
357
|
+
};
|
|
203
358
|
const stmts = makeDownloadStatements(spec, columns);
|
|
204
359
|
const rowsResult = sql.unsafe(stmts.getTotalRows).execute();
|
|
205
360
|
const bytesResult = sql.unsafe(stmts.getTotalBytes).execute();
|
|
@@ -217,11 +372,11 @@ async function getInitialDownloadState(lc, sql, spec) {
|
|
|
217
372
|
lc.info?.(`Computed initial download state for ${table} (${elapsed} ms)`, { state: state.status });
|
|
218
373
|
return state;
|
|
219
374
|
}
|
|
220
|
-
function copy(lc, { spec: table, status }, dbClient, from, to, textCopy) {
|
|
221
|
-
if (textCopy) return copyText(lc, table, status, dbClient, from, to);
|
|
222
|
-
return copyBinary(lc, table, status, from, to);
|
|
375
|
+
function copy(lc, { spec: table, status }, dbClient, from, to, textCopy, sampleRate, maxRowsPerTable) {
|
|
376
|
+
if (textCopy) return copyText(lc, table, status, dbClient, from, to, sampleRate, maxRowsPerTable);
|
|
377
|
+
return copyBinary(lc, table, status, from, to, sampleRate, maxRowsPerTable);
|
|
223
378
|
}
|
|
224
|
-
async function copyBinary(lc, table, status, from, to) {
|
|
379
|
+
async function copyBinary(lc, table, status, from, to, sampleRate, maxRowsPerTable) {
|
|
225
380
|
const start = performance.now();
|
|
226
381
|
let flushTime = 0;
|
|
227
382
|
const tableName = liteTableName(table);
|
|
@@ -236,8 +391,10 @@ async function copyBinary(lc, table, status, from, to) {
|
|
|
236
391
|
const insertBatchStmt = to.prepare(insertSql + `,${valuesSql}`.repeat(49));
|
|
237
392
|
const filterConditions = Object.values(table.publications).map(({ rowFilter }) => rowFilter).filter((f) => !!f);
|
|
238
393
|
const where = filterConditions.length === 0 ? "" : `WHERE ${filterConditions.join(" OR ")}`;
|
|
239
|
-
const
|
|
240
|
-
const
|
|
394
|
+
const sample = tableSampleClause(sampleRate);
|
|
395
|
+
const limit = limitClause(maxRowsPerTable);
|
|
396
|
+
const fromTable = `FROM ${id(table.schema)}.${id(table.name)}${sample} ${where}`;
|
|
397
|
+
const select = `SELECT ${orderedColumns.map(([name, spec]) => hasBinaryDecoder(spec) ? id(name) : `${id(name)}::text`).join(",")} ${fromTable}${limit}`;
|
|
241
398
|
const decoders = orderedColumns.map(([, spec]) => hasBinaryDecoder(spec) ? makeBinaryDecoder(spec) : textCastDecoder);
|
|
242
399
|
const valuesPerRow = columnSpecs.length;
|
|
243
400
|
const valuesPerBatch = valuesPerRow * 50;
|
|
@@ -295,7 +452,7 @@ async function copyBinary(lc, table, status, from, to) {
|
|
|
295
452
|
flushTime
|
|
296
453
|
};
|
|
297
454
|
}
|
|
298
|
-
async function copyText(lc, table, status, dbClient, from, to) {
|
|
455
|
+
async function copyText(lc, table, status, dbClient, from, to, sampleRate, maxRowsPerTable) {
|
|
299
456
|
const start = performance.now();
|
|
300
457
|
let flushTime = 0;
|
|
301
458
|
const tableName = liteTableName(table);
|
|
@@ -308,7 +465,7 @@ async function copyText(lc, table, status, dbClient, from, to) {
|
|
|
308
465
|
INSERT INTO "${tableName}" (${insertColumnList}) VALUES ${valuesSql}`;
|
|
309
466
|
const insertStmt = to.prepare(insertSql);
|
|
310
467
|
const insertBatchStmt = to.prepare(insertSql + `,${valuesSql}`.repeat(49));
|
|
311
|
-
const { select } = makeDownloadStatements(table, columnNames);
|
|
468
|
+
const { select } = makeDownloadStatements(table, columnNames, sampleRate, maxRowsPerTable);
|
|
312
469
|
const valuesPerRow = columnSpecs.length;
|
|
313
470
|
const valuesPerBatch = valuesPerRow * 50;
|
|
314
471
|
const pendingValues = Array.from({ length: MAX_BUFFERED_ROWS * valuesPerRow });
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"initial-sync.js","names":[],"sources":["../../../../../../../zero-cache/src/services/change-source/pg/initial-sync.ts"],"sourcesContent":["import {\n PG_CONFIGURATION_LIMIT_EXCEEDED,\n PG_INSUFFICIENT_PRIVILEGE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {platform} from 'node:os';\nimport {Writable} from 'node:stream';\nimport {pipeline} from 'node:stream/promises';\nimport postgres from 'postgres';\nimport type {JSONObject} from '../../../../../shared/src/bigint-json.ts';\nimport {must} from '../../../../../shared/src/must.ts';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport type {DownloadStatus} from '../../../../../zero-events/src/status.ts';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport {\n createLiteIndexStatement,\n createLiteTableStatement,\n} from '../../../db/create.ts';\nimport * as Mode from '../../../db/mode-enum.ts';\nimport {\n BinaryCopyParser,\n hasBinaryDecoder,\n makeBinaryDecoder,\n textCastDecoder,\n} from '../../../db/pg-copy-binary.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {\n mapPostgresToLite,\n mapPostgresToLiteIndex,\n} from '../../../db/pg-to-lite.ts';\nimport {getTypeParsers} from '../../../db/pg-type-parser.ts';\nimport {runTx} from '../../../db/run-transaction.ts';\nimport type {IndexSpec, PublishedTableSpec} from '../../../db/specs.ts';\nimport {importSnapshot, TransactionPool} from '../../../db/transaction-pool.ts';\nimport {\n JSON_STRINGIFIED,\n liteValue,\n type LiteValueType,\n} from '../../../types/lite.ts';\nimport {liteTableName} from '../../../types/names.ts';\nimport {PG_15, PG_17} from '../../../types/pg-versions.ts';\nimport {\n pgClient,\n type PostgresDB,\n type PostgresTransaction,\n type PostgresValueType,\n} from '../../../types/pg.ts';\nimport {CpuProfiler} from '../../../types/profiler.ts';\nimport type {ShardConfig} from '../../../types/shards.ts';\nimport {ALLOWED_APP_ID_CHARACTERS} from '../../../types/shards.ts';\nimport {id} from '../../../types/sql.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {ColumnMetadataStore} from '../../replicator/schema/column-metadata.ts';\nimport {initReplicationState} from '../../replicator/schema/replication-state.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {ensureShardSchema} from './schema/init.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport {\n addReplica,\n dropShard,\n getInternalShardConfig,\n newReplicationSlot,\n replicationSlotExpression,\n validatePublications,\n} from './schema/shard.ts';\n\nexport type InitialSyncOptions = {\n tableCopyWorkers: number;\n profileCopy?: boolean | undefined;\n textCopy?: boolean | undefined;\n replicationSlotFailover?: boolean | undefined;\n};\n\n/** Server context to store with the initial sync metadata for debugging. */\nexport type ServerContext = JSONObject;\n\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n syncOptions: InitialSyncOptions,\n context: ServerContext,\n) {\n if (!ALLOWED_APP_ID_CHARACTERS.test(shard.appID)) {\n throw new Error(\n 'The App ID may only consist of lower-case letters, numbers, and the underscore character',\n );\n }\n const {\n tableCopyWorkers,\n profileCopy,\n textCopy = false,\n replicationSlotFailover = false,\n } = syncOptions;\n const copyProfiler = profileCopy ? await CpuProfiler.connect() : null;\n const sql = pgClient(lc, upstreamURI);\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const slotName = newReplicationSlot(shard);\n const statusPublisher = ReplicationStatusPublisher.forRunningTransaction(\n tx,\n ).publish(lc, 'Initializing');\n try {\n const pgVersion = await checkUpstreamConfig(sql);\n\n const {publications} = await ensurePublishedTables(lc, sql, shard);\n lc.info?.(`Upstream is setup with publications [${publications}]`);\n\n const {database, host} = sql.options;\n lc.info?.(`opening replication session to ${database}@${host}`);\n\n let slot: ReplicationSlot;\n for (let first = true; ; first = false) {\n try {\n slot = await createReplicationSlot(\n lc,\n replicationSession,\n slotName,\n replicationSlotFailover && pgVersion >= PG_17,\n );\n break;\n } catch (e) {\n if (first && e instanceof postgres.PostgresError) {\n if (e.code === PG_INSUFFICIENT_PRIVILEGE) {\n // Some Postgres variants (e.g. Google Cloud SQL) require that\n // the user have the REPLICATION role in order to create a slot.\n // Note that this must be done by the upstreamDB connection, and\n // does not work in the replicationSession itself.\n await sql`ALTER ROLE current_user WITH REPLICATION`;\n lc.info?.(`Added the REPLICATION role to database user`);\n continue;\n }\n if (e.code === PG_CONFIGURATION_LIMIT_EXCEEDED) {\n const slotExpression = replicationSlotExpression(shard);\n\n const dropped = await sql<{slot: string}[]>`\n SELECT slot_name as slot, pg_drop_replication_slot(slot_name) \n FROM pg_replication_slots\n WHERE slot_name LIKE ${slotExpression} AND NOT active`;\n if (dropped.length) {\n lc.warn?.(\n `Dropped inactive replication slots: ${dropped.map(({slot}) => slot)}`,\n e,\n );\n continue;\n }\n lc.error?.(`Unable to drop replication slots`, e);\n }\n }\n throw e;\n }\n }\n const {snapshot_name: snapshot, consistent_point: lsn} = slot;\n const initialVersion = toStateVersionString(lsn);\n\n initReplicationState(tx, publications, initialVersion, context);\n\n // Run up to MAX_WORKERS to copy of tables at the replication slot's snapshot.\n const start = performance.now();\n // Retrieve the published schema at the consistent_point.\n const published = await runTx(\n sql,\n async tx => {\n await tx.unsafe(/* sql*/ `SET TRANSACTION SNAPSHOT '${snapshot}'`);\n return getPublicationInfo(tx, publications);\n },\n {mode: Mode.READONLY},\n );\n // Note: If this throws, initial-sync is aborted.\n validatePublications(lc, published);\n\n // Now that tables have been validated, kick off the copiers.\n const {tables, indexes} = published;\n const numTables = tables.length;\n if (platform() === 'win32' && tableCopyWorkers < numTables) {\n lc.warn?.(\n `Increasing the number of copy workers from ${tableCopyWorkers} to ` +\n `${numTables} to work around a Node/Postgres connection bug`,\n );\n }\n const numWorkers =\n platform() === 'win32'\n ? numTables\n : Math.min(tableCopyWorkers, numTables);\n\n const copyPool = pgClient(lc, upstreamURI, {\n max: numWorkers,\n connection: {['application_name']: 'initial-sync-copy-worker'},\n ['max_lifetime']: 120 * 60, // set a long (2h) limit for COPY streaming\n });\n const copiers = startTableCopyWorkers(\n lc,\n copyPool,\n snapshot,\n numWorkers,\n numTables,\n );\n try {\n createLiteTables(tx, tables, initialVersion);\n const downloads = await Promise.all(\n tables.map(spec =>\n copiers.processReadTask((db, lc) =>\n getInitialDownloadState(lc, db, spec),\n ),\n ),\n );\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying ${numTables} upstream tables at version ${initialVersion}`,\n 5000,\n () => ({downloadStatus: downloads.map(({status}) => status)}),\n );\n\n void copyProfiler?.start();\n const rowCounts = await Promise.all(\n downloads.map(table =>\n copiers.processReadTask((db, lc) =>\n copy(lc, table, copyPool, db, tx, textCopy),\n ),\n ),\n );\n void copyProfiler?.stopAndDispose(lc, 'initial-copy');\n copiers.setDone();\n\n const total = rowCounts.reduce(\n (acc, curr) => ({\n rows: acc.rows + curr.rows,\n flushTime: acc.flushTime + curr.flushTime,\n }),\n {rows: 0, flushTime: 0},\n );\n\n statusPublisher.publish(\n lc,\n 'Indexing',\n `Creating ${indexes.length} indexes`,\n 5000,\n );\n const indexStart = performance.now();\n createLiteIndices(tx, indexes);\n const index = performance.now() - indexStart;\n lc.info?.(`Created indexes (${index.toFixed(3)} ms)`);\n\n await addReplica(\n sql,\n shard,\n slotName,\n initialVersion,\n published,\n context,\n );\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Synced ${total.rows.toLocaleString()} rows of ${numTables} tables in ${publications} up to ${lsn} ` +\n `(flush: ${total.flushTime.toFixed(3)}, index: ${index.toFixed(3)}, total: ${elapsed.toFixed(3)} ms)`,\n );\n } finally {\n // All meaningful errors are handled at the processReadTask() call site.\n void copyPool.end().catch(e => lc.warn?.(`Error closing copyPool`, e));\n }\n } catch (e) {\n // If initial-sync did not succeed, make a best effort to drop the\n // orphaned replication slot to avoid running out of slots in\n // pathological cases that result in repeated failures.\n lc.warn?.(`dropping replication slot ${slotName}`, e);\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = ${slotName};\n `.catch(e => lc.warn?.(`Unable to drop replication slot ${slotName}`, e));\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n await replicationSession.end();\n await sql.end();\n }\n}\n\nasync function checkUpstreamConfig(sql: PostgresDB) {\n const {walLevel, version} = (\n await sql<{walLevel: string; version: number}[]>`\n SELECT current_setting('wal_level') as \"walLevel\", \n current_setting('server_version_num') as \"version\";\n `\n )[0];\n\n if (walLevel !== 'logical') {\n throw new Error(\n `Postgres must be configured with \"wal_level = logical\" (currently: \"${walLevel})`,\n );\n }\n if (version < PG_15) {\n throw new Error(\n `Must be running Postgres 15 or higher (currently: \"${version}\")`,\n );\n }\n return version;\n}\n\nasync function ensurePublishedTables(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n validate = true,\n): Promise<{publications: string[]}> {\n const {database, host} = sql.options;\n lc.info?.(`Ensuring upstream PUBLICATION on ${database}@${host}`);\n\n await ensureShardSchema(lc, sql, shard);\n const {publications} = await getInternalShardConfig(sql, shard);\n\n if (validate) {\n let valid = false;\n const nonInternalPublications = publications.filter(\n p => !p.startsWith('_'),\n );\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(publications)}\n `.values();\n if (exists.length !== publications.length) {\n lc.warn?.(\n `some configured publications [${publications}] are missing: ` +\n `[${exists.flat()}]. resyncing`,\n );\n } else if (\n !equals(new Set(shard.publications), new Set(nonInternalPublications))\n ) {\n lc.warn?.(\n `requested publications [${shard.publications}] differ from previous` +\n `publications [${nonInternalPublications}]. resyncing`,\n );\n } else {\n valid = true;\n }\n if (!valid) {\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n return ensurePublishedTables(lc, sql, shard, false);\n }\n }\n return {publications};\n}\n\nfunction startTableCopyWorkers(\n lc: LogContext,\n db: PostgresDB,\n snapshot: string,\n numWorkers: number,\n numTables: number,\n): TransactionPool {\n const {init} = importSnapshot(snapshot);\n const tableCopiers = new TransactionPool(\n lc,\n Mode.READONLY,\n init,\n undefined,\n numWorkers,\n );\n tableCopiers.run(db);\n\n lc.info?.(`Started ${numWorkers} workers to copy ${numTables} tables`);\n\n if (parseInt(process.versions.node) < 22) {\n lc.warn?.(\n `\\n\\n\\n` +\n `Older versions of Node have a bug that results in an unresponsive\\n` +\n `Postgres connection after running certain combinations of COPY commands.\\n` +\n `If initial sync hangs, run zero-cache with Node v22+. This has the additional\\n` +\n `benefit of being consistent with the Node version run in the production container image.` +\n `\\n\\n\\n`,\n );\n }\n return tableCopiers;\n}\n\n// Row returned by `CREATE_REPLICATION_SLOT`\ntype ReplicationSlot = {\n slot_name: string;\n consistent_point: string;\n snapshot_name: string;\n output_plugin: string;\n};\n\n// Note: The replication connection does not support the extended query protocol,\n// so all commands must be sent using sql.unsafe(). This is technically safe\n// because all placeholder values are under our control (i.e. \"slotName\").\nexport async function createReplicationSlot(\n lc: LogContext,\n session: postgres.Sql,\n slotName: string,\n // Note: must be false if pgVersion < PG_17. Caller must verify.\n failover = false,\n): Promise<ReplicationSlot> {\n const [slot] = failover\n ? await session.unsafe<ReplicationSlot[]>(\n /*sql*/ `CREATE_REPLICATION_SLOT \"${slotName}\" LOGICAL pgoutput (FAILOVER)`,\n )\n : await session.unsafe<ReplicationSlot[]>(\n /*sql*/ `CREATE_REPLICATION_SLOT \"${slotName}\" LOGICAL pgoutput`,\n );\n lc.info?.(`Created replication slot ${slotName}`, slot);\n return slot;\n}\n\nfunction createLiteTables(\n tx: Database,\n tables: PublishedTableSpec[],\n initialVersion: string,\n) {\n // TODO: Figure out how to reuse the ChangeProcessor here to avoid\n // duplicating the ColumnMetadata logic.\n const columnMetadata = must(ColumnMetadataStore.getInstance(tx));\n for (const t of tables) {\n tx.exec(createLiteTableStatement(mapPostgresToLite(t, initialVersion)));\n const tableName = liteTableName(t);\n for (const [colName, colSpec] of Object.entries(t.columns)) {\n columnMetadata.insert(tableName, colName, colSpec);\n }\n }\n}\n\nfunction createLiteIndices(tx: Database, indices: IndexSpec[]) {\n for (const index of indices) {\n tx.exec(createLiteIndexStatement(mapPostgresToLiteIndex(index)));\n }\n}\n\n// Verified empirically that batches of 50 seem to be the sweet spot,\n// similar to the report in https://sqlite.org/forum/forumpost/8878a512d3652655\n//\n// Exported for testing.\nexport const INSERT_BATCH_SIZE = 50;\n\nconst MB = 1024 * 1024;\nconst MAX_BUFFERED_ROWS = 10_000;\nconst BUFFERED_SIZE_THRESHOLD = 8 * MB;\n\nexport type DownloadStatements = {\n select: string;\n getTotalRows: string;\n getTotalBytes: string;\n};\n\nexport function makeDownloadStatements(\n table: PublishedTableSpec,\n cols: string[],\n): DownloadStatements {\n const filterConditions = Object.values(table.publications)\n .map(({rowFilter}) => rowFilter)\n .filter(f => !!f); // remove nulls\n const where =\n filterConditions.length === 0\n ? ''\n : /*sql*/ `WHERE ${filterConditions.join(' OR ')}`;\n const fromTable = /*sql*/ `FROM ${id(table.schema)}.${id(table.name)} ${where}`;\n const totalBytes = `(${cols.map(col => `SUM(COALESCE(pg_column_size(${id(col)}), 0))`).join(' + ')})`;\n const stmts = {\n select: /*sql*/ `SELECT ${cols.map(id).join(',')} ${fromTable}`,\n getTotalRows: /*sql*/ `SELECT COUNT(*) AS \"totalRows\" ${fromTable}`,\n getTotalBytes: /*sql*/ `SELECT ${totalBytes} AS \"totalBytes\" ${fromTable}`,\n };\n return stmts;\n}\n\ntype DownloadState = {\n spec: PublishedTableSpec;\n status: DownloadStatus;\n};\n\nasync function getInitialDownloadState(\n lc: LogContext,\n sql: PostgresDB,\n spec: PublishedTableSpec,\n): Promise<DownloadState> {\n const start = performance.now();\n const table = liteTableName(spec);\n const columns = Object.keys(spec.columns);\n const stmts = makeDownloadStatements(spec, columns);\n const rowsResult = sql\n .unsafe<{totalRows: bigint}[]>(stmts.getTotalRows)\n .execute();\n const bytesResult = sql\n .unsafe<{totalBytes: bigint}[]>(stmts.getTotalBytes)\n .execute();\n\n const state: DownloadState = {\n spec,\n status: {\n table,\n columns,\n rows: 0,\n totalRows: Number((await rowsResult)[0].totalRows),\n totalBytes: Number((await bytesResult)[0].totalBytes),\n },\n };\n const elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(`Computed initial download state for ${table} (${elapsed} ms)`, {\n state: state.status,\n });\n return state;\n}\n\nfunction copy(\n lc: LogContext,\n {spec: table, status}: DownloadState,\n dbClient: PostgresDB,\n from: PostgresTransaction,\n to: Database,\n textCopy: boolean,\n) {\n if (textCopy) {\n return copyText(lc, table, status, dbClient, from, to);\n }\n return copyBinary(lc, table, status, from, to);\n}\n\nasync function copyBinary(\n lc: LogContext,\n table: PublishedTableSpec,\n status: DownloadStatus,\n from: PostgresTransaction,\n to: Database,\n) {\n const start = performance.now();\n let flushTime = 0;\n\n const tableName = liteTableName(table);\n const orderedColumns = Object.entries(table.columns);\n\n const columnNames = orderedColumns.map(([c]) => c);\n const columnSpecs = orderedColumns.map(([_name, spec]) => spec);\n const insertColumnList = columnNames.map(c => id(c)).join(',');\n\n const valuesSql =\n columnNames.length > 0 ? `(${'?,'.repeat(columnNames.length - 1)}?)` : '()';\n const insertSql = /*sql*/ `\n INSERT INTO \"${tableName}\" (${insertColumnList}) VALUES ${valuesSql}`;\n const insertStmt = to.prepare(insertSql);\n const insertBatchStmt = to.prepare(\n insertSql + `,${valuesSql}`.repeat(INSERT_BATCH_SIZE - 1),\n );\n\n // Build SELECT with ::text casts for columns without a known binary decoder.\n const filterConditions = Object.values(table.publications)\n .map(({rowFilter}) => rowFilter)\n .filter(f => !!f);\n const where =\n filterConditions.length === 0\n ? ''\n : /*sql*/ `WHERE ${filterConditions.join(' OR ')}`;\n const fromTable = /*sql*/ `FROM ${id(table.schema)}.${id(table.name)} ${where}`;\n const selectColumns = orderedColumns.map(([name, spec]) =>\n hasBinaryDecoder(spec) ? id(name) : `${id(name)}::text`,\n );\n const select = /*sql*/ `SELECT ${selectColumns.join(',')} ${fromTable}`;\n\n const decoders = orderedColumns.map(([, spec]) =>\n hasBinaryDecoder(spec) ? makeBinaryDecoder(spec) : textCastDecoder,\n );\n\n const valuesPerRow = columnSpecs.length;\n const valuesPerBatch = valuesPerRow * INSERT_BATCH_SIZE;\n\n const pendingValues: LiteValueType[] = Array.from({\n length: MAX_BUFFERED_ROWS * valuesPerRow,\n });\n let pendingRows = 0;\n let pendingSize = 0;\n\n function flush() {\n const start = performance.now();\n const flushedRows = pendingRows;\n const flushedSize = pendingSize;\n\n let l = 0;\n for (; pendingRows > INSERT_BATCH_SIZE; pendingRows -= INSERT_BATCH_SIZE) {\n insertBatchStmt.run(pendingValues.slice(l, (l += valuesPerBatch)));\n }\n for (; pendingRows > 0; pendingRows--) {\n insertStmt.run(pendingValues.slice(l, (l += valuesPerRow)));\n }\n const flushedValues = flushedRows * valuesPerRow;\n for (let i = 0; i < flushedValues; i++) {\n pendingValues[i] = undefined as unknown as LiteValueType;\n }\n pendingSize = 0;\n status.rows += flushedRows;\n\n const elapsed = performance.now() - start;\n flushTime += elapsed;\n lc.debug?.(\n `flushed ${flushedRows} ${tableName} rows (${flushedSize} bytes) in ${elapsed.toFixed(3)} ms`,\n );\n }\n\n const binaryParser = new BinaryCopyParser();\n let col = 0;\n\n lc.info?.(`Starting binary copy stream of ${tableName}:`, select);\n\n await pipeline(\n await from\n .unsafe(`COPY (${select}) TO STDOUT WITH (FORMAT binary)`)\n .readable(),\n new Writable({\n highWaterMark: BUFFERED_SIZE_THRESHOLD,\n\n write(\n chunk: Buffer,\n _encoding: string,\n callback: (error?: Error) => void,\n ) {\n try {\n for (const fieldBuf of binaryParser.parse(chunk)) {\n pendingSize += fieldBuf === null ? 4 : fieldBuf.length;\n pendingValues[pendingRows * valuesPerRow + col] =\n fieldBuf === null ? null : decoders[col](fieldBuf);\n\n if (++col === decoders.length) {\n col = 0;\n if (\n ++pendingRows >= MAX_BUFFERED_ROWS - valuesPerRow ||\n pendingSize >= BUFFERED_SIZE_THRESHOLD\n ) {\n flush();\n }\n }\n }\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n\n final: (callback: (error?: Error) => void) => {\n try {\n flush();\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n }),\n );\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Finished copying ${status.rows} rows into ${tableName} ` +\n `(flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `,\n );\n return {rows: status.rows, flushTime};\n}\n\nasync function copyText(\n lc: LogContext,\n table: PublishedTableSpec,\n status: DownloadStatus,\n dbClient: PostgresDB,\n from: PostgresTransaction,\n to: Database,\n) {\n const start = performance.now();\n let flushTime = 0;\n\n const tableName = liteTableName(table);\n const orderedColumns = Object.entries(table.columns);\n\n const columnNames = orderedColumns.map(([c]) => c);\n const columnSpecs = orderedColumns.map(([_name, spec]) => spec);\n const insertColumnList = columnNames.map(c => id(c)).join(',');\n\n const valuesSql =\n columnNames.length > 0 ? `(${'?,'.repeat(columnNames.length - 1)}?)` : '()';\n const insertSql = /*sql*/ `\n INSERT INTO \"${tableName}\" (${insertColumnList}) VALUES ${valuesSql}`;\n const insertStmt = to.prepare(insertSql);\n const insertBatchStmt = to.prepare(\n insertSql + `,${valuesSql}`.repeat(INSERT_BATCH_SIZE - 1),\n );\n\n const {select} = makeDownloadStatements(table, columnNames);\n const valuesPerRow = columnSpecs.length;\n const valuesPerBatch = valuesPerRow * INSERT_BATCH_SIZE;\n\n const pendingValues: LiteValueType[] = Array.from({\n length: MAX_BUFFERED_ROWS * valuesPerRow,\n });\n let pendingRows = 0;\n let pendingSize = 0;\n\n function flush() {\n const start = performance.now();\n const flushedRows = pendingRows;\n const flushedSize = pendingSize;\n\n let l = 0;\n for (; pendingRows > INSERT_BATCH_SIZE; pendingRows -= INSERT_BATCH_SIZE) {\n insertBatchStmt.run(pendingValues.slice(l, (l += valuesPerBatch)));\n }\n for (; pendingRows > 0; pendingRows--) {\n insertStmt.run(pendingValues.slice(l, (l += valuesPerRow)));\n }\n const flushedValues = flushedRows * valuesPerRow;\n for (let i = 0; i < flushedValues; i++) {\n pendingValues[i] = undefined as unknown as LiteValueType;\n }\n pendingSize = 0;\n status.rows += flushedRows;\n\n const elapsed = performance.now() - start;\n flushTime += elapsed;\n lc.debug?.(\n `flushed ${flushedRows} ${tableName} rows (${flushedSize} bytes) in ${elapsed.toFixed(3)} ms`,\n );\n }\n\n lc.info?.(`Starting text copy stream of ${tableName}:`, select);\n const pgParsers = await getTypeParsers(dbClient, {returnJsonAsString: true});\n const parsers = columnSpecs.map(c => {\n const pgParse = pgParsers.getTypeParser(c.typeOID);\n return (val: string) =>\n liteValue(\n pgParse(val) as PostgresValueType,\n c.dataType,\n JSON_STRINGIFIED,\n );\n });\n\n const tsvParser = new TsvParser();\n let col = 0;\n\n await pipeline(\n await from.unsafe(`COPY (${select}) TO STDOUT`).readable(),\n new Writable({\n highWaterMark: BUFFERED_SIZE_THRESHOLD,\n\n write(\n chunk: Buffer,\n _encoding: string,\n callback: (error?: Error) => void,\n ) {\n try {\n for (const text of tsvParser.parse(chunk)) {\n pendingSize += text === null ? 4 : text.length;\n pendingValues[pendingRows * valuesPerRow + col] =\n text === null ? null : parsers[col](text);\n\n if (++col === parsers.length) {\n col = 0;\n if (\n ++pendingRows >= MAX_BUFFERED_ROWS - valuesPerRow ||\n pendingSize >= BUFFERED_SIZE_THRESHOLD\n ) {\n flush();\n }\n }\n }\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n\n final: (callback: (error?: Error) => void) => {\n try {\n flush();\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n }),\n );\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Finished copying ${status.rows} rows into ${tableName} ` +\n `(flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `,\n );\n return {rows: status.rows, flushTime};\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA4EA,eAAsB,YACpB,IACA,OACA,IACA,aACA,aACA,SACA;AACA,KAAI,CAAC,0BAA0B,KAAK,MAAM,MAAM,CAC9C,OAAM,IAAI,MACR,2FACD;CAEH,MAAM,EACJ,kBACA,aACA,WAAW,OACX,0BAA0B,UACxB;CACJ,MAAM,eAAe,cAAc,MAAM,YAAY,SAAS,GAAG;CACjE,MAAM,MAAM,SAAS,IAAI,YAAY;CACrC,MAAM,qBAAqB,SAAS,IAAI,aAAa;GAClD,gBAAgB;EACjB,YAAY,EAAC,aAAa,YAAW;EACtC,CAAC;CACF,MAAM,WAAW,mBAAmB,MAAM;CAC1C,MAAM,kBAAkB,2BAA2B,sBACjD,GACD,CAAC,QAAQ,IAAI,eAAe;AAC7B,KAAI;EACF,MAAM,YAAY,MAAM,oBAAoB,IAAI;EAEhD,MAAM,EAAC,iBAAgB,MAAM,sBAAsB,IAAI,KAAK,MAAM;AAClE,KAAG,OAAO,wCAAwC,aAAa,GAAG;EAElE,MAAM,EAAC,UAAU,SAAQ,IAAI;AAC7B,KAAG,OAAO,kCAAkC,SAAS,GAAG,OAAO;EAE/D,IAAI;AACJ,OAAK,IAAI,QAAQ,OAAQ,QAAQ,MAC/B,KAAI;AACF,UAAO,MAAM,sBACX,IACA,oBACA,UACA,2BAA2B,aAAA,KAC5B;AACD;WACO,GAAG;AACV,OAAI,SAAS,aAAa,SAAS,eAAe;AAChD,QAAI,EAAE,SAAS,2BAA2B;AAKxC,WAAM,GAAG;AACT,QAAG,OAAO,8CAA8C;AACxD;;AAEF,QAAI,EAAE,SAAS,iCAAiC;KAG9C,MAAM,UAAU,MAAM,GAAqB;;;uCAFpB,0BAA0B,MAAM,CAKb;AAC1C,SAAI,QAAQ,QAAQ;AAClB,SAAG,OACD,uCAAuC,QAAQ,KAAK,EAAC,WAAU,KAAK,IACpE,EACD;AACD;;AAEF,QAAG,QAAQ,oCAAoC,EAAE;;;AAGrD,SAAM;;EAGV,MAAM,EAAC,eAAe,UAAU,kBAAkB,QAAO;EACzD,MAAM,iBAAiB,qBAAqB,IAAI;AAEhD,uBAAqB,IAAI,cAAc,gBAAgB,QAAQ;EAG/D,MAAM,QAAQ,YAAY,KAAK;EAE/B,MAAM,YAAY,MAAM,MACtB,KACA,OAAM,OAAM;AACV,SAAM,GAAG,OAAgB,6BAA6B,SAAS,GAAG;AAClE,UAAO,mBAAmB,IAAI,aAAa;KAE7C,EAAC,MAAM,UAAc,CACtB;AAED,uBAAqB,IAAI,UAAU;EAGnC,MAAM,EAAC,QAAQ,YAAW;EAC1B,MAAM,YAAY,OAAO;AACzB,MAAI,UAAU,KAAK,WAAW,mBAAmB,UAC/C,IAAG,OACD,8CAA8C,iBAAiB,MAC1D,UAAU,gDAChB;EAEH,MAAM,aACJ,UAAU,KAAK,UACX,YACA,KAAK,IAAI,kBAAkB,UAAU;EAE3C,MAAM,WAAW,SAAS,IAAI,aAAa;GACzC,KAAK;GACL,YAAY,GAAE,qBAAqB,4BAA2B;IAC7D,iBAAiB;GACnB,CAAC;EACF,MAAM,UAAU,sBACd,IACA,UACA,UACA,YACA,UACD;AACD,MAAI;AACF,oBAAiB,IAAI,QAAQ,eAAe;GAC5C,MAAM,YAAY,MAAM,QAAQ,IAC9B,OAAO,KAAI,SACT,QAAQ,iBAAiB,IAAI,OAC3B,wBAAwB,IAAI,IAAI,KAAK,CACtC,CACF,CACF;AACD,mBAAgB,QACd,IACA,gBACA,WAAW,UAAU,8BAA8B,kBACnD,YACO,EAAC,gBAAgB,UAAU,KAAK,EAAC,aAAY,OAAO,EAAC,EAC7D;AAEI,iBAAc,OAAO;GAC1B,MAAM,YAAY,MAAM,QAAQ,IAC9B,UAAU,KAAI,UACZ,QAAQ,iBAAiB,IAAI,OAC3B,KAAK,IAAI,OAAO,UAAU,IAAI,IAAI,SAAS,CAC5C,CACF,CACF;AACI,iBAAc,eAAe,IAAI,eAAe;AACrD,WAAQ,SAAS;GAEjB,MAAM,QAAQ,UAAU,QACrB,KAAK,UAAU;IACd,MAAM,IAAI,OAAO,KAAK;IACtB,WAAW,IAAI,YAAY,KAAK;IACjC,GACD;IAAC,MAAM;IAAG,WAAW;IAAE,CACxB;AAED,mBAAgB,QACd,IACA,YACA,YAAY,QAAQ,OAAO,WAC3B,IACD;GACD,MAAM,aAAa,YAAY,KAAK;AACpC,qBAAkB,IAAI,QAAQ;GAC9B,MAAM,QAAQ,YAAY,KAAK,GAAG;AAClC,MAAG,OAAO,oBAAoB,MAAM,QAAQ,EAAE,CAAC,MAAM;AAErD,SAAM,WACJ,KACA,OACA,UACA,gBACA,WACA,QACD;GAED,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,MAAG,OACD,UAAU,MAAM,KAAK,gBAAgB,CAAC,WAAW,UAAU,aAAa,aAAa,SAAS,IAAI,WACrF,MAAM,UAAU,QAAQ,EAAE,CAAC,WAAW,MAAM,QAAQ,EAAE,CAAC,WAAW,QAAQ,QAAQ,EAAE,CAAC,MACnG;YACO;AAEH,YAAS,KAAK,CAAC,OAAM,MAAK,GAAG,OAAO,0BAA0B,EAAE,CAAC;;UAEjE,GAAG;AAIV,KAAG,OAAO,6BAA6B,YAAY,EAAE;AACrD,QAAM,GAAG;;4BAEe,SAAS;MAC/B,OAAM,MAAK,GAAG,OAAO,mCAAmC,YAAY,EAAE,CAAC;AACzE,QAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,EAAE;WACzD;AACR,kBAAgB,MAAM;AACtB,QAAM,mBAAmB,KAAK;AAC9B,QAAM,IAAI,KAAK;;;AAInB,eAAe,oBAAoB,KAAiB;CAClD,MAAM,EAAC,UAAU,aACf,MAAM,GAA0C;;;KAIhD;AAEF,KAAI,aAAa,UACf,OAAM,IAAI,MACR,uEAAuE,SAAS,GACjF;AAEH,KAAI,UAAA,KACF,OAAM,IAAI,MACR,sDAAsD,QAAQ,IAC/D;AAEH,QAAO;;AAGT,eAAe,sBACb,IACA,KACA,OACA,WAAW,MACwB;CACnC,MAAM,EAAC,UAAU,SAAQ,IAAI;AAC7B,IAAG,OAAO,oCAAoC,SAAS,GAAG,OAAO;AAEjE,OAAM,kBAAkB,IAAI,KAAK,MAAM;CACvC,MAAM,EAAC,iBAAgB,MAAM,uBAAuB,KAAK,MAAM;AAE/D,KAAI,UAAU;EACZ,IAAI,QAAQ;EACZ,MAAM,0BAA0B,aAAa,QAC3C,MAAK,CAAC,EAAE,WAAW,IAAI,CACxB;EACD,MAAM,SAAS,MAAM,GAAG;4DACgC,IAAI,aAAa,CAAC;QACtE,QAAQ;AACZ,MAAI,OAAO,WAAW,aAAa,OACjC,IAAG,OACD,iCAAiC,aAAa,kBACxC,OAAO,MAAM,CAAC,cACrB;WAED,CAAC,OAAO,IAAI,IAAI,MAAM,aAAa,EAAE,IAAI,IAAI,wBAAwB,CAAC,CAEtE,IAAG,OACD,2BAA2B,MAAM,aAAa,sCAC3B,wBAAwB,cAC5C;MAED,SAAQ;AAEV,MAAI,CAAC,OAAO;AACV,SAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,SAAS,CAAC;AACxD,UAAO,sBAAsB,IAAI,KAAK,OAAO,MAAM;;;AAGvD,QAAO,EAAC,cAAa;;AAGvB,SAAS,sBACP,IACA,IACA,UACA,YACA,WACiB;CACjB,MAAM,EAAC,SAAQ,eAAe,SAAS;CACvC,MAAM,eAAe,IAAI,gBACvB,IACA,UACA,MACA,KAAA,GACA,WACD;AACD,cAAa,IAAI,GAAG;AAEpB,IAAG,OAAO,WAAW,WAAW,mBAAmB,UAAU,SAAS;AAEtE,KAAI,SAAS,QAAQ,SAAS,KAAK,GAAG,GACpC,IAAG,OACD,mUAMD;AAEH,QAAO;;AAcT,eAAsB,sBACpB,IACA,SACA,UAEA,WAAW,OACe;CAC1B,MAAM,CAAC,QAAQ,WACX,MAAM,QAAQ,OACJ,4BAA4B,SAAS,+BAC9C,GACD,MAAM,QAAQ,OACJ,4BAA4B,SAAS,oBAC9C;AACL,IAAG,OAAO,4BAA4B,YAAY,KAAK;AACvD,QAAO;;AAGT,SAAS,iBACP,IACA,QACA,gBACA;CAGA,MAAM,iBAAiB,KAAK,oBAAoB,YAAY,GAAG,CAAC;AAChE,MAAK,MAAM,KAAK,QAAQ;AACtB,KAAG,KAAK,yBAAyB,kBAAkB,GAAG,eAAe,CAAC,CAAC;EACvE,MAAM,YAAY,cAAc,EAAE;AAClC,OAAK,MAAM,CAAC,SAAS,YAAY,OAAO,QAAQ,EAAE,QAAQ,CACxD,gBAAe,OAAO,WAAW,SAAS,QAAQ;;;AAKxD,SAAS,kBAAkB,IAAc,SAAsB;AAC7D,MAAK,MAAM,SAAS,QAClB,IAAG,KAAK,yBAAyB,uBAAuB,MAAM,CAAC,CAAC;;AAUpE,IAAM,KAAK,OAAO;AAClB,IAAM,oBAAoB;AAC1B,IAAM,0BAA0B,IAAI;AAQpC,SAAgB,uBACd,OACA,MACoB;CACpB,MAAM,mBAAmB,OAAO,OAAO,MAAM,aAAa,CACvD,KAAK,EAAC,gBAAe,UAAU,CAC/B,QAAO,MAAK,CAAC,CAAC,EAAE;CACnB,MAAM,QACJ,iBAAiB,WAAW,IACxB,KACQ,SAAS,iBAAiB,KAAK,OAAO;CACpD,MAAM,YAAoB,QAAQ,GAAG,MAAM,OAAO,CAAC,GAAG,GAAG,MAAM,KAAK,CAAC,GAAG;CACxE,MAAM,aAAa,IAAI,KAAK,KAAI,QAAO,+BAA+B,GAAG,IAAI,CAAC,QAAQ,CAAC,KAAK,MAAM,CAAC;AAMnG,QALc;EACZ,QAAgB,UAAU,KAAK,IAAI,GAAG,CAAC,KAAK,IAAI,CAAC,GAAG;EACpD,cAAsB,kCAAkC;EACxD,eAAuB,UAAU,WAAW,mBAAmB;EAChE;;AASH,eAAe,wBACb,IACA,KACA,MACwB;CACxB,MAAM,QAAQ,YAAY,KAAK;CAC/B,MAAM,QAAQ,cAAc,KAAK;CACjC,MAAM,UAAU,OAAO,KAAK,KAAK,QAAQ;CACzC,MAAM,QAAQ,uBAAuB,MAAM,QAAQ;CACnD,MAAM,aAAa,IAChB,OAA8B,MAAM,aAAa,CACjD,SAAS;CACZ,MAAM,cAAc,IACjB,OAA+B,MAAM,cAAc,CACnD,SAAS;CAEZ,MAAM,QAAuB;EAC3B;EACA,QAAQ;GACN;GACA;GACA,MAAM;GACN,WAAW,QAAQ,MAAM,YAAY,GAAG,UAAU;GAClD,YAAY,QAAQ,MAAM,aAAa,GAAG,WAAW;GACtD;EACF;CACD,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,IAAG,OAAO,uCAAuC,MAAM,IAAI,QAAQ,OAAO,EACxE,OAAO,MAAM,QACd,CAAC;AACF,QAAO;;AAGT,SAAS,KACP,IACA,EAAC,MAAM,OAAO,UACd,UACA,MACA,IACA,UACA;AACA,KAAI,SACF,QAAO,SAAS,IAAI,OAAO,QAAQ,UAAU,MAAM,GAAG;AAExD,QAAO,WAAW,IAAI,OAAO,QAAQ,MAAM,GAAG;;AAGhD,eAAe,WACb,IACA,OACA,QACA,MACA,IACA;CACA,MAAM,QAAQ,YAAY,KAAK;CAC/B,IAAI,YAAY;CAEhB,MAAM,YAAY,cAAc,MAAM;CACtC,MAAM,iBAAiB,OAAO,QAAQ,MAAM,QAAQ;CAEpD,MAAM,cAAc,eAAe,KAAK,CAAC,OAAO,EAAE;CAClD,MAAM,cAAc,eAAe,KAAK,CAAC,OAAO,UAAU,KAAK;CAC/D,MAAM,mBAAmB,YAAY,KAAI,MAAK,GAAG,EAAE,CAAC,CAAC,KAAK,IAAI;CAE9D,MAAM,YACJ,YAAY,SAAS,IAAI,IAAI,KAAK,OAAO,YAAY,SAAS,EAAE,CAAC,MAAM;CACzE,MAAM,YAAoB;mBACT,UAAU,KAAK,iBAAiB,WAAW;CAC5D,MAAM,aAAa,GAAG,QAAQ,UAAU;CACxC,MAAM,kBAAkB,GAAG,QACzB,YAAY,IAAI,YAAY,OAAA,GAA6B,CAC1D;CAGD,MAAM,mBAAmB,OAAO,OAAO,MAAM,aAAa,CACvD,KAAK,EAAC,gBAAe,UAAU,CAC/B,QAAO,MAAK,CAAC,CAAC,EAAE;CACnB,MAAM,QACJ,iBAAiB,WAAW,IACxB,KACQ,SAAS,iBAAiB,KAAK,OAAO;CACpD,MAAM,YAAoB,QAAQ,GAAG,MAAM,OAAO,CAAC,GAAG,GAAG,MAAM,KAAK,CAAC,GAAG;CAIxE,MAAM,SAAiB,UAHD,eAAe,KAAK,CAAC,MAAM,UAC/C,iBAAiB,KAAK,GAAG,GAAG,KAAK,GAAG,GAAG,GAAG,KAAK,CAAC,QACjD,CAC8C,KAAK,IAAI,CAAC,GAAG;CAE5D,MAAM,WAAW,eAAe,KAAK,GAAG,UACtC,iBAAiB,KAAK,GAAG,kBAAkB,KAAK,GAAG,gBACpD;CAED,MAAM,eAAe,YAAY;CACjC,MAAM,iBAAiB,eAAA;CAEvB,MAAM,gBAAiC,MAAM,KAAK,EAChD,QAAQ,oBAAoB,cAC7B,CAAC;CACF,IAAI,cAAc;CAClB,IAAI,cAAc;CAElB,SAAS,QAAQ;EACf,MAAM,QAAQ,YAAY,KAAK;EAC/B,MAAM,cAAc;EACpB,MAAM,cAAc;EAEpB,IAAI,IAAI;AACR,SAAO,cAAA,IAAiC,eAAA,GACtC,iBAAgB,IAAI,cAAc,MAAM,GAAI,KAAK,eAAgB,CAAC;AAEpE,SAAO,cAAc,GAAG,cACtB,YAAW,IAAI,cAAc,MAAM,GAAI,KAAK,aAAc,CAAC;EAE7D,MAAM,gBAAgB,cAAc;AACpC,OAAK,IAAI,IAAI,GAAG,IAAI,eAAe,IACjC,eAAc,KAAK,KAAA;AAErB,gBAAc;AACd,SAAO,QAAQ;EAEf,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,eAAa;AACb,KAAG,QACD,WAAW,YAAY,GAAG,UAAU,SAAS,YAAY,aAAa,QAAQ,QAAQ,EAAE,CAAC,KAC1F;;CAGH,MAAM,eAAe,IAAI,kBAAkB;CAC3C,IAAI,MAAM;AAEV,IAAG,OAAO,kCAAkC,UAAU,IAAI,OAAO;AAEjE,OAAM,WACJ,MAAM,KACH,OAAO,SAAS,OAAO,kCAAkC,CACzD,UAAU,EACb,IAAI,SAAS;EACX,eAAe;EAEf,MACE,OACA,WACA,UACA;AACA,OAAI;AACF,SAAK,MAAM,YAAY,aAAa,MAAM,MAAM,EAAE;AAChD,oBAAe,aAAa,OAAO,IAAI,SAAS;AAChD,mBAAc,cAAc,eAAe,OACzC,aAAa,OAAO,OAAO,SAAS,KAAK,SAAS;AAEpD,SAAI,EAAE,QAAQ,SAAS,QAAQ;AAC7B,YAAM;AACN,UACE,EAAE,eAAe,oBAAoB,gBACrC,eAAe,wBAEf,QAAO;;;AAIb,cAAU;YACH,GAAG;AACV,aAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,EAAE,CAAC,CAAC;;;EAI3D,QAAQ,aAAsC;AAC5C,OAAI;AACF,WAAO;AACP,cAAU;YACH,GAAG;AACV,aAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,EAAE,CAAC,CAAC;;;EAG5D,CAAC,CACH;CAED,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,IAAG,OACD,oBAAoB,OAAO,KAAK,aAAa,UAAU,WAC1C,UAAU,QAAQ,EAAE,CAAC,eAAe,QAAQ,QAAQ,EAAE,CAAC,OACrE;AACD,QAAO;EAAC,MAAM,OAAO;EAAM;EAAU;;AAGvC,eAAe,SACb,IACA,OACA,QACA,UACA,MACA,IACA;CACA,MAAM,QAAQ,YAAY,KAAK;CAC/B,IAAI,YAAY;CAEhB,MAAM,YAAY,cAAc,MAAM;CACtC,MAAM,iBAAiB,OAAO,QAAQ,MAAM,QAAQ;CAEpD,MAAM,cAAc,eAAe,KAAK,CAAC,OAAO,EAAE;CAClD,MAAM,cAAc,eAAe,KAAK,CAAC,OAAO,UAAU,KAAK;CAC/D,MAAM,mBAAmB,YAAY,KAAI,MAAK,GAAG,EAAE,CAAC,CAAC,KAAK,IAAI;CAE9D,MAAM,YACJ,YAAY,SAAS,IAAI,IAAI,KAAK,OAAO,YAAY,SAAS,EAAE,CAAC,MAAM;CACzE,MAAM,YAAoB;mBACT,UAAU,KAAK,iBAAiB,WAAW;CAC5D,MAAM,aAAa,GAAG,QAAQ,UAAU;CACxC,MAAM,kBAAkB,GAAG,QACzB,YAAY,IAAI,YAAY,OAAA,GAA6B,CAC1D;CAED,MAAM,EAAC,WAAU,uBAAuB,OAAO,YAAY;CAC3D,MAAM,eAAe,YAAY;CACjC,MAAM,iBAAiB,eAAA;CAEvB,MAAM,gBAAiC,MAAM,KAAK,EAChD,QAAQ,oBAAoB,cAC7B,CAAC;CACF,IAAI,cAAc;CAClB,IAAI,cAAc;CAElB,SAAS,QAAQ;EACf,MAAM,QAAQ,YAAY,KAAK;EAC/B,MAAM,cAAc;EACpB,MAAM,cAAc;EAEpB,IAAI,IAAI;AACR,SAAO,cAAA,IAAiC,eAAA,GACtC,iBAAgB,IAAI,cAAc,MAAM,GAAI,KAAK,eAAgB,CAAC;AAEpE,SAAO,cAAc,GAAG,cACtB,YAAW,IAAI,cAAc,MAAM,GAAI,KAAK,aAAc,CAAC;EAE7D,MAAM,gBAAgB,cAAc;AACpC,OAAK,IAAI,IAAI,GAAG,IAAI,eAAe,IACjC,eAAc,KAAK,KAAA;AAErB,gBAAc;AACd,SAAO,QAAQ;EAEf,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,eAAa;AACb,KAAG,QACD,WAAW,YAAY,GAAG,UAAU,SAAS,YAAY,aAAa,QAAQ,QAAQ,EAAE,CAAC,KAC1F;;AAGH,IAAG,OAAO,gCAAgC,UAAU,IAAI,OAAO;CAC/D,MAAM,YAAY,MAAM,eAAe,UAAU,EAAC,oBAAoB,MAAK,CAAC;CAC5E,MAAM,UAAU,YAAY,KAAI,MAAK;EACnC,MAAM,UAAU,UAAU,cAAc,EAAE,QAAQ;AAClD,UAAQ,QACN,UACE,QAAQ,IAAI,EACZ,EAAE,UAAA,IAEH;GACH;CAEF,MAAM,YAAY,IAAI,WAAW;CACjC,IAAI,MAAM;AAEV,OAAM,WACJ,MAAM,KAAK,OAAO,SAAS,OAAO,aAAa,CAAC,UAAU,EAC1D,IAAI,SAAS;EACX,eAAe;EAEf,MACE,OACA,WACA,UACA;AACA,OAAI;AACF,SAAK,MAAM,QAAQ,UAAU,MAAM,MAAM,EAAE;AACzC,oBAAe,SAAS,OAAO,IAAI,KAAK;AACxC,mBAAc,cAAc,eAAe,OACzC,SAAS,OAAO,OAAO,QAAQ,KAAK,KAAK;AAE3C,SAAI,EAAE,QAAQ,QAAQ,QAAQ;AAC5B,YAAM;AACN,UACE,EAAE,eAAe,oBAAoB,gBACrC,eAAe,wBAEf,QAAO;;;AAIb,cAAU;YACH,GAAG;AACV,aAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,EAAE,CAAC,CAAC;;;EAI3D,QAAQ,aAAsC;AAC5C,OAAI;AACF,WAAO;AACP,cAAU;YACH,GAAG;AACV,aAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,EAAE,CAAC,CAAC;;;EAG5D,CAAC,CACH;CAED,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,IAAG,OACD,oBAAoB,OAAO,KAAK,aAAa,UAAU,WAC1C,UAAU,QAAQ,EAAE,CAAC,eAAe,QAAQ,QAAQ,EAAE,CAAC,OACrE;AACD,QAAO;EAAC,MAAM,OAAO;EAAM;EAAU"}
|
|
1
|
+
{"version":3,"file":"initial-sync.js","names":[],"sources":["../../../../../../../zero-cache/src/services/change-source/pg/initial-sync.ts"],"sourcesContent":["import {mkdtemp, rm} from 'node:fs/promises';\nimport {platform, tmpdir} from 'node:os';\nimport {join} from 'node:path';\nimport {Writable} from 'node:stream';\nimport {pipeline} from 'node:stream/promises';\nimport {\n PG_CONFIGURATION_LIMIT_EXCEEDED,\n PG_INSUFFICIENT_PRIVILEGE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport postgres from 'postgres';\nimport type {JSONObject} from '../../../../../shared/src/bigint-json.ts';\nimport {must} from '../../../../../shared/src/must.ts';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport type {DownloadStatus} from '../../../../../zero-events/src/status.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {\n createLiteIndexStatement,\n createLiteTableStatement,\n} from '../../../db/create.ts';\nimport {\n computeZqlSpecs,\n listIndexes,\n listTables,\n} from '../../../db/lite-tables.ts';\nimport * as Mode from '../../../db/mode-enum.ts';\nimport {\n BinaryCopyParser,\n hasBinaryDecoder,\n makeBinaryDecoder,\n textCastDecoder,\n} from '../../../db/pg-copy-binary.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {\n mapPostgresToLite,\n mapPostgresToLiteIndex,\n} from '../../../db/pg-to-lite.ts';\nimport {getTypeParsers} from '../../../db/pg-type-parser.ts';\nimport {runTx} from '../../../db/run-transaction.ts';\nimport type {IndexSpec, PublishedTableSpec} from '../../../db/specs.ts';\nimport {importSnapshot, TransactionPool} from '../../../db/transaction-pool.ts';\nimport {\n JSON_STRINGIFIED,\n liteValue,\n type LiteValueType,\n} from '../../../types/lite.ts';\nimport {liteTableName} from '../../../types/names.ts';\nimport {PG_15, PG_17} from '../../../types/pg-versions.ts';\nimport {\n pgClient,\n type PostgresDB,\n type PostgresTransaction,\n type PostgresValueType,\n} from '../../../types/pg.ts';\nimport {CpuProfiler} from '../../../types/profiler.ts';\nimport type {ShardConfig} from '../../../types/shards.ts';\nimport {ALLOWED_APP_ID_CHARACTERS} from '../../../types/shards.ts';\nimport {id} from '../../../types/sql.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {ColumnMetadataStore} from '../../replicator/schema/column-metadata.ts';\nimport {initReplicationState} from '../../replicator/schema/replication-state.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {ensureShardSchema} from './schema/init.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport {\n addReplica,\n dropShard,\n getInternalShardConfig,\n newReplicationSlot,\n replicationSlotExpression,\n validatePublications,\n} from './schema/shard.ts';\n\nexport type InitialSyncOptions = {\n tableCopyWorkers: number;\n profileCopy?: boolean | undefined;\n textCopy?: boolean | undefined;\n replicationSlotFailover?: boolean | undefined;\n /**\n * When set, run initial sync in \"shadow\" mode for verification: skip all\n * upstream mutations (no replication slot, no addReplica, no dropShard, no\n * slot drop on failure), suppress status events, and optionally sample\n * rows from each table via TABLESAMPLE BERNOULLI + LIMIT. The caller is\n * responsible for providing (and discarding) a throwaway SQLite `tx`.\n */\n shadow?:\n | {\n /** 0 < rate <= 1. When 1, no TABLESAMPLE clause is added. */\n sampleRate: number;\n /**\n * LIMIT N cap appended after TABLESAMPLE. Required: shadow sync is\n * for verification only, so every run must commit to a row budget.\n */\n maxRowsPerTable: number;\n }\n | undefined;\n};\n\n/** Server context to store with the initial sync metadata for debugging. */\nexport type ServerContext = JSONObject;\n\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n syncOptions: InitialSyncOptions,\n context: ServerContext,\n) {\n if (!ALLOWED_APP_ID_CHARACTERS.test(shard.appID)) {\n throw new Error(\n 'The App ID may only consist of lower-case letters, numbers, and the underscore character',\n );\n }\n const {\n tableCopyWorkers,\n profileCopy,\n textCopy = false,\n replicationSlotFailover = false,\n shadow,\n } = syncOptions;\n const copyProfiler = profileCopy ? await CpuProfiler.connect() : null;\n const sql = pgClient(lc, upstreamURI);\n // Replication session is only needed to create a replication slot in the\n // real path. In shadow mode we export a snapshot on a normal connection\n // instead, so no replication session is opened.\n const replicationSession = shadow\n ? undefined\n : pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const slotName = newReplicationSlot(shard);\n const statusPublisher = ReplicationStatusPublisher.forRunningTransaction(\n tx,\n shadow ? async () => {} : undefined,\n ).publish(lc, 'Initializing');\n let releaseShadowSnapshot: (() => Promise<void>) | undefined;\n try {\n const pgVersion = await checkUpstreamConfig(sql);\n\n // In shadow mode we assume the shard is already initialized and just\n // read back the existing publications. `ensurePublishedTables` would\n // otherwise run DDL and potentially call `dropShard`, which must never\n // happen during a shadow run.\n const {publications} = shadow\n ? await getInternalShardConfig(sql, shard)\n : await ensurePublishedTables(lc, sql, shard);\n lc.info?.(`Upstream is setup with publications [${publications}]`);\n\n const {database, host} = sql.options;\n lc.info?.(\n shadow\n ? `acquiring exported snapshot on ${database}@${host} (shadow mode)`\n : `opening replication session to ${database}@${host}`,\n );\n\n let snapshot: string;\n let lsn: string;\n\n if (shadow) {\n const acquired = await acquireExportedSnapshotForShadowSync(\n lc,\n upstreamURI,\n );\n snapshot = acquired.snapshot;\n lsn = acquired.lsn;\n releaseShadowSnapshot = acquired.release;\n } else {\n let slot: ReplicationSlot;\n for (let first = true; ; first = false) {\n try {\n slot = await createReplicationSlot(\n lc,\n must(replicationSession),\n slotName,\n replicationSlotFailover && pgVersion >= PG_17,\n );\n break;\n } catch (e) {\n if (first && e instanceof postgres.PostgresError) {\n if (e.code === PG_INSUFFICIENT_PRIVILEGE) {\n // Some Postgres variants (e.g. Google Cloud SQL) require that\n // the user have the REPLICATION role in order to create a slot.\n // Note that this must be done by the upstreamDB connection, and\n // does not work in the replicationSession itself.\n await sql`ALTER ROLE current_user WITH REPLICATION`;\n lc.info?.(`Added the REPLICATION role to database user`);\n continue;\n }\n if (e.code === PG_CONFIGURATION_LIMIT_EXCEEDED) {\n const slotExpression = replicationSlotExpression(shard);\n\n const dropped = await sql<{slot: string}[]>`\n SELECT slot_name as slot, pg_drop_replication_slot(slot_name)\n FROM pg_replication_slots\n WHERE slot_name LIKE ${slotExpression} AND NOT active`;\n if (dropped.length) {\n lc.warn?.(\n `Dropped inactive replication slots: ${dropped.map(({slot}) => slot)}`,\n e,\n );\n continue;\n }\n lc.error?.(`Unable to drop replication slots`, e);\n }\n }\n throw e;\n }\n }\n snapshot = slot.snapshot_name;\n lsn = slot.consistent_point;\n }\n\n const initialVersion = toStateVersionString(lsn);\n\n initReplicationState(tx, publications, initialVersion, context);\n\n // Run up to MAX_WORKERS to copy of tables at the replication slot's snapshot.\n const start = performance.now();\n // Retrieve the published schema at the consistent_point.\n const published = await runTx(\n sql,\n async tx => {\n await tx.unsafe(/* sql*/ `SET TRANSACTION SNAPSHOT '${snapshot}'`);\n return getPublicationInfo(tx, publications);\n },\n {mode: Mode.READONLY},\n );\n // Note: If this throws, initial-sync is aborted.\n validatePublications(lc, published);\n\n // Now that tables have been validated, kick off the copiers.\n const {tables, indexes} = published;\n const numTables = tables.length;\n if (platform() === 'win32' && tableCopyWorkers < numTables) {\n lc.warn?.(\n `Increasing the number of copy workers from ${tableCopyWorkers} to ` +\n `${numTables} to work around a Node/Postgres connection bug`,\n );\n }\n const numWorkers =\n platform() === 'win32'\n ? numTables\n : Math.min(tableCopyWorkers, numTables);\n\n const copyPool = pgClient(lc, upstreamURI, {\n max: numWorkers,\n connection: {['application_name']: 'initial-sync-copy-worker'},\n ['max_lifetime']: 120 * 60, // set a long (2h) limit for COPY streaming\n });\n const copiers = startTableCopyWorkers(\n lc,\n copyPool,\n snapshot,\n numWorkers,\n numTables,\n );\n try {\n createLiteTables(tx, tables, initialVersion);\n const sampleRate = shadow?.sampleRate;\n const maxRowsPerTable = shadow?.maxRowsPerTable;\n const downloads = await Promise.all(\n tables.map(spec =>\n copiers.processReadTask((db, lc) =>\n getInitialDownloadState(lc, db, spec, shadow !== undefined),\n ),\n ),\n );\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying ${numTables} upstream tables at version ${initialVersion}`,\n 5000,\n () => ({downloadStatus: downloads.map(({status}) => status)}),\n );\n\n void copyProfiler?.start();\n const rowCounts = await Promise.all(\n downloads.map(table =>\n copiers.processReadTask((db, lc) =>\n copy(\n lc,\n table,\n copyPool,\n db,\n tx,\n textCopy,\n sampleRate,\n maxRowsPerTable,\n ),\n ),\n ),\n );\n void copyProfiler?.stopAndDispose(lc, 'initial-copy');\n copiers.setDone();\n\n const total = rowCounts.reduce(\n (acc, curr) => ({\n rows: acc.rows + curr.rows,\n flushTime: acc.flushTime + curr.flushTime,\n }),\n {rows: 0, flushTime: 0},\n );\n\n statusPublisher.publish(\n lc,\n 'Indexing',\n `Creating ${indexes.length} indexes`,\n 5000,\n );\n const indexStart = performance.now();\n createLiteIndices(tx, indexes);\n const index = performance.now() - indexStart;\n lc.info?.(`Created indexes (${index.toFixed(3)} ms)`);\n\n if (shadow) {\n const rowsByTable = new Map<string, number>();\n for (let i = 0; i < downloads.length; i++) {\n rowsByTable.set(downloads[i].status.table, rowCounts[i].rows);\n }\n verifyShadowReplica(lc, tx, published, rowsByTable);\n } else {\n await addReplica(\n sql,\n shard,\n slotName,\n initialVersion,\n published,\n context,\n );\n }\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Synced ${total.rows.toLocaleString()} rows of ${numTables} tables in ${publications} up to ${lsn} ` +\n `(flush: ${total.flushTime.toFixed(3)}, index: ${index.toFixed(3)}, total: ${elapsed.toFixed(3)} ms)`,\n );\n } finally {\n // All meaningful errors are handled at the processReadTask() call site.\n void copyPool.end().catch(e => lc.warn?.(`Error closing copyPool`, e));\n }\n } catch (e) {\n if (!shadow) {\n // If initial-sync did not succeed, make a best effort to drop the\n // orphaned replication slot to avoid running out of slots in\n // pathological cases that result in repeated failures.\n lc.warn?.(`dropping replication slot ${slotName}`, e);\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = ${slotName};\n `.catch(e => lc.warn?.(`Unable to drop replication slot ${slotName}`, e));\n }\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n if (releaseShadowSnapshot) {\n await releaseShadowSnapshot().catch(e =>\n lc.warn?.(`Error releasing shadow snapshot`, e),\n );\n }\n if (replicationSession) {\n await replicationSession.end();\n }\n await sql.end();\n }\n}\n\nexport type ShadowSyncOptions = {\n sampleRate: number;\n maxRowsPerTable: number;\n /**\n * Parent directory for the throwaway SQLite replica. Defaults to the OS\n * tmpdir. Primarily for tests that need to isolate the scratch directory.\n */\n parentDir?: string | undefined;\n};\n\n/**\n * Exercises the initial-sync code path against a sample of rows from every\n * published table, writing into a throwaway SQLite database that is deleted\n * when the run ends. Produces zero upstream mutations: no replication slot,\n * no `addReplica`, no `dropShard`, no status events.\n *\n * Intended to be invoked periodically so that if a customer ever needs a\n * full reset, we have recent confidence that `initialSync` still works.\n * The shard must already be initialized upstream.\n */\nexport async function shadowInitialSync(\n lc: LogContext,\n shard: ShardConfig,\n upstreamURI: string,\n shadow: ShadowSyncOptions,\n context: ServerContext,\n syncOptions?: Pick<InitialSyncOptions, 'textCopy'>,\n): Promise<void> {\n const dir = await mkdtemp(\n join(shadow.parentDir ?? tmpdir(), 'zero-shadow-sync-'),\n );\n const dbPath = join(dir, 'shadow-replica.db');\n const db = new Database(lc, dbPath);\n try {\n await initialSync(\n lc,\n shard,\n db,\n upstreamURI,\n {\n // Shadow sync copies small samples, so one worker is plenty —\n // no reason to burn additional upstream connections.\n tableCopyWorkers: 1,\n textCopy: syncOptions?.textCopy,\n shadow,\n },\n context,\n );\n } finally {\n try {\n db.close();\n } catch (e) {\n lc.warn?.(`Error closing shadow replica db`, e);\n }\n await rm(dir, {recursive: true, force: true}).catch(e =>\n lc.warn?.(`Error cleaning up shadow replica dir ${dir}`, e),\n );\n }\n}\n\nasync function checkUpstreamConfig(sql: PostgresDB) {\n const {walLevel, version} = (\n await sql<{walLevel: string; version: number}[]>`\n SELECT current_setting('wal_level') as \"walLevel\", \n current_setting('server_version_num') as \"version\";\n `\n )[0];\n\n if (walLevel !== 'logical') {\n throw new Error(\n `Postgres must be configured with \"wal_level = logical\" (currently: \"${walLevel})`,\n );\n }\n if (version < PG_15) {\n throw new Error(\n `Must be running Postgres 15 or higher (currently: \"${version}\")`,\n );\n }\n return version;\n}\n\nasync function ensurePublishedTables(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n validate = true,\n): Promise<{publications: string[]}> {\n const {database, host} = sql.options;\n lc.info?.(`Ensuring upstream PUBLICATION on ${database}@${host}`);\n\n await ensureShardSchema(lc, sql, shard);\n const {publications} = await getInternalShardConfig(sql, shard);\n\n if (validate) {\n let valid = false;\n const nonInternalPublications = publications.filter(\n p => !p.startsWith('_'),\n );\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(publications)}\n `.values();\n if (exists.length !== publications.length) {\n lc.warn?.(\n `some configured publications [${publications}] are missing: ` +\n `[${exists.flat()}]. resyncing`,\n );\n } else if (\n !equals(new Set(shard.publications), new Set(nonInternalPublications))\n ) {\n lc.warn?.(\n `requested publications [${shard.publications}] differ from previous` +\n `publications [${nonInternalPublications}]. resyncing`,\n );\n } else {\n valid = true;\n }\n if (!valid) {\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n return ensurePublishedTables(lc, sql, shard, false);\n }\n }\n return {publications};\n}\n\nfunction startTableCopyWorkers(\n lc: LogContext,\n db: PostgresDB,\n snapshot: string,\n numWorkers: number,\n numTables: number,\n): TransactionPool {\n const {init} = importSnapshot(snapshot);\n const tableCopiers = new TransactionPool(lc, {\n mode: Mode.READONLY,\n init,\n initialWorkers: numWorkers,\n });\n tableCopiers.run(db);\n\n lc.info?.(`Started ${numWorkers} workers to copy ${numTables} tables`);\n\n if (parseInt(process.versions.node) < 22) {\n lc.warn?.(\n `\\n\\n\\n` +\n `Older versions of Node have a bug that results in an unresponsive\\n` +\n `Postgres connection after running certain combinations of COPY commands.\\n` +\n `If initial sync hangs, run zero-cache with Node v22+. This has the additional\\n` +\n `benefit of being consistent with the Node version run in the production container image.` +\n `\\n\\n\\n`,\n );\n }\n return tableCopiers;\n}\n\n// Row returned by `CREATE_REPLICATION_SLOT`\ntype ReplicationSlot = {\n slot_name: string;\n consistent_point: string;\n snapshot_name: string;\n output_plugin: string;\n};\n\n/**\n * Shadow-mode alternative to `createReplicationSlot`: opens a dedicated\n * READ ONLY REPEATABLE READ transaction on a normal connection, exports the\n * snapshot and captures the current WAL LSN, then holds the transaction\n * open until `release()` is called. The held transaction keeps the snapshot\n * importable by the table-copy workers for the duration of the COPY phase.\n *\n * Idle-in-transaction timeout is disabled locally so the exporter doesn't\n * get killed while workers are still importing.\n */\nasync function acquireExportedSnapshotForShadowSync(\n lc: LogContext,\n upstreamURI: string,\n): Promise<{\n snapshot: string;\n lsn: string;\n release: () => Promise<void>;\n}> {\n const holder = pgClient(lc, upstreamURI, {\n max: 1,\n connection: {['application_name']: 'shadow-initial-sync-snapshot'},\n });\n const ready = resolver<{snapshot: string; lsn: string}>();\n const release = resolver<void>();\n const held = holder\n .begin(Mode.READONLY, async tx => {\n await tx`SET LOCAL idle_in_transaction_session_timeout = 0`.execute();\n const [row] = await tx<{snapshot: string; lsn: string}[]>`\n SELECT pg_export_snapshot() AS snapshot,\n pg_current_wal_lsn()::text AS lsn`;\n ready.resolve(row);\n await release.promise;\n })\n .catch(e => ready.reject(e));\n\n let snapshot: string;\n let lsn: string;\n try {\n ({snapshot, lsn} = await ready.promise);\n } catch (e) {\n await holder\n .end()\n .catch(err =>\n lc.warn?.(`Error ending shadow snapshot holder after failure`, err),\n );\n throw e;\n }\n lc.info?.(\n `Exported snapshot ${snapshot} at LSN ${lsn} (shadow initial sync)`,\n );\n return {\n snapshot,\n lsn,\n release: async () => {\n release.resolve();\n try {\n await held;\n } catch (e) {\n lc.warn?.(`snapshot holder transaction ended with error`, e);\n }\n await holder.end();\n },\n };\n}\n\n// Note: The replication connection does not support the extended query protocol,\n// so all commands must be sent using sql.unsafe(). This is technically safe\n// because all placeholder values are under our control (i.e. \"slotName\").\nexport async function createReplicationSlot(\n lc: LogContext,\n session: postgres.Sql,\n slotName: string,\n // Note: must be false if pgVersion < PG_17. Caller must verify.\n failover = false,\n): Promise<ReplicationSlot> {\n const [slot] = failover\n ? await session.unsafe<ReplicationSlot[]>(\n /*sql*/ `CREATE_REPLICATION_SLOT \"${slotName}\" LOGICAL pgoutput (FAILOVER)`,\n )\n : await session.unsafe<ReplicationSlot[]>(\n /*sql*/ `CREATE_REPLICATION_SLOT \"${slotName}\" LOGICAL pgoutput`,\n );\n lc.info?.(`Created replication slot ${slotName}`, slot);\n return slot;\n}\n\nfunction createLiteTables(\n tx: Database,\n tables: PublishedTableSpec[],\n initialVersion: string,\n) {\n // TODO: Figure out how to reuse the ChangeProcessor here to avoid\n // duplicating the ColumnMetadata logic.\n const columnMetadata = must(ColumnMetadataStore.getInstance(tx));\n for (const t of tables) {\n tx.exec(createLiteTableStatement(mapPostgresToLite(t, initialVersion)));\n const tableName = liteTableName(t);\n for (const [colName, colSpec] of Object.entries(t.columns)) {\n columnMetadata.insert(tableName, colName, colSpec);\n }\n }\n}\n\nfunction createLiteIndices(tx: Database, indices: IndexSpec[]) {\n for (const index of indices) {\n tx.exec(createLiteIndexStatement(mapPostgresToLiteIndex(index)));\n }\n}\n\n/**\n * Runs structural assertions over a just-synced replica and throws if any\n * fail. Only called in shadow mode — a successful return means the replica\n * is schema-complete, row-count consistent, ZQL-queryable, and its column\n * metadata is in sync with its lite schema.\n *\n * Exported for testing.\n */\nexport function verifyShadowReplica(\n lc: LogContext,\n db: Database,\n published: {tables: PublishedTableSpec[]; indexes: IndexSpec[]},\n rowsByTable: ReadonlyMap<string, number>,\n): void {\n const issues: string[] = [];\n\n // 1. Schema completeness: every published table exists in the replica\n // with at least the expected column set.\n const liteTables = listTables(db);\n const liteTableByName = new Map(liteTables.map(t => [t.name, t]));\n for (const pt of published.tables) {\n const name = liteTableName(pt);\n const lite = liteTableByName.get(name);\n if (!lite) {\n issues.push(`missing table in replica: ${name}`);\n continue;\n }\n for (const col of Object.keys(pt.columns)) {\n if (!(col in lite.columns)) {\n issues.push(`column missing in replica table ${name}: ${col}`);\n }\n }\n }\n\n // Every published index exists in the replica.\n const liteIndexNames = new Set(listIndexes(db).map(i => i.name));\n for (const ix of published.indexes) {\n const mapped = mapPostgresToLiteIndex(ix);\n if (!liteIndexNames.has(mapped.name)) {\n issues.push(\n `missing index in replica: ${mapped.name} on ${mapped.tableName}`,\n );\n }\n }\n\n // 2. Row counts: SQLite COUNT(*) matches the in-memory copy counter.\n for (const [table, expected] of rowsByTable) {\n try {\n const [row] = db\n .prepare(`SELECT COUNT(*) as count FROM \"${table}\"`)\n .all<{count: number}>();\n if (row.count !== expected) {\n issues.push(\n `row count mismatch for table ${table}: ` +\n `copy counter reported ${expected}, replica has ${row.count}`,\n );\n }\n } catch (e) {\n issues.push(`could not count rows in table ${table}: ${String(e)}`);\n }\n }\n\n // 3. ZQL-queryability: every published table survives computeZqlSpecs's\n // filtering (primary-key candidate, ZQL-typed columns, etc.).\n const tableSpecs = computeZqlSpecs(lc, db, {\n includeBackfillingColumns: false,\n });\n for (const pt of published.tables) {\n const name = liteTableName(pt);\n if (!tableSpecs.has(name)) {\n issues.push(\n `table not queryable via ZQL (dropped by computeZqlSpecs): ${name}`,\n );\n }\n }\n\n // 4. Column metadata: every published column has a _zero.column_metadata row.\n const meta = must(ColumnMetadataStore.getInstance(db));\n for (const pt of published.tables) {\n const name = liteTableName(pt);\n const rows = meta.getTable(name);\n for (const col of Object.keys(pt.columns)) {\n if (!rows.has(col)) {\n issues.push(`missing column_metadata row for ${name}.${col}`);\n }\n }\n }\n\n if (issues.length) {\n throw new Error(\n `Shadow replica verification failed (${issues.length} issue(s)):\\n` +\n issues.map(i => ` - ${i}`).join('\\n'),\n );\n }\n}\n\n// Verified empirically that batches of 50 seem to be the sweet spot,\n// similar to the report in https://sqlite.org/forum/forumpost/8878a512d3652655\n//\n// Exported for testing.\nexport const INSERT_BATCH_SIZE = 50;\n\nconst MB = 1024 * 1024;\nconst MAX_BUFFERED_ROWS = 10_000;\nconst BUFFERED_SIZE_THRESHOLD = 8 * MB;\n\nexport type DownloadStatements = {\n select: string;\n getTotalRows: string;\n getTotalBytes: string;\n};\n\n/**\n * Produces ` TABLESAMPLE BERNOULLI(n)` when `sampleRate` is < 1, else `''`.\n * Row-level Bernoulli sampling is used (rather than SYSTEM) because it\n * produces a more uniform sample and, unlike SYSTEM, still returns rows\n * for small tables at low rates.\n */\nfunction tableSampleClause(sampleRate: number | undefined): string {\n if (sampleRate === undefined || sampleRate >= 1) {\n return '';\n }\n // Round away float noise (e.g. 0.3 * 100 = 30.000000000000004) while still\n // preserving sub-integer rates like 0.001 (= 0.1%).\n const pct = parseFloat((sampleRate * 100).toFixed(6));\n return /*sql*/ ` TABLESAMPLE BERNOULLI(${pct})`;\n}\n\nfunction limitClause(maxRowsPerTable: number | undefined): string {\n return maxRowsPerTable !== undefined\n ? /*sql*/ ` LIMIT ${maxRowsPerTable}`\n : '';\n}\n\nexport function makeDownloadStatements(\n table: PublishedTableSpec,\n cols: string[],\n sampleRate?: number | undefined,\n maxRowsPerTable?: number | undefined,\n): DownloadStatements {\n const filterConditions = Object.values(table.publications)\n .map(({rowFilter}) => rowFilter)\n .filter(f => !!f); // remove nulls\n const where =\n filterConditions.length === 0\n ? ''\n : /*sql*/ `WHERE ${filterConditions.join(' OR ')}`;\n const sample = tableSampleClause(sampleRate);\n const limit = limitClause(maxRowsPerTable);\n const fromTable = /*sql*/ `FROM ${id(table.schema)}.${id(table.name)}${sample} ${where}`;\n const select = /*sql*/ `SELECT ${cols.map(id).join(',')} ${fromTable}${limit}`;\n if (limit) {\n // With LIMIT, wrap counts/sums in a subquery so they reflect the\n // capped rowset rather than the full (sampled) table.\n const bytesExpr = cols\n .map(col => `COALESCE(pg_column_size(${id(col)}), 0)`)\n .join(' + ');\n return {\n select,\n getTotalRows: /*sql*/ `SELECT COUNT(*)::bigint AS \"totalRows\" FROM (SELECT 1 AS _ ${fromTable}${limit}) s`,\n getTotalBytes: /*sql*/ `SELECT COALESCE(SUM(b), 0)::bigint AS \"totalBytes\" FROM (SELECT (${bytesExpr}) AS b ${fromTable}${limit}) s`,\n };\n }\n const totalBytes = `(${cols.map(col => `SUM(COALESCE(pg_column_size(${id(col)}), 0))`).join(' + ')})`;\n return {\n select,\n getTotalRows: /*sql*/ `SELECT COUNT(*) AS \"totalRows\" ${fromTable}`,\n getTotalBytes: /*sql*/ `SELECT ${totalBytes} AS \"totalBytes\" ${fromTable}`,\n };\n}\n\ntype DownloadState = {\n spec: PublishedTableSpec;\n status: DownloadStatus;\n};\n\n// Exported for testing.\nexport async function getInitialDownloadState(\n lc: LogContext,\n sql: PostgresDB,\n spec: PublishedTableSpec,\n skipTotals: boolean,\n): Promise<DownloadState> {\n const start = performance.now();\n const table = liteTableName(spec);\n const columns = Object.keys(spec.columns);\n if (skipTotals) {\n // Shadow sync suppresses status events, so the COUNT(*) and\n // per-column pg_column_size sums would be computed and thrown away.\n // These are also expensive statements that run table scans.\n return {\n spec,\n status: {table, columns, rows: 0, totalRows: 0, totalBytes: 0},\n };\n }\n const stmts = makeDownloadStatements(spec, columns);\n const rowsResult = sql\n .unsafe<{totalRows: bigint}[]>(stmts.getTotalRows)\n .execute();\n const bytesResult = sql\n .unsafe<{totalBytes: bigint}[]>(stmts.getTotalBytes)\n .execute();\n\n const state: DownloadState = {\n spec,\n status: {\n table,\n columns,\n rows: 0,\n totalRows: Number((await rowsResult)[0].totalRows),\n totalBytes: Number((await bytesResult)[0].totalBytes),\n },\n };\n const elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(`Computed initial download state for ${table} (${elapsed} ms)`, {\n state: state.status,\n });\n return state;\n}\n\nfunction copy(\n lc: LogContext,\n {spec: table, status}: DownloadState,\n dbClient: PostgresDB,\n from: PostgresTransaction,\n to: Database,\n textCopy: boolean,\n sampleRate?: number | undefined,\n maxRowsPerTable?: number | undefined,\n) {\n if (textCopy) {\n return copyText(\n lc,\n table,\n status,\n dbClient,\n from,\n to,\n sampleRate,\n maxRowsPerTable,\n );\n }\n return copyBinary(lc, table, status, from, to, sampleRate, maxRowsPerTable);\n}\n\nasync function copyBinary(\n lc: LogContext,\n table: PublishedTableSpec,\n status: DownloadStatus,\n from: PostgresTransaction,\n to: Database,\n sampleRate?: number | undefined,\n maxRowsPerTable?: number | undefined,\n) {\n const start = performance.now();\n let flushTime = 0;\n\n const tableName = liteTableName(table);\n const orderedColumns = Object.entries(table.columns);\n\n const columnNames = orderedColumns.map(([c]) => c);\n const columnSpecs = orderedColumns.map(([_name, spec]) => spec);\n const insertColumnList = columnNames.map(c => id(c)).join(',');\n\n const valuesSql =\n columnNames.length > 0 ? `(${'?,'.repeat(columnNames.length - 1)}?)` : '()';\n const insertSql = /*sql*/ `\n INSERT INTO \"${tableName}\" (${insertColumnList}) VALUES ${valuesSql}`;\n const insertStmt = to.prepare(insertSql);\n const insertBatchStmt = to.prepare(\n insertSql + `,${valuesSql}`.repeat(INSERT_BATCH_SIZE - 1),\n );\n\n // Build SELECT with ::text casts for columns without a known binary decoder.\n const filterConditions = Object.values(table.publications)\n .map(({rowFilter}) => rowFilter)\n .filter(f => !!f);\n const where =\n filterConditions.length === 0\n ? ''\n : /*sql*/ `WHERE ${filterConditions.join(' OR ')}`;\n const sample = tableSampleClause(sampleRate);\n const limit = limitClause(maxRowsPerTable);\n const fromTable = /*sql*/ `FROM ${id(table.schema)}.${id(table.name)}${sample} ${where}`;\n const selectColumns = orderedColumns.map(([name, spec]) =>\n hasBinaryDecoder(spec) ? id(name) : `${id(name)}::text`,\n );\n const select = /*sql*/ `SELECT ${selectColumns.join(',')} ${fromTable}${limit}`;\n\n const decoders = orderedColumns.map(([, spec]) =>\n hasBinaryDecoder(spec) ? makeBinaryDecoder(spec) : textCastDecoder,\n );\n\n const valuesPerRow = columnSpecs.length;\n const valuesPerBatch = valuesPerRow * INSERT_BATCH_SIZE;\n\n const pendingValues: LiteValueType[] = Array.from({\n length: MAX_BUFFERED_ROWS * valuesPerRow,\n });\n let pendingRows = 0;\n let pendingSize = 0;\n\n function flush() {\n const start = performance.now();\n const flushedRows = pendingRows;\n const flushedSize = pendingSize;\n\n let l = 0;\n for (; pendingRows > INSERT_BATCH_SIZE; pendingRows -= INSERT_BATCH_SIZE) {\n insertBatchStmt.run(pendingValues.slice(l, (l += valuesPerBatch)));\n }\n for (; pendingRows > 0; pendingRows--) {\n insertStmt.run(pendingValues.slice(l, (l += valuesPerRow)));\n }\n const flushedValues = flushedRows * valuesPerRow;\n for (let i = 0; i < flushedValues; i++) {\n pendingValues[i] = undefined as unknown as LiteValueType;\n }\n pendingSize = 0;\n status.rows += flushedRows;\n\n const elapsed = performance.now() - start;\n flushTime += elapsed;\n lc.debug?.(\n `flushed ${flushedRows} ${tableName} rows (${flushedSize} bytes) in ${elapsed.toFixed(3)} ms`,\n );\n }\n\n const binaryParser = new BinaryCopyParser();\n let col = 0;\n\n lc.info?.(`Starting binary copy stream of ${tableName}:`, select);\n\n await pipeline(\n await from\n .unsafe(`COPY (${select}) TO STDOUT WITH (FORMAT binary)`)\n .readable(),\n new Writable({\n highWaterMark: BUFFERED_SIZE_THRESHOLD,\n\n write(\n chunk: Buffer,\n _encoding: string,\n callback: (error?: Error) => void,\n ) {\n try {\n for (const fieldBuf of binaryParser.parse(chunk)) {\n pendingSize += fieldBuf === null ? 4 : fieldBuf.length;\n pendingValues[pendingRows * valuesPerRow + col] =\n fieldBuf === null ? null : decoders[col](fieldBuf);\n\n if (++col === decoders.length) {\n col = 0;\n if (\n ++pendingRows >= MAX_BUFFERED_ROWS - valuesPerRow ||\n pendingSize >= BUFFERED_SIZE_THRESHOLD\n ) {\n flush();\n }\n }\n }\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n\n final: (callback: (error?: Error) => void) => {\n try {\n flush();\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n }),\n );\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Finished copying ${status.rows} rows into ${tableName} ` +\n `(flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `,\n );\n return {rows: status.rows, flushTime};\n}\n\nasync function copyText(\n lc: LogContext,\n table: PublishedTableSpec,\n status: DownloadStatus,\n dbClient: PostgresDB,\n from: PostgresTransaction,\n to: Database,\n sampleRate?: number | undefined,\n maxRowsPerTable?: number | undefined,\n) {\n const start = performance.now();\n let flushTime = 0;\n\n const tableName = liteTableName(table);\n const orderedColumns = Object.entries(table.columns);\n\n const columnNames = orderedColumns.map(([c]) => c);\n const columnSpecs = orderedColumns.map(([_name, spec]) => spec);\n const insertColumnList = columnNames.map(c => id(c)).join(',');\n\n const valuesSql =\n columnNames.length > 0 ? `(${'?,'.repeat(columnNames.length - 1)}?)` : '()';\n const insertSql = /*sql*/ `\n INSERT INTO \"${tableName}\" (${insertColumnList}) VALUES ${valuesSql}`;\n const insertStmt = to.prepare(insertSql);\n const insertBatchStmt = to.prepare(\n insertSql + `,${valuesSql}`.repeat(INSERT_BATCH_SIZE - 1),\n );\n\n const {select} = makeDownloadStatements(\n table,\n columnNames,\n sampleRate,\n maxRowsPerTable,\n );\n const valuesPerRow = columnSpecs.length;\n const valuesPerBatch = valuesPerRow * INSERT_BATCH_SIZE;\n\n const pendingValues: LiteValueType[] = Array.from({\n length: MAX_BUFFERED_ROWS * valuesPerRow,\n });\n let pendingRows = 0;\n let pendingSize = 0;\n\n function flush() {\n const start = performance.now();\n const flushedRows = pendingRows;\n const flushedSize = pendingSize;\n\n let l = 0;\n for (; pendingRows > INSERT_BATCH_SIZE; pendingRows -= INSERT_BATCH_SIZE) {\n insertBatchStmt.run(pendingValues.slice(l, (l += valuesPerBatch)));\n }\n for (; pendingRows > 0; pendingRows--) {\n insertStmt.run(pendingValues.slice(l, (l += valuesPerRow)));\n }\n const flushedValues = flushedRows * valuesPerRow;\n for (let i = 0; i < flushedValues; i++) {\n pendingValues[i] = undefined as unknown as LiteValueType;\n }\n pendingSize = 0;\n status.rows += flushedRows;\n\n const elapsed = performance.now() - start;\n flushTime += elapsed;\n lc.debug?.(\n `flushed ${flushedRows} ${tableName} rows (${flushedSize} bytes) in ${elapsed.toFixed(3)} ms`,\n );\n }\n\n lc.info?.(`Starting text copy stream of ${tableName}:`, select);\n const pgParsers = await getTypeParsers(dbClient, {returnJsonAsString: true});\n const parsers = columnSpecs.map(c => {\n const pgParse = pgParsers.getTypeParser(c.typeOID);\n return (val: string) =>\n liteValue(\n pgParse(val) as PostgresValueType,\n c.dataType,\n JSON_STRINGIFIED,\n );\n });\n\n const tsvParser = new TsvParser();\n let col = 0;\n\n await pipeline(\n await from.unsafe(`COPY (${select}) TO STDOUT`).readable(),\n new Writable({\n highWaterMark: BUFFERED_SIZE_THRESHOLD,\n\n write(\n chunk: Buffer,\n _encoding: string,\n callback: (error?: Error) => void,\n ) {\n try {\n for (const text of tsvParser.parse(chunk)) {\n pendingSize += text === null ? 4 : text.length;\n pendingValues[pendingRows * valuesPerRow + col] =\n text === null ? null : parsers[col](text);\n\n if (++col === parsers.length) {\n col = 0;\n if (\n ++pendingRows >= MAX_BUFFERED_ROWS - valuesPerRow ||\n pendingSize >= BUFFERED_SIZE_THRESHOLD\n ) {\n flush();\n }\n }\n }\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n\n final: (callback: (error?: Error) => void) => {\n try {\n flush();\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n }),\n );\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Finished copying ${status.rows} rows into ${tableName} ` +\n `(flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `,\n );\n return {rows: status.rows, flushTime};\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAsGA,eAAsB,YACpB,IACA,OACA,IACA,aACA,aACA,SACA;AACA,KAAI,CAAC,0BAA0B,KAAK,MAAM,MAAM,CAC9C,OAAM,IAAI,MACR,2FACD;CAEH,MAAM,EACJ,kBACA,aACA,WAAW,OACX,0BAA0B,OAC1B,WACE;CACJ,MAAM,eAAe,cAAc,MAAM,YAAY,SAAS,GAAG;CACjE,MAAM,MAAM,SAAS,IAAI,YAAY;CAIrC,MAAM,qBAAqB,SACvB,KAAA,IACA,SAAS,IAAI,aAAa;GACvB,gBAAgB;EACjB,YAAY,EAAC,aAAa,YAAW;EACtC,CAAC;CACN,MAAM,WAAW,mBAAmB,MAAM;CAC1C,MAAM,kBAAkB,2BAA2B,sBACjD,IACA,SAAS,YAAY,KAAK,KAAA,EAC3B,CAAC,QAAQ,IAAI,eAAe;CAC7B,IAAI;AACJ,KAAI;EACF,MAAM,YAAY,MAAM,oBAAoB,IAAI;EAMhD,MAAM,EAAC,iBAAgB,SACnB,MAAM,uBAAuB,KAAK,MAAM,GACxC,MAAM,sBAAsB,IAAI,KAAK,MAAM;AAC/C,KAAG,OAAO,wCAAwC,aAAa,GAAG;EAElE,MAAM,EAAC,UAAU,SAAQ,IAAI;AAC7B,KAAG,OACD,SACI,kCAAkC,SAAS,GAAG,KAAK,kBACnD,kCAAkC,SAAS,GAAG,OACnD;EAED,IAAI;EACJ,IAAI;AAEJ,MAAI,QAAQ;GACV,MAAM,WAAW,MAAM,qCACrB,IACA,YACD;AACD,cAAW,SAAS;AACpB,SAAM,SAAS;AACf,2BAAwB,SAAS;SAC5B;GACL,IAAI;AACJ,QAAK,IAAI,QAAQ,OAAQ,QAAQ,MAC/B,KAAI;AACF,WAAO,MAAM,sBACX,IACA,KAAK,mBAAmB,EACxB,UACA,2BAA2B,aAAA,KAC5B;AACD;YACO,GAAG;AACV,QAAI,SAAS,aAAa,SAAS,eAAe;AAChD,SAAI,EAAE,SAAS,2BAA2B;AAKxC,YAAM,GAAG;AACT,SAAG,OAAO,8CAA8C;AACxD;;AAEF,SAAI,EAAE,SAAS,iCAAiC;MAG9C,MAAM,UAAU,MAAM,GAAqB;;;yCAFpB,0BAA0B,MAAM,CAKb;AAC1C,UAAI,QAAQ,QAAQ;AAClB,UAAG,OACD,uCAAuC,QAAQ,KAAK,EAAC,WAAU,KAAK,IACpE,EACD;AACD;;AAEF,SAAG,QAAQ,oCAAoC,EAAE;;;AAGrD,UAAM;;AAGV,cAAW,KAAK;AAChB,SAAM,KAAK;;EAGb,MAAM,iBAAiB,qBAAqB,IAAI;AAEhD,uBAAqB,IAAI,cAAc,gBAAgB,QAAQ;EAG/D,MAAM,QAAQ,YAAY,KAAK;EAE/B,MAAM,YAAY,MAAM,MACtB,KACA,OAAM,OAAM;AACV,SAAM,GAAG,OAAgB,6BAA6B,SAAS,GAAG;AAClE,UAAO,mBAAmB,IAAI,aAAa;KAE7C,EAAC,MAAM,UAAc,CACtB;AAED,uBAAqB,IAAI,UAAU;EAGnC,MAAM,EAAC,QAAQ,YAAW;EAC1B,MAAM,YAAY,OAAO;AACzB,MAAI,UAAU,KAAK,WAAW,mBAAmB,UAC/C,IAAG,OACD,8CAA8C,iBAAiB,MAC1D,UAAU,gDAChB;EAEH,MAAM,aACJ,UAAU,KAAK,UACX,YACA,KAAK,IAAI,kBAAkB,UAAU;EAE3C,MAAM,WAAW,SAAS,IAAI,aAAa;GACzC,KAAK;GACL,YAAY,GAAE,qBAAqB,4BAA2B;IAC7D,iBAAiB;GACnB,CAAC;EACF,MAAM,UAAU,sBACd,IACA,UACA,UACA,YACA,UACD;AACD,MAAI;AACF,oBAAiB,IAAI,QAAQ,eAAe;GAC5C,MAAM,aAAa,QAAQ;GAC3B,MAAM,kBAAkB,QAAQ;GAChC,MAAM,YAAY,MAAM,QAAQ,IAC9B,OAAO,KAAI,SACT,QAAQ,iBAAiB,IAAI,OAC3B,wBAAwB,IAAI,IAAI,MAAM,WAAW,KAAA,EAAU,CAC5D,CACF,CACF;AACD,mBAAgB,QACd,IACA,gBACA,WAAW,UAAU,8BAA8B,kBACnD,YACO,EAAC,gBAAgB,UAAU,KAAK,EAAC,aAAY,OAAO,EAAC,EAC7D;AAEI,iBAAc,OAAO;GAC1B,MAAM,YAAY,MAAM,QAAQ,IAC9B,UAAU,KAAI,UACZ,QAAQ,iBAAiB,IAAI,OAC3B,KACE,IACA,OACA,UACA,IACA,IACA,UACA,YACA,gBACD,CACF,CACF,CACF;AACI,iBAAc,eAAe,IAAI,eAAe;AACrD,WAAQ,SAAS;GAEjB,MAAM,QAAQ,UAAU,QACrB,KAAK,UAAU;IACd,MAAM,IAAI,OAAO,KAAK;IACtB,WAAW,IAAI,YAAY,KAAK;IACjC,GACD;IAAC,MAAM;IAAG,WAAW;IAAE,CACxB;AAED,mBAAgB,QACd,IACA,YACA,YAAY,QAAQ,OAAO,WAC3B,IACD;GACD,MAAM,aAAa,YAAY,KAAK;AACpC,qBAAkB,IAAI,QAAQ;GAC9B,MAAM,QAAQ,YAAY,KAAK,GAAG;AAClC,MAAG,OAAO,oBAAoB,MAAM,QAAQ,EAAE,CAAC,MAAM;AAErD,OAAI,QAAQ;IACV,MAAM,8BAAc,IAAI,KAAqB;AAC7C,SAAK,IAAI,IAAI,GAAG,IAAI,UAAU,QAAQ,IACpC,aAAY,IAAI,UAAU,GAAG,OAAO,OAAO,UAAU,GAAG,KAAK;AAE/D,wBAAoB,IAAI,IAAI,WAAW,YAAY;SAEnD,OAAM,WACJ,KACA,OACA,UACA,gBACA,WACA,QACD;GAGH,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,MAAG,OACD,UAAU,MAAM,KAAK,gBAAgB,CAAC,WAAW,UAAU,aAAa,aAAa,SAAS,IAAI,WACrF,MAAM,UAAU,QAAQ,EAAE,CAAC,WAAW,MAAM,QAAQ,EAAE,CAAC,WAAW,QAAQ,QAAQ,EAAE,CAAC,MACnG;YACO;AAEH,YAAS,KAAK,CAAC,OAAM,MAAK,GAAG,OAAO,0BAA0B,EAAE,CAAC;;UAEjE,GAAG;AACV,MAAI,CAAC,QAAQ;AAIX,MAAG,OAAO,6BAA6B,YAAY,EAAE;AACrD,SAAM,GAAG;;8BAEe,SAAS;QAC/B,OAAM,MAAK,GAAG,OAAO,mCAAmC,YAAY,EAAE,CAAC;;AAE3E,QAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,EAAE;WACzD;AACR,kBAAgB,MAAM;AACtB,MAAI,sBACF,OAAM,uBAAuB,CAAC,OAAM,MAClC,GAAG,OAAO,mCAAmC,EAAE,CAChD;AAEH,MAAI,mBACF,OAAM,mBAAmB,KAAK;AAEhC,QAAM,IAAI,KAAK;;;AAgEnB,eAAe,oBAAoB,KAAiB;CAClD,MAAM,EAAC,UAAU,aACf,MAAM,GAA0C;;;KAIhD;AAEF,KAAI,aAAa,UACf,OAAM,IAAI,MACR,uEAAuE,SAAS,GACjF;AAEH,KAAI,UAAA,KACF,OAAM,IAAI,MACR,sDAAsD,QAAQ,IAC/D;AAEH,QAAO;;AAGT,eAAe,sBACb,IACA,KACA,OACA,WAAW,MACwB;CACnC,MAAM,EAAC,UAAU,SAAQ,IAAI;AAC7B,IAAG,OAAO,oCAAoC,SAAS,GAAG,OAAO;AAEjE,OAAM,kBAAkB,IAAI,KAAK,MAAM;CACvC,MAAM,EAAC,iBAAgB,MAAM,uBAAuB,KAAK,MAAM;AAE/D,KAAI,UAAU;EACZ,IAAI,QAAQ;EACZ,MAAM,0BAA0B,aAAa,QAC3C,MAAK,CAAC,EAAE,WAAW,IAAI,CACxB;EACD,MAAM,SAAS,MAAM,GAAG;4DACgC,IAAI,aAAa,CAAC;QACtE,QAAQ;AACZ,MAAI,OAAO,WAAW,aAAa,OACjC,IAAG,OACD,iCAAiC,aAAa,kBACxC,OAAO,MAAM,CAAC,cACrB;WAED,CAAC,OAAO,IAAI,IAAI,MAAM,aAAa,EAAE,IAAI,IAAI,wBAAwB,CAAC,CAEtE,IAAG,OACD,2BAA2B,MAAM,aAAa,sCAC3B,wBAAwB,cAC5C;MAED,SAAQ;AAEV,MAAI,CAAC,OAAO;AACV,SAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,SAAS,CAAC;AACxD,UAAO,sBAAsB,IAAI,KAAK,OAAO,MAAM;;;AAGvD,QAAO,EAAC,cAAa;;AAGvB,SAAS,sBACP,IACA,IACA,UACA,YACA,WACiB;CACjB,MAAM,EAAC,SAAQ,eAAe,SAAS;CACvC,MAAM,eAAe,IAAI,gBAAgB,IAAI;EAC3C,MAAM;EACN;EACA,gBAAgB;EACjB,CAAC;AACF,cAAa,IAAI,GAAG;AAEpB,IAAG,OAAO,WAAW,WAAW,mBAAmB,UAAU,SAAS;AAEtE,KAAI,SAAS,QAAQ,SAAS,KAAK,GAAG,GACpC,IAAG,OACD,mUAMD;AAEH,QAAO;;;;;;;;;;;;AAqBT,eAAe,qCACb,IACA,aAKC;CACD,MAAM,SAAS,SAAS,IAAI,aAAa;EACvC,KAAK;EACL,YAAY,GAAE,qBAAqB,gCAA+B;EACnE,CAAC;CACF,MAAM,QAAQ,UAA2C;CACzD,MAAM,UAAU,UAAgB;CAChC,MAAM,OAAO,OACV,MAAM,UAAe,OAAM,OAAM;AAChC,QAAM,EAAE,oDAAoD,SAAS;EACrE,MAAM,CAAC,OAAO,MAAM,EAAqC;;;AAGzD,QAAM,QAAQ,IAAI;AAClB,QAAM,QAAQ;GACd,CACD,OAAM,MAAK,MAAM,OAAO,EAAE,CAAC;CAE9B,IAAI;CACJ,IAAI;AACJ,KAAI;AACF,GAAC,CAAC,UAAU,OAAO,MAAM,MAAM;UACxB,GAAG;AACV,QAAM,OACH,KAAK,CACL,OAAM,QACL,GAAG,OAAO,qDAAqD,IAAI,CACpE;AACH,QAAM;;AAER,IAAG,OACD,qBAAqB,SAAS,UAAU,IAAI,wBAC7C;AACD,QAAO;EACL;EACA;EACA,SAAS,YAAY;AACnB,WAAQ,SAAS;AACjB,OAAI;AACF,UAAM;YACC,GAAG;AACV,OAAG,OAAO,gDAAgD,EAAE;;AAE9D,SAAM,OAAO,KAAK;;EAErB;;AAMH,eAAsB,sBACpB,IACA,SACA,UAEA,WAAW,OACe;CAC1B,MAAM,CAAC,QAAQ,WACX,MAAM,QAAQ,OACJ,4BAA4B,SAAS,+BAC9C,GACD,MAAM,QAAQ,OACJ,4BAA4B,SAAS,oBAC9C;AACL,IAAG,OAAO,4BAA4B,YAAY,KAAK;AACvD,QAAO;;AAGT,SAAS,iBACP,IACA,QACA,gBACA;CAGA,MAAM,iBAAiB,KAAK,oBAAoB,YAAY,GAAG,CAAC;AAChE,MAAK,MAAM,KAAK,QAAQ;AACtB,KAAG,KAAK,yBAAyB,kBAAkB,GAAG,eAAe,CAAC,CAAC;EACvE,MAAM,YAAY,cAAc,EAAE;AAClC,OAAK,MAAM,CAAC,SAAS,YAAY,OAAO,QAAQ,EAAE,QAAQ,CACxD,gBAAe,OAAO,WAAW,SAAS,QAAQ;;;AAKxD,SAAS,kBAAkB,IAAc,SAAsB;AAC7D,MAAK,MAAM,SAAS,QAClB,IAAG,KAAK,yBAAyB,uBAAuB,MAAM,CAAC,CAAC;;;;;;;;;;AAYpE,SAAgB,oBACd,IACA,IACA,WACA,aACM;CACN,MAAM,SAAmB,EAAE;CAI3B,MAAM,aAAa,WAAW,GAAG;CACjC,MAAM,kBAAkB,IAAI,IAAI,WAAW,KAAI,MAAK,CAAC,EAAE,MAAM,EAAE,CAAC,CAAC;AACjE,MAAK,MAAM,MAAM,UAAU,QAAQ;EACjC,MAAM,OAAO,cAAc,GAAG;EAC9B,MAAM,OAAO,gBAAgB,IAAI,KAAK;AACtC,MAAI,CAAC,MAAM;AACT,UAAO,KAAK,6BAA6B,OAAO;AAChD;;AAEF,OAAK,MAAM,OAAO,OAAO,KAAK,GAAG,QAAQ,CACvC,KAAI,EAAE,OAAO,KAAK,SAChB,QAAO,KAAK,mCAAmC,KAAK,IAAI,MAAM;;CAMpE,MAAM,iBAAiB,IAAI,IAAI,YAAY,GAAG,CAAC,KAAI,MAAK,EAAE,KAAK,CAAC;AAChE,MAAK,MAAM,MAAM,UAAU,SAAS;EAClC,MAAM,SAAS,uBAAuB,GAAG;AACzC,MAAI,CAAC,eAAe,IAAI,OAAO,KAAK,CAClC,QAAO,KACL,6BAA6B,OAAO,KAAK,MAAM,OAAO,YACvD;;AAKL,MAAK,MAAM,CAAC,OAAO,aAAa,YAC9B,KAAI;EACF,MAAM,CAAC,OAAO,GACX,QAAQ,kCAAkC,MAAM,GAAG,CACnD,KAAsB;AACzB,MAAI,IAAI,UAAU,SAChB,QAAO,KACL,gCAAgC,MAAM,0BACX,SAAS,gBAAgB,IAAI,QACzD;UAEI,GAAG;AACV,SAAO,KAAK,iCAAiC,MAAM,IAAI,OAAO,EAAE,GAAG;;CAMvE,MAAM,aAAa,gBAAgB,IAAI,IAAI,EACzC,2BAA2B,OAC5B,CAAC;AACF,MAAK,MAAM,MAAM,UAAU,QAAQ;EACjC,MAAM,OAAO,cAAc,GAAG;AAC9B,MAAI,CAAC,WAAW,IAAI,KAAK,CACvB,QAAO,KACL,6DAA6D,OAC9D;;CAKL,MAAM,OAAO,KAAK,oBAAoB,YAAY,GAAG,CAAC;AACtD,MAAK,MAAM,MAAM,UAAU,QAAQ;EACjC,MAAM,OAAO,cAAc,GAAG;EAC9B,MAAM,OAAO,KAAK,SAAS,KAAK;AAChC,OAAK,MAAM,OAAO,OAAO,KAAK,GAAG,QAAQ,CACvC,KAAI,CAAC,KAAK,IAAI,IAAI,CAChB,QAAO,KAAK,mCAAmC,KAAK,GAAG,MAAM;;AAKnE,KAAI,OAAO,OACT,OAAM,IAAI,MACR,uCAAuC,OAAO,OAAO,iBACnD,OAAO,KAAI,MAAK,OAAO,IAAI,CAAC,KAAK,KAAK,CACzC;;AAUL,IAAM,KAAK,OAAO;AAClB,IAAM,oBAAoB;AAC1B,IAAM,0BAA0B,IAAI;;;;;;;AAcpC,SAAS,kBAAkB,YAAwC;AACjE,KAAI,eAAe,KAAA,KAAa,cAAc,EAC5C,QAAO;AAKT,QAAe,0BADH,YAAY,aAAa,KAAK,QAAQ,EAAE,CAAC,CACR;;AAG/C,SAAS,YAAY,iBAA6C;AAChE,QAAO,oBAAoB,KAAA,IACf,UAAU,oBAClB;;AAGN,SAAgB,uBACd,OACA,MACA,YACA,iBACoB;CACpB,MAAM,mBAAmB,OAAO,OAAO,MAAM,aAAa,CACvD,KAAK,EAAC,gBAAe,UAAU,CAC/B,QAAO,MAAK,CAAC,CAAC,EAAE;CACnB,MAAM,QACJ,iBAAiB,WAAW,IACxB,KACQ,SAAS,iBAAiB,KAAK,OAAO;CACpD,MAAM,SAAS,kBAAkB,WAAW;CAC5C,MAAM,QAAQ,YAAY,gBAAgB;CAC1C,MAAM,YAAoB,QAAQ,GAAG,MAAM,OAAO,CAAC,GAAG,GAAG,MAAM,KAAK,GAAG,OAAO,GAAG;CACjF,MAAM,SAAiB,UAAU,KAAK,IAAI,GAAG,CAAC,KAAK,IAAI,CAAC,GAAG,YAAY;AACvE,KAAI,OAAO;EAGT,MAAM,YAAY,KACf,KAAI,QAAO,2BAA2B,GAAG,IAAI,CAAC,OAAO,CACrD,KAAK,MAAM;AACd,SAAO;GACL;GACA,cAAsB,8DAA8D,YAAY,MAAM;GACtG,eAAuB,oEAAoE,UAAU,SAAS,YAAY,MAAM;GACjI;;CAEH,MAAM,aAAa,IAAI,KAAK,KAAI,QAAO,+BAA+B,GAAG,IAAI,CAAC,QAAQ,CAAC,KAAK,MAAM,CAAC;AACnG,QAAO;EACL;EACA,cAAsB,kCAAkC;EACxD,eAAuB,UAAU,WAAW,mBAAmB;EAChE;;AASH,eAAsB,wBACpB,IACA,KACA,MACA,YACwB;CACxB,MAAM,QAAQ,YAAY,KAAK;CAC/B,MAAM,QAAQ,cAAc,KAAK;CACjC,MAAM,UAAU,OAAO,KAAK,KAAK,QAAQ;AACzC,KAAI,WAIF,QAAO;EACL;EACA,QAAQ;GAAC;GAAO;GAAS,MAAM;GAAG,WAAW;GAAG,YAAY;GAAE;EAC/D;CAEH,MAAM,QAAQ,uBAAuB,MAAM,QAAQ;CACnD,MAAM,aAAa,IAChB,OAA8B,MAAM,aAAa,CACjD,SAAS;CACZ,MAAM,cAAc,IACjB,OAA+B,MAAM,cAAc,CACnD,SAAS;CAEZ,MAAM,QAAuB;EAC3B;EACA,QAAQ;GACN;GACA;GACA,MAAM;GACN,WAAW,QAAQ,MAAM,YAAY,GAAG,UAAU;GAClD,YAAY,QAAQ,MAAM,aAAa,GAAG,WAAW;GACtD;EACF;CACD,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,IAAG,OAAO,uCAAuC,MAAM,IAAI,QAAQ,OAAO,EACxE,OAAO,MAAM,QACd,CAAC;AACF,QAAO;;AAGT,SAAS,KACP,IACA,EAAC,MAAM,OAAO,UACd,UACA,MACA,IACA,UACA,YACA,iBACA;AACA,KAAI,SACF,QAAO,SACL,IACA,OACA,QACA,UACA,MACA,IACA,YACA,gBACD;AAEH,QAAO,WAAW,IAAI,OAAO,QAAQ,MAAM,IAAI,YAAY,gBAAgB;;AAG7E,eAAe,WACb,IACA,OACA,QACA,MACA,IACA,YACA,iBACA;CACA,MAAM,QAAQ,YAAY,KAAK;CAC/B,IAAI,YAAY;CAEhB,MAAM,YAAY,cAAc,MAAM;CACtC,MAAM,iBAAiB,OAAO,QAAQ,MAAM,QAAQ;CAEpD,MAAM,cAAc,eAAe,KAAK,CAAC,OAAO,EAAE;CAClD,MAAM,cAAc,eAAe,KAAK,CAAC,OAAO,UAAU,KAAK;CAC/D,MAAM,mBAAmB,YAAY,KAAI,MAAK,GAAG,EAAE,CAAC,CAAC,KAAK,IAAI;CAE9D,MAAM,YACJ,YAAY,SAAS,IAAI,IAAI,KAAK,OAAO,YAAY,SAAS,EAAE,CAAC,MAAM;CACzE,MAAM,YAAoB;mBACT,UAAU,KAAK,iBAAiB,WAAW;CAC5D,MAAM,aAAa,GAAG,QAAQ,UAAU;CACxC,MAAM,kBAAkB,GAAG,QACzB,YAAY,IAAI,YAAY,OAAA,GAA6B,CAC1D;CAGD,MAAM,mBAAmB,OAAO,OAAO,MAAM,aAAa,CACvD,KAAK,EAAC,gBAAe,UAAU,CAC/B,QAAO,MAAK,CAAC,CAAC,EAAE;CACnB,MAAM,QACJ,iBAAiB,WAAW,IACxB,KACQ,SAAS,iBAAiB,KAAK,OAAO;CACpD,MAAM,SAAS,kBAAkB,WAAW;CAC5C,MAAM,QAAQ,YAAY,gBAAgB;CAC1C,MAAM,YAAoB,QAAQ,GAAG,MAAM,OAAO,CAAC,GAAG,GAAG,MAAM,KAAK,GAAG,OAAO,GAAG;CAIjF,MAAM,SAAiB,UAHD,eAAe,KAAK,CAAC,MAAM,UAC/C,iBAAiB,KAAK,GAAG,GAAG,KAAK,GAAG,GAAG,GAAG,KAAK,CAAC,QACjD,CAC8C,KAAK,IAAI,CAAC,GAAG,YAAY;CAExE,MAAM,WAAW,eAAe,KAAK,GAAG,UACtC,iBAAiB,KAAK,GAAG,kBAAkB,KAAK,GAAG,gBACpD;CAED,MAAM,eAAe,YAAY;CACjC,MAAM,iBAAiB,eAAA;CAEvB,MAAM,gBAAiC,MAAM,KAAK,EAChD,QAAQ,oBAAoB,cAC7B,CAAC;CACF,IAAI,cAAc;CAClB,IAAI,cAAc;CAElB,SAAS,QAAQ;EACf,MAAM,QAAQ,YAAY,KAAK;EAC/B,MAAM,cAAc;EACpB,MAAM,cAAc;EAEpB,IAAI,IAAI;AACR,SAAO,cAAA,IAAiC,eAAA,GACtC,iBAAgB,IAAI,cAAc,MAAM,GAAI,KAAK,eAAgB,CAAC;AAEpE,SAAO,cAAc,GAAG,cACtB,YAAW,IAAI,cAAc,MAAM,GAAI,KAAK,aAAc,CAAC;EAE7D,MAAM,gBAAgB,cAAc;AACpC,OAAK,IAAI,IAAI,GAAG,IAAI,eAAe,IACjC,eAAc,KAAK,KAAA;AAErB,gBAAc;AACd,SAAO,QAAQ;EAEf,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,eAAa;AACb,KAAG,QACD,WAAW,YAAY,GAAG,UAAU,SAAS,YAAY,aAAa,QAAQ,QAAQ,EAAE,CAAC,KAC1F;;CAGH,MAAM,eAAe,IAAI,kBAAkB;CAC3C,IAAI,MAAM;AAEV,IAAG,OAAO,kCAAkC,UAAU,IAAI,OAAO;AAEjE,OAAM,WACJ,MAAM,KACH,OAAO,SAAS,OAAO,kCAAkC,CACzD,UAAU,EACb,IAAI,SAAS;EACX,eAAe;EAEf,MACE,OACA,WACA,UACA;AACA,OAAI;AACF,SAAK,MAAM,YAAY,aAAa,MAAM,MAAM,EAAE;AAChD,oBAAe,aAAa,OAAO,IAAI,SAAS;AAChD,mBAAc,cAAc,eAAe,OACzC,aAAa,OAAO,OAAO,SAAS,KAAK,SAAS;AAEpD,SAAI,EAAE,QAAQ,SAAS,QAAQ;AAC7B,YAAM;AACN,UACE,EAAE,eAAe,oBAAoB,gBACrC,eAAe,wBAEf,QAAO;;;AAIb,cAAU;YACH,GAAG;AACV,aAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,EAAE,CAAC,CAAC;;;EAI3D,QAAQ,aAAsC;AAC5C,OAAI;AACF,WAAO;AACP,cAAU;YACH,GAAG;AACV,aAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,EAAE,CAAC,CAAC;;;EAG5D,CAAC,CACH;CAED,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,IAAG,OACD,oBAAoB,OAAO,KAAK,aAAa,UAAU,WAC1C,UAAU,QAAQ,EAAE,CAAC,eAAe,QAAQ,QAAQ,EAAE,CAAC,OACrE;AACD,QAAO;EAAC,MAAM,OAAO;EAAM;EAAU;;AAGvC,eAAe,SACb,IACA,OACA,QACA,UACA,MACA,IACA,YACA,iBACA;CACA,MAAM,QAAQ,YAAY,KAAK;CAC/B,IAAI,YAAY;CAEhB,MAAM,YAAY,cAAc,MAAM;CACtC,MAAM,iBAAiB,OAAO,QAAQ,MAAM,QAAQ;CAEpD,MAAM,cAAc,eAAe,KAAK,CAAC,OAAO,EAAE;CAClD,MAAM,cAAc,eAAe,KAAK,CAAC,OAAO,UAAU,KAAK;CAC/D,MAAM,mBAAmB,YAAY,KAAI,MAAK,GAAG,EAAE,CAAC,CAAC,KAAK,IAAI;CAE9D,MAAM,YACJ,YAAY,SAAS,IAAI,IAAI,KAAK,OAAO,YAAY,SAAS,EAAE,CAAC,MAAM;CACzE,MAAM,YAAoB;mBACT,UAAU,KAAK,iBAAiB,WAAW;CAC5D,MAAM,aAAa,GAAG,QAAQ,UAAU;CACxC,MAAM,kBAAkB,GAAG,QACzB,YAAY,IAAI,YAAY,OAAA,GAA6B,CAC1D;CAED,MAAM,EAAC,WAAU,uBACf,OACA,aACA,YACA,gBACD;CACD,MAAM,eAAe,YAAY;CACjC,MAAM,iBAAiB,eAAA;CAEvB,MAAM,gBAAiC,MAAM,KAAK,EAChD,QAAQ,oBAAoB,cAC7B,CAAC;CACF,IAAI,cAAc;CAClB,IAAI,cAAc;CAElB,SAAS,QAAQ;EACf,MAAM,QAAQ,YAAY,KAAK;EAC/B,MAAM,cAAc;EACpB,MAAM,cAAc;EAEpB,IAAI,IAAI;AACR,SAAO,cAAA,IAAiC,eAAA,GACtC,iBAAgB,IAAI,cAAc,MAAM,GAAI,KAAK,eAAgB,CAAC;AAEpE,SAAO,cAAc,GAAG,cACtB,YAAW,IAAI,cAAc,MAAM,GAAI,KAAK,aAAc,CAAC;EAE7D,MAAM,gBAAgB,cAAc;AACpC,OAAK,IAAI,IAAI,GAAG,IAAI,eAAe,IACjC,eAAc,KAAK,KAAA;AAErB,gBAAc;AACd,SAAO,QAAQ;EAEf,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,eAAa;AACb,KAAG,QACD,WAAW,YAAY,GAAG,UAAU,SAAS,YAAY,aAAa,QAAQ,QAAQ,EAAE,CAAC,KAC1F;;AAGH,IAAG,OAAO,gCAAgC,UAAU,IAAI,OAAO;CAC/D,MAAM,YAAY,MAAM,eAAe,UAAU,EAAC,oBAAoB,MAAK,CAAC;CAC5E,MAAM,UAAU,YAAY,KAAI,MAAK;EACnC,MAAM,UAAU,UAAU,cAAc,EAAE,QAAQ;AAClD,UAAQ,QACN,UACE,QAAQ,IAAI,EACZ,EAAE,UAAA,IAEH;GACH;CAEF,MAAM,YAAY,IAAI,WAAW;CACjC,IAAI,MAAM;AAEV,OAAM,WACJ,MAAM,KAAK,OAAO,SAAS,OAAO,aAAa,CAAC,UAAU,EAC1D,IAAI,SAAS;EACX,eAAe;EAEf,MACE,OACA,WACA,UACA;AACA,OAAI;AACF,SAAK,MAAM,QAAQ,UAAU,MAAM,MAAM,EAAE;AACzC,oBAAe,SAAS,OAAO,IAAI,KAAK;AACxC,mBAAc,cAAc,eAAe,OACzC,SAAS,OAAO,OAAO,QAAQ,KAAK,KAAK;AAE3C,SAAI,EAAE,QAAQ,QAAQ,QAAQ;AAC5B,YAAM;AACN,UACE,EAAE,eAAe,oBAAoB,gBACrC,eAAe,wBAEf,QAAO;;;AAIb,cAAU;YACH,GAAG;AACV,aAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,EAAE,CAAC,CAAC;;;EAI3D,QAAQ,aAAsC;AAC5C,OAAI;AACF,WAAO;AACP,cAAU;YACH,GAAG;AACV,aAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,EAAE,CAAC,CAAC;;;EAG5D,CAAC,CACH;CAED,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,IAAG,OACD,oBAAoB,OAAO,KAAK,aAAa,UAAU,WAC1C,UAAU,QAAQ,EAAE,CAAC,eAAe,QAAQ,QAAQ,EAAE,CAAC,OACrE;AACD,QAAO;EAAC,MAAM,OAAO;EAAM;EAAU"}
|
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
import { sleep } from "../../../../../../shared/src/sleep.js";
|
|
2
|
+
import { id, lit } from "../../../../types/sql.js";
|
|
2
3
|
import "../../../../types/pg.js";
|
|
3
4
|
import { fromBigInt } from "../lsn.js";
|
|
4
|
-
import { id, lit } from "../../../../types/sql.js";
|
|
5
5
|
import { AutoResetSignal } from "../../../change-streamer/schema/tables.js";
|
|
6
6
|
import { Subscription } from "../../../../types/subscription.js";
|
|
7
7
|
import { pipe } from "../../../../types/streams.js";
|
|
8
8
|
import { getTypeParsers } from "../../../../db/pg-type-parser.js";
|
|
9
9
|
import { PgoutputParser } from "./pgoutput-parser.js";
|
|
10
|
-
import postgres from "postgres";
|
|
11
10
|
import { defu } from "defu";
|
|
11
|
+
import postgres from "postgres";
|
|
12
12
|
import { PG_ADMIN_SHUTDOWN, PG_OBJECT_IN_USE, PG_OBJECT_NOT_IN_PREREQUISITE_STATE } from "@drdgvhbh/postgres-error-codes";
|
|
13
13
|
//#region ../zero-cache/src/services/change-source/pg/logical-replication/stream.ts
|
|
14
14
|
var DEFAULT_RETRIES_IF_REPLICATION_SLOT_ACTIVE = 5;
|