@rocicorp/zero 1.3.0 → 1.4.0-canary.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/out/analyze-query/src/analyze-cli.d.ts +24 -0
- package/out/analyze-query/src/analyze-cli.d.ts.map +1 -0
- package/out/analyze-query/src/analyze-cli.js +279 -0
- package/out/analyze-query/src/analyze-cli.js.map +1 -0
- package/out/analyze-query/src/bin-analyze.js +6 -6
- package/out/analyze-query/src/bin-transform.js +2 -2
- package/out/ast-to-zql/src/bin.js +1 -1
- package/out/shared/src/logging.d.ts.map +1 -1
- package/out/shared/src/logging.js +1 -1
- package/out/shared/src/logging.js.map +1 -1
- package/out/shared/src/options.d.ts.map +1 -1
- package/out/shared/src/options.js +1 -1
- package/out/shared/src/options.js.map +1 -1
- package/out/z2s/src/compiler.d.ts.map +1 -1
- package/out/z2s/src/compiler.js +4 -1
- package/out/z2s/src/compiler.js.map +1 -1
- package/out/z2s/src/sql.d.ts.map +1 -1
- package/out/z2s/src/sql.js +1 -0
- package/out/z2s/src/sql.js.map +1 -1
- package/out/zero/package.js +95 -89
- package/out/zero/package.js.map +1 -1
- package/out/zero/src/analyze.d.ts +2 -0
- package/out/zero/src/analyze.d.ts.map +1 -0
- package/out/zero/src/analyze.js +2 -0
- package/out/zero/src/zero-cache-dev.js +1 -1
- package/out/zero/src/zero-cache-dev.js.map +1 -1
- package/out/zero/src/zero-out.js +1 -1
- package/out/zero-cache/src/auth/auth.d.ts.map +1 -1
- package/out/zero-cache/src/auth/auth.js.map +1 -1
- package/out/zero-cache/src/auth/load-permissions.js +2 -2
- package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.js +5 -14
- package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
- package/out/zero-cache/src/config/network.d.ts +1 -1
- package/out/zero-cache/src/config/network.d.ts.map +1 -1
- package/out/zero-cache/src/config/network.js +1 -1
- package/out/zero-cache/src/config/network.js.map +1 -1
- package/out/zero-cache/src/config/normalize.d.ts.map +1 -1
- package/out/zero-cache/src/config/normalize.js.map +1 -1
- package/out/zero-cache/src/config/zero-config.d.ts +5 -0
- package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
- package/out/zero-cache/src/config/zero-config.js +16 -3
- package/out/zero-cache/src/config/zero-config.js.map +1 -1
- package/out/zero-cache/src/db/lite-tables.d.ts.map +1 -1
- package/out/zero-cache/src/db/lite-tables.js +3 -3
- package/out/zero-cache/src/db/lite-tables.js.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.d.ts +43 -40
- package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.js +76 -56
- package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
- package/out/zero-cache/src/observability/events.d.ts.map +1 -1
- package/out/zero-cache/src/observability/events.js +1 -1
- package/out/zero-cache/src/observability/events.js.map +1 -1
- package/out/zero-cache/src/scripts/decommission.js +1 -1
- package/out/zero-cache/src/scripts/deploy-permissions.js +2 -2
- package/out/zero-cache/src/scripts/permissions.js +1 -1
- package/out/zero-cache/src/server/anonymous-otel-start.d.ts.map +1 -1
- package/out/zero-cache/src/server/anonymous-otel-start.js +4 -4
- package/out/zero-cache/src/server/anonymous-otel-start.js.map +1 -1
- package/out/zero-cache/src/server/change-streamer.d.ts +1 -1
- package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/server/change-streamer.js +27 -12
- package/out/zero-cache/src/server/change-streamer.js.map +1 -1
- package/out/zero-cache/src/server/logging.d.ts +1 -3
- package/out/zero-cache/src/server/logging.d.ts.map +1 -1
- package/out/zero-cache/src/server/logging.js +6 -3
- package/out/zero-cache/src/server/logging.js.map +1 -1
- package/out/zero-cache/src/server/main.d.ts.map +1 -1
- package/out/zero-cache/src/server/main.js +26 -26
- package/out/zero-cache/src/server/main.js.map +1 -1
- package/out/zero-cache/src/server/mutator.js +4 -2
- package/out/zero-cache/src/server/mutator.js.map +1 -1
- package/out/zero-cache/src/server/otel-log-sink.d.ts.map +1 -1
- package/out/zero-cache/src/server/otel-log-sink.js +0 -2
- package/out/zero-cache/src/server/otel-log-sink.js.map +1 -1
- package/out/zero-cache/src/server/otel-start.d.ts +1 -1
- package/out/zero-cache/src/server/otel-start.d.ts.map +1 -1
- package/out/zero-cache/src/server/otel-start.js +7 -3
- package/out/zero-cache/src/server/otel-start.js.map +1 -1
- package/out/zero-cache/src/server/reaper.js +6 -6
- package/out/zero-cache/src/server/reaper.js.map +1 -1
- package/out/zero-cache/src/server/replicator.d.ts.map +1 -1
- package/out/zero-cache/src/server/replicator.js +5 -3
- package/out/zero-cache/src/server/replicator.js.map +1 -1
- package/out/zero-cache/src/server/runner/run-worker.js +2 -2
- package/out/zero-cache/src/server/runner/run-worker.js.map +1 -1
- package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/server/syncer.js +13 -12
- package/out/zero-cache/src/server/syncer.js.map +1 -1
- package/out/zero-cache/src/server/worker-dispatcher.js +1 -1
- package/out/zero-cache/src/services/analyze.js +1 -1
- package/out/zero-cache/src/services/change-source/common/backfill-manager.js +1 -1
- package/out/zero-cache/src/services/change-source/common/replica-schema.js +1 -1
- package/out/zero-cache/src/services/change-source/custom/change-source.js +4 -4
- package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +4 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.js +19 -23
- package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +58 -3
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js +209 -52
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js +2 -2
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts +24 -15
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +35 -58
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.js +2 -2
- package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts +1 -2
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.js +15 -18
- package/out/zero-cache/src/services/change-source/pg/schema/published.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/shard.js +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.js +1 -1
- package/out/zero-cache/src/services/change-streamer/backup-monitor.js +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.js +4 -4
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +5 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +10 -7
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/replica-monitor.js +2 -2
- package/out/zero-cache/src/services/change-streamer/storer.d.ts +19 -2
- package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.js +70 -6
- package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
- package/out/zero-cache/src/services/heapz.d.ts.map +1 -1
- package/out/zero-cache/src/services/heapz.js +1 -1
- package/out/zero-cache/src/services/heapz.js.map +1 -1
- package/out/zero-cache/src/services/life-cycle.d.ts +2 -1
- package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
- package/out/zero-cache/src/services/life-cycle.js +10 -7
- package/out/zero-cache/src/services/life-cycle.js.map +1 -1
- package/out/zero-cache/src/services/litestream/commands.d.ts +15 -4
- package/out/zero-cache/src/services/litestream/commands.d.ts.map +1 -1
- package/out/zero-cache/src/services/litestream/commands.js +40 -34
- package/out/zero-cache/src/services/litestream/commands.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/mutagen.js +3 -3
- package/out/zero-cache/src/services/mutagen/pusher.d.ts +28 -28
- package/out/zero-cache/src/services/replicator/change-processor.js +2 -2
- package/out/zero-cache/src/services/replicator/incremental-sync.js +1 -1
- package/out/zero-cache/src/services/replicator/schema/replication-state.js +1 -1
- package/out/zero-cache/src/services/replicator/write-worker-client.js.map +1 -1
- package/out/zero-cache/src/services/replicator/write-worker.js +3 -3
- package/out/zero-cache/src/services/replicator/write-worker.js.map +1 -1
- package/out/zero-cache/src/services/run-ast.d.ts.map +1 -1
- package/out/zero-cache/src/services/run-ast.js +2 -2
- package/out/zero-cache/src/services/run-ast.js.map +1 -1
- package/out/zero-cache/src/services/statz.d.ts.map +1 -1
- package/out/zero-cache/src/services/statz.js +3 -3
- package/out/zero-cache/src/services/statz.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/active-users-gauge.js +1 -1
- package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts +2 -2
- package/out/zero-cache/src/services/view-syncer/connection-context-manager.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/connection-context-manager.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-purger.js +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.js +3 -3
- package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js +1 -1
- package/out/zero-cache/src/services/view-syncer/inspect-handler.js +2 -2
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +6 -16
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +30 -38
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js +4 -4
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.js +2 -2
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.js +6 -6
- package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
- package/out/zero-cache/src/types/profiler.d.ts.map +1 -1
- package/out/zero-cache/src/types/profiler.js.map +1 -1
- package/out/zero-cache/src/types/row-key.d.ts.map +1 -1
- package/out/zero-cache/src/types/row-key.js.map +1 -1
- package/out/zero-cache/src/types/streams.d.ts +1 -1
- package/out/zero-cache/src/types/streams.d.ts.map +1 -1
- package/out/zero-cache/src/types/streams.js.map +1 -1
- package/out/zero-cache/src/types/websocket-handoff.d.ts +1 -1
- package/out/zero-cache/src/types/websocket-handoff.d.ts.map +1 -1
- package/out/zero-cache/src/types/websocket-handoff.js +1 -1
- package/out/zero-cache/src/types/websocket-handoff.js.map +1 -1
- package/out/zero-cache/src/workers/connection.d.ts +1 -1
- package/out/zero-cache/src/workers/connection.d.ts.map +1 -1
- package/out/zero-cache/src/workers/connection.js +2 -2
- package/out/zero-cache/src/workers/connection.js.map +1 -1
- package/out/zero-cache/src/workers/mutator.js.map +1 -1
- package/out/zero-cache/src/workers/syncer.d.ts +1 -1
- package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/workers/syncer.js +3 -3
- package/out/zero-cache/src/workers/syncer.js.map +1 -1
- package/out/zero-client/src/client/crud-impl.d.ts.map +1 -1
- package/out/zero-client/src/client/crud-impl.js +4 -13
- package/out/zero-client/src/client/crud-impl.js.map +1 -1
- package/out/zero-client/src/client/inspector/inspector.d.ts +24 -0
- package/out/zero-client/src/client/inspector/inspector.d.ts.map +1 -1
- package/out/zero-client/src/client/inspector/inspector.js +28 -0
- package/out/zero-client/src/client/inspector/inspector.js.map +1 -1
- package/out/zero-client/src/client/inspector/lazy-inspector.d.ts +9 -0
- package/out/zero-client/src/client/inspector/lazy-inspector.d.ts.map +1 -1
- package/out/zero-client/src/client/inspector/lazy-inspector.js +28 -1
- package/out/zero-client/src/client/inspector/lazy-inspector.js.map +1 -1
- package/out/zero-client/src/client/ivm-branch.d.ts.map +1 -1
- package/out/zero-client/src/client/ivm-branch.js +4 -13
- package/out/zero-client/src/client/ivm-branch.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zero-protocol/src/error.d.ts.map +1 -1
- package/out/zero-protocol/src/error.js +1 -1
- package/out/zero-protocol/src/error.js.map +1 -1
- package/out/zero-solid/src/solid-view.d.ts.map +1 -1
- package/out/zero-solid/src/solid-view.js +13 -13
- package/out/zero-solid/src/solid-view.js.map +1 -1
- package/out/zql/src/builder/builder.d.ts.map +1 -1
- package/out/zql/src/builder/builder.js.map +1 -1
- package/out/zql/src/ivm/array-view.d.ts.map +1 -1
- package/out/zql/src/ivm/array-view.js +26 -1
- package/out/zql/src/ivm/array-view.js.map +1 -1
- package/out/zql/src/ivm/change-index-enum.d.ts +9 -0
- package/out/zql/src/ivm/change-index-enum.d.ts.map +1 -0
- package/out/zql/src/ivm/change-index.d.ts +5 -0
- package/out/zql/src/ivm/change-index.d.ts.map +1 -0
- package/out/zql/src/ivm/change-type-enum.d.ts +9 -0
- package/out/zql/src/ivm/change-type-enum.d.ts.map +1 -0
- package/out/zql/src/ivm/change-type.d.ts +5 -0
- package/out/zql/src/ivm/change-type.d.ts.map +1 -0
- package/out/zql/src/ivm/change.d.ts +20 -22
- package/out/zql/src/ivm/change.d.ts.map +1 -1
- package/out/zql/src/ivm/change.js +33 -0
- package/out/zql/src/ivm/change.js.map +1 -0
- package/out/zql/src/ivm/exists.d.ts.map +1 -1
- package/out/zql/src/ivm/exists.js +27 -38
- package/out/zql/src/ivm/exists.js.map +1 -1
- package/out/zql/src/ivm/fan-in.d.ts +3 -2
- package/out/zql/src/ivm/fan-in.d.ts.map +1 -1
- package/out/zql/src/ivm/fan-in.js.map +1 -1
- package/out/zql/src/ivm/fan-out.d.ts +1 -1
- package/out/zql/src/ivm/fan-out.d.ts.map +1 -1
- package/out/zql/src/ivm/fan-out.js +1 -1
- package/out/zql/src/ivm/fan-out.js.map +1 -1
- package/out/zql/src/ivm/filter-operators.d.ts +3 -3
- package/out/zql/src/ivm/filter-operators.d.ts.map +1 -1
- package/out/zql/src/ivm/filter-operators.js.map +1 -1
- package/out/zql/src/ivm/filter-push.d.ts.map +1 -1
- package/out/zql/src/ivm/filter-push.js +7 -7
- package/out/zql/src/ivm/filter-push.js.map +1 -1
- package/out/zql/src/ivm/filter.d.ts +1 -1
- package/out/zql/src/ivm/filter.d.ts.map +1 -1
- package/out/zql/src/ivm/filter.js.map +1 -1
- package/out/zql/src/ivm/flipped-join.d.ts.map +1 -1
- package/out/zql/src/ivm/flipped-join.js +49 -58
- package/out/zql/src/ivm/flipped-join.js.map +1 -1
- package/out/zql/src/ivm/join-utils.d.ts +2 -6
- package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
- package/out/zql/src/ivm/join-utils.js +25 -25
- package/out/zql/src/ivm/join-utils.js.map +1 -1
- package/out/zql/src/ivm/join.d.ts.map +1 -1
- package/out/zql/src/ivm/join.js +32 -51
- package/out/zql/src/ivm/join.js.map +1 -1
- package/out/zql/src/ivm/maybe-split-and-push-edit-change.d.ts +1 -1
- package/out/zql/src/ivm/maybe-split-and-push-edit-change.d.ts.map +1 -1
- package/out/zql/src/ivm/maybe-split-and-push-edit-change.js +5 -10
- package/out/zql/src/ivm/maybe-split-and-push-edit-change.js.map +1 -1
- package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
- package/out/zql/src/ivm/memory-source.js +51 -59
- package/out/zql/src/ivm/memory-source.js.map +1 -1
- package/out/zql/src/ivm/push-accumulated.d.ts +3 -2
- package/out/zql/src/ivm/push-accumulated.d.ts.map +1 -1
- package/out/zql/src/ivm/push-accumulated.js +98 -122
- package/out/zql/src/ivm/push-accumulated.js.map +1 -1
- package/out/zql/src/ivm/skip.d.ts +1 -1
- package/out/zql/src/ivm/skip.d.ts.map +1 -1
- package/out/zql/src/ivm/skip.js +2 -2
- package/out/zql/src/ivm/skip.js.map +1 -1
- package/out/zql/src/ivm/source-change-index-enum.d.ts +7 -0
- package/out/zql/src/ivm/source-change-index-enum.d.ts.map +1 -0
- package/out/zql/src/ivm/source-change-index.d.ts +5 -0
- package/out/zql/src/ivm/source-change-index.d.ts.map +1 -0
- package/out/zql/src/ivm/source.d.ts +11 -13
- package/out/zql/src/ivm/source.d.ts.map +1 -1
- package/out/zql/src/ivm/source.js +26 -0
- package/out/zql/src/ivm/source.js.map +1 -0
- package/out/zql/src/ivm/take.d.ts.map +1 -1
- package/out/zql/src/ivm/take.js +27 -50
- package/out/zql/src/ivm/take.js.map +1 -1
- package/out/zql/src/ivm/union-fan-in.d.ts +2 -1
- package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
- package/out/zql/src/ivm/union-fan-in.js +3 -3
- package/out/zql/src/ivm/union-fan-in.js.map +1 -1
- package/out/zql/src/ivm/union-fan-out.d.ts.map +1 -1
- package/out/zql/src/ivm/union-fan-out.js +1 -1
- package/out/zql/src/ivm/union-fan-out.js.map +1 -1
- package/out/zql/src/planner/planner-debug.d.ts +2 -2
- package/out/zql/src/planner/planner-debug.d.ts.map +1 -1
- package/out/zql/src/planner/planner-debug.js.map +1 -1
- package/out/zql/src/planner/planner-graph.d.ts +1 -1
- package/out/zql/src/planner/planner-graph.d.ts.map +1 -1
- package/out/zql/src/planner/planner-graph.js.map +1 -1
- package/out/zqlite/src/internal/sql-inline.d.ts.map +1 -1
- package/out/zqlite/src/internal/sql-inline.js.map +1 -1
- package/out/zqlite/src/query-builder.d.ts.map +1 -1
- package/out/zqlite/src/query-builder.js.map +1 -1
- package/out/zqlite/src/table-source.d.ts.map +1 -1
- package/out/zqlite/src/table-source.js +11 -11
- package/out/zqlite/src/table-source.js.map +1 -1
- package/package.json +99 -93
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"published.js","names":[],"sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/published.ts"],"sourcesContent":["import {literal} from 'pg-format';\nimport type postgres from 'postgres';\nimport {equals} from '../../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {computeZqlSpecsFromLiteSpecs} from '../../../../db/lite-tables.ts';\nimport {\n mapPostgresToLite,\n mapPostgresToLiteIndex,\n} from '../../../../db/pg-to-lite.ts';\nimport {publishedIndexSpec, publishedTableSpec} from '../../../../db/specs.ts';\nimport {liteTableName} from '../../../../types/names.ts';\n\nexport function publishedTableQuery(publications: readonly string[]) {\n // Notes:\n // * There's a bug in PG15 in which generated columns are incorrectly\n // included in pg_publication_tables.attnames, (even though the generated\n // column values are not be included in the replication stream).\n // The WHERE condition `attgenerated = ''` fixes this by explicitly excluding\n // generated columns from the list.\n return /*sql*/ `\nWITH published_columns AS (SELECT \n pc.oid::int8 AS \"oid\",\n nspname AS \"schema\",\n pc.relnamespace::int8 AS \"schemaOID\" ,\n pc.relname AS \"name\", \n pc.relreplident AS \"replicaIdentity\",\n attnum AS \"pos\", \n attname AS \"col\", \n pt.typname AS \"type\", \n atttypid::int8 AS \"typeOID\", \n pt.typtype,\n elem_pt.typtype AS \"elemTyptype\",\n NULLIF(atttypmod, -1) AS \"maxLen\", \n attndims \"arrayDims\", \n attnotnull AS \"notNull\",\n pg_get_expr(pd.adbin, pd.adrelid) as \"dflt\",\n NULLIF(ARRAY_POSITION(conkey, attnum), -1) AS \"keyPos\", \n pb.rowfilter as \"rowFilter\",\n pb.pubname as \"publication\"\nFROM pg_attribute\nJOIN pg_class pc ON pc.oid = attrelid\nJOIN pg_namespace pns ON pns.oid = relnamespace\nJOIN pg_type pt ON atttypid = pt.oid\nLEFT JOIN pg_type elem_pt ON elem_pt.oid = pt.typelem\nJOIN pg_publication_tables as pb ON \n pb.schemaname = nspname AND \n pb.tablename = pc.relname AND\n attname = ANY(pb.attnames)\nLEFT JOIN pg_constraint pk ON pk.contype = 'p' AND pk.connamespace = relnamespace AND pk.conrelid = attrelid\nLEFT JOIN pg_attrdef pd ON pd.adrelid = attrelid AND pd.adnum = attnum\nWHERE pb.pubname IN (${literal(publications)}) AND \n (current_setting('server_version_num')::int >= 160000 OR attgenerated = '')\nORDER BY nspname, pc.relname),\n\ntables AS (SELECT json_build_object(\n 'oid', \"oid\",\n 'schema', \"schema\", \n 'schemaOID', \"schemaOID\",\n 'name', \"name\", \n 'replicaIdentity', \"replicaIdentity\",\n 'columns', json_object_agg(\n DISTINCT\n col,\n jsonb_build_object(\n 'pos', \"pos\",\n 'dataType', CASE WHEN \"arrayDims\" = 0 \n THEN \"type\" \n ELSE substring(\"type\" from 2) || repeat('[]', \"arrayDims\") END,\n 'pgTypeClass', \"typtype\",\n 'elemPgTypeClass', \"elemTyptype\",\n 'typeOID', \"typeOID\",\n -- https://stackoverflow.com/a/52376230\n 'characterMaximumLength', CASE WHEN \"typeOID\" = 1043 OR \"typeOID\" = 1042 \n THEN \"maxLen\" - 4 \n ELSE \"maxLen\" END,\n 'notNull', \"notNull\",\n 'dflt', \"dflt\"\n )\n ),\n 'primaryKey', ARRAY( SELECT json_object_keys(\n json_strip_nulls(\n json_object_agg(\n DISTINCT \"col\", \"keyPos\" ORDER BY \"keyPos\"\n )\n )\n )),\n 'publications', json_object_agg(\n DISTINCT \n \"publication\", \n jsonb_build_object('rowFilter', \"rowFilter\")\n )\n) AS \"table\" FROM published_columns GROUP BY \"schema\", \"schemaOID\", \"name\", \"oid\", \"replicaIdentity\")\n\nSELECT COALESCE(json_agg(\"table\"), '[]'::json) as \"tables\" FROM tables\n `;\n}\n\nexport function indexDefinitionsQuery(publications: readonly string[]) {\n // Note: pg_attribute contains column names for tables and for indexes.\n // However, the latter does not get updated when a column in a table is\n // renamed.\n //\n // https://www.postgresql.org/message-id/5860814f-c91d-4ab0-b771-ded90d7b9c55%40www.fastmail.com\n //\n // To address this, the pg_attribute rows are looked up for the index's\n // table rather than the index itself, using the pg_index.indkey array\n // to determine the set and order of columns to include.\n //\n // Notes:\n // * The first bit of indoption is 1 for DESC and 0 for ASC:\n // https://github.com/postgres/postgres/blob/4e1fad37872e49a711adad5d9870516e5c71a375/src/include/catalog/pg_index.h#L89\n // * pg_index.indkey is an int2vector which is 0-based instead of 1-based.\n // * The additional check for attgenerated is required for the aforementioned\n // (in publishedTableQuery) bug in PG15 in which generated columns are\n // incorrectly included in pg_publication_tables.attnames\n return /*sql*/ `\n WITH indexed_columns AS (SELECT\n pg_indexes.schemaname as \"schema\",\n pg_indexes.tablename as \"tableName\",\n pg_indexes.indexname as \"name\",\n index_column.name as \"col\",\n CASE WHEN pg_index.indoption[index_column.pos-1] & 1 = 1 THEN 'DESC' ELSE 'ASC' END as \"dir\",\n pg_index.indisunique as \"unique\",\n pg_index.indisprimary as \"isPrimaryKey\",\n pg_index.indisreplident as \"isReplicaIdentity\",\n pg_index.indimmediate as \"isImmediate\"\n FROM pg_indexes\n JOIN pg_namespace ON pg_indexes.schemaname = pg_namespace.nspname\n JOIN pg_class pc ON\n pc.relname = pg_indexes.indexname\n AND pc.relnamespace = pg_namespace.oid\n JOIN pg_publication_tables as pb ON \n pb.schemaname = pg_indexes.schemaname AND \n pb.tablename = pg_indexes.tablename\n JOIN pg_index ON pg_index.indexrelid = pc.oid\n JOIN LATERAL (\n SELECT array_agg(attname) as attnames, array_agg(attgenerated != '') as generated FROM pg_attribute\n WHERE attrelid = pg_index.indrelid\n AND attnum = ANY( (pg_index.indkey::smallint[] )[:pg_index.indnkeyatts - 1] )\n ) as indexed ON true\n JOIN LATERAL (\n SELECT pg_attribute.attname as name, col.index_pos as pos\n FROM UNNEST( (pg_index.indkey::smallint[])[:pg_index.indnkeyatts - 1] ) \n WITH ORDINALITY as col(table_pos, index_pos)\n JOIN pg_attribute ON attrelid = pg_index.indrelid AND attnum = col.table_pos\n ) AS index_column ON true\n LEFT JOIN pg_constraint ON pg_constraint.conindid = pc.oid\n WHERE pb.pubname IN (${literal(publications)})\n AND pg_index.indexprs IS NULL\n AND pg_index.indpred IS NULL\n AND (pg_constraint.contype IS NULL OR pg_constraint.contype IN ('p', 'u'))\n AND indexed.attnames <@ pb.attnames\n AND (current_setting('server_version_num')::int >= 160000 OR false = ALL(indexed.generated))\n ORDER BY\n pg_indexes.schemaname,\n pg_indexes.tablename,\n pg_indexes.indexname,\n index_column.pos ASC),\n \n indexes AS (SELECT json_build_object(\n 'schema', \"schema\",\n 'tableName', \"tableName\",\n 'name', \"name\",\n 'unique', \"unique\",\n 'isPrimaryKey', \"isPrimaryKey\",\n 'isReplicaIdentity', \"isReplicaIdentity\",\n 'isImmediate', \"isImmediate\",\n 'columns', json_object_agg(\"col\", \"dir\")\n ) AS index FROM indexed_columns \n GROUP BY \"schema\", \"tableName\", \"name\", \"unique\", \n \"isPrimaryKey\", \"isReplicaIdentity\", \"isImmediate\")\n\n SELECT COALESCE(json_agg(\"index\"), '[]'::json) as \"indexes\" FROM indexes\n `;\n}\n\nexport const publishedSchema = v\n .object({\n tables: v.array(publishedTableSpec),\n indexes: v.array(publishedIndexSpec),\n })\n .map(({tables, indexes}) => {\n const zqlSpecs = computeZqlSpecsFromLiteSpecs(\n tables.map(t => mapPostgresToLite(t)),\n indexes.map(mapPostgresToLiteIndex),\n {includeBackfillingColumns: true},\n );\n return {\n indexes,\n\n // Denormalize the schema such that each `table` includes the\n // `replicaIdentityColumns` corresponding to the table's\n // replica identity and associated primary key or index.\n tables: tables.map(table => {\n const replicaIdentityColumns: string[] = [];\n switch (table.replicaIdentity) {\n case 'd':\n replicaIdentityColumns.push(...(table.primaryKey ?? []));\n break;\n case 'i':\n replicaIdentityColumns.push(\n ...Object.keys(\n indexes.find(\n ind =>\n ind.schema === table.schema &&\n ind.tableName === table.name &&\n ind.isReplicaIdentity,\n )?.columns ?? {},\n ),\n );\n break;\n case 'f': {\n // For the key columns of replica identity \"full\", use the columns\n // that the replicator/change-processor will end up using\n // (in #getKey()) as the row key.\n const zqlTable = zqlSpecs.get(liteTableName(table));\n // Note: There zql spec will be absent if the table is not synced,\n // e.g. if it has no suitable unique indexes.\n if (zqlTable) {\n replicaIdentityColumns.push(...zqlTable.tableSpec.primaryKey);\n }\n break;\n }\n }\n return {\n ...table,\n replicaIdentityColumns,\n };\n }),\n };\n });\n\nexport type PublishedSchema = v.Infer<typeof publishedSchema>;\n\nexport type PublishedTableWithReplicaIdentity =\n PublishedSchema['tables'][number];\n\nconst publicationSchema = v.object({\n pubname: v.string(),\n pubinsert: v.boolean(),\n pubupdate: v.boolean(),\n pubdelete: v.boolean(),\n pubtruncate: v.boolean(),\n});\n\nconst publicationsResultSchema = v.array(publicationSchema);\n\nexport type PublicationInfo = PublishedSchema & {\n publications: v.Infer<typeof publicationsResultSchema>;\n};\n\n/**\n * Retrieves published tables and columns.\n */\nexport async function getPublicationInfo(\n sql: postgres.Sql,\n publications: string[],\n): Promise<PublicationInfo> {\n const result = await sql.unsafe(/*sql*/ `\n SELECT \n schemaname AS \"schema\",\n tablename AS \"table\", \n json_object_agg(pubname, attnames) AS \"publications\"\n FROM pg_publication_tables pb\n WHERE pb.pubname IN (${literal(publications)})\n GROUP BY schemaname, tablename;\n\n SELECT ${Object.keys(publicationSchema.shape).join(\n ',',\n )} FROM pg_publication pb\n WHERE pb.pubname IN (${literal(publications)})\n ORDER BY pubname;\n\n ${publishedTableQuery(publications)};\n\n ${indexDefinitionsQuery(publications)};\n`);\n\n // The first query is used to check that tables in multiple publications\n // always publish the same set of columns.\n const publishedColumns = result[0] as {\n schema: string;\n table: string;\n publications: Record<string, string[]>;\n }[];\n for (const {table, publications} of publishedColumns) {\n let expected: Set<string>;\n Object.entries(publications).forEach(([_, columns], i) => {\n const cols = new Set(columns);\n if (i === 0) {\n expected = cols;\n } else if (!equals(expected, cols)) {\n throw new Error(\n `Table ${table} is exported with different columns: [${[\n ...expected,\n ]}] vs [${[...cols]}]`,\n );\n }\n });\n }\n\n return {\n publications: v.parse(result[1], publicationsResultSchema),\n ...v.parse(\n {\n ...result[2][0], // tables\n ...result[3][0], // indexes\n },\n publishedSchema,\n ),\n };\n}\n"],"mappings":";;;;;;;;AAYA,SAAgB,oBAAoB,cAAiC;AAOnE,QAAe;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;uBA+BM,QAAQ,aAAa,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+C7C,SAAgB,sBAAsB,cAAiC;AAkBrE,QAAe;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;2BAgCU,QAAQ,aAAa,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA6BjD,IAAa,kBAAkB,eAC5B,OAAO;CACN,QAAQ,eAAE,MAAM,mBAAmB;CACnC,SAAS,eAAE,MAAM,mBAAmB;CACrC,CAAC,CACD,KAAK,EAAC,QAAQ,cAAa;CAC1B,MAAM,WAAW,6BACf,OAAO,KAAI,MAAK,kBAAkB,EAAE,CAAC,EACrC,QAAQ,IAAI,uBAAuB,EACnC,EAAC,2BAA2B,MAAK,CAClC;AACD,QAAO;EACL;EAKA,QAAQ,OAAO,KAAI,UAAS;GAC1B,MAAM,yBAAmC,EAAE;AAC3C,WAAQ,MAAM,iBAAd;IACE,KAAK;AACH,4BAAuB,KAAK,GAAI,MAAM,cAAc,EAAE,CAAE;AACxD;IACF,KAAK;AACH,4BAAuB,KACrB,GAAG,OAAO,KACR,QAAQ,MACN,QACE,IAAI,WAAW,MAAM,UACrB,IAAI,cAAc,MAAM,QACxB,IAAI,kBACP,EAAE,WAAW,EAAE,CACjB,CACF;AACD;IACF,KAAK,KAAK;KAIR,MAAM,WAAW,SAAS,IAAI,cAAc,MAAM,CAAC;AAGnD,SAAI,SACF,wBAAuB,KAAK,GAAG,SAAS,UAAU,WAAW;AAE/D;;;AAGJ,UAAO;IACL,GAAG;IACH;IACD;IACD;EACH;EACD;AAOJ,IAAM,oBAAoB,eAAE,OAAO;CACjC,SAAS,eAAE,QAAQ;CACnB,WAAW,eAAE,SAAS;CACtB,WAAW,eAAE,SAAS;CACtB,WAAW,eAAE,SAAS;CACtB,aAAa,eAAE,SAAS;CACzB,CAAC;AAEF,IAAM,2BAA2B,eAAE,MAAM,kBAAkB;;;;AAS3D,eAAsB,mBACpB,KACA,cAC0B;CAC1B,MAAM,SAAS,MAAM,IAAI,OAAe;;;;;;2BAMf,QAAQ,aAAa,CAAC;;;WAGtC,OAAO,KAAK,kBAAkB,MAAM,CAAC,KAC5C,IACD,CAAC;2BACuB,QAAQ,aAAa,CAAC;;;IAG7C,oBAAoB,aAAa,CAAC;;IAElC,sBAAsB,aAAa,CAAC;EACtC;CAIA,MAAM,mBAAmB,OAAO;AAKhC,MAAK,MAAM,EAAC,OAAO,kBAAiB,kBAAkB;EACpD,IAAI;AACJ,SAAO,QAAQ,aAAa,CAAC,SAAS,CAAC,GAAG,UAAU,MAAM;GACxD,MAAM,OAAO,IAAI,IAAI,QAAQ;AAC7B,OAAI,MAAM,EACR,YAAW;YACF,CAAC,OAAO,UAAU,KAAK,CAChC,OAAM,IAAI,MACR,SAAS,MAAM,wCAAwC,CACrD,GAAG,SACJ,CAAC,QAAQ,CAAC,GAAG,KAAK,CAAC,GACrB;IAEH;;AAGJ,QAAO;EACL,cAAc,MAAQ,OAAO,IAAI,yBAAyB;EAC1D,GAAG,MACD;GACE,GAAG,OAAO,GAAG;GACb,GAAG,OAAO,GAAG;GACd,EACD,gBACD;EACF"}
|
|
1
|
+
{"version":3,"file":"published.js","names":[],"sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/published.ts"],"sourcesContent":["import {literal} from 'pg-format';\nimport type postgres from 'postgres';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport {BigIntJSON} from '../../../../../../shared/src/bigint-json.ts';\nimport {equals} from '../../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {computeZqlSpecsFromLiteSpecs} from '../../../../db/lite-tables.ts';\nimport {\n mapPostgresToLite,\n mapPostgresToLiteIndex,\n} from '../../../../db/pg-to-lite.ts';\nimport {publishedIndexSpec, publishedTableSpec} from '../../../../db/specs.ts';\nimport {liteTableName} from '../../../../types/names.ts';\n\nexport function publishedSchemaQuery(publications: readonly string[]) {\n // Notes:\n // * There's a bug in PG15 in which generated columns are incorrectly\n // included in pg_publication_tables.attnames, (even though the generated\n // column values are not be included in the replication stream).\n // The WHERE condition `attgenerated = ''` fixes this by explicitly excluding\n // generated columns from the list.\n return (\n /*sql*/ `\nWITH published_columns AS (SELECT \n pc.oid::int8 AS \"oid\",\n nspname AS \"schema\",\n pc.relnamespace::int8 AS \"schemaOID\" ,\n pc.relname AS \"name\", \n pc.relreplident AS \"replicaIdentity\",\n attnum AS \"pos\", \n attname AS \"col\", \n pt.typname AS \"type\", \n atttypid::int8 AS \"typeOID\", \n pt.typtype,\n elem_pt.typtype AS \"elemTyptype\",\n NULLIF(atttypmod, -1) AS \"maxLen\", \n attndims \"arrayDims\", \n attnotnull AS \"notNull\",\n pg_get_expr(pd.adbin, pd.adrelid) as \"dflt\",\n NULLIF(ARRAY_POSITION(conkey, attnum), -1) AS \"keyPos\", \n pb.rowfilter as \"rowFilter\",\n pb.pubname as \"publication\"\nFROM pg_attribute\nJOIN pg_class pc ON pc.oid = attrelid\nJOIN pg_namespace pns ON pns.oid = relnamespace\nJOIN pg_type pt ON atttypid = pt.oid\nLEFT JOIN pg_type elem_pt ON elem_pt.oid = pt.typelem\nJOIN pg_publication_tables as pb ON \n pb.schemaname = nspname AND \n pb.tablename = pc.relname AND\n attname = ANY(pb.attnames)\nLEFT JOIN pg_constraint pk ON pk.contype = 'p' AND pk.connamespace = relnamespace AND pk.conrelid = attrelid\nLEFT JOIN pg_attrdef pd ON pd.adrelid = attrelid AND pd.adnum = attnum\nWHERE pb.pubname IN (${literal(publications)}) AND \n (current_setting('server_version_num')::int >= 160000 OR attgenerated = '')\nORDER BY nspname, pc.relname),\n\ntables AS (SELECT json_build_object(\n 'oid', \"oid\",\n 'schema', \"schema\", \n 'schemaOID', \"schemaOID\",\n 'name', \"name\", \n 'replicaIdentity', \"replicaIdentity\",\n 'columns', json_object_agg(\n DISTINCT\n col,\n jsonb_build_object(\n 'pos', \"pos\",\n 'dataType', CASE WHEN \"arrayDims\" = 0 \n THEN \"type\" \n ELSE substring(\"type\" from 2) || repeat('[]', \"arrayDims\") END,\n 'pgTypeClass', \"typtype\",\n 'elemPgTypeClass', \"elemTyptype\",\n 'typeOID', \"typeOID\",\n -- https://stackoverflow.com/a/52376230\n 'characterMaximumLength', CASE WHEN \"typeOID\" = 1043 OR \"typeOID\" = 1042 \n THEN \"maxLen\" - 4 \n ELSE \"maxLen\" END,\n 'notNull', \"notNull\",\n 'dflt', \"dflt\"\n )\n ),\n 'primaryKey', ARRAY( SELECT json_object_keys(\n json_strip_nulls(\n json_object_agg(\n DISTINCT \"col\", \"keyPos\" ORDER BY \"keyPos\"\n )\n )\n )),\n 'publications', json_object_agg(\n DISTINCT \n \"publication\", \n jsonb_build_object('rowFilter', \"rowFilter\")\n )\n) AS \"table\" FROM published_columns \n GROUP BY \"schema\", \"schemaOID\", \"name\", \"oid\", \"replicaIdentity\"),\n ` +\n // Note: pg_attribute contains column names for tables and for indexes.\n // However, the latter does not get updated when a column in a table is\n // renamed.\n //\n // https://www.postgresql.org/message-id/5860814f-c91d-4ab0-b771-ded90d7b9c55%40www.fastmail.com\n //\n // To address this, the pg_attribute rows are looked up for the index's\n // table rather than the index itself, using the pg_index.indkey array\n // to determine the set and order of columns to include.\n //\n // Notes:\n // * The first bit of indoption is 1 for DESC and 0 for ASC:\n // https://github.com/postgres/postgres/blob/4e1fad37872e49a711adad5d9870516e5c71a375/src/include/catalog/pg_index.h#L89\n // * pg_index.indkey is an int2vector which is 0-based instead of 1-based.\n // * The additional check for attgenerated is required for the aforementioned\n // (in publishedTableQuery) bug in PG15 in which generated columns are\n // incorrectly included in pg_publication_tables.attnames\n /*sql*/ `\n indexed_columns AS (SELECT\n pg_indexes.schemaname as \"schema\",\n pg_indexes.tablename as \"tableName\",\n pg_indexes.indexname as \"name\",\n index_column.name as \"col\",\n CASE WHEN pg_index.indoption[index_column.pos-1] & 1 = 1 THEN 'DESC' ELSE 'ASC' END as \"dir\",\n pg_index.indisunique as \"unique\",\n pg_index.indisprimary as \"isPrimaryKey\",\n pg_index.indisreplident as \"isReplicaIdentity\",\n pg_index.indimmediate as \"isImmediate\"\n FROM pg_indexes\n JOIN pg_namespace ON pg_indexes.schemaname = pg_namespace.nspname\n JOIN pg_class pc ON\n pc.relname = pg_indexes.indexname\n AND pc.relnamespace = pg_namespace.oid\n JOIN pg_publication_tables as pb ON \n pb.schemaname = pg_indexes.schemaname AND \n pb.tablename = pg_indexes.tablename\n JOIN pg_index ON pg_index.indexrelid = pc.oid\n JOIN LATERAL (\n SELECT array_agg(attname) as attnames, array_agg(attgenerated != '') as generated FROM pg_attribute\n WHERE attrelid = pg_index.indrelid\n AND attnum = ANY( (pg_index.indkey::smallint[] )[:pg_index.indnkeyatts - 1] )\n ) as indexed ON true\n JOIN LATERAL (\n SELECT pg_attribute.attname as name, col.index_pos as pos\n FROM UNNEST( (pg_index.indkey::smallint[])[:pg_index.indnkeyatts - 1] ) \n WITH ORDINALITY as col(table_pos, index_pos)\n JOIN pg_attribute ON attrelid = pg_index.indrelid AND attnum = col.table_pos\n ) AS index_column ON true\n LEFT JOIN pg_constraint ON pg_constraint.conindid = pc.oid\n WHERE pb.pubname IN (${literal(publications)})\n AND pg_index.indexprs IS NULL\n AND pg_index.indpred IS NULL\n AND (pg_constraint.contype IS NULL OR pg_constraint.contype IN ('p', 'u'))\n AND indexed.attnames <@ pb.attnames\n AND (current_setting('server_version_num')::int >= 160000 OR false = ALL(indexed.generated))\n ORDER BY\n pg_indexes.schemaname,\n pg_indexes.tablename,\n pg_indexes.indexname,\n index_column.pos ASC),\n \n indexes AS (SELECT json_build_object(\n 'schema', \"schema\",\n 'tableName', \"tableName\",\n 'name', \"name\",\n 'unique', \"unique\",\n 'isPrimaryKey', \"isPrimaryKey\",\n 'isReplicaIdentity', \"isReplicaIdentity\",\n 'isImmediate', \"isImmediate\",\n 'columns', json_object_agg(\"col\", \"dir\")\n ) AS index FROM indexed_columns \n GROUP BY \"schema\", \"tableName\", \"name\", \"unique\", \n \"isPrimaryKey\", \"isReplicaIdentity\", \"isImmediate\")\n\n SELECT json_build_object(\n 'tables', COALESCE((SELECT json_agg(\"table\") FROM tables), '[]'::json),\n 'indexes', COALESCE((SELECT json_agg(\"index\") FROM indexes), '[]'::json)\n ) as \"publishedSchema\"\n `\n );\n}\n\nexport const publishedSchema = v\n .object({\n tables: v.array(publishedTableSpec),\n indexes: v.array(publishedIndexSpec),\n })\n .map(({tables, indexes}) => {\n const zqlSpecs = computeZqlSpecsFromLiteSpecs(\n tables.map(t => mapPostgresToLite(t)),\n indexes.map(mapPostgresToLiteIndex),\n {includeBackfillingColumns: true},\n );\n return {\n indexes,\n\n // Denormalize the schema such that each `table` includes the\n // `replicaIdentityColumns` corresponding to the table's\n // replica identity and associated primary key or index.\n tables: tables.map(table => {\n const replicaIdentityColumns: string[] = [];\n switch (table.replicaIdentity) {\n case 'd':\n replicaIdentityColumns.push(...(table.primaryKey ?? []));\n break;\n case 'i':\n replicaIdentityColumns.push(\n ...Object.keys(\n indexes.find(\n ind =>\n ind.schema === table.schema &&\n ind.tableName === table.name &&\n ind.isReplicaIdentity,\n )?.columns ?? {},\n ),\n );\n break;\n case 'f': {\n // For the key columns of replica identity \"full\", use the columns\n // that the replicator/change-processor will end up using\n // (in #getKey()) as the row key.\n const zqlTable = zqlSpecs.get(liteTableName(table));\n // Note: There zql spec will be absent if the table is not synced,\n // e.g. if it has no suitable unique indexes.\n if (zqlTable) {\n replicaIdentityColumns.push(...zqlTable.tableSpec.primaryKey);\n }\n break;\n }\n }\n return {\n ...table,\n replicaIdentityColumns,\n };\n }),\n };\n });\n\nexport type PublishedSchema = v.Infer<typeof publishedSchema>;\n\nexport type PublishedTableWithReplicaIdentity =\n PublishedSchema['tables'][number];\n\nconst publicationSchema = v.object({\n pubname: v.string(),\n pubinsert: v.boolean(),\n pubupdate: v.boolean(),\n pubdelete: v.boolean(),\n pubtruncate: v.boolean(),\n});\n\nconst publicationsResultSchema = v.array(publicationSchema);\n\nexport type PublicationInfo = PublishedSchema & {\n publications: v.Infer<typeof publicationsResultSchema>;\n};\n\n/**\n * Retrieves published tables and columns.\n */\nexport async function getPublicationInfo(\n sql: postgres.Sql,\n publications: string[],\n): Promise<PublicationInfo> {\n const result = await sql.unsafe(/*sql*/ `\n SELECT \n schemaname AS \"schema\",\n tablename AS \"table\", \n json_object_agg(pubname, attnames) AS \"publications\"\n FROM pg_publication_tables pb\n WHERE pb.pubname IN (${literal(publications)})\n GROUP BY schemaname, tablename;\n\n SELECT ${Object.keys(publicationSchema.shape).join(\n ',',\n )} FROM pg_publication pb\n WHERE pb.pubname IN (${literal(publications)})\n ORDER BY pubname;\n\n ${publishedSchemaQuery(publications)};\n`);\n\n // The first query is used to check that tables in multiple publications\n // always publish the same set of columns.\n const publishedColumns = result[0] as {\n schema: string;\n table: string;\n publications: Record<string, string[]>;\n }[];\n for (const {table, publications} of publishedColumns) {\n let expected: Set<string>;\n Object.entries(publications).forEach(([_, columns], i) => {\n const cols = new Set(columns);\n if (i === 0) {\n expected = cols;\n } else if (!equals(expected, cols)) {\n throw new Error(\n `Table ${table} is exported with different columns: [${[\n ...expected,\n ]}] vs [${[...cols]}]`,\n );\n }\n });\n }\n\n assert(\n result[2][0].publishedSchema,\n () => `Invalid publishedSchema result ${BigIntJSON.stringify(result[2])}`,\n );\n\n return {\n publications: v.parse(result[1], publicationsResultSchema),\n ...v.parse(result[2][0].publishedSchema, publishedSchema),\n };\n}\n"],"mappings":";;;;;;;;;;AAcA,SAAgB,qBAAqB,cAAiC;AAOpE,QACU;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;uBA+BW,QAAQ,aAAa,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;2BA6FlB,QAAQ,aAAa,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAiCjD,IAAa,kBAAkB,eAC5B,OAAO;CACN,QAAQ,eAAE,MAAM,mBAAmB;CACnC,SAAS,eAAE,MAAM,mBAAmB;CACrC,CAAC,CACD,KAAK,EAAC,QAAQ,cAAa;CAC1B,MAAM,WAAW,6BACf,OAAO,KAAI,MAAK,kBAAkB,EAAE,CAAC,EACrC,QAAQ,IAAI,uBAAuB,EACnC,EAAC,2BAA2B,MAAK,CAClC;AACD,QAAO;EACL;EAKA,QAAQ,OAAO,KAAI,UAAS;GAC1B,MAAM,yBAAmC,EAAE;AAC3C,WAAQ,MAAM,iBAAd;IACE,KAAK;AACH,4BAAuB,KAAK,GAAI,MAAM,cAAc,EAAE,CAAE;AACxD;IACF,KAAK;AACH,4BAAuB,KACrB,GAAG,OAAO,KACR,QAAQ,MACN,QACE,IAAI,WAAW,MAAM,UACrB,IAAI,cAAc,MAAM,QACxB,IAAI,kBACP,EAAE,WAAW,EAAE,CACjB,CACF;AACD;IACF,KAAK,KAAK;KAIR,MAAM,WAAW,SAAS,IAAI,cAAc,MAAM,CAAC;AAGnD,SAAI,SACF,wBAAuB,KAAK,GAAG,SAAS,UAAU,WAAW;AAE/D;;;AAGJ,UAAO;IACL,GAAG;IACH;IACD;IACD;EACH;EACD;AAOJ,IAAM,oBAAoB,eAAE,OAAO;CACjC,SAAS,eAAE,QAAQ;CACnB,WAAW,eAAE,SAAS;CACtB,WAAW,eAAE,SAAS;CACtB,WAAW,eAAE,SAAS;CACtB,aAAa,eAAE,SAAS;CACzB,CAAC;AAEF,IAAM,2BAA2B,eAAE,MAAM,kBAAkB;;;;AAS3D,eAAsB,mBACpB,KACA,cAC0B;CAC1B,MAAM,SAAS,MAAM,IAAI,OAAe;;;;;;2BAMf,QAAQ,aAAa,CAAC;;;WAGtC,OAAO,KAAK,kBAAkB,MAAM,CAAC,KAC5C,IACD,CAAC;2BACuB,QAAQ,aAAa,CAAC;;;IAG7C,qBAAqB,aAAa,CAAC;EACrC;CAIA,MAAM,mBAAmB,OAAO;AAKhC,MAAK,MAAM,EAAC,OAAO,kBAAiB,kBAAkB;EACpD,IAAI;AACJ,SAAO,QAAQ,aAAa,CAAC,SAAS,CAAC,GAAG,UAAU,MAAM;GACxD,MAAM,OAAO,IAAI,IAAI,QAAQ;AAC7B,OAAI,MAAM,EACR,YAAW;YACF,CAAC,OAAO,UAAU,KAAK,CAChC,OAAM,IAAI,MACR,SAAS,MAAM,wCAAwC,CACrD,GAAG,SACJ,CAAC,QAAQ,CAAC,GAAG,KAAK,CAAC,GACrB;IAEH;;AAGJ,QACE,OAAO,GAAG,GAAG,uBACP,kCAAkC,WAAW,UAAU,OAAO,GAAG,GACxE;AAED,QAAO;EACL,cAAc,MAAQ,OAAO,IAAI,yBAAyB;EAC1D,GAAG,MAAQ,OAAO,GAAG,GAAG,iBAAiB,gBAAgB;EAC1D"}
|
|
@@ -2,8 +2,8 @@ import { assert } from "../../../../../../shared/src/asserts.js";
|
|
|
2
2
|
import { parse, valita_exports } from "../../../../../../shared/src/valita.js";
|
|
3
3
|
import { jsonObjectSchema, stringify } from "../../../../../../shared/src/bigint-json.js";
|
|
4
4
|
import { appSchema, check, upstreamSchema } from "../../../../types/shards.js";
|
|
5
|
-
import { getPublicationInfo, publishedSchema } from "./published.js";
|
|
6
5
|
import { id } from "../../../../types/sql.js";
|
|
6
|
+
import { getPublicationInfo, publishedSchema } from "./published.js";
|
|
7
7
|
import { createEventTriggerStatements } from "./ddl.js";
|
|
8
8
|
import { validate } from "./validation.js";
|
|
9
9
|
import postgres from "postgres";
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { literalUnion, valita_exports } from "../../../../../../shared/src/valita.js";
|
|
2
2
|
import { must } from "../../../../../../shared/src/must.js";
|
|
3
|
-
import { jsonObjectSchema } from "./json.js";
|
|
4
3
|
import { jsonValueSchema } from "../../../../../../shared/src/bigint-json.js";
|
|
4
|
+
import { jsonObjectSchema } from "./json.js";
|
|
5
5
|
import { columnSpec, indexSpec, tableSpec } from "../../../../db/specs.js";
|
|
6
6
|
//#region ../zero-cache/src/services/change-source/protocol/current/data.ts
|
|
7
7
|
/**
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { promiseVoid } from "../../../../shared/src/resolved-promises.js";
|
|
2
2
|
import { Database } from "../../../../zqlite/src/db.js";
|
|
3
|
-
import { RunningState } from "../running-state.js";
|
|
4
3
|
import { getOrCreateGauge } from "../../observability/metrics.js";
|
|
4
|
+
import { RunningState } from "../running-state.js";
|
|
5
5
|
import { Subscription } from "../../types/subscription.js";
|
|
6
6
|
import parsePrometheusTextFormat from "parse-prometheus-text-format";
|
|
7
7
|
//#region ../zero-cache/src/services/change-streamer/backup-monitor.ts
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import type { LogContext } from '@rocicorp/logger';
|
|
2
1
|
import type { IncomingMessage } from 'node:http';
|
|
2
|
+
import type { LogContext } from '@rocicorp/logger';
|
|
3
3
|
import type { ZeroConfig } from '../../config/zero-config.ts';
|
|
4
4
|
import { type Worker } from '../../types/processes.ts';
|
|
5
5
|
import { type ShardID } from '../../types/shards.ts';
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-streamer-http.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-http.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"change-streamer-http.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-http.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,WAAW,CAAC;AAE/C,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,6BAA6B,CAAC;AAG5D,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,0BAA0B,CAAC;AACrD,OAAO,EAAC,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AACnD,OAAO,EAAsB,KAAK,MAAM,EAAC,MAAM,wBAAwB,CAAC;AAIxE,OAAO,EAAC,WAAW,EAAC,MAAM,oBAAoB,CAAC;AAC/C,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,aAAa,EAAC,MAAM,qBAAqB,CAAC;AACvD,OAAO,EAGL,KAAK,cAAc,EACnB,KAAK,UAAU,EACf,KAAK,iBAAiB,EACvB,MAAM,sBAAsB,CAAC;AAE9B,OAAO,EAAwB,KAAK,eAAe,EAAC,MAAM,eAAe,CAAC;AAW1E,KAAK,OAAO,GAAG;IACb,IAAI,EAAE,MAAM,CAAC;IACb,cAAc,EAAE,MAAM,CAAC;CACxB,CAAC;AAEF,qBAAa,wBAAyB,SAAQ,WAAW;;IACvD,QAAQ,CAAC,EAAE,iCAAiC;gBAO1C,EAAE,EAAE,UAAU,EACd,MAAM,EAAE,UAAU,EAClB,IAAI,EAAE,OAAO,EACb,MAAM,EAAE,MAAM,EACd,cAAc,EAAE,cAAc,GAAG,OAAO,EACxC,aAAa,EAAE,aAAa,GAAG,IAAI;cA+HlB,QAAQ,IAAI,IAAI;cAWV,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;CAKlD;AAED,qBAAa,wBAAyB,YAAW,cAAc;;gBAO3D,EAAE,EAAE,UAAU,EACd,OAAO,EAAE,OAAO,EAChB,QAAQ,EAAE,MAAM,EAChB,iBAAiB,EAAE,MAAM,GAAG,SAAS;IA+BjC,eAAe,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,eAAe,CAAC,CAAC;IASjE,SAAS,CAAC,GAAG,EAAE,iBAAiB,GAAG,OAAO,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC;CAQrE;AAED,KAAK,cAAc,GAAG,IAAI,CAAC,eAAe,EAAE,KAAK,GAAG,SAAS,CAAC,CAAC;AAE/D,wBAAgB,oBAAoB,CAAC,GAAG,EAAE,cAAc,GAAG,iBAAiB,CAc3E"}
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import { assert } from "../../../../shared/src/asserts.js";
|
|
2
2
|
import { must } from "../../../../shared/src/must.js";
|
|
3
3
|
import "../../types/processes.js";
|
|
4
|
-
import { HttpService } from "../http-service.js";
|
|
5
4
|
import { pgClient } from "../../types/pg.js";
|
|
5
|
+
import { HttpService } from "../http-service.js";
|
|
6
6
|
import { PROTOCOL_ERROR, closeWithError } from "../../types/ws.js";
|
|
7
7
|
import { installWebSocketReceiver } from "../../types/websocket-handoff.js";
|
|
8
8
|
import { discoverChangeStreamerAddress } from "./schema/tables.js";
|
|
@@ -10,7 +10,7 @@ import { streamIn, streamOut } from "../../types/streams.js";
|
|
|
10
10
|
import { URLParams } from "../../types/url-params.js";
|
|
11
11
|
import { downstreamSchema } from "./change-streamer.js";
|
|
12
12
|
import { snapshotMessageSchema } from "./snapshot.js";
|
|
13
|
-
import WebSocket
|
|
13
|
+
import WebSocket from "ws";
|
|
14
14
|
import websocket from "@fastify/websocket";
|
|
15
15
|
//#region ../zero-cache/src/services/change-streamer/change-streamer-http.ts
|
|
16
16
|
var MIN_SUPPORTED_PROTOCOL_VERSION = 1;
|
|
@@ -122,11 +122,11 @@ var ChangeStreamerHttpClient = class {
|
|
|
122
122
|
return uri;
|
|
123
123
|
}
|
|
124
124
|
async reserveSnapshot(taskID) {
|
|
125
|
-
const ws = new WebSocket
|
|
125
|
+
const ws = new WebSocket(await this.#resolveChangeStreamer(SNAPSHOT_PATH) + `?${new URLSearchParams({ taskID }).toString()}`);
|
|
126
126
|
return streamIn(this.#lc, ws, snapshotMessageSchema);
|
|
127
127
|
}
|
|
128
128
|
async subscribe(ctx) {
|
|
129
|
-
const ws = new WebSocket
|
|
129
|
+
const ws = new WebSocket(await this.#resolveChangeStreamer(CHANGES_PATH) + `?${getParams(ctx).toString()}`);
|
|
130
130
|
return streamIn(this.#lc, ws, downstreamSchema);
|
|
131
131
|
}
|
|
132
132
|
};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-streamer-http.js","names":["#lc","#opts","#changeStreamer","#backupMonitor","#subscribe","#reserveSnapshot","#receiveWebsocket","#getBackupMonitor","#ensureChangeStreamerStarted","#changeStreamerStarted","#shardID","#changeDB","#changeStreamerURI","#resolveChangeStreamer"],"sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-http.ts"],"sourcesContent":["import websocket from '@fastify/websocket';\nimport type {LogContext} from '@rocicorp/logger';\nimport type {IncomingMessage} from 'node:http';\nimport WebSocket from 'ws';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {must} from '../../../../shared/src/must.ts';\nimport type {ZeroConfig} from '../../config/zero-config.ts';\nimport type {IncomingMessageSubset} from '../../types/http.ts';\nimport {pgClient, type PostgresDB} from '../../types/pg.ts';\nimport {type Worker} from '../../types/processes.ts';\nimport {type ShardID} from '../../types/shards.ts';\nimport {streamIn, streamOut, type Source} from '../../types/streams.ts';\nimport {URLParams} from '../../types/url-params.ts';\nimport {installWebSocketReceiver} from '../../types/websocket-handoff.ts';\nimport {closeWithError, PROTOCOL_ERROR} from '../../types/ws.ts';\nimport {HttpService} from '../http-service.ts';\nimport type {Service} from '../service.ts';\nimport type {BackupMonitor} from './backup-monitor.ts';\nimport {\n downstreamSchema,\n PROTOCOL_VERSION,\n type ChangeStreamer,\n type Downstream,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport {discoverChangeStreamerAddress} from './schema/tables.ts';\nimport {snapshotMessageSchema, type SnapshotMessage} from './snapshot.ts';\n\nconst MIN_SUPPORTED_PROTOCOL_VERSION = 1;\n\nconst SNAPSHOT_PATH_PATTERN = '/replication/:version/snapshot';\nconst CHANGES_PATH_PATTERN = '/replication/:version/changes';\nconst PATH_REGEX = /\\/replication\\/v(?<version>\\d+)\\/(changes|snapshot)$/;\n\nconst SNAPSHOT_PATH = `/replication/v${PROTOCOL_VERSION}/snapshot`;\nconst CHANGES_PATH = `/replication/v${PROTOCOL_VERSION}/changes`;\n\ntype Options = {\n port: number;\n startupDelayMs: number;\n};\n\nexport class ChangeStreamerHttpServer extends HttpService {\n readonly id = 'change-streamer-http-server';\n readonly #lc: LogContext;\n readonly #opts: Options;\n readonly #changeStreamer: ChangeStreamer & Service;\n readonly #backupMonitor: BackupMonitor | null;\n\n constructor(\n lc: LogContext,\n config: ZeroConfig,\n opts: Options,\n parent: Worker,\n changeStreamer: ChangeStreamer & Service,\n backupMonitor: BackupMonitor | null,\n ) {\n super('change-streamer-http-server', lc, opts, async fastify => {\n const websocketOptions: {perMessageDeflate?: boolean | object} = {};\n if (config.websocketCompression) {\n if (config.websocketCompressionOptions) {\n try {\n websocketOptions.perMessageDeflate = JSON.parse(\n config.websocketCompressionOptions,\n );\n } catch (e) {\n throw new Error(\n `Failed to parse ZERO_WEBSOCKET_COMPRESSION_OPTIONS: ${String(e)}. Expected valid JSON.`,\n );\n }\n } else {\n websocketOptions.perMessageDeflate = true;\n }\n }\n\n await fastify.register(websocket, {\n options: websocketOptions,\n });\n\n fastify.get(CHANGES_PATH_PATTERN, {websocket: true}, this.#subscribe);\n fastify.get(\n SNAPSHOT_PATH_PATTERN,\n {websocket: true},\n this.#reserveSnapshot,\n );\n\n installWebSocketReceiver<'snapshot' | 'changes'>(\n lc,\n fastify.websocketServer,\n this.#receiveWebsocket,\n parent,\n );\n });\n\n this.#lc = lc;\n this.#opts = opts;\n this.#changeStreamer = changeStreamer;\n this.#backupMonitor = backupMonitor;\n }\n\n #getBackupMonitor() {\n return must(\n this.#backupMonitor,\n 'replication-manager is not configured with a ZERO_LITESTREAM_BACKUP_URL',\n );\n }\n\n // Called when receiving a web socket via the main dispatcher handoff.\n readonly #receiveWebsocket = (\n ws: WebSocket,\n action: 'changes' | 'snapshot',\n msg: IncomingMessageSubset,\n ) => {\n switch (action) {\n case 'snapshot':\n return this.#reserveSnapshot(ws, msg);\n case 'changes':\n return this.#subscribe(ws, msg);\n default:\n closeWithError(\n this._lc,\n ws,\n `invalid action \"${action}\" received in handoff`,\n );\n return;\n }\n };\n\n readonly #reserveSnapshot = (ws: WebSocket, req: RequestHeaders) => {\n try {\n const url = new URL(\n req.url ?? '',\n req.headers.origin ?? 'http://localhost',\n );\n checkProtocolVersion(url.pathname);\n const taskID = url.searchParams.get('taskID');\n if (!taskID) {\n throw new Error('Missing taskID in snapshot request');\n }\n const downstream =\n this.#getBackupMonitor().startSnapshotReservation(taskID);\n void streamOut(this._lc, downstream, ws);\n } catch (err) {\n closeWithError(this._lc, ws, err, PROTOCOL_ERROR);\n }\n };\n\n readonly #subscribe = async (ws: WebSocket, req: RequestHeaders) => {\n try {\n const ctx = getSubscriberContext(req);\n if (ctx.mode === 'serving') {\n this.#ensureChangeStreamerStarted('incoming subscription');\n }\n\n const downstream = await this.#changeStreamer.subscribe(ctx);\n if (ctx.initial && ctx.taskID && this.#backupMonitor) {\n // Now that the change-streamer knows about the subscriber and watermark,\n // end the reservation to safely resume scheduling cleanup.\n this.#backupMonitor.endReservation(ctx.taskID);\n }\n void streamOut(this._lc, downstream, ws);\n } catch (err) {\n closeWithError(this._lc, ws, err, PROTOCOL_ERROR);\n }\n };\n\n #changeStreamerStarted = false;\n\n #ensureChangeStreamerStarted(reason: string) {\n if (!this.#changeStreamerStarted && this._state.shouldRun()) {\n this.#lc.info?.(`starting ChangeStreamerService: ${reason}`);\n void this.#changeStreamer\n .run()\n .catch(e =>\n this.#lc.warn?.(`ChangeStreamerService ended with error`, e),\n )\n .finally(() => this.stop());\n\n this.#changeStreamerStarted = true;\n }\n }\n\n protected override _onStart(): void {\n const {startupDelayMs} = this.#opts;\n this._state.setTimeout(\n () =>\n this.#ensureChangeStreamerStarted(\n `startup delay elapsed (${startupDelayMs} ms)`,\n ),\n startupDelayMs,\n );\n }\n\n protected override async _onStop(): Promise<void> {\n if (this.#changeStreamerStarted) {\n await this.#changeStreamer.stop();\n }\n }\n}\n\nexport class ChangeStreamerHttpClient implements ChangeStreamer {\n readonly #lc: LogContext;\n readonly #shardID: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #changeStreamerURI: string | undefined;\n\n constructor(\n lc: LogContext,\n shardID: ShardID,\n changeDB: string,\n changeStreamerURI: string | undefined,\n ) {\n this.#lc = lc;\n this.#shardID = shardID;\n // Create a pg client with a single short-lived connection for the purpose\n // of change-streamer discovery (i.e. ChangeDB as DNS).\n this.#changeDB = pgClient(lc, changeDB, {\n max: 1,\n ['idle_timeout']: 15,\n connection: {['application_name']: 'change-streamer-discovery'},\n });\n this.#changeStreamerURI = changeStreamerURI;\n }\n\n async #resolveChangeStreamer(path: string) {\n let baseURL = this.#changeStreamerURI;\n if (!baseURL) {\n const address = await discoverChangeStreamerAddress(\n this.#shardID,\n this.#changeDB,\n );\n if (!address) {\n throw new Error(`no change-streamer is running`);\n }\n baseURL = address.includes('://') ? `${address}/` : `ws://${address}/`;\n }\n const uri = new URL(path, baseURL);\n this.#lc.info?.(`connecting to change-streamer@${uri}`);\n return uri;\n }\n\n async reserveSnapshot(taskID: string): Promise<Source<SnapshotMessage>> {\n const uri = await this.#resolveChangeStreamer(SNAPSHOT_PATH);\n\n const params = new URLSearchParams({taskID});\n const ws = new WebSocket(uri + `?${params.toString()}`);\n\n return streamIn(this.#lc, ws, snapshotMessageSchema);\n }\n\n async subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const uri = await this.#resolveChangeStreamer(CHANGES_PATH);\n\n const params = getParams(ctx);\n const ws = new WebSocket(uri + `?${params.toString()}`);\n\n return streamIn(this.#lc, ws, downstreamSchema);\n }\n}\n\ntype RequestHeaders = Pick<IncomingMessage, 'url' | 'headers'>;\n\nexport function getSubscriberContext(req: RequestHeaders): SubscriberContext {\n const url = new URL(req.url ?? '', req.headers.origin ?? 'http://localhost');\n const protocolVersion = checkProtocolVersion(url.pathname);\n const params = new URLParams(url);\n\n return {\n protocolVersion,\n id: params.get('id', true),\n taskID: params.get('taskID', false),\n mode: params.get('mode', false) === 'backup' ? 'backup' : 'serving',\n replicaVersion: params.get('replicaVersion', true),\n watermark: params.get('watermark', true),\n initial: params.getBoolean('initial'),\n };\n}\n\nfunction checkProtocolVersion(pathname: string): number {\n const match = PATH_REGEX.exec(pathname);\n if (!match) {\n throw new Error(`invalid path: ${pathname}`);\n }\n const v = Number(match.groups?.version);\n if (\n Number.isNaN(v) ||\n v > PROTOCOL_VERSION ||\n v < MIN_SUPPORTED_PROTOCOL_VERSION\n ) {\n throw new Error(\n `Cannot service client at protocol v${v}. ` +\n `Supported protocols: [v${MIN_SUPPORTED_PROTOCOL_VERSION} ... v${PROTOCOL_VERSION}]`,\n );\n }\n return v;\n}\n\n// This is called from the client-side (i.e. the replicator).\nfunction getParams(ctx: SubscriberContext): URLSearchParams {\n // The protocolVersion is hard-coded into the CHANGES_PATH.\n const {protocolVersion, ...stringParams} = ctx;\n assert(\n protocolVersion === PROTOCOL_VERSION,\n `replicator should be setting protocolVersion to ${PROTOCOL_VERSION}`,\n );\n return new URLSearchParams({\n ...stringParams,\n taskID: ctx.taskID ? ctx.taskID : '',\n initial: ctx.initial ? 'true' : 'false',\n });\n}\n"],"mappings":";;;;;;;;;;;;;;;AA4BA,IAAM,iCAAiC;AAEvC,IAAM,wBAAwB;AAC9B,IAAM,uBAAuB;AAC7B,IAAM,aAAa;AAEnB,IAAM,gBAAgB;AACtB,IAAM,eAAe;AAOrB,IAAa,2BAAb,cAA8C,YAAY;CACxD,KAAc;CACd;CACA;CACA;CACA;CAEA,YACE,IACA,QACA,MACA,QACA,gBACA,eACA;AACA,QAAM,+BAA+B,IAAI,MAAM,OAAM,YAAW;GAC9D,MAAM,mBAA2D,EAAE;AACnE,OAAI,OAAO,qBACT,KAAI,OAAO,4BACT,KAAI;AACF,qBAAiB,oBAAoB,KAAK,MACxC,OAAO,4BACR;YACM,GAAG;AACV,UAAM,IAAI,MACR,uDAAuD,OAAO,EAAE,CAAC,wBAClE;;OAGH,kBAAiB,oBAAoB;AAIzC,SAAM,QAAQ,SAAS,WAAW,EAChC,SAAS,kBACV,CAAC;AAEF,WAAQ,IAAI,sBAAsB,EAAC,WAAW,MAAK,EAAE,MAAA,UAAgB;AACrE,WAAQ,IACN,uBACA,EAAC,WAAW,MAAK,EACjB,MAAA,gBACD;AAED,4BACE,IACA,QAAQ,iBACR,MAAA,kBACA,OACD;IACD;AAEF,QAAA,KAAW;AACX,QAAA,OAAa;AACb,QAAA,iBAAuB;AACvB,QAAA,gBAAsB;;CAGxB,oBAAoB;AAClB,SAAO,KACL,MAAA,eACA,0EACD;;CAIH,qBACE,IACA,QACA,QACG;AACH,UAAQ,QAAR;GACE,KAAK,WACH,QAAO,MAAA,gBAAsB,IAAI,IAAI;GACvC,KAAK,UACH,QAAO,MAAA,UAAgB,IAAI,IAAI;GACjC;AACE,mBACE,KAAK,KACL,IACA,mBAAmB,OAAO,uBAC3B;AACD;;;CAIN,oBAA6B,IAAe,QAAwB;AAClE,MAAI;GACF,MAAM,MAAM,IAAI,IACd,IAAI,OAAO,IACX,IAAI,QAAQ,UAAU,mBACvB;AACD,wBAAqB,IAAI,SAAS;GAClC,MAAM,SAAS,IAAI,aAAa,IAAI,SAAS;AAC7C,OAAI,CAAC,OACH,OAAM,IAAI,MAAM,qCAAqC;GAEvD,MAAM,aACJ,MAAA,kBAAwB,CAAC,yBAAyB,OAAO;AACtD,aAAU,KAAK,KAAK,YAAY,GAAG;WACjC,KAAK;AACZ,kBAAe,KAAK,KAAK,IAAI,KAAK,eAAe;;;CAIrD,aAAsB,OAAO,IAAe,QAAwB;AAClE,MAAI;GACF,MAAM,MAAM,qBAAqB,IAAI;AACrC,OAAI,IAAI,SAAS,UACf,OAAA,4BAAkC,wBAAwB;GAG5D,MAAM,aAAa,MAAM,MAAA,eAAqB,UAAU,IAAI;AAC5D,OAAI,IAAI,WAAW,IAAI,UAAU,MAAA,cAG/B,OAAA,cAAoB,eAAe,IAAI,OAAO;AAE3C,aAAU,KAAK,KAAK,YAAY,GAAG;WACjC,KAAK;AACZ,kBAAe,KAAK,KAAK,IAAI,KAAK,eAAe;;;CAIrD,yBAAyB;CAEzB,6BAA6B,QAAgB;AAC3C,MAAI,CAAC,MAAA,yBAA+B,KAAK,OAAO,WAAW,EAAE;AAC3D,SAAA,GAAS,OAAO,mCAAmC,SAAS;AACvD,SAAA,eACF,KAAK,CACL,OAAM,MACL,MAAA,GAAS,OAAO,0CAA0C,EAAE,CAC7D,CACA,cAAc,KAAK,MAAM,CAAC;AAE7B,SAAA,wBAA8B;;;CAIlC,WAAoC;EAClC,MAAM,EAAC,mBAAkB,MAAA;AACzB,OAAK,OAAO,iBAER,MAAA,4BACE,0BAA0B,eAAe,MAC1C,EACH,eACD;;CAGH,MAAyB,UAAyB;AAChD,MAAI,MAAA,sBACF,OAAM,MAAA,eAAqB,MAAM;;;AAKvC,IAAa,2BAAb,MAAgE;CAC9D;CACA;CACA;CACA;CAEA,YACE,IACA,SACA,UACA,mBACA;AACA,QAAA,KAAW;AACX,QAAA,UAAgB;AAGhB,QAAA,WAAiB,SAAS,IAAI,UAAU;GACtC,KAAK;IACJ,iBAAiB;GAClB,YAAY,GAAE,qBAAqB,6BAA4B;GAChE,CAAC;AACF,QAAA,oBAA0B;;CAG5B,OAAA,sBAA6B,MAAc;EACzC,IAAI,UAAU,MAAA;AACd,MAAI,CAAC,SAAS;GACZ,MAAM,UAAU,MAAM,8BACpB,MAAA,SACA,MAAA,SACD;AACD,OAAI,CAAC,QACH,OAAM,IAAI,MAAM,gCAAgC;AAElD,aAAU,QAAQ,SAAS,MAAM,GAAG,GAAG,QAAQ,KAAK,QAAQ,QAAQ;;EAEtE,MAAM,MAAM,IAAI,IAAI,MAAM,QAAQ;AAClC,QAAA,GAAS,OAAO,iCAAiC,MAAM;AACvD,SAAO;;CAGT,MAAM,gBAAgB,QAAkD;EAItE,MAAM,KAAK,IAAI,YAHH,MAAM,MAAA,sBAA4B,cAAc,GAG7B,IADhB,IAAI,gBAAgB,EAAC,QAAO,CAAC,CACF,UAAU,GAAG;AAEvD,SAAO,SAAS,MAAA,IAAU,IAAI,sBAAsB;;CAGtD,MAAM,UAAU,KAAqD;EAInE,MAAM,KAAK,IAAI,YAHH,MAAM,MAAA,sBAA4B,aAAa,GAG5B,IADhB,UAAU,IAAI,CACa,UAAU,GAAG;AAEvD,SAAO,SAAS,MAAA,IAAU,IAAI,iBAAiB;;;AAMnD,SAAgB,qBAAqB,KAAwC;CAC3E,MAAM,MAAM,IAAI,IAAI,IAAI,OAAO,IAAI,IAAI,QAAQ,UAAU,mBAAmB;CAC5E,MAAM,kBAAkB,qBAAqB,IAAI,SAAS;CAC1D,MAAM,SAAS,IAAI,UAAU,IAAI;AAEjC,QAAO;EACL;EACA,IAAI,OAAO,IAAI,MAAM,KAAK;EAC1B,QAAQ,OAAO,IAAI,UAAU,MAAM;EACnC,MAAM,OAAO,IAAI,QAAQ,MAAM,KAAK,WAAW,WAAW;EAC1D,gBAAgB,OAAO,IAAI,kBAAkB,KAAK;EAClD,WAAW,OAAO,IAAI,aAAa,KAAK;EACxC,SAAS,OAAO,WAAW,UAAU;EACtC;;AAGH,SAAS,qBAAqB,UAA0B;CACtD,MAAM,QAAQ,WAAW,KAAK,SAAS;AACvC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,iBAAiB,WAAW;CAE9C,MAAM,IAAI,OAAO,MAAM,QAAQ,QAAQ;AACvC,KACE,OAAO,MAAM,EAAE,IACf,IAAA,KACA,IAAI,+BAEJ,OAAM,IAAI,MACR,sCAAsC,EAAE,2BACZ,+BAA+B,UAC5D;AAEH,QAAO;;AAIT,SAAS,UAAU,KAAyC;CAE1D,MAAM,EAAC,iBAAiB,GAAG,iBAAgB;AAC3C,QACE,oBAAA,GACA,oDACD;AACD,QAAO,IAAI,gBAAgB;EACzB,GAAG;EACH,QAAQ,IAAI,SAAS,IAAI,SAAS;EAClC,SAAS,IAAI,UAAU,SAAS;EACjC,CAAC"}
|
|
1
|
+
{"version":3,"file":"change-streamer-http.js","names":["#lc","#opts","#changeStreamer","#backupMonitor","#subscribe","#reserveSnapshot","#receiveWebsocket","#getBackupMonitor","#ensureChangeStreamerStarted","#changeStreamerStarted","#shardID","#changeDB","#changeStreamerURI","#resolveChangeStreamer"],"sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-http.ts"],"sourcesContent":["import type {IncomingMessage} from 'node:http';\nimport websocket from '@fastify/websocket';\nimport type {LogContext} from '@rocicorp/logger';\nimport WebSocket from 'ws';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {must} from '../../../../shared/src/must.ts';\nimport type {ZeroConfig} from '../../config/zero-config.ts';\nimport type {IncomingMessageSubset} from '../../types/http.ts';\nimport {pgClient, type PostgresDB} from '../../types/pg.ts';\nimport {type Worker} from '../../types/processes.ts';\nimport {type ShardID} from '../../types/shards.ts';\nimport {streamIn, streamOut, type Source} from '../../types/streams.ts';\nimport {URLParams} from '../../types/url-params.ts';\nimport {installWebSocketReceiver} from '../../types/websocket-handoff.ts';\nimport {closeWithError, PROTOCOL_ERROR} from '../../types/ws.ts';\nimport {HttpService} from '../http-service.ts';\nimport type {Service} from '../service.ts';\nimport type {BackupMonitor} from './backup-monitor.ts';\nimport {\n downstreamSchema,\n PROTOCOL_VERSION,\n type ChangeStreamer,\n type Downstream,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport {discoverChangeStreamerAddress} from './schema/tables.ts';\nimport {snapshotMessageSchema, type SnapshotMessage} from './snapshot.ts';\n\nconst MIN_SUPPORTED_PROTOCOL_VERSION = 1;\n\nconst SNAPSHOT_PATH_PATTERN = '/replication/:version/snapshot';\nconst CHANGES_PATH_PATTERN = '/replication/:version/changes';\nconst PATH_REGEX = /\\/replication\\/v(?<version>\\d+)\\/(changes|snapshot)$/;\n\nconst SNAPSHOT_PATH = `/replication/v${PROTOCOL_VERSION}/snapshot`;\nconst CHANGES_PATH = `/replication/v${PROTOCOL_VERSION}/changes`;\n\ntype Options = {\n port: number;\n startupDelayMs: number;\n};\n\nexport class ChangeStreamerHttpServer extends HttpService {\n readonly id = 'change-streamer-http-server';\n readonly #lc: LogContext;\n readonly #opts: Options;\n readonly #changeStreamer: ChangeStreamer & Service;\n readonly #backupMonitor: BackupMonitor | null;\n\n constructor(\n lc: LogContext,\n config: ZeroConfig,\n opts: Options,\n parent: Worker,\n changeStreamer: ChangeStreamer & Service,\n backupMonitor: BackupMonitor | null,\n ) {\n super('change-streamer-http-server', lc, opts, async fastify => {\n const websocketOptions: {perMessageDeflate?: boolean | object} = {};\n if (config.websocketCompression) {\n if (config.websocketCompressionOptions) {\n try {\n websocketOptions.perMessageDeflate = JSON.parse(\n config.websocketCompressionOptions,\n );\n } catch (e) {\n throw new Error(\n `Failed to parse ZERO_WEBSOCKET_COMPRESSION_OPTIONS: ${String(e)}. Expected valid JSON.`,\n );\n }\n } else {\n websocketOptions.perMessageDeflate = true;\n }\n }\n\n await fastify.register(websocket, {\n options: websocketOptions,\n });\n\n fastify.get(CHANGES_PATH_PATTERN, {websocket: true}, this.#subscribe);\n fastify.get(\n SNAPSHOT_PATH_PATTERN,\n {websocket: true},\n this.#reserveSnapshot,\n );\n\n installWebSocketReceiver<'snapshot' | 'changes'>(\n lc,\n fastify.websocketServer,\n this.#receiveWebsocket,\n parent,\n );\n });\n\n this.#lc = lc;\n this.#opts = opts;\n this.#changeStreamer = changeStreamer;\n this.#backupMonitor = backupMonitor;\n }\n\n #getBackupMonitor() {\n return must(\n this.#backupMonitor,\n 'replication-manager is not configured with a ZERO_LITESTREAM_BACKUP_URL',\n );\n }\n\n // Called when receiving a web socket via the main dispatcher handoff.\n readonly #receiveWebsocket = (\n ws: WebSocket,\n action: 'changes' | 'snapshot',\n msg: IncomingMessageSubset,\n ) => {\n switch (action) {\n case 'snapshot':\n return this.#reserveSnapshot(ws, msg);\n case 'changes':\n return this.#subscribe(ws, msg);\n default:\n closeWithError(\n this._lc,\n ws,\n `invalid action \"${action}\" received in handoff`,\n );\n return;\n }\n };\n\n readonly #reserveSnapshot = (ws: WebSocket, req: RequestHeaders) => {\n try {\n const url = new URL(\n req.url ?? '',\n req.headers.origin ?? 'http://localhost',\n );\n checkProtocolVersion(url.pathname);\n const taskID = url.searchParams.get('taskID');\n if (!taskID) {\n throw new Error('Missing taskID in snapshot request');\n }\n const downstream =\n this.#getBackupMonitor().startSnapshotReservation(taskID);\n void streamOut(this._lc, downstream, ws);\n } catch (err) {\n closeWithError(this._lc, ws, err, PROTOCOL_ERROR);\n }\n };\n\n readonly #subscribe = async (ws: WebSocket, req: RequestHeaders) => {\n try {\n const ctx = getSubscriberContext(req);\n if (ctx.mode === 'serving') {\n this.#ensureChangeStreamerStarted('incoming subscription');\n }\n\n const downstream = await this.#changeStreamer.subscribe(ctx);\n if (ctx.initial && ctx.taskID && this.#backupMonitor) {\n // Now that the change-streamer knows about the subscriber and watermark,\n // end the reservation to safely resume scheduling cleanup.\n this.#backupMonitor.endReservation(ctx.taskID);\n }\n void streamOut(this._lc, downstream, ws);\n } catch (err) {\n closeWithError(this._lc, ws, err, PROTOCOL_ERROR);\n }\n };\n\n #changeStreamerStarted = false;\n\n #ensureChangeStreamerStarted(reason: string) {\n if (!this.#changeStreamerStarted && this._state.shouldRun()) {\n this.#lc.info?.(`starting ChangeStreamerService: ${reason}`);\n void this.#changeStreamer\n .run()\n .catch(e =>\n this.#lc.warn?.(`ChangeStreamerService ended with error`, e),\n )\n .finally(() => this.stop());\n\n this.#changeStreamerStarted = true;\n }\n }\n\n protected override _onStart(): void {\n const {startupDelayMs} = this.#opts;\n this._state.setTimeout(\n () =>\n this.#ensureChangeStreamerStarted(\n `startup delay elapsed (${startupDelayMs} ms)`,\n ),\n startupDelayMs,\n );\n }\n\n protected override async _onStop(): Promise<void> {\n if (this.#changeStreamerStarted) {\n await this.#changeStreamer.stop();\n }\n }\n}\n\nexport class ChangeStreamerHttpClient implements ChangeStreamer {\n readonly #lc: LogContext;\n readonly #shardID: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #changeStreamerURI: string | undefined;\n\n constructor(\n lc: LogContext,\n shardID: ShardID,\n changeDB: string,\n changeStreamerURI: string | undefined,\n ) {\n this.#lc = lc;\n this.#shardID = shardID;\n // Create a pg client with a single short-lived connection for the purpose\n // of change-streamer discovery (i.e. ChangeDB as DNS).\n this.#changeDB = pgClient(lc, changeDB, {\n max: 1,\n ['idle_timeout']: 15,\n connection: {['application_name']: 'change-streamer-discovery'},\n });\n this.#changeStreamerURI = changeStreamerURI;\n }\n\n async #resolveChangeStreamer(path: string) {\n let baseURL = this.#changeStreamerURI;\n if (!baseURL) {\n const address = await discoverChangeStreamerAddress(\n this.#shardID,\n this.#changeDB,\n );\n if (!address) {\n throw new Error(`no change-streamer is running`);\n }\n baseURL = address.includes('://') ? `${address}/` : `ws://${address}/`;\n }\n const uri = new URL(path, baseURL);\n this.#lc.info?.(`connecting to change-streamer@${uri}`);\n return uri;\n }\n\n async reserveSnapshot(taskID: string): Promise<Source<SnapshotMessage>> {\n const uri = await this.#resolveChangeStreamer(SNAPSHOT_PATH);\n\n const params = new URLSearchParams({taskID});\n const ws = new WebSocket(uri + `?${params.toString()}`);\n\n return streamIn(this.#lc, ws, snapshotMessageSchema);\n }\n\n async subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const uri = await this.#resolveChangeStreamer(CHANGES_PATH);\n\n const params = getParams(ctx);\n const ws = new WebSocket(uri + `?${params.toString()}`);\n\n return streamIn(this.#lc, ws, downstreamSchema);\n }\n}\n\ntype RequestHeaders = Pick<IncomingMessage, 'url' | 'headers'>;\n\nexport function getSubscriberContext(req: RequestHeaders): SubscriberContext {\n const url = new URL(req.url ?? '', req.headers.origin ?? 'http://localhost');\n const protocolVersion = checkProtocolVersion(url.pathname);\n const params = new URLParams(url);\n\n return {\n protocolVersion,\n id: params.get('id', true),\n taskID: params.get('taskID', false),\n mode: params.get('mode', false) === 'backup' ? 'backup' : 'serving',\n replicaVersion: params.get('replicaVersion', true),\n watermark: params.get('watermark', true),\n initial: params.getBoolean('initial'),\n };\n}\n\nfunction checkProtocolVersion(pathname: string): number {\n const match = PATH_REGEX.exec(pathname);\n if (!match) {\n throw new Error(`invalid path: ${pathname}`);\n }\n const v = Number(match.groups?.version);\n if (\n Number.isNaN(v) ||\n v > PROTOCOL_VERSION ||\n v < MIN_SUPPORTED_PROTOCOL_VERSION\n ) {\n throw new Error(\n `Cannot service client at protocol v${v}. ` +\n `Supported protocols: [v${MIN_SUPPORTED_PROTOCOL_VERSION} ... v${PROTOCOL_VERSION}]`,\n );\n }\n return v;\n}\n\n// This is called from the client-side (i.e. the replicator).\nfunction getParams(ctx: SubscriberContext): URLSearchParams {\n // The protocolVersion is hard-coded into the CHANGES_PATH.\n const {protocolVersion, ...stringParams} = ctx;\n assert(\n protocolVersion === PROTOCOL_VERSION,\n `replicator should be setting protocolVersion to ${PROTOCOL_VERSION}`,\n );\n return new URLSearchParams({\n ...stringParams,\n taskID: ctx.taskID ? ctx.taskID : '',\n initial: ctx.initial ? 'true' : 'false',\n });\n}\n"],"mappings":";;;;;;;;;;;;;;;AA4BA,IAAM,iCAAiC;AAEvC,IAAM,wBAAwB;AAC9B,IAAM,uBAAuB;AAC7B,IAAM,aAAa;AAEnB,IAAM,gBAAgB;AACtB,IAAM,eAAe;AAOrB,IAAa,2BAAb,cAA8C,YAAY;CACxD,KAAc;CACd;CACA;CACA;CACA;CAEA,YACE,IACA,QACA,MACA,QACA,gBACA,eACA;AACA,QAAM,+BAA+B,IAAI,MAAM,OAAM,YAAW;GAC9D,MAAM,mBAA2D,EAAE;AACnE,OAAI,OAAO,qBACT,KAAI,OAAO,4BACT,KAAI;AACF,qBAAiB,oBAAoB,KAAK,MACxC,OAAO,4BACR;YACM,GAAG;AACV,UAAM,IAAI,MACR,uDAAuD,OAAO,EAAE,CAAC,wBAClE;;OAGH,kBAAiB,oBAAoB;AAIzC,SAAM,QAAQ,SAAS,WAAW,EAChC,SAAS,kBACV,CAAC;AAEF,WAAQ,IAAI,sBAAsB,EAAC,WAAW,MAAK,EAAE,MAAA,UAAgB;AACrE,WAAQ,IACN,uBACA,EAAC,WAAW,MAAK,EACjB,MAAA,gBACD;AAED,4BACE,IACA,QAAQ,iBACR,MAAA,kBACA,OACD;IACD;AAEF,QAAA,KAAW;AACX,QAAA,OAAa;AACb,QAAA,iBAAuB;AACvB,QAAA,gBAAsB;;CAGxB,oBAAoB;AAClB,SAAO,KACL,MAAA,eACA,0EACD;;CAIH,qBACE,IACA,QACA,QACG;AACH,UAAQ,QAAR;GACE,KAAK,WACH,QAAO,MAAA,gBAAsB,IAAI,IAAI;GACvC,KAAK,UACH,QAAO,MAAA,UAAgB,IAAI,IAAI;GACjC;AACE,mBACE,KAAK,KACL,IACA,mBAAmB,OAAO,uBAC3B;AACD;;;CAIN,oBAA6B,IAAe,QAAwB;AAClE,MAAI;GACF,MAAM,MAAM,IAAI,IACd,IAAI,OAAO,IACX,IAAI,QAAQ,UAAU,mBACvB;AACD,wBAAqB,IAAI,SAAS;GAClC,MAAM,SAAS,IAAI,aAAa,IAAI,SAAS;AAC7C,OAAI,CAAC,OACH,OAAM,IAAI,MAAM,qCAAqC;GAEvD,MAAM,aACJ,MAAA,kBAAwB,CAAC,yBAAyB,OAAO;AACtD,aAAU,KAAK,KAAK,YAAY,GAAG;WACjC,KAAK;AACZ,kBAAe,KAAK,KAAK,IAAI,KAAK,eAAe;;;CAIrD,aAAsB,OAAO,IAAe,QAAwB;AAClE,MAAI;GACF,MAAM,MAAM,qBAAqB,IAAI;AACrC,OAAI,IAAI,SAAS,UACf,OAAA,4BAAkC,wBAAwB;GAG5D,MAAM,aAAa,MAAM,MAAA,eAAqB,UAAU,IAAI;AAC5D,OAAI,IAAI,WAAW,IAAI,UAAU,MAAA,cAG/B,OAAA,cAAoB,eAAe,IAAI,OAAO;AAE3C,aAAU,KAAK,KAAK,YAAY,GAAG;WACjC,KAAK;AACZ,kBAAe,KAAK,KAAK,IAAI,KAAK,eAAe;;;CAIrD,yBAAyB;CAEzB,6BAA6B,QAAgB;AAC3C,MAAI,CAAC,MAAA,yBAA+B,KAAK,OAAO,WAAW,EAAE;AAC3D,SAAA,GAAS,OAAO,mCAAmC,SAAS;AACvD,SAAA,eACF,KAAK,CACL,OAAM,MACL,MAAA,GAAS,OAAO,0CAA0C,EAAE,CAC7D,CACA,cAAc,KAAK,MAAM,CAAC;AAE7B,SAAA,wBAA8B;;;CAIlC,WAAoC;EAClC,MAAM,EAAC,mBAAkB,MAAA;AACzB,OAAK,OAAO,iBAER,MAAA,4BACE,0BAA0B,eAAe,MAC1C,EACH,eACD;;CAGH,MAAyB,UAAyB;AAChD,MAAI,MAAA,sBACF,OAAM,MAAA,eAAqB,MAAM;;;AAKvC,IAAa,2BAAb,MAAgE;CAC9D;CACA;CACA;CACA;CAEA,YACE,IACA,SACA,UACA,mBACA;AACA,QAAA,KAAW;AACX,QAAA,UAAgB;AAGhB,QAAA,WAAiB,SAAS,IAAI,UAAU;GACtC,KAAK;IACJ,iBAAiB;GAClB,YAAY,GAAE,qBAAqB,6BAA4B;GAChE,CAAC;AACF,QAAA,oBAA0B;;CAG5B,OAAA,sBAA6B,MAAc;EACzC,IAAI,UAAU,MAAA;AACd,MAAI,CAAC,SAAS;GACZ,MAAM,UAAU,MAAM,8BACpB,MAAA,SACA,MAAA,SACD;AACD,OAAI,CAAC,QACH,OAAM,IAAI,MAAM,gCAAgC;AAElD,aAAU,QAAQ,SAAS,MAAM,GAAG,GAAG,QAAQ,KAAK,QAAQ,QAAQ;;EAEtE,MAAM,MAAM,IAAI,IAAI,MAAM,QAAQ;AAClC,QAAA,GAAS,OAAO,iCAAiC,MAAM;AACvD,SAAO;;CAGT,MAAM,gBAAgB,QAAkD;EAItE,MAAM,KAAK,IAAI,UAHH,MAAM,MAAA,sBAA4B,cAAc,GAG7B,IADhB,IAAI,gBAAgB,EAAC,QAAO,CAAC,CACF,UAAU,GAAG;AAEvD,SAAO,SAAS,MAAA,IAAU,IAAI,sBAAsB;;CAGtD,MAAM,UAAU,KAAqD;EAInE,MAAM,KAAK,IAAI,UAHH,MAAM,MAAA,sBAA4B,aAAa,GAG5B,IADhB,UAAU,IAAI,CACa,UAAU,GAAG;AAEvD,SAAO,SAAS,MAAA,IAAU,IAAI,iBAAiB;;;AAMnD,SAAgB,qBAAqB,KAAwC;CAC3E,MAAM,MAAM,IAAI,IAAI,IAAI,OAAO,IAAI,IAAI,QAAQ,UAAU,mBAAmB;CAC5E,MAAM,kBAAkB,qBAAqB,IAAI,SAAS;CAC1D,MAAM,SAAS,IAAI,UAAU,IAAI;AAEjC,QAAO;EACL;EACA,IAAI,OAAO,IAAI,MAAM,KAAK;EAC1B,QAAQ,OAAO,IAAI,UAAU,MAAM;EACnC,MAAM,OAAO,IAAI,QAAQ,MAAM,KAAK,WAAW,WAAW;EAC1D,gBAAgB,OAAO,IAAI,kBAAkB,KAAK;EAClD,WAAW,OAAO,IAAI,aAAa,KAAK;EACxC,SAAS,OAAO,WAAW,UAAU;EACtC;;AAGH,SAAS,qBAAqB,UAA0B;CACtD,MAAM,QAAQ,WAAW,KAAK,SAAS;AACvC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,iBAAiB,WAAW;CAE9C,MAAM,IAAI,OAAO,MAAM,QAAQ,QAAQ;AACvC,KACE,OAAO,MAAM,EAAE,IACf,IAAA,KACA,IAAI,+BAEJ,OAAM,IAAI,MACR,sCAAsC,EAAE,2BACZ,+BAA+B,UAC5D;AAEH,QAAO;;AAIT,SAAS,UAAU,KAAyC;CAE1D,MAAM,EAAC,iBAAiB,GAAG,iBAAgB;AAC3C,QACE,oBAAA,GACA,oDACD;AACD,QAAO,IAAI,gBAAgB;EACzB,GAAG;EACH,QAAQ,IAAI,SAAS,IAAI,SAAS;EAClC,SAAS,IAAI,UAAU,SAAS;EACjC,CAAC"}
|
|
@@ -6,10 +6,14 @@ import { type ChangeStreamData } from '../change-source/protocol/current/downstr
|
|
|
6
6
|
import { type ReplicationStatusPublisher } from '../replicator/replication-status.ts';
|
|
7
7
|
import type { SubscriptionState } from '../replicator/schema/replication-state.ts';
|
|
8
8
|
import { type ChangeStreamerService } from './change-streamer.ts';
|
|
9
|
+
import { type PurgeLock, type TuningOptions as StorerOptions } from './storer.ts';
|
|
10
|
+
export type TuningOptions = StorerOptions & {
|
|
11
|
+
flowControlConsensusPaddingSeconds: number;
|
|
12
|
+
};
|
|
9
13
|
/**
|
|
10
14
|
* Performs initialization and schema migrations to initialize a ChangeStreamerImpl.
|
|
11
15
|
*/
|
|
12
|
-
export declare function initializeStreamer(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, changeDB: PostgresDB, changeSource: ChangeSource, replicationStatusPublisher: ReplicationStatusPublisher, subscriptionState: SubscriptionState,
|
|
16
|
+
export declare function initializeStreamer(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, changeDB: PostgresDB, changeSource: ChangeSource, replicationStatusPublisher: ReplicationStatusPublisher, subscriptionState: SubscriptionState, purgeLock: PurgeLock | null, autoReset: boolean, opts: TuningOptions, setTimeoutFn?: typeof setTimeout): Promise<ChangeStreamerService>;
|
|
13
17
|
/**
|
|
14
18
|
* Internally all Downstream messages (not just commits) are given a watermark.
|
|
15
19
|
* These are used for internal ordering for:
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-streamer-service.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"change-streamer-service.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAWjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAClD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAGnD,OAAO,KAAK,EACV,YAAY,EAEb,MAAM,mCAAmC,CAAC;AAC3C,OAAO,EAEL,KAAK,gBAAgB,EACtB,MAAM,iDAAiD,CAAC;AACzD,OAAO,EAGL,KAAK,0BAA0B,EAChC,MAAM,qCAAqC,CAAC;AAC7C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,2CAA2C,CAAC;AAMjF,OAAO,EACL,KAAK,qBAAqB,EAI3B,MAAM,sBAAsB,CAAC;AAS9B,OAAO,EAEL,KAAK,SAAS,EACd,KAAK,aAAa,IAAI,aAAa,EACpC,MAAM,aAAa,CAAC;AAGrB,MAAM,MAAM,aAAa,GAAG,aAAa,GAAG;IAC1C,kCAAkC,EAAE,MAAM,CAAC;CAC5C,CAAC;AAEF;;GAEG;AACH,wBAAsB,kBAAkB,CACtC,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,QAAQ,EAAE,UAAU,EACpB,YAAY,EAAE,YAAY,EAC1B,0BAA0B,EAAE,0BAA0B,EACtD,iBAAiB,EAAE,iBAAiB,EACpC,SAAS,EAAE,SAAS,GAAG,IAAI,EAC3B,SAAS,EAAE,OAAO,EAClB,IAAI,EAAE,aAAa,EACnB,YAAY,oBAAa,GACxB,OAAO,CAAC,qBAAqB,CAAC,CA4BhC;AAID;;;;;;;;;GASG;AACH,MAAM,MAAM,iBAAiB,GAAG,CAAC,SAAS,EAAE,MAAM,EAAE,gBAAgB,CAAC,CAAC"}
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import { unreachable } from "../../../../shared/src/asserts.js";
|
|
2
2
|
import { promiseVoid } from "../../../../shared/src/resolved-promises.js";
|
|
3
3
|
import "../change-source/protocol/current/downstream.js";
|
|
4
|
+
import { getOrCreateCounter } from "../../observability/metrics.js";
|
|
4
5
|
import { DEFAULT_MAX_RETRY_DELAY_MS, RunningState, UnrecoverableError } from "../running-state.js";
|
|
5
6
|
import { min } from "../../types/lexi-version.js";
|
|
6
|
-
import { getOrCreateCounter } from "../../observability/metrics.js";
|
|
7
7
|
import { publishCriticalEvent } from "../../observability/events.js";
|
|
8
8
|
import { AutoResetSignal, ensureReplicationConfig, markResetRequired } from "./schema/tables.js";
|
|
9
9
|
import { Subscription } from "../../types/subscription.js";
|
|
@@ -19,11 +19,11 @@ import { getDefaultHighWaterMark } from "node:stream";
|
|
|
19
19
|
/**
|
|
20
20
|
* Performs initialization and schema migrations to initialize a ChangeStreamerImpl.
|
|
21
21
|
*/
|
|
22
|
-
async function initializeStreamer(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, changeSource, replicationStatusPublisher, subscriptionState,
|
|
22
|
+
async function initializeStreamer(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, changeSource, replicationStatusPublisher, subscriptionState, purgeLock, autoReset, opts, setTimeoutFn = setTimeout) {
|
|
23
23
|
await initChangeStreamerSchema(lc, changeDB, shard);
|
|
24
24
|
await ensureReplicationConfig(lc, changeDB, subscriptionState, shard, autoReset, setTimeoutFn);
|
|
25
25
|
const { replicaVersion } = subscriptionState;
|
|
26
|
-
return new ChangeStreamerImpl(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, replicaVersion, changeSource, replicationStatusPublisher,
|
|
26
|
+
return new ChangeStreamerImpl(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, replicaVersion, changeSource, replicationStatusPublisher, purgeLock, autoReset, opts, setTimeoutFn);
|
|
27
27
|
}
|
|
28
28
|
var REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS = 5e3;
|
|
29
29
|
/**
|
|
@@ -181,8 +181,9 @@ var ChangeStreamerImpl = class {
|
|
|
181
181
|
#txCounter = getOrCreateCounter("replication", "transactions", "Count of replicated transactions");
|
|
182
182
|
#changeCounter = getOrCreateCounter("replication", "changes", "Count of replicated changes (DML or DDL statements)");
|
|
183
183
|
#latestStatus;
|
|
184
|
+
#purgeLock;
|
|
184
185
|
#stream;
|
|
185
|
-
constructor(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, replicaVersion, source, replicationStatusPublisher,
|
|
186
|
+
constructor(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, replicaVersion, source, replicationStatusPublisher, initialPurgeLock, autoReset, opts, setTimeoutFn = setTimeout) {
|
|
186
187
|
this.id = `change-streamer`;
|
|
187
188
|
this.#lc = lc.withContext("component", "change-streamer");
|
|
188
189
|
this.#shard = shard;
|
|
@@ -193,9 +194,10 @@ var ChangeStreamerImpl = class {
|
|
|
193
194
|
"status",
|
|
194
195
|
consumed[1],
|
|
195
196
|
consumed[2]
|
|
196
|
-
]), (err) => this.stop(err),
|
|
197
|
-
this.#forwarder = new Forwarder(lc, { flowControlConsensusPaddingSeconds });
|
|
197
|
+
]), (err) => this.stop(err), opts);
|
|
198
|
+
this.#forwarder = new Forwarder(lc, { flowControlConsensusPaddingSeconds: opts.flowControlConsensusPaddingSeconds });
|
|
198
199
|
this.#replicationStatusPublisher = replicationStatusPublisher;
|
|
200
|
+
this.#purgeLock = initialPurgeLock;
|
|
199
201
|
this.#autoReset = autoReset;
|
|
200
202
|
this.#state = new RunningState(this.id, void 0, setTimeoutFn);
|
|
201
203
|
this.#latestStatus = { tag: "status" };
|
|
@@ -205,7 +207,8 @@ var ChangeStreamerImpl = class {
|
|
|
205
207
|
this.#forwarder.startProgressMonitor();
|
|
206
208
|
const lagReport = await this.#source.startLagReporter();
|
|
207
209
|
if (lagReport) this.#latestStatus.lagReport = lagReport;
|
|
208
|
-
await this.#storer.assumeOwnership();
|
|
210
|
+
await this.#storer.assumeOwnership(this.#purgeLock);
|
|
211
|
+
this.#purgeLock = null;
|
|
209
212
|
const flushBytesThreshold = getDefaultHighWaterMark(false);
|
|
210
213
|
while (this.#state.shouldRun()) {
|
|
211
214
|
let err;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-streamer-service.js","names":["#lc","#shard","#changeDB","#replicaVersion","#source","#storer","#forwarder","#replicationStatusPublisher","#autoReset","#state","#initialWatermarks","#serving","#txCounter","#changeCounter","#stream","#latestStatus","#handleControlMessage","#purgeOldChanges"],"sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {getDefaultHighWaterMark} from 'node:stream';\nimport {unreachable} from '../../../../shared/src/asserts.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport {publishCriticalEvent} from '../../observability/events.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport {\n min,\n type AtLeastOne,\n type LexiVersion,\n} from '../../types/lexi-version.ts';\nimport type {PostgresDB} from '../../types/pg.ts';\nimport type {ShardID} from '../../types/shards.ts';\nimport type {Source} from '../../types/streams.ts';\nimport {Subscription} from '../../types/subscription.ts';\nimport type {\n ChangeSource,\n ChangeStream,\n} from '../change-source/change-source.ts';\nimport {\n type ChangeStreamControl,\n type ChangeStreamData,\n} from '../change-source/protocol/current/downstream.ts';\nimport {\n publishReplicationError,\n replicationStatusError,\n type ReplicationStatusPublisher,\n} from '../replicator/replication-status.ts';\nimport type {SubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {\n DEFAULT_MAX_RETRY_DELAY_MS,\n RunningState,\n UnrecoverableError,\n} from '../running-state.ts';\nimport {\n type ChangeStreamerService,\n type Downstream,\n type Status,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {Forwarder} from './forwarder.ts';\nimport {initChangeStreamerSchema} from './schema/init.ts';\nimport {\n AutoResetSignal,\n ensureReplicationConfig,\n markResetRequired,\n} from './schema/tables.ts';\nimport {Storer} from './storer.ts';\nimport {Subscriber} from './subscriber.ts';\n\n/**\n * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.\n */\nexport async function initializeStreamer(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n changeSource: ChangeSource,\n replicationStatusPublisher: ReplicationStatusPublisher,\n subscriptionState: SubscriptionState,\n autoReset: boolean,\n backPressureLimitHeapProportion: number,\n flowControlConsensusPaddingSeconds: number,\n setTimeoutFn = setTimeout,\n): Promise<ChangeStreamerService> {\n // Make sure the ChangeLog DB is set up.\n await initChangeStreamerSchema(lc, changeDB, shard);\n await ensureReplicationConfig(\n lc,\n changeDB,\n subscriptionState,\n shard,\n autoReset,\n setTimeoutFn,\n );\n\n const {replicaVersion} = subscriptionState;\n return new ChangeStreamerImpl(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n changeSource,\n replicationStatusPublisher,\n autoReset,\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n setTimeoutFn,\n );\n}\n\nconst REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS = 5000;\n\n/**\n * Internally all Downstream messages (not just commits) are given a watermark.\n * These are used for internal ordering for:\n * 1. Replaying new changes in the Storer\n * 2. Filtering old changes in the Subscriber\n *\n * However, only the watermark for `Commit` messages are exposed to\n * subscribers, as that is the only semantically correct watermark to\n * use for tracking a position in a replication stream.\n */\nexport type WatermarkedChange = [watermark: string, ChangeStreamData];\n\n/**\n * Upstream-agnostic dispatch of messages in a {@link ChangeStreamMessage} to a\n * {@link Forwarder} and {@link Storer} to execute the forward-store-ack\n * procedure described in {@link ChangeStreamer}.\n *\n * ### Subscriber Catchup\n *\n * Connecting clients first need to be \"caught up\" to the current watermark\n * (from stored change log entries) before new entries are forwarded to\n * them. This is non-trivial because the replication stream may be in the\n * middle of a pending streamed Transaction for which some entries have\n * already been forwarded but are not yet committed to the store.\n *\n *\n * ```\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * | Historic changes in storage | Pending (streamed) tx | Next tx\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * Replication stream\n * > > > > > > > > >\n * ^ ---> required catchup ---> ^\n * Subscriber watermark Subscription begins\n * ```\n *\n * Preemptively buffering the changes of every pending transaction\n * would be wasteful and consume too much memory for large transactions.\n *\n * Instead, the streamer synchronously dispatches changes and subscriptions\n * to the {@link Forwarder} and the {@link Storer} such that the two\n * components are aligned as to where in the stream the subscription started.\n * The two components then coordinate catchup and handoff via the\n * {@link Subscriber} object with the following algorithm:\n *\n * * If the streamer is in the middle of a pending Transaction, the\n * Subscriber is \"queued\" on both the Forwarder and the Storer. In this\n * state, new changes are *not* forwarded to the Subscriber, and catchup\n * is not yet executed.\n * * Once the commit message for the pending Transaction is processed\n * by the Storer, it begins catchup on the Subscriber (with a READONLY\n * snapshot so that it does not block subsequent storage operations).\n * This catchup is thus guaranteed to load the change log entries of\n * that last Transaction.\n * * When the Forwarder processes that same commit message, it moves the\n * Subscriber from the \"queued\" to the \"active\" set of clients such that\n * the Subscriber begins receiving new changes, starting from the next\n * Transaction.\n * * The Subscriber does not forward those changes, however, if its catchup\n * is not complete. Until then, it buffers the changes in memory.\n * * Once catchup is complete, the buffered changes are immediately sent\n * and the Subscriber henceforth forwards changes as they are received.\n *\n * In the (common) case where the streamer is not in the middle of a pending\n * transaction when a subscription begins, the Storer begins catchup\n * immediately and the Forwarder directly adds the Subscriber to its active\n * set. However, the Subscriber still buffers any forwarded messages until\n * its catchup is complete.\n *\n * ### Watermarks and ordering\n *\n * The ChangeStreamerService depends on its {@link ChangeSource} to send\n * changes in contiguous [`begin`, `data` ..., `data`, `commit`] sequences\n * in commit order. This follows Postgres's Logical Replication Protocol\n * Message Flow:\n *\n * https://www.postgresql.org/docs/16/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW\n *\n * > The logical replication protocol sends individual transactions one by one.\n * > This means that all messages between a pair of Begin and Commit messages belong to the same transaction.\n *\n * In order to correctly replay (new) and filter (old) messages to subscribers\n * at different points in the replication stream, these changes must be assigned\n * watermarks such that they preserve the order in which they were received\n * from the ChangeSource.\n *\n * A previous implementation incorrectly derived these watermarks from the Postgres\n * Log Sequence Numbers (LSN) of each message. However, LSNs from concurrent,\n * non-conflicting transactions can overlap, which can result in a `begin` message\n * with an earlier LSN arriving after a `commit` message. For example, the\n * changes for these transactions:\n *\n * ```\n * LSN: 1 2 3 4 5 6 7 8 9 10\n * tx1: begin data data data commit\n * tx2: begin data data data commit\n * ```\n *\n * will arrive as:\n *\n * ```\n * begin1, data2, data4, data6, commit8, begin3, data5, data7, data9, commit10\n * ```\n *\n * Thus, LSN of non-commit messages are not suitable for tracking the sorting\n * order of the replication stream.\n *\n * Instead, the ChangeStreamer uses the following algorithm for deterministic\n * catchup and filtering of changes:\n *\n * * A `commit` message is assigned to a watermark corresponding to its LSN.\n * These are guaranteed to be in commit order by definition.\n *\n * * `begin` and `data` messages are assigned to the watermark of the\n * preceding `commit` (the previous transaction, or the replication\n * slot's starting LSN) plus 1. This guarantees that they will be sorted\n * after the previously commit transaction even if their LSNs came before it.\n * This is referred to as the `preCommitWatermark`.\n *\n * * In the ChangeLog DB, messages have a secondary sort column `pos`, which is\n * the position of the message within its transaction, with the `begin` message\n * starting at `0`. This guarantees that `begin` and `data` messages will be\n * fetched in the original ChangeSource order during catchup.\n *\n * `begin` and `data` messages share the same watermark, but this is sufficient for\n * Subscriber filtering because subscribers only know about the `commit` watermarks\n * exposed in the `Downstream` `Commit` message. The Subscriber object thus compares\n * the internal watermarks of the incoming messages against the commit watermark of\n * the caller, updating the watermark at every `Commit` message that is forwarded.\n *\n * ### Cleanup\n *\n * As mentioned in the {@link ChangeStreamer} documentation: \"the ChangeStreamer\n * uses a combination of [the \"initial\", i.e. backup-derived watermark and] ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\"\n *\n * More concretely:\n *\n * * The `initial`, backup-derived watermark is the earliest to which cleanup\n * should ever happen.\n *\n * * However, it is possible for the replica backup to be *ahead* of a connected\n * subscriber; and if a network error causes that subscriber to retry from its\n * last watermark, the change streamer must support it.\n *\n * Thus, before cleaning up to an `initial` backup-derived watermark, the change\n * streamer first confirms that all connected subscribers have also passed\n * that watermark.\n */\nclass ChangeStreamerImpl implements ChangeStreamerService {\n readonly id: string;\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #replicaVersion: string;\n readonly #source: ChangeSource;\n readonly #storer: Storer;\n readonly #forwarder: Forwarder;\n readonly #replicationStatusPublisher: ReplicationStatusPublisher;\n\n readonly #autoReset: boolean;\n readonly #state: RunningState;\n readonly #initialWatermarks = new Set<string>();\n\n // Starting the (Postgres) ChangeStream results in killing the previous\n // Postgres subscriber, potentially creating a gap in which the old\n // change-streamer has shut down and the new change-streamer has not yet\n // been recognized as \"healthy\" (and thus does not get any requests).\n //\n // To minimize this gap, delay starting the ChangeStream until the first\n // request from a `serving` replicator, indicating that higher level\n // load-balancing / routing logic has begun routing requests to this task.\n readonly #serving = resolver();\n\n readonly #txCounter = getOrCreateCounter(\n 'replication',\n 'transactions',\n 'Count of replicated transactions',\n );\n readonly #changeCounter = getOrCreateCounter(\n 'replication',\n 'changes',\n 'Count of replicated changes (DML or DDL statements)',\n );\n\n #latestStatus: Status;\n #stream: ChangeStream | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n replicaVersion: string,\n source: ChangeSource,\n replicationStatusPublisher: ReplicationStatusPublisher,\n autoReset: boolean,\n backPressureLimitHeapProportion: number,\n flowControlConsensusPaddingSeconds: number,\n setTimeoutFn = setTimeout,\n ) {\n this.id = `change-streamer`;\n this.#lc = lc.withContext('component', 'change-streamer');\n this.#shard = shard;\n this.#changeDB = changeDB;\n this.#replicaVersion = replicaVersion;\n this.#source = source;\n this.#storer = new Storer(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n consumed => this.#stream?.acks.push(['status', consumed[1], consumed[2]]),\n err => this.stop(err),\n backPressureLimitHeapProportion,\n );\n this.#forwarder = new Forwarder(lc, {\n flowControlConsensusPaddingSeconds,\n });\n this.#replicationStatusPublisher = replicationStatusPublisher;\n this.#autoReset = autoReset;\n this.#state = new RunningState(this.id, undefined, setTimeoutFn);\n this.#latestStatus = {tag: 'status'};\n }\n\n async run() {\n this.#lc.info?.('starting change stream');\n\n this.#forwarder.startProgressMonitor();\n\n const lagReport = await this.#source.startLagReporter();\n if (lagReport) {\n this.#latestStatus.lagReport = lagReport;\n }\n\n // Once this change-streamer acquires \"ownership\" of the change DB,\n // it is safe to start the storer.\n await this.#storer.assumeOwnership();\n\n // The threshold in (estimated number of) bytes to send() on subscriber\n // websockets before `await`-ing the I/O buffers to be ready for more.\n const flushBytesThreshold = getDefaultHighWaterMark(false);\n\n while (this.#state.shouldRun()) {\n let err: unknown;\n let watermark: string | null = null;\n let unflushedBytes = 0;\n try {\n const {lastWatermark, backfillRequests} =\n await this.#storer.getStartStreamInitializationParameters();\n const stream = await this.#source.startStream(\n lastWatermark,\n backfillRequests,\n );\n this.#storer.run().catch(e => stream.changes.cancel(e));\n\n this.#stream = stream;\n if (\n this.#state.resetBackoff() >\n REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS\n ) {\n // After recovering from a backoff for which a replication status\n // error was published, publish an OK status\n this.#replicationStatusPublisher.publish(\n this.#lc,\n 'Replicating',\n `Replicating from ${lastWatermark}`,\n );\n }\n watermark = null;\n\n for await (const change of stream.changes) {\n const [type, msg] = change;\n switch (type) {\n case 'status':\n if (msg.ack) {\n this.#storer.status(change); // storer acks once it gets through its queue\n }\n if (msg.lagReport) {\n // Lag reports are not stored in the cdc change log, but rather\n // only forwarded on \"live\" connections. When a new subscriber\n // is catching up, it is initialized with the #latestStatus\n // from which it can measure lag while catching up.\n this.#latestStatus.lagReport = msg.lagReport;\n this.#forwarder.sendStatus(this.#latestStatus);\n }\n continue;\n case 'control':\n await this.#handleControlMessage(msg);\n continue; // control messages are not stored/forwarded\n case 'begin':\n watermark = change[2].commitWatermark;\n break;\n case 'commit':\n if (watermark !== change[2].watermark) {\n throw new UnrecoverableError(\n `commit watermark ${change[2].watermark} does not match 'begin' watermark ${watermark}`,\n );\n }\n this.#txCounter.add(1);\n break;\n default:\n if (type === 'data') {\n this.#changeCounter.add(1);\n }\n if (watermark === null) {\n throw new UnrecoverableError(\n `${type} change (${msg.tag}) received before 'begin' message`,\n );\n }\n break;\n }\n\n const entry: WatermarkedChange = [watermark, change];\n unflushedBytes += this.#storer.store(entry);\n if (unflushedBytes < flushBytesThreshold) {\n // pipeline changes until flushBytesThreshold\n this.#forwarder.forward(entry);\n } else {\n // Wait for messages to clear socket buffers to ensure that they\n // make their way to subscribers. Without this `await`, the\n // messages end up being buffered in this process, which:\n // (1) results in memory pressure and increased GC activity\n // (2) prevents subscribers from processing the messages as they\n // arrive, instead getting them in a large batch after being\n // idle while they were queued (causing further delays).\n await this.#forwarder.forwardWithFlowControl(entry);\n unflushedBytes = 0;\n }\n\n if (type === 'commit' || type === 'rollback') {\n watermark = null;\n }\n\n // Allow the storer to exert back pressure.\n const readyForMore = this.#storer.readyForMore();\n if (readyForMore) {\n await readyForMore;\n }\n }\n } catch (e) {\n err = e;\n } finally {\n this.#stream?.changes.cancel();\n this.#stream = undefined;\n }\n\n // When the change stream is interrupted, abort any pending transaction.\n if (watermark) {\n this.#lc.warn?.(`aborting interrupted transaction ${watermark}`);\n this.#storer.abort();\n this.#forwarder.forward([watermark, ['rollback', {tag: 'rollback'}]]);\n }\n\n // Backoff and drain any pending entries in the storer before reconnecting.\n await Promise.all([\n this.#storer.stop(),\n this.#state.backoff(this.#lc, err),\n this.#state.retryDelay > REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS\n ? publishCriticalEvent(\n this.#lc,\n replicationStatusError(this.#lc, 'Replicating', err),\n )\n : promiseVoid,\n ]);\n }\n\n this.#forwarder.stopProgressMonitor();\n this.#lc.info?.('ChangeStreamer stopped');\n }\n\n async #handleControlMessage(msg: ChangeStreamControl[1]) {\n this.#lc.info?.('received control message', msg);\n const {tag} = msg;\n\n switch (tag) {\n case 'reset-required':\n await markResetRequired(this.#changeDB, this.#shard);\n await publishReplicationError(\n this.#lc,\n 'Replicating',\n msg.message ?? 'Resync required',\n msg.errorDetails,\n );\n if (this.#autoReset) {\n this.#lc.warn?.('shutting down for auto-reset');\n await this.stop(new AutoResetSignal());\n }\n break;\n default:\n unreachable(tag);\n }\n }\n\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const {protocolVersion, id, mode, replicaVersion, watermark} = ctx;\n if (mode === 'serving') {\n this.#serving.resolve();\n }\n const downstream = Subscription.create<Downstream>({\n cleanup: () => this.#forwarder.remove(subscriber),\n });\n const subscriber = new Subscriber(\n protocolVersion,\n id,\n watermark,\n downstream,\n () => this.#latestStatus,\n );\n if (replicaVersion !== this.#replicaVersion) {\n this.#lc.warn?.(\n `rejecting subscriber at replica version ${replicaVersion}`,\n );\n subscriber.close(\n ErrorType.WrongReplicaVersion,\n `current replica version is ${\n this.#replicaVersion\n } (requested ${replicaVersion})`,\n );\n } else {\n this.#lc.debug?.(`adding subscriber ${subscriber.id}`);\n\n this.#forwarder.add(subscriber);\n this.#storer.catchup(subscriber, mode);\n }\n return Promise.resolve(downstream);\n }\n\n scheduleCleanup(watermark: string) {\n const origSize = this.#initialWatermarks.size;\n this.#initialWatermarks.add(watermark);\n\n if (origSize === 0) {\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n\n async getChangeLogState(): Promise<{\n replicaVersion: string;\n minWatermark: string;\n }> {\n const minWatermark = await this.#storer.getMinWatermarkForCatchup();\n if (!minWatermark) {\n this.#lc.warn?.(\n `Unexpected empty changeLog. Resync if \"Local replica watermark\" errors arise`,\n );\n }\n return {\n replicaVersion: this.#replicaVersion,\n minWatermark: minWatermark ?? this.#replicaVersion,\n };\n }\n\n /**\n * Makes a best effort to purge the change log. In the event of a database\n * error, exceptions will be logged and swallowed, so this method is safe\n * to run in a timeout.\n */\n async #purgeOldChanges(): Promise<void> {\n const initial = [...this.#initialWatermarks];\n if (initial.length === 0) {\n this.#lc.warn?.('No initial watermarks to check for cleanup'); // Not expected.\n return;\n }\n const current = [...this.#forwarder.getAcks()];\n if (current.length === 0) {\n // Also not expected, but possible (e.g. subscriber connects, then disconnects).\n // Bail to be safe.\n this.#lc.warn?.('No subscribers to confirm cleanup');\n return;\n }\n try {\n const earliestInitial = min(...(initial as AtLeastOne<LexiVersion>));\n const earliestCurrent = min(...(current as AtLeastOne<LexiVersion>));\n if (earliestCurrent < earliestInitial) {\n this.#lc.info?.(\n `At least one client is behind backup (${earliestCurrent} < ${earliestInitial})`,\n );\n } else {\n this.#lc.info?.(`Purging changes before ${earliestInitial} ...`);\n const start = performance.now();\n const deleted = await this.#storer.purgeRecordsBefore(earliestInitial);\n const elapsed = (performance.now() - start).toFixed(2);\n this.#lc.info?.(\n `Purged ${deleted} changes before ${earliestInitial} (${elapsed} ms)`,\n );\n this.#initialWatermarks.delete(earliestInitial);\n }\n } catch (e) {\n this.#lc.warn?.(`error purging change log`, e);\n } finally {\n if (this.#initialWatermarks.size) {\n // If there are unpurged watermarks to check, schedule the next purge.\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n }\n\n async stop(err?: unknown) {\n this.#state.stop(this.#lc, err);\n this.#stream?.changes.cancel();\n await this.#storer.stop();\n await this.#source.stop();\n }\n}\n\n// The delay between receiving an initial, backup-based watermark\n// and performing a check of whether to purge records before it.\n// This delay should be long enough to handle situations like the following:\n//\n// 1. `litestream restore` downloads a backup for the `replication-manager`\n// 2. `replication-manager` starts up and runs this `change-streamer`\n// 3. `zero-cache`s that are running on a different replica connect to this\n// `change-streamer` after exponential backoff retries.\n//\n// It is possible for a `zero-cache`[3] to be behind the backup restored [1].\n// This cleanup delay (30 seconds) is thus set to be a value comfortably\n// longer than the max delay for exponential backoff (10 seconds) in\n// `services/running-state.ts`. This allows the `zero-cache` [3] to reconnect\n// so that the `change-streamer` can track its progress and know when it has\n// surpassed the initial watermark of the backup [1].\nconst CLEANUP_DELAY_MS = DEFAULT_MAX_RETRY_DELAY_MS * 3;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;AAuDA,eAAsB,mBACpB,IACA,OACA,QACA,kBACA,mBACA,UACA,cACA,4BACA,mBACA,WACA,iCACA,oCACA,eAAe,YACiB;AAEhC,OAAM,yBAAyB,IAAI,UAAU,MAAM;AACnD,OAAM,wBACJ,IACA,UACA,mBACA,OACA,WACA,aACD;CAED,MAAM,EAAC,mBAAkB;AACzB,QAAO,IAAI,mBACT,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,cACA,4BACA,WACA,iCACA,oCACA,aACD;;AAGH,IAAM,8CAA8C;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwJpD,IAAM,qBAAN,MAA0D;CACxD;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CAEA;CACA;CACA,qCAA8B,IAAI,KAAa;CAU/C,WAAoB,UAAU;CAE9B,aAAsB,mBACpB,eACA,gBACA,mCACD;CACD,iBAA0B,mBACxB,eACA,WACA,sDACD;CAED;CACA;CAEA,YACE,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,QACA,4BACA,WACA,iCACA,oCACA,eAAe,YACf;AACA,OAAK,KAAK;AACV,QAAA,KAAW,GAAG,YAAY,aAAa,kBAAkB;AACzD,QAAA,QAAc;AACd,QAAA,WAAiB;AACjB,QAAA,iBAAuB;AACvB,QAAA,SAAe;AACf,QAAA,SAAe,IAAI,OACjB,IACA,OACA,QACA,kBACA,mBACA,UACA,iBACA,aAAY,MAAA,QAAc,KAAK,KAAK;GAAC;GAAU,SAAS;GAAI,SAAS;GAAG,CAAC,GACzE,QAAO,KAAK,KAAK,IAAI,EACrB,gCACD;AACD,QAAA,YAAkB,IAAI,UAAU,IAAI,EAClC,oCACD,CAAC;AACF,QAAA,6BAAmC;AACnC,QAAA,YAAkB;AAClB,QAAA,QAAc,IAAI,aAAa,KAAK,IAAI,KAAA,GAAW,aAAa;AAChE,QAAA,eAAqB,EAAC,KAAK,UAAS;;CAGtC,MAAM,MAAM;AACV,QAAA,GAAS,OAAO,yBAAyB;AAEzC,QAAA,UAAgB,sBAAsB;EAEtC,MAAM,YAAY,MAAM,MAAA,OAAa,kBAAkB;AACvD,MAAI,UACF,OAAA,aAAmB,YAAY;AAKjC,QAAM,MAAA,OAAa,iBAAiB;EAIpC,MAAM,sBAAsB,wBAAwB,MAAM;AAE1D,SAAO,MAAA,MAAY,WAAW,EAAE;GAC9B,IAAI;GACJ,IAAI,YAA2B;GAC/B,IAAI,iBAAiB;AACrB,OAAI;IACF,MAAM,EAAC,eAAe,qBACpB,MAAM,MAAA,OAAa,wCAAwC;IAC7D,MAAM,SAAS,MAAM,MAAA,OAAa,YAChC,eACA,iBACD;AACD,UAAA,OAAa,KAAK,CAAC,OAAM,MAAK,OAAO,QAAQ,OAAO,EAAE,CAAC;AAEvD,UAAA,SAAe;AACf,QACE,MAAA,MAAY,cAAc,GAC1B,4CAIA,OAAA,2BAAiC,QAC/B,MAAA,IACA,eACA,oBAAoB,gBACrB;AAEH,gBAAY;AAEZ,eAAW,MAAM,UAAU,OAAO,SAAS;KACzC,MAAM,CAAC,MAAM,OAAO;AACpB,aAAQ,MAAR;MACE,KAAK;AACH,WAAI,IAAI,IACN,OAAA,OAAa,OAAO,OAAO;AAE7B,WAAI,IAAI,WAAW;AAKjB,cAAA,aAAmB,YAAY,IAAI;AACnC,cAAA,UAAgB,WAAW,MAAA,aAAmB;;AAEhD;MACF,KAAK;AACH,aAAM,MAAA,qBAA2B,IAAI;AACrC;MACF,KAAK;AACH,mBAAY,OAAO,GAAG;AACtB;MACF,KAAK;AACH,WAAI,cAAc,OAAO,GAAG,UAC1B,OAAM,IAAI,mBACR,oBAAoB,OAAO,GAAG,UAAU,oCAAoC,YAC7E;AAEH,aAAA,UAAgB,IAAI,EAAE;AACtB;MACF;AACE,WAAI,SAAS,OACX,OAAA,cAAoB,IAAI,EAAE;AAE5B,WAAI,cAAc,KAChB,OAAM,IAAI,mBACR,GAAG,KAAK,WAAW,IAAI,IAAI,mCAC5B;AAEH;;KAGJ,MAAM,QAA2B,CAAC,WAAW,OAAO;AACpD,uBAAkB,MAAA,OAAa,MAAM,MAAM;AAC3C,SAAI,iBAAiB,oBAEnB,OAAA,UAAgB,QAAQ,MAAM;UACzB;AAQL,YAAM,MAAA,UAAgB,uBAAuB,MAAM;AACnD,uBAAiB;;AAGnB,SAAI,SAAS,YAAY,SAAS,WAChC,aAAY;KAId,MAAM,eAAe,MAAA,OAAa,cAAc;AAChD,SAAI,aACF,OAAM;;YAGH,GAAG;AACV,UAAM;aACE;AACR,UAAA,QAAc,QAAQ,QAAQ;AAC9B,UAAA,SAAe,KAAA;;AAIjB,OAAI,WAAW;AACb,UAAA,GAAS,OAAO,oCAAoC,YAAY;AAChE,UAAA,OAAa,OAAO;AACpB,UAAA,UAAgB,QAAQ,CAAC,WAAW,CAAC,YAAY,EAAC,KAAK,YAAW,CAAC,CAAC,CAAC;;AAIvE,SAAM,QAAQ,IAAI;IAChB,MAAA,OAAa,MAAM;IACnB,MAAA,MAAY,QAAQ,MAAA,IAAU,IAAI;IAClC,MAAA,MAAY,aAAa,8CACrB,qBACE,MAAA,IACA,uBAAuB,MAAA,IAAU,eAAe,IAAI,CACrD,GACD;IACL,CAAC;;AAGJ,QAAA,UAAgB,qBAAqB;AACrC,QAAA,GAAS,OAAO,yBAAyB;;CAG3C,OAAA,qBAA4B,KAA6B;AACvD,QAAA,GAAS,OAAO,4BAA4B,IAAI;EAChD,MAAM,EAAC,QAAO;AAEd,UAAQ,KAAR;GACE,KAAK;AACH,UAAM,kBAAkB,MAAA,UAAgB,MAAA,MAAY;AACpD,UAAM,wBACJ,MAAA,IACA,eACA,IAAI,WAAW,mBACf,IAAI,aACL;AACD,QAAI,MAAA,WAAiB;AACnB,WAAA,GAAS,OAAO,+BAA+B;AAC/C,WAAM,KAAK,KAAK,IAAI,iBAAiB,CAAC;;AAExC;GACF,QACE,aAAY,IAAI;;;CAItB,UAAU,KAAqD;EAC7D,MAAM,EAAC,iBAAiB,IAAI,MAAM,gBAAgB,cAAa;AAC/D,MAAI,SAAS,UACX,OAAA,QAAc,SAAS;EAEzB,MAAM,aAAa,aAAa,OAAmB,EACjD,eAAe,MAAA,UAAgB,OAAO,WAAW,EAClD,CAAC;EACF,MAAM,aAAa,IAAI,WACrB,iBACA,IACA,WACA,kBACM,MAAA,aACP;AACD,MAAI,mBAAmB,MAAA,gBAAsB;AAC3C,SAAA,GAAS,OACP,2CAA2C,iBAC5C;AACD,cAAW,MACT,GACA,8BACE,MAAA,eACD,cAAc,eAAe,GAC/B;SACI;AACL,SAAA,GAAS,QAAQ,qBAAqB,WAAW,KAAK;AAEtD,SAAA,UAAgB,IAAI,WAAW;AAC/B,SAAA,OAAa,QAAQ,YAAY,KAAK;;AAExC,SAAO,QAAQ,QAAQ,WAAW;;CAGpC,gBAAgB,WAAmB;EACjC,MAAM,WAAW,MAAA,kBAAwB;AACzC,QAAA,kBAAwB,IAAI,UAAU;AAEtC,MAAI,aAAa,EACf,OAAA,MAAY,iBAAiB,MAAA,iBAAuB,EAAE,iBAAiB;;CAI3E,MAAM,oBAGH;EACD,MAAM,eAAe,MAAM,MAAA,OAAa,2BAA2B;AACnE,MAAI,CAAC,aACH,OAAA,GAAS,OACP,+EACD;AAEH,SAAO;GACL,gBAAgB,MAAA;GAChB,cAAc,gBAAgB,MAAA;GAC/B;;;;;;;CAQH,OAAA,kBAAwC;EACtC,MAAM,UAAU,CAAC,GAAG,MAAA,kBAAwB;AAC5C,MAAI,QAAQ,WAAW,GAAG;AACxB,SAAA,GAAS,OAAO,6CAA6C;AAC7D;;EAEF,MAAM,UAAU,CAAC,GAAG,MAAA,UAAgB,SAAS,CAAC;AAC9C,MAAI,QAAQ,WAAW,GAAG;AAGxB,SAAA,GAAS,OAAO,oCAAoC;AACpD;;AAEF,MAAI;GACF,MAAM,kBAAkB,IAAI,GAAI,QAAoC;GACpE,MAAM,kBAAkB,IAAI,GAAI,QAAoC;AACpE,OAAI,kBAAkB,gBACpB,OAAA,GAAS,OACP,yCAAyC,gBAAgB,KAAK,gBAAgB,GAC/E;QACI;AACL,UAAA,GAAS,OAAO,0BAA0B,gBAAgB,MAAM;IAChE,MAAM,QAAQ,YAAY,KAAK;IAC/B,MAAM,UAAU,MAAM,MAAA,OAAa,mBAAmB,gBAAgB;IACtE,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,UAAA,GAAS,OACP,UAAU,QAAQ,kBAAkB,gBAAgB,IAAI,QAAQ,MACjE;AACD,UAAA,kBAAwB,OAAO,gBAAgB;;WAE1C,GAAG;AACV,SAAA,GAAS,OAAO,4BAA4B,EAAE;YACtC;AACR,OAAI,MAAA,kBAAwB,KAE1B,OAAA,MAAY,iBAAiB,MAAA,iBAAuB,EAAE,iBAAiB;;;CAK7E,MAAM,KAAK,KAAe;AACxB,QAAA,MAAY,KAAK,MAAA,IAAU,IAAI;AAC/B,QAAA,QAAc,QAAQ,QAAQ;AAC9B,QAAM,MAAA,OAAa,MAAM;AACzB,QAAM,MAAA,OAAa,MAAM;;;AAmB7B,IAAM,mBAAmB,6BAA6B"}
|
|
1
|
+
{"version":3,"file":"change-streamer-service.js","names":["#lc","#shard","#changeDB","#replicaVersion","#source","#storer","#forwarder","#replicationStatusPublisher","#autoReset","#state","#initialWatermarks","#serving","#txCounter","#changeCounter","#stream","#purgeLock","#latestStatus","#handleControlMessage","#purgeOldChanges"],"sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"sourcesContent":["import {getDefaultHighWaterMark} from 'node:stream';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {unreachable} from '../../../../shared/src/asserts.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport {publishCriticalEvent} from '../../observability/events.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport {\n min,\n type AtLeastOne,\n type LexiVersion,\n} from '../../types/lexi-version.ts';\nimport type {PostgresDB} from '../../types/pg.ts';\nimport type {ShardID} from '../../types/shards.ts';\nimport type {Source} from '../../types/streams.ts';\nimport {Subscription} from '../../types/subscription.ts';\nimport type {\n ChangeSource,\n ChangeStream,\n} from '../change-source/change-source.ts';\nimport {\n type ChangeStreamControl,\n type ChangeStreamData,\n} from '../change-source/protocol/current/downstream.ts';\nimport {\n publishReplicationError,\n replicationStatusError,\n type ReplicationStatusPublisher,\n} from '../replicator/replication-status.ts';\nimport type {SubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {\n DEFAULT_MAX_RETRY_DELAY_MS,\n RunningState,\n UnrecoverableError,\n} from '../running-state.ts';\nimport {\n type ChangeStreamerService,\n type Downstream,\n type Status,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {Forwarder} from './forwarder.ts';\nimport {initChangeStreamerSchema} from './schema/init.ts';\nimport {\n AutoResetSignal,\n ensureReplicationConfig,\n markResetRequired,\n} from './schema/tables.ts';\nimport {\n Storer,\n type PurgeLock,\n type TuningOptions as StorerOptions,\n} from './storer.ts';\nimport {Subscriber} from './subscriber.ts';\n\nexport type TuningOptions = StorerOptions & {\n flowControlConsensusPaddingSeconds: number;\n};\n\n/**\n * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.\n */\nexport async function initializeStreamer(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n changeSource: ChangeSource,\n replicationStatusPublisher: ReplicationStatusPublisher,\n subscriptionState: SubscriptionState,\n purgeLock: PurgeLock | null,\n autoReset: boolean,\n opts: TuningOptions,\n setTimeoutFn = setTimeout,\n): Promise<ChangeStreamerService> {\n // Make sure the ChangeLog DB is set up.\n await initChangeStreamerSchema(lc, changeDB, shard);\n await ensureReplicationConfig(\n lc,\n changeDB,\n subscriptionState,\n shard,\n autoReset,\n setTimeoutFn,\n );\n\n const {replicaVersion} = subscriptionState;\n return new ChangeStreamerImpl(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n changeSource,\n replicationStatusPublisher,\n purgeLock,\n autoReset,\n opts,\n setTimeoutFn,\n );\n}\n\nconst REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS = 5000;\n\n/**\n * Internally all Downstream messages (not just commits) are given a watermark.\n * These are used for internal ordering for:\n * 1. Replaying new changes in the Storer\n * 2. Filtering old changes in the Subscriber\n *\n * However, only the watermark for `Commit` messages are exposed to\n * subscribers, as that is the only semantically correct watermark to\n * use for tracking a position in a replication stream.\n */\nexport type WatermarkedChange = [watermark: string, ChangeStreamData];\n\n/**\n * Upstream-agnostic dispatch of messages in a {@link ChangeStreamMessage} to a\n * {@link Forwarder} and {@link Storer} to execute the forward-store-ack\n * procedure described in {@link ChangeStreamer}.\n *\n * ### Subscriber Catchup\n *\n * Connecting clients first need to be \"caught up\" to the current watermark\n * (from stored change log entries) before new entries are forwarded to\n * them. This is non-trivial because the replication stream may be in the\n * middle of a pending streamed Transaction for which some entries have\n * already been forwarded but are not yet committed to the store.\n *\n *\n * ```\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * | Historic changes in storage | Pending (streamed) tx | Next tx\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * Replication stream\n * > > > > > > > > >\n * ^ ---> required catchup ---> ^\n * Subscriber watermark Subscription begins\n * ```\n *\n * Preemptively buffering the changes of every pending transaction\n * would be wasteful and consume too much memory for large transactions.\n *\n * Instead, the streamer synchronously dispatches changes and subscriptions\n * to the {@link Forwarder} and the {@link Storer} such that the two\n * components are aligned as to where in the stream the subscription started.\n * The two components then coordinate catchup and handoff via the\n * {@link Subscriber} object with the following algorithm:\n *\n * * If the streamer is in the middle of a pending Transaction, the\n * Subscriber is \"queued\" on both the Forwarder and the Storer. In this\n * state, new changes are *not* forwarded to the Subscriber, and catchup\n * is not yet executed.\n * * Once the commit message for the pending Transaction is processed\n * by the Storer, it begins catchup on the Subscriber (with a READONLY\n * snapshot so that it does not block subsequent storage operations).\n * This catchup is thus guaranteed to load the change log entries of\n * that last Transaction.\n * * When the Forwarder processes that same commit message, it moves the\n * Subscriber from the \"queued\" to the \"active\" set of clients such that\n * the Subscriber begins receiving new changes, starting from the next\n * Transaction.\n * * The Subscriber does not forward those changes, however, if its catchup\n * is not complete. Until then, it buffers the changes in memory.\n * * Once catchup is complete, the buffered changes are immediately sent\n * and the Subscriber henceforth forwards changes as they are received.\n *\n * In the (common) case where the streamer is not in the middle of a pending\n * transaction when a subscription begins, the Storer begins catchup\n * immediately and the Forwarder directly adds the Subscriber to its active\n * set. However, the Subscriber still buffers any forwarded messages until\n * its catchup is complete.\n *\n * ### Watermarks and ordering\n *\n * The ChangeStreamerService depends on its {@link ChangeSource} to send\n * changes in contiguous [`begin`, `data` ..., `data`, `commit`] sequences\n * in commit order. This follows Postgres's Logical Replication Protocol\n * Message Flow:\n *\n * https://www.postgresql.org/docs/16/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW\n *\n * > The logical replication protocol sends individual transactions one by one.\n * > This means that all messages between a pair of Begin and Commit messages belong to the same transaction.\n *\n * In order to correctly replay (new) and filter (old) messages to subscribers\n * at different points in the replication stream, these changes must be assigned\n * watermarks such that they preserve the order in which they were received\n * from the ChangeSource.\n *\n * A previous implementation incorrectly derived these watermarks from the Postgres\n * Log Sequence Numbers (LSN) of each message. However, LSNs from concurrent,\n * non-conflicting transactions can overlap, which can result in a `begin` message\n * with an earlier LSN arriving after a `commit` message. For example, the\n * changes for these transactions:\n *\n * ```\n * LSN: 1 2 3 4 5 6 7 8 9 10\n * tx1: begin data data data commit\n * tx2: begin data data data commit\n * ```\n *\n * will arrive as:\n *\n * ```\n * begin1, data2, data4, data6, commit8, begin3, data5, data7, data9, commit10\n * ```\n *\n * Thus, LSN of non-commit messages are not suitable for tracking the sorting\n * order of the replication stream.\n *\n * Instead, the ChangeStreamer uses the following algorithm for deterministic\n * catchup and filtering of changes:\n *\n * * A `commit` message is assigned to a watermark corresponding to its LSN.\n * These are guaranteed to be in commit order by definition.\n *\n * * `begin` and `data` messages are assigned to the watermark of the\n * preceding `commit` (the previous transaction, or the replication\n * slot's starting LSN) plus 1. This guarantees that they will be sorted\n * after the previously commit transaction even if their LSNs came before it.\n * This is referred to as the `preCommitWatermark`.\n *\n * * In the ChangeLog DB, messages have a secondary sort column `pos`, which is\n * the position of the message within its transaction, with the `begin` message\n * starting at `0`. This guarantees that `begin` and `data` messages will be\n * fetched in the original ChangeSource order during catchup.\n *\n * `begin` and `data` messages share the same watermark, but this is sufficient for\n * Subscriber filtering because subscribers only know about the `commit` watermarks\n * exposed in the `Downstream` `Commit` message. The Subscriber object thus compares\n * the internal watermarks of the incoming messages against the commit watermark of\n * the caller, updating the watermark at every `Commit` message that is forwarded.\n *\n * ### Cleanup\n *\n * As mentioned in the {@link ChangeStreamer} documentation: \"the ChangeStreamer\n * uses a combination of [the \"initial\", i.e. backup-derived watermark and] ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\"\n *\n * More concretely:\n *\n * * The `initial`, backup-derived watermark is the earliest to which cleanup\n * should ever happen.\n *\n * * However, it is possible for the replica backup to be *ahead* of a connected\n * subscriber; and if a network error causes that subscriber to retry from its\n * last watermark, the change streamer must support it.\n *\n * Thus, before cleaning up to an `initial` backup-derived watermark, the change\n * streamer first confirms that all connected subscribers have also passed\n * that watermark.\n */\nclass ChangeStreamerImpl implements ChangeStreamerService {\n readonly id: string;\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #replicaVersion: string;\n readonly #source: ChangeSource;\n readonly #storer: Storer;\n readonly #forwarder: Forwarder;\n readonly #replicationStatusPublisher: ReplicationStatusPublisher;\n\n readonly #autoReset: boolean;\n readonly #state: RunningState;\n readonly #initialWatermarks = new Set<string>();\n\n // Starting the (Postgres) ChangeStream results in killing the previous\n // Postgres subscriber, potentially creating a gap in which the old\n // change-streamer has shut down and the new change-streamer has not yet\n // been recognized as \"healthy\" (and thus does not get any requests).\n //\n // To minimize this gap, delay starting the ChangeStream until the first\n // request from a `serving` replicator, indicating that higher level\n // load-balancing / routing logic has begun routing requests to this task.\n readonly #serving = resolver();\n\n readonly #txCounter = getOrCreateCounter(\n 'replication',\n 'transactions',\n 'Count of replicated transactions',\n );\n readonly #changeCounter = getOrCreateCounter(\n 'replication',\n 'changes',\n 'Count of replicated changes (DML or DDL statements)',\n );\n\n #latestStatus: Status;\n #purgeLock: PurgeLock | null;\n #stream: ChangeStream | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n replicaVersion: string,\n source: ChangeSource,\n replicationStatusPublisher: ReplicationStatusPublisher,\n initialPurgeLock: PurgeLock | null,\n autoReset: boolean,\n opts: TuningOptions,\n setTimeoutFn = setTimeout,\n ) {\n this.id = `change-streamer`;\n this.#lc = lc.withContext('component', 'change-streamer');\n this.#shard = shard;\n this.#changeDB = changeDB;\n this.#replicaVersion = replicaVersion;\n this.#source = source;\n this.#storer = new Storer(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n consumed => this.#stream?.acks.push(['status', consumed[1], consumed[2]]),\n err => this.stop(err),\n opts,\n );\n this.#forwarder = new Forwarder(lc, {\n flowControlConsensusPaddingSeconds:\n opts.flowControlConsensusPaddingSeconds,\n });\n this.#replicationStatusPublisher = replicationStatusPublisher;\n this.#purgeLock = initialPurgeLock;\n this.#autoReset = autoReset;\n this.#state = new RunningState(this.id, undefined, setTimeoutFn);\n this.#latestStatus = {tag: 'status'};\n }\n\n async run() {\n this.#lc.info?.('starting change stream');\n\n this.#forwarder.startProgressMonitor();\n\n const lagReport = await this.#source.startLagReporter();\n if (lagReport) {\n this.#latestStatus.lagReport = lagReport;\n }\n\n // Once this change-streamer acquires \"ownership\" of the change DB,\n // it is safe to start the storer.\n await this.#storer.assumeOwnership(this.#purgeLock);\n this.#purgeLock = null;\n\n // The threshold in (estimated number of) bytes to send() on subscriber\n // websockets before `await`-ing the I/O buffers to be ready for more.\n const flushBytesThreshold = getDefaultHighWaterMark(false);\n\n while (this.#state.shouldRun()) {\n let err: unknown;\n let watermark: string | null = null;\n let unflushedBytes = 0;\n try {\n const {lastWatermark, backfillRequests} =\n await this.#storer.getStartStreamInitializationParameters();\n const stream = await this.#source.startStream(\n lastWatermark,\n backfillRequests,\n );\n this.#storer.run().catch(e => stream.changes.cancel(e));\n\n this.#stream = stream;\n if (\n this.#state.resetBackoff() >\n REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS\n ) {\n // After recovering from a backoff for which a replication status\n // error was published, publish an OK status\n this.#replicationStatusPublisher.publish(\n this.#lc,\n 'Replicating',\n `Replicating from ${lastWatermark}`,\n );\n }\n watermark = null;\n\n for await (const change of stream.changes) {\n const [type, msg] = change;\n switch (type) {\n case 'status':\n if (msg.ack) {\n this.#storer.status(change); // storer acks once it gets through its queue\n }\n if (msg.lagReport) {\n // Lag reports are not stored in the cdc change log, but rather\n // only forwarded on \"live\" connections. When a new subscriber\n // is catching up, it is initialized with the #latestStatus\n // from which it can measure lag while catching up.\n this.#latestStatus.lagReport = msg.lagReport;\n this.#forwarder.sendStatus(this.#latestStatus);\n }\n continue;\n case 'control':\n await this.#handleControlMessage(msg);\n continue; // control messages are not stored/forwarded\n case 'begin':\n watermark = change[2].commitWatermark;\n break;\n case 'commit':\n if (watermark !== change[2].watermark) {\n throw new UnrecoverableError(\n `commit watermark ${change[2].watermark} does not match 'begin' watermark ${watermark}`,\n );\n }\n this.#txCounter.add(1);\n break;\n default:\n if (type === 'data') {\n this.#changeCounter.add(1);\n }\n if (watermark === null) {\n throw new UnrecoverableError(\n `${type} change (${msg.tag}) received before 'begin' message`,\n );\n }\n break;\n }\n\n const entry: WatermarkedChange = [watermark, change];\n unflushedBytes += this.#storer.store(entry);\n if (unflushedBytes < flushBytesThreshold) {\n // pipeline changes until flushBytesThreshold\n this.#forwarder.forward(entry);\n } else {\n // Wait for messages to clear socket buffers to ensure that they\n // make their way to subscribers. Without this `await`, the\n // messages end up being buffered in this process, which:\n // (1) results in memory pressure and increased GC activity\n // (2) prevents subscribers from processing the messages as they\n // arrive, instead getting them in a large batch after being\n // idle while they were queued (causing further delays).\n await this.#forwarder.forwardWithFlowControl(entry);\n unflushedBytes = 0;\n }\n\n if (type === 'commit' || type === 'rollback') {\n watermark = null;\n }\n\n // Allow the storer to exert back pressure.\n const readyForMore = this.#storer.readyForMore();\n if (readyForMore) {\n await readyForMore;\n }\n }\n } catch (e) {\n err = e;\n } finally {\n this.#stream?.changes.cancel();\n this.#stream = undefined;\n }\n\n // When the change stream is interrupted, abort any pending transaction.\n if (watermark) {\n this.#lc.warn?.(`aborting interrupted transaction ${watermark}`);\n this.#storer.abort();\n this.#forwarder.forward([watermark, ['rollback', {tag: 'rollback'}]]);\n }\n\n // Backoff and drain any pending entries in the storer before reconnecting.\n await Promise.all([\n this.#storer.stop(),\n this.#state.backoff(this.#lc, err),\n this.#state.retryDelay > REPLICATION_STATUS_ERROR_DELAY_THRESHOLD_MS\n ? publishCriticalEvent(\n this.#lc,\n replicationStatusError(this.#lc, 'Replicating', err),\n )\n : promiseVoid,\n ]);\n }\n\n this.#forwarder.stopProgressMonitor();\n this.#lc.info?.('ChangeStreamer stopped');\n }\n\n async #handleControlMessage(msg: ChangeStreamControl[1]) {\n this.#lc.info?.('received control message', msg);\n const {tag} = msg;\n\n switch (tag) {\n case 'reset-required':\n await markResetRequired(this.#changeDB, this.#shard);\n await publishReplicationError(\n this.#lc,\n 'Replicating',\n msg.message ?? 'Resync required',\n msg.errorDetails,\n );\n if (this.#autoReset) {\n this.#lc.warn?.('shutting down for auto-reset');\n await this.stop(new AutoResetSignal());\n }\n break;\n default:\n unreachable(tag);\n }\n }\n\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const {protocolVersion, id, mode, replicaVersion, watermark} = ctx;\n if (mode === 'serving') {\n this.#serving.resolve();\n }\n const downstream = Subscription.create<Downstream>({\n cleanup: () => this.#forwarder.remove(subscriber),\n });\n const subscriber = new Subscriber(\n protocolVersion,\n id,\n watermark,\n downstream,\n () => this.#latestStatus,\n );\n if (replicaVersion !== this.#replicaVersion) {\n this.#lc.warn?.(\n `rejecting subscriber at replica version ${replicaVersion}`,\n );\n subscriber.close(\n ErrorType.WrongReplicaVersion,\n `current replica version is ${\n this.#replicaVersion\n } (requested ${replicaVersion})`,\n );\n } else {\n this.#lc.debug?.(`adding subscriber ${subscriber.id}`);\n\n this.#forwarder.add(subscriber);\n this.#storer.catchup(subscriber, mode);\n }\n return Promise.resolve(downstream);\n }\n\n scheduleCleanup(watermark: string) {\n const origSize = this.#initialWatermarks.size;\n this.#initialWatermarks.add(watermark);\n\n if (origSize === 0) {\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n\n async getChangeLogState(): Promise<{\n replicaVersion: string;\n minWatermark: string;\n }> {\n const minWatermark = await this.#storer.getMinWatermarkForCatchup();\n if (!minWatermark) {\n this.#lc.warn?.(\n `Unexpected empty changeLog. Resync if \"Local replica watermark\" errors arise`,\n );\n }\n return {\n replicaVersion: this.#replicaVersion,\n minWatermark: minWatermark ?? this.#replicaVersion,\n };\n }\n\n /**\n * Makes a best effort to purge the change log. In the event of a database\n * error, exceptions will be logged and swallowed, so this method is safe\n * to run in a timeout.\n */\n async #purgeOldChanges(): Promise<void> {\n const initial = [...this.#initialWatermarks];\n if (initial.length === 0) {\n this.#lc.warn?.('No initial watermarks to check for cleanup'); // Not expected.\n return;\n }\n const current = [...this.#forwarder.getAcks()];\n if (current.length === 0) {\n // Also not expected, but possible (e.g. subscriber connects, then disconnects).\n // Bail to be safe.\n this.#lc.warn?.('No subscribers to confirm cleanup');\n return;\n }\n try {\n const earliestInitial = min(...(initial as AtLeastOne<LexiVersion>));\n const earliestCurrent = min(...(current as AtLeastOne<LexiVersion>));\n if (earliestCurrent < earliestInitial) {\n this.#lc.info?.(\n `At least one client is behind backup (${earliestCurrent} < ${earliestInitial})`,\n );\n } else {\n this.#lc.info?.(`Purging changes before ${earliestInitial} ...`);\n const start = performance.now();\n const deleted = await this.#storer.purgeRecordsBefore(earliestInitial);\n const elapsed = (performance.now() - start).toFixed(2);\n this.#lc.info?.(\n `Purged ${deleted} changes before ${earliestInitial} (${elapsed} ms)`,\n );\n this.#initialWatermarks.delete(earliestInitial);\n }\n } catch (e) {\n this.#lc.warn?.(`error purging change log`, e);\n } finally {\n if (this.#initialWatermarks.size) {\n // If there are unpurged watermarks to check, schedule the next purge.\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n }\n\n async stop(err?: unknown) {\n this.#state.stop(this.#lc, err);\n this.#stream?.changes.cancel();\n await this.#storer.stop();\n await this.#source.stop();\n }\n}\n\n// The delay between receiving an initial, backup-based watermark\n// and performing a check of whether to purge records before it.\n// This delay should be long enough to handle situations like the following:\n//\n// 1. `litestream restore` downloads a backup for the `replication-manager`\n// 2. `replication-manager` starts up and runs this `change-streamer`\n// 3. `zero-cache`s that are running on a different replica connect to this\n// `change-streamer` after exponential backoff retries.\n//\n// It is possible for a `zero-cache`[3] to be behind the backup restored [1].\n// This cleanup delay (30 seconds) is thus set to be a value comfortably\n// longer than the max delay for exponential backoff (10 seconds) in\n// `services/running-state.ts`. This allows the `zero-cache` [3] to reconnect\n// so that the `change-streamer` can track its progress and know when it has\n// surpassed the initial watermark of the backup [1].\nconst CLEANUP_DELAY_MS = DEFAULT_MAX_RETRY_DELAY_MS * 3;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;AA+DA,eAAsB,mBACpB,IACA,OACA,QACA,kBACA,mBACA,UACA,cACA,4BACA,mBACA,WACA,WACA,MACA,eAAe,YACiB;AAEhC,OAAM,yBAAyB,IAAI,UAAU,MAAM;AACnD,OAAM,wBACJ,IACA,UACA,mBACA,OACA,WACA,aACD;CAED,MAAM,EAAC,mBAAkB;AACzB,QAAO,IAAI,mBACT,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,cACA,4BACA,WACA,WACA,MACA,aACD;;AAGH,IAAM,8CAA8C;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwJpD,IAAM,qBAAN,MAA0D;CACxD;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CAEA;CACA;CACA,qCAA8B,IAAI,KAAa;CAU/C,WAAoB,UAAU;CAE9B,aAAsB,mBACpB,eACA,gBACA,mCACD;CACD,iBAA0B,mBACxB,eACA,WACA,sDACD;CAED;CACA;CACA;CAEA,YACE,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,QACA,4BACA,kBACA,WACA,MACA,eAAe,YACf;AACA,OAAK,KAAK;AACV,QAAA,KAAW,GAAG,YAAY,aAAa,kBAAkB;AACzD,QAAA,QAAc;AACd,QAAA,WAAiB;AACjB,QAAA,iBAAuB;AACvB,QAAA,SAAe;AACf,QAAA,SAAe,IAAI,OACjB,IACA,OACA,QACA,kBACA,mBACA,UACA,iBACA,aAAY,MAAA,QAAc,KAAK,KAAK;GAAC;GAAU,SAAS;GAAI,SAAS;GAAG,CAAC,GACzE,QAAO,KAAK,KAAK,IAAI,EACrB,KACD;AACD,QAAA,YAAkB,IAAI,UAAU,IAAI,EAClC,oCACE,KAAK,oCACR,CAAC;AACF,QAAA,6BAAmC;AACnC,QAAA,YAAkB;AAClB,QAAA,YAAkB;AAClB,QAAA,QAAc,IAAI,aAAa,KAAK,IAAI,KAAA,GAAW,aAAa;AAChE,QAAA,eAAqB,EAAC,KAAK,UAAS;;CAGtC,MAAM,MAAM;AACV,QAAA,GAAS,OAAO,yBAAyB;AAEzC,QAAA,UAAgB,sBAAsB;EAEtC,MAAM,YAAY,MAAM,MAAA,OAAa,kBAAkB;AACvD,MAAI,UACF,OAAA,aAAmB,YAAY;AAKjC,QAAM,MAAA,OAAa,gBAAgB,MAAA,UAAgB;AACnD,QAAA,YAAkB;EAIlB,MAAM,sBAAsB,wBAAwB,MAAM;AAE1D,SAAO,MAAA,MAAY,WAAW,EAAE;GAC9B,IAAI;GACJ,IAAI,YAA2B;GAC/B,IAAI,iBAAiB;AACrB,OAAI;IACF,MAAM,EAAC,eAAe,qBACpB,MAAM,MAAA,OAAa,wCAAwC;IAC7D,MAAM,SAAS,MAAM,MAAA,OAAa,YAChC,eACA,iBACD;AACD,UAAA,OAAa,KAAK,CAAC,OAAM,MAAK,OAAO,QAAQ,OAAO,EAAE,CAAC;AAEvD,UAAA,SAAe;AACf,QACE,MAAA,MAAY,cAAc,GAC1B,4CAIA,OAAA,2BAAiC,QAC/B,MAAA,IACA,eACA,oBAAoB,gBACrB;AAEH,gBAAY;AAEZ,eAAW,MAAM,UAAU,OAAO,SAAS;KACzC,MAAM,CAAC,MAAM,OAAO;AACpB,aAAQ,MAAR;MACE,KAAK;AACH,WAAI,IAAI,IACN,OAAA,OAAa,OAAO,OAAO;AAE7B,WAAI,IAAI,WAAW;AAKjB,cAAA,aAAmB,YAAY,IAAI;AACnC,cAAA,UAAgB,WAAW,MAAA,aAAmB;;AAEhD;MACF,KAAK;AACH,aAAM,MAAA,qBAA2B,IAAI;AACrC;MACF,KAAK;AACH,mBAAY,OAAO,GAAG;AACtB;MACF,KAAK;AACH,WAAI,cAAc,OAAO,GAAG,UAC1B,OAAM,IAAI,mBACR,oBAAoB,OAAO,GAAG,UAAU,oCAAoC,YAC7E;AAEH,aAAA,UAAgB,IAAI,EAAE;AACtB;MACF;AACE,WAAI,SAAS,OACX,OAAA,cAAoB,IAAI,EAAE;AAE5B,WAAI,cAAc,KAChB,OAAM,IAAI,mBACR,GAAG,KAAK,WAAW,IAAI,IAAI,mCAC5B;AAEH;;KAGJ,MAAM,QAA2B,CAAC,WAAW,OAAO;AACpD,uBAAkB,MAAA,OAAa,MAAM,MAAM;AAC3C,SAAI,iBAAiB,oBAEnB,OAAA,UAAgB,QAAQ,MAAM;UACzB;AAQL,YAAM,MAAA,UAAgB,uBAAuB,MAAM;AACnD,uBAAiB;;AAGnB,SAAI,SAAS,YAAY,SAAS,WAChC,aAAY;KAId,MAAM,eAAe,MAAA,OAAa,cAAc;AAChD,SAAI,aACF,OAAM;;YAGH,GAAG;AACV,UAAM;aACE;AACR,UAAA,QAAc,QAAQ,QAAQ;AAC9B,UAAA,SAAe,KAAA;;AAIjB,OAAI,WAAW;AACb,UAAA,GAAS,OAAO,oCAAoC,YAAY;AAChE,UAAA,OAAa,OAAO;AACpB,UAAA,UAAgB,QAAQ,CAAC,WAAW,CAAC,YAAY,EAAC,KAAK,YAAW,CAAC,CAAC,CAAC;;AAIvE,SAAM,QAAQ,IAAI;IAChB,MAAA,OAAa,MAAM;IACnB,MAAA,MAAY,QAAQ,MAAA,IAAU,IAAI;IAClC,MAAA,MAAY,aAAa,8CACrB,qBACE,MAAA,IACA,uBAAuB,MAAA,IAAU,eAAe,IAAI,CACrD,GACD;IACL,CAAC;;AAGJ,QAAA,UAAgB,qBAAqB;AACrC,QAAA,GAAS,OAAO,yBAAyB;;CAG3C,OAAA,qBAA4B,KAA6B;AACvD,QAAA,GAAS,OAAO,4BAA4B,IAAI;EAChD,MAAM,EAAC,QAAO;AAEd,UAAQ,KAAR;GACE,KAAK;AACH,UAAM,kBAAkB,MAAA,UAAgB,MAAA,MAAY;AACpD,UAAM,wBACJ,MAAA,IACA,eACA,IAAI,WAAW,mBACf,IAAI,aACL;AACD,QAAI,MAAA,WAAiB;AACnB,WAAA,GAAS,OAAO,+BAA+B;AAC/C,WAAM,KAAK,KAAK,IAAI,iBAAiB,CAAC;;AAExC;GACF,QACE,aAAY,IAAI;;;CAItB,UAAU,KAAqD;EAC7D,MAAM,EAAC,iBAAiB,IAAI,MAAM,gBAAgB,cAAa;AAC/D,MAAI,SAAS,UACX,OAAA,QAAc,SAAS;EAEzB,MAAM,aAAa,aAAa,OAAmB,EACjD,eAAe,MAAA,UAAgB,OAAO,WAAW,EAClD,CAAC;EACF,MAAM,aAAa,IAAI,WACrB,iBACA,IACA,WACA,kBACM,MAAA,aACP;AACD,MAAI,mBAAmB,MAAA,gBAAsB;AAC3C,SAAA,GAAS,OACP,2CAA2C,iBAC5C;AACD,cAAW,MACT,GACA,8BACE,MAAA,eACD,cAAc,eAAe,GAC/B;SACI;AACL,SAAA,GAAS,QAAQ,qBAAqB,WAAW,KAAK;AAEtD,SAAA,UAAgB,IAAI,WAAW;AAC/B,SAAA,OAAa,QAAQ,YAAY,KAAK;;AAExC,SAAO,QAAQ,QAAQ,WAAW;;CAGpC,gBAAgB,WAAmB;EACjC,MAAM,WAAW,MAAA,kBAAwB;AACzC,QAAA,kBAAwB,IAAI,UAAU;AAEtC,MAAI,aAAa,EACf,OAAA,MAAY,iBAAiB,MAAA,iBAAuB,EAAE,iBAAiB;;CAI3E,MAAM,oBAGH;EACD,MAAM,eAAe,MAAM,MAAA,OAAa,2BAA2B;AACnE,MAAI,CAAC,aACH,OAAA,GAAS,OACP,+EACD;AAEH,SAAO;GACL,gBAAgB,MAAA;GAChB,cAAc,gBAAgB,MAAA;GAC/B;;;;;;;CAQH,OAAA,kBAAwC;EACtC,MAAM,UAAU,CAAC,GAAG,MAAA,kBAAwB;AAC5C,MAAI,QAAQ,WAAW,GAAG;AACxB,SAAA,GAAS,OAAO,6CAA6C;AAC7D;;EAEF,MAAM,UAAU,CAAC,GAAG,MAAA,UAAgB,SAAS,CAAC;AAC9C,MAAI,QAAQ,WAAW,GAAG;AAGxB,SAAA,GAAS,OAAO,oCAAoC;AACpD;;AAEF,MAAI;GACF,MAAM,kBAAkB,IAAI,GAAI,QAAoC;GACpE,MAAM,kBAAkB,IAAI,GAAI,QAAoC;AACpE,OAAI,kBAAkB,gBACpB,OAAA,GAAS,OACP,yCAAyC,gBAAgB,KAAK,gBAAgB,GAC/E;QACI;AACL,UAAA,GAAS,OAAO,0BAA0B,gBAAgB,MAAM;IAChE,MAAM,QAAQ,YAAY,KAAK;IAC/B,MAAM,UAAU,MAAM,MAAA,OAAa,mBAAmB,gBAAgB;IACtE,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,UAAA,GAAS,OACP,UAAU,QAAQ,kBAAkB,gBAAgB,IAAI,QAAQ,MACjE;AACD,UAAA,kBAAwB,OAAO,gBAAgB;;WAE1C,GAAG;AACV,SAAA,GAAS,OAAO,4BAA4B,EAAE;YACtC;AACR,OAAI,MAAA,kBAAwB,KAE1B,OAAA,MAAY,iBAAiB,MAAA,iBAAuB,EAAE,iBAAiB;;;CAK7E,MAAM,KAAK,KAAe;AACxB,QAAA,MAAY,KAAK,MAAA,IAAU,IAAI;AAC/B,QAAA,QAAc,QAAQ,QAAQ;AAC9B,QAAM,MAAA,OAAa,MAAM;AACzB,QAAM,MAAA,OAAa,MAAM;;;AAmB7B,IAAM,mBAAmB,6BAA6B"}
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import { promiseVoid } from "../../../../shared/src/resolved-promises.js";
|
|
2
2
|
import { Database } from "../../../../zqlite/src/db.js";
|
|
3
|
-
import { RunningState } from "../running-state.js";
|
|
4
|
-
import { StatementRunner } from "../../db/statements.js";
|
|
5
3
|
import { getReplicationState } from "../replicator/schema/replication-state.js";
|
|
4
|
+
import { StatementRunner } from "../../db/statements.js";
|
|
5
|
+
import { RunningState } from "../running-state.js";
|
|
6
6
|
//#region ../zero-cache/src/services/change-streamer/replica-monitor.ts
|
|
7
7
|
var CHECK_INTERVAL_MS = 30 * 1e3;
|
|
8
8
|
/**
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { LogContext } from '@rocicorp/logger';
|
|
2
|
+
import { TransactionPool } from '../../db/transaction-pool.ts';
|
|
2
3
|
import { type PostgresDB } from '../../types/pg.ts';
|
|
3
4
|
import { type ShardID } from '../../types/shards.ts';
|
|
4
5
|
import { type BackfillRequest } from '../change-source/protocol/current.ts';
|
|
@@ -8,6 +9,10 @@ import type { ReplicatorMode } from '../replicator/replicator.ts';
|
|
|
8
9
|
import type { Service } from '../service.ts';
|
|
9
10
|
import type { WatermarkedChange } from './change-streamer-service.ts';
|
|
10
11
|
import type { Subscriber } from './subscriber.ts';
|
|
12
|
+
export type TuningOptions = {
|
|
13
|
+
backPressureLimitHeapProportion: number;
|
|
14
|
+
statementTimeoutMs: number;
|
|
15
|
+
};
|
|
11
16
|
/**
|
|
12
17
|
* Handles the storage of changes and the catchup of subscribers
|
|
13
18
|
* that are behind.
|
|
@@ -41,8 +46,8 @@ import type { Subscriber } from './subscriber.ts';
|
|
|
41
46
|
export declare class Storer implements Service {
|
|
42
47
|
#private;
|
|
43
48
|
readonly id = "storer";
|
|
44
|
-
constructor(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, db: PostgresDB, replicaVersion: string, onConsumed: (c: Commit | UpstreamStatusMessage) => void, onFatal: (err: Error) => void, backPressureLimitHeapProportion:
|
|
45
|
-
assumeOwnership(): Promise<void>;
|
|
49
|
+
constructor(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, db: PostgresDB, replicaVersion: string, onConsumed: (c: Commit | UpstreamStatusMessage) => void, onFatal: (err: Error) => void, { backPressureLimitHeapProportion, statementTimeoutMs }: TuningOptions);
|
|
50
|
+
assumeOwnership(purgeLock?: PurgeLock | null): Promise<void>;
|
|
46
51
|
getStartStreamInitializationParameters(): Promise<{
|
|
47
52
|
lastWatermark: string;
|
|
48
53
|
backfillRequests: BackfillRequest[];
|
|
@@ -69,4 +74,16 @@ export declare class Storer implements Service {
|
|
|
69
74
|
allProcessed(): Promise<void>;
|
|
70
75
|
stop(): Promise<void>;
|
|
71
76
|
}
|
|
77
|
+
export declare class PurgeLock {
|
|
78
|
+
#private;
|
|
79
|
+
readonly replicaVersion: string;
|
|
80
|
+
readonly minWatermark: string;
|
|
81
|
+
constructor(lc: LogContext, tx: TransactionPool, replicaVersion: string, watermark: string);
|
|
82
|
+
release(): Promise<void>;
|
|
83
|
+
}
|
|
84
|
+
export declare class PurgeLocker {
|
|
85
|
+
#private;
|
|
86
|
+
constructor(lc: LogContext, shard: ShardID, db: PostgresDB);
|
|
87
|
+
acquire(): Promise<PurgeLock | null>;
|
|
88
|
+
}
|
|
72
89
|
//# sourceMappingURL=storer.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAWjD,OAAO,EAAC,eAAe,EAAC,MAAM,8BAA8B,CAAC;AAC7D,OAAO,EAAC,KAAK,UAAU,EAA2B,MAAM,mBAAmB,CAAC;AAC5E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAKL,KAAK,eAAe,EAMrB,MAAM,sCAAsC,CAAC;AAC9C,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,iDAAiD,CAAC;AAC5E,OAAO,KAAK,EACV,uBAAuB,EACvB,qBAAqB,EACtB,MAAM,6CAA6C,CAAC;AACrD,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8BAA8B,CAAC;AAUpE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AA8BhD,MAAM,MAAM,aAAa,GAAG;IAC1B,+BAA+B,EAAE,MAAM,CAAC;IACxC,kBAAkB,EAAE,MAAM,CAAC;CAC5B,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,qBAAa,MAAO,YAAW,OAAO;;IACpC,QAAQ,CAAC,EAAE,YAAY;gBAkBrB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,MAAM,EACtB,UAAU,EAAE,CAAC,CAAC,EAAE,MAAM,GAAG,qBAAqB,KAAK,IAAI,EACvD,OAAO,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI,EAC7B,EAAC,+BAA+B,EAAE,kBAAkB,EAAC,EAAE,aAAa;IA+BhE,eAAe,CAAC,SAAS,CAAC,EAAE,SAAS,GAAG,IAAI;IA2B5C,sCAAsC,IAAI,OAAO,CAAC;QACtD,aAAa,EAAE,MAAM,CAAC;QACtB,gBAAgB,EAAE,eAAe,EAAE,CAAC;KACrC,CAAC;IAkCI,yBAAyB,IAAI,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAOzD,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IA+BtD;;OAEG;IACH,KAAK,CAAC,KAAK,EAAE,iBAAiB;IAsB9B,KAAK;IAIL,MAAM,CAAC,CAAC,EAAE,uBAAuB;IAIjC,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,cAAc;IAMpD,YAAY,IAAI,OAAO,CAAC,IAAI,CAAC,GAAG,SAAS;IA0CzC;;;OAGG;IACG,GAAG;IAocT;;;OAGG;IACG,YAAY;IAQlB,IAAI;CAOL;AAgBD,qBAAa,SAAS;;IAGpB,QAAQ,CAAC,cAAc,EAAE,MAAM,CAAC;IAChC,QAAQ,CAAC,YAAY,EAAE,MAAM,CAAC;gBAG5B,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,eAAe,EACnB,cAAc,EAAE,MAAM,EACtB,SAAS,EAAE,MAAM;IAUb,OAAO;CAWd;AAED,qBAAa,WAAW;;gBAKV,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,OAAO,EAAE,EAAE,EAAE,UAAU;IAWpD,OAAO;CA4Bd"}
|