@rocicorp/zero 0.26.0-canary.0 → 0.26.0-canary.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/out/replicache/src/persist/collect-idb-databases.d.ts +4 -4
- package/out/replicache/src/persist/collect-idb-databases.d.ts.map +1 -1
- package/out/replicache/src/persist/collect-idb-databases.js +22 -19
- package/out/replicache/src/persist/collect-idb-databases.js.map +1 -1
- package/out/replicache/src/persist/refresh.d.ts.map +1 -1
- package/out/replicache/src/persist/refresh.js +0 -8
- package/out/replicache/src/persist/refresh.js.map +1 -1
- package/out/replicache/src/process-scheduler.d.ts +23 -0
- package/out/replicache/src/process-scheduler.d.ts.map +1 -1
- package/out/replicache/src/process-scheduler.js +50 -1
- package/out/replicache/src/process-scheduler.js.map +1 -1
- package/out/replicache/src/replicache-impl.d.ts +8 -0
- package/out/replicache/src/replicache-impl.d.ts.map +1 -1
- package/out/replicache/src/replicache-impl.js +11 -2
- package/out/replicache/src/replicache-impl.js.map +1 -1
- package/out/shared/src/custom-key-map.d.ts +4 -4
- package/out/shared/src/custom-key-map.d.ts.map +1 -1
- package/out/shared/src/custom-key-map.js.map +1 -1
- package/out/shared/src/falsy.d.ts +3 -0
- package/out/shared/src/falsy.d.ts.map +1 -0
- package/out/shared/src/iterables.d.ts +6 -8
- package/out/shared/src/iterables.d.ts.map +1 -1
- package/out/shared/src/iterables.js +13 -7
- package/out/shared/src/iterables.js.map +1 -1
- package/out/shared/src/options.d.ts +1 -0
- package/out/shared/src/options.d.ts.map +1 -1
- package/out/shared/src/options.js +5 -1
- package/out/shared/src/options.js.map +1 -1
- package/out/zero/package.json.js +1 -1
- package/out/zero/src/adapters/drizzle.js +1 -2
- package/out/zero/src/adapters/prisma.d.ts +2 -0
- package/out/zero/src/adapters/prisma.d.ts.map +1 -0
- package/out/zero/src/adapters/prisma.js +6 -0
- package/out/zero/src/adapters/prisma.js.map +1 -0
- package/out/zero/src/pg.js +4 -7
- package/out/zero/src/react.js +3 -1
- package/out/zero/src/react.js.map +1 -1
- package/out/zero/src/server.js +5 -8
- package/out/zero/src/zero-cache-dev.js +7 -3
- package/out/zero/src/zero-cache-dev.js.map +1 -1
- package/out/zero-cache/src/auth/load-permissions.d.ts +3 -2
- package/out/zero-cache/src/auth/load-permissions.d.ts.map +1 -1
- package/out/zero-cache/src/auth/load-permissions.js +14 -8
- package/out/zero-cache/src/auth/load-permissions.js.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.d.ts +6 -0
- package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.js +16 -3
- package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
- package/out/zero-cache/src/config/zero-config.d.ts +54 -9
- package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
- package/out/zero-cache/src/config/zero-config.js +80 -20
- package/out/zero-cache/src/config/zero-config.js.map +1 -1
- package/out/zero-cache/src/custom/fetch.d.ts +3 -0
- package/out/zero-cache/src/custom/fetch.d.ts.map +1 -1
- package/out/zero-cache/src/custom/fetch.js +26 -0
- package/out/zero-cache/src/custom/fetch.js.map +1 -1
- package/out/zero-cache/src/db/lite-tables.js +1 -1
- package/out/zero-cache/src/db/lite-tables.js.map +1 -1
- package/out/zero-cache/src/db/migration-lite.d.ts.map +1 -1
- package/out/zero-cache/src/db/migration-lite.js +9 -3
- package/out/zero-cache/src/db/migration-lite.js.map +1 -1
- package/out/zero-cache/src/db/migration.d.ts.map +1 -1
- package/out/zero-cache/src/db/migration.js +9 -3
- package/out/zero-cache/src/db/migration.js.map +1 -1
- package/out/zero-cache/src/db/specs.d.ts +4 -3
- package/out/zero-cache/src/db/specs.d.ts.map +1 -1
- package/out/zero-cache/src/db/specs.js +4 -1
- package/out/zero-cache/src/db/specs.js.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.js +9 -3
- package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
- package/out/zero-cache/src/observability/events.d.ts.map +1 -1
- package/out/zero-cache/src/observability/events.js +15 -5
- package/out/zero-cache/src/observability/events.js.map +1 -1
- package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/server/change-streamer.js +10 -2
- package/out/zero-cache/src/server/change-streamer.js.map +1 -1
- package/out/zero-cache/src/server/inspector-delegate.d.ts +1 -1
- package/out/zero-cache/src/server/inspector-delegate.d.ts.map +1 -1
- package/out/zero-cache/src/server/inspector-delegate.js +11 -30
- package/out/zero-cache/src/server/inspector-delegate.js.map +1 -1
- package/out/zero-cache/src/server/main.js +1 -1
- package/out/zero-cache/src/server/main.js.map +1 -1
- package/out/zero-cache/src/server/priority-op.d.ts +8 -0
- package/out/zero-cache/src/server/priority-op.d.ts.map +1 -0
- package/out/zero-cache/src/server/priority-op.js +29 -0
- package/out/zero-cache/src/server/priority-op.js.map +1 -0
- package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/server/syncer.js +10 -10
- package/out/zero-cache/src/server/syncer.js.map +1 -1
- package/out/zero-cache/src/services/analyze.js +1 -1
- package/out/zero-cache/src/services/analyze.js.map +1 -1
- package/out/zero-cache/src/services/change-source/custom/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/custom/change-source.js +4 -7
- package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.js +68 -13
- package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js +7 -2
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js +7 -4
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts +125 -180
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +1 -10
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.js +26 -12
- package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts +36 -90
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.js +51 -14
- package/out/zero-cache/src/services/change-source/pg/schema/published.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts +31 -36
- package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/shard.js +25 -17
- package/out/zero-cache/src/services/change-source/pg/schema/shard.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts +2 -2
- package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/validation.js +2 -4
- package/out/zero-cache/src/services/change-source/pg/schema/validation.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts +158 -53
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.js +55 -10
- package/out/zero-cache/src/services/change-source/protocol/current/data.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +210 -72
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current.js +4 -2
- package/out/zero-cache/src/services/change-source/replica-schema.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/replica-schema.js +20 -4
- package/out/zero-cache/src/services/change-source/replica-schema.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +6 -4
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts +71 -25
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.js +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts +1 -0
- package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/schema/tables.js +6 -5
- package/out/zero-cache/src/services/change-streamer/schema/tables.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.d.ts +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.js +17 -6
- package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.d.ts +2 -0
- package/out/zero-cache/src/services/change-streamer/subscriber.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.js +14 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.js.map +1 -1
- package/out/zero-cache/src/services/heapz.d.ts.map +1 -1
- package/out/zero-cache/src/services/heapz.js +1 -0
- package/out/zero-cache/src/services/heapz.js.map +1 -1
- package/out/zero-cache/src/services/life-cycle.d.ts +1 -1
- package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
- package/out/zero-cache/src/services/life-cycle.js.map +1 -1
- package/out/zero-cache/src/services/litestream/commands.d.ts.map +1 -1
- package/out/zero-cache/src/services/litestream/commands.js +3 -1
- package/out/zero-cache/src/services/litestream/commands.js.map +1 -1
- package/out/zero-cache/src/services/litestream/config.yml +1 -0
- package/out/zero-cache/src/services/mutagen/error.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/error.js +4 -1
- package/out/zero-cache/src/services/mutagen/error.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/mutagen.d.ts +4 -4
- package/out/zero-cache/src/services/mutagen/mutagen.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/mutagen.js +10 -24
- package/out/zero-cache/src/services/mutagen/mutagen.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/pusher.d.ts +8 -6
- package/out/zero-cache/src/services/mutagen/pusher.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/pusher.js +130 -19
- package/out/zero-cache/src/services/mutagen/pusher.js.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.js +24 -31
- package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/change-log.d.ts +4 -4
- package/out/zero-cache/src/services/replicator/schema/change-log.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/change-log.js +38 -36
- package/out/zero-cache/src/services/replicator/schema/change-log.js.map +1 -1
- package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.d.ts +3 -3
- package/out/zero-cache/src/services/replicator/schema/column-metadata.d.ts.map +1 -0
- package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.js +3 -3
- package/out/zero-cache/src/services/replicator/schema/column-metadata.js.map +1 -0
- package/out/zero-cache/src/services/replicator/schema/replication-state.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/replication-state.js +3 -1
- package/out/zero-cache/src/services/replicator/schema/replication-state.js.map +1 -1
- package/out/zero-cache/src/services/run-ast.js +1 -1
- package/out/zero-cache/src/services/run-ast.js.map +1 -1
- package/out/zero-cache/src/services/statz.d.ts.map +1 -1
- package/out/zero-cache/src/services/statz.js +1 -0
- package/out/zero-cache/src/services/statz.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/client-handler.d.ts +5 -6
- package/out/zero-cache/src/services/view-syncer/client-handler.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/client-handler.js +5 -23
- package/out/zero-cache/src/services/view-syncer/client-handler.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.js +65 -44
- package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts +0 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js +23 -6
- package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +14 -22
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +46 -67
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js +22 -11
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +0 -2
- package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.js +3 -11
- package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +6 -4
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.js +216 -243
- package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
- package/out/zero-cache/src/types/lexi-version.d.ts.map +1 -1
- package/out/zero-cache/src/types/lexi-version.js +4 -1
- package/out/zero-cache/src/types/lexi-version.js.map +1 -1
- package/out/zero-cache/src/types/lite.d.ts.map +1 -1
- package/out/zero-cache/src/types/lite.js +8 -2
- package/out/zero-cache/src/types/lite.js.map +1 -1
- package/out/zero-cache/src/types/shards.js +1 -1
- package/out/zero-cache/src/types/shards.js.map +1 -1
- package/out/zero-cache/src/types/sql.d.ts +5 -0
- package/out/zero-cache/src/types/sql.d.ts.map +1 -1
- package/out/zero-cache/src/types/sql.js +5 -1
- package/out/zero-cache/src/types/sql.js.map +1 -1
- package/out/zero-cache/src/types/subscription.js +1 -1
- package/out/zero-cache/src/types/subscription.js.map +1 -1
- package/out/zero-cache/src/workers/connect-params.d.ts +1 -1
- package/out/zero-cache/src/workers/connect-params.d.ts.map +1 -1
- package/out/zero-cache/src/workers/connect-params.js +2 -3
- package/out/zero-cache/src/workers/connect-params.js.map +1 -1
- package/out/zero-cache/src/workers/replicator.d.ts.map +1 -1
- package/out/zero-cache/src/workers/replicator.js +2 -5
- package/out/zero-cache/src/workers/replicator.js.map +1 -1
- package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
- package/out/zero-cache/src/workers/syncer-ws-message-handler.js +15 -10
- package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
- package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/workers/syncer.js +17 -10
- package/out/zero-cache/src/workers/syncer.js.map +1 -1
- package/out/zero-client/src/client/connection-manager.d.ts +8 -0
- package/out/zero-client/src/client/connection-manager.d.ts.map +1 -1
- package/out/zero-client/src/client/connection-manager.js +33 -0
- package/out/zero-client/src/client/connection-manager.js.map +1 -1
- package/out/zero-client/src/client/connection.d.ts.map +1 -1
- package/out/zero-client/src/client/connection.js +6 -3
- package/out/zero-client/src/client/connection.js.map +1 -1
- package/out/zero-client/src/client/context.js +1 -0
- package/out/zero-client/src/client/context.js.map +1 -1
- package/out/zero-client/src/client/error.js +1 -1
- package/out/zero-client/src/client/error.js.map +1 -1
- package/out/zero-client/src/client/mutator-proxy.d.ts.map +1 -1
- package/out/zero-client/src/client/mutator-proxy.js +15 -1
- package/out/zero-client/src/client/mutator-proxy.js.map +1 -1
- package/out/zero-client/src/client/options.d.ts +11 -1
- package/out/zero-client/src/client/options.d.ts.map +1 -1
- package/out/zero-client/src/client/options.js.map +1 -1
- package/out/zero-client/src/client/query-manager.d.ts +4 -0
- package/out/zero-client/src/client/query-manager.d.ts.map +1 -1
- package/out/zero-client/src/client/query-manager.js +7 -0
- package/out/zero-client/src/client/query-manager.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zero-client/src/client/zero.d.ts +5 -5
- package/out/zero-client/src/client/zero.d.ts.map +1 -1
- package/out/zero-client/src/client/zero.js +53 -8
- package/out/zero-client/src/client/zero.js.map +1 -1
- package/out/zero-client/src/mod.d.ts +1 -0
- package/out/zero-client/src/mod.d.ts.map +1 -1
- package/out/zero-protocol/src/connect.d.ts +4 -0
- package/out/zero-protocol/src/connect.d.ts.map +1 -1
- package/out/zero-protocol/src/connect.js +3 -1
- package/out/zero-protocol/src/connect.js.map +1 -1
- package/out/zero-protocol/src/protocol-version.d.ts +1 -1
- package/out/zero-protocol/src/protocol-version.d.ts.map +1 -1
- package/out/zero-protocol/src/protocol-version.js +1 -1
- package/out/zero-protocol/src/protocol-version.js.map +1 -1
- package/out/zero-protocol/src/push.d.ts +16 -0
- package/out/zero-protocol/src/push.d.ts.map +1 -1
- package/out/zero-protocol/src/push.js +25 -1
- package/out/zero-protocol/src/push.js.map +1 -1
- package/out/zero-protocol/src/up.d.ts +2 -0
- package/out/zero-protocol/src/up.d.ts.map +1 -1
- package/out/zero-react/src/mod.d.ts +3 -1
- package/out/zero-react/src/mod.d.ts.map +1 -1
- package/out/zero-react/src/paging-reducer.d.ts +61 -0
- package/out/zero-react/src/paging-reducer.d.ts.map +1 -0
- package/out/zero-react/src/paging-reducer.js +77 -0
- package/out/zero-react/src/paging-reducer.js.map +1 -0
- package/out/zero-react/src/use-query.d.ts +11 -1
- package/out/zero-react/src/use-query.d.ts.map +1 -1
- package/out/zero-react/src/use-query.js +13 -11
- package/out/zero-react/src/use-query.js.map +1 -1
- package/out/zero-react/src/use-rows.d.ts +39 -0
- package/out/zero-react/src/use-rows.d.ts.map +1 -0
- package/out/zero-react/src/use-rows.js +130 -0
- package/out/zero-react/src/use-rows.js.map +1 -0
- package/out/zero-react/src/use-zero-virtualizer.d.ts +122 -0
- package/out/zero-react/src/use-zero-virtualizer.d.ts.map +1 -0
- package/out/zero-react/src/use-zero-virtualizer.js +342 -0
- package/out/zero-react/src/use-zero-virtualizer.js.map +1 -0
- package/out/zero-react/src/zero-provider.js +1 -1
- package/out/zero-react/src/zero-provider.js.map +1 -1
- package/out/zero-server/src/adapters/drizzle.d.ts +18 -18
- package/out/zero-server/src/adapters/drizzle.d.ts.map +1 -1
- package/out/zero-server/src/adapters/drizzle.js +8 -22
- package/out/zero-server/src/adapters/drizzle.js.map +1 -1
- package/out/zero-server/src/adapters/pg.d.ts +19 -13
- package/out/zero-server/src/adapters/pg.d.ts.map +1 -1
- package/out/zero-server/src/adapters/pg.js.map +1 -1
- package/out/zero-server/src/adapters/postgresjs.d.ts +19 -13
- package/out/zero-server/src/adapters/postgresjs.d.ts.map +1 -1
- package/out/zero-server/src/adapters/postgresjs.js.map +1 -1
- package/out/zero-server/src/adapters/prisma.d.ts +66 -0
- package/out/zero-server/src/adapters/prisma.d.ts.map +1 -0
- package/out/zero-server/src/adapters/prisma.js +63 -0
- package/out/zero-server/src/adapters/prisma.js.map +1 -0
- package/out/zero-server/src/custom.js +1 -15
- package/out/zero-server/src/custom.js.map +1 -1
- package/out/zero-server/src/mod.d.ts +9 -8
- package/out/zero-server/src/mod.d.ts.map +1 -1
- package/out/zero-server/src/process-mutations.d.ts +2 -1
- package/out/zero-server/src/process-mutations.d.ts.map +1 -1
- package/out/zero-server/src/process-mutations.js +39 -4
- package/out/zero-server/src/process-mutations.js.map +1 -1
- package/out/zero-server/src/push-processor.js +1 -1
- package/out/zero-server/src/push-processor.js.map +1 -1
- package/out/zero-server/src/schema.d.ts.map +1 -1
- package/out/zero-server/src/schema.js +4 -1
- package/out/zero-server/src/schema.js.map +1 -1
- package/out/zero-server/src/zql-database.d.ts.map +1 -1
- package/out/zero-server/src/zql-database.js +18 -0
- package/out/zero-server/src/zql-database.js.map +1 -1
- package/out/zero-solid/src/mod.d.ts +1 -1
- package/out/zero-solid/src/mod.d.ts.map +1 -1
- package/out/zero-solid/src/solid-view.js +1 -0
- package/out/zero-solid/src/solid-view.js.map +1 -1
- package/out/zero-solid/src/use-query.d.ts +10 -1
- package/out/zero-solid/src/use-query.d.ts.map +1 -1
- package/out/zero-solid/src/use-query.js +22 -5
- package/out/zero-solid/src/use-query.js.map +1 -1
- package/out/zero-solid/src/use-zero.js +1 -1
- package/out/zero-solid/src/use-zero.js.map +1 -1
- package/out/zql/src/ivm/constraint.d.ts.map +1 -1
- package/out/zql/src/ivm/constraint.js +4 -1
- package/out/zql/src/ivm/constraint.js.map +1 -1
- package/out/zql/src/ivm/exists.d.ts.map +1 -1
- package/out/zql/src/ivm/exists.js +4 -1
- package/out/zql/src/ivm/exists.js.map +1 -1
- package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
- package/out/zql/src/ivm/join-utils.js +8 -2
- package/out/zql/src/ivm/join-utils.js.map +1 -1
- package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
- package/out/zql/src/ivm/memory-source.js +12 -3
- package/out/zql/src/ivm/memory-source.js.map +1 -1
- package/out/zql/src/ivm/push-accumulated.d.ts.map +1 -1
- package/out/zql/src/ivm/push-accumulated.js +25 -2
- package/out/zql/src/ivm/push-accumulated.js.map +1 -1
- package/out/zql/src/ivm/stream.d.ts.map +1 -1
- package/out/zql/src/ivm/stream.js +1 -1
- package/out/zql/src/ivm/stream.js.map +1 -1
- package/out/zql/src/ivm/take.d.ts.map +1 -1
- package/out/zql/src/ivm/take.js +24 -6
- package/out/zql/src/ivm/take.js.map +1 -1
- package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
- package/out/zql/src/ivm/union-fan-in.js +12 -3
- package/out/zql/src/ivm/union-fan-in.js.map +1 -1
- package/out/zql/src/mutate/mutator.js +4 -4
- package/out/zql/src/mutate/mutator.js.map +1 -1
- package/out/zql/src/query/create-builder.js +3 -5
- package/out/zql/src/query/create-builder.js.map +1 -1
- package/out/zql/src/query/query-registry.js +4 -4
- package/out/zql/src/query/query-registry.js.map +1 -1
- package/out/zqlite/src/table-source.d.ts.map +1 -1
- package/out/zqlite/src/table-source.js +1 -2
- package/out/zqlite/src/table-source.js.map +1 -1
- package/package.json +8 -4
- package/out/zero-cache/src/services/change-source/column-metadata.d.ts.map +0 -1
- package/out/zero-cache/src/services/change-source/column-metadata.js.map +0 -1
- package/out/zero-cache/src/types/schema-versions.d.ts +0 -12
- package/out/zero-cache/src/types/schema-versions.d.ts.map +0 -1
- package/out/zero-cache/src/types/schema-versions.js +0 -28
- package/out/zero-cache/src/types/schema-versions.js.map +0 -1
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-streamer-service.js","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {unreachable} from '../../../../shared/src/asserts.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport {\n min,\n type AtLeastOne,\n type LexiVersion,\n} from '../../types/lexi-version.ts';\nimport type {PostgresDB} from '../../types/pg.ts';\nimport type {ShardID} from '../../types/shards.ts';\nimport type {Sink, Source} from '../../types/streams.ts';\nimport {Subscription} from '../../types/subscription.ts';\nimport {\n type ChangeStreamControl,\n type ChangeStreamData,\n type ChangeStreamMessage,\n} from '../change-source/protocol/current/downstream.ts';\nimport type {ChangeSourceUpstream} from '../change-source/protocol/current/upstream.ts';\nimport {publishReplicationError} from '../replicator/replication-status.ts';\nimport type {SubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {\n DEFAULT_MAX_RETRY_DELAY_MS,\n RunningState,\n UnrecoverableError,\n} from '../running-state.ts';\nimport {\n type ChangeStreamerService,\n type Downstream,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {Forwarder} from './forwarder.ts';\nimport {initChangeStreamerSchema} from './schema/init.ts';\nimport {\n AutoResetSignal,\n ensureReplicationConfig,\n markResetRequired,\n} from './schema/tables.ts';\nimport {Storer} from './storer.ts';\nimport {Subscriber} from './subscriber.ts';\n\n/**\n * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.\n */\nexport async function initializeStreamer(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n changeSource: ChangeSource,\n subscriptionState: SubscriptionState,\n autoReset: boolean,\n setTimeoutFn = setTimeout,\n): Promise<ChangeStreamerService> {\n // Make sure the ChangeLog DB is set up.\n await initChangeStreamerSchema(lc, changeDB, shard);\n await ensureReplicationConfig(\n lc,\n changeDB,\n subscriptionState,\n shard,\n autoReset,\n );\n\n const {replicaVersion} = subscriptionState;\n return new ChangeStreamerImpl(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n changeSource,\n autoReset,\n setTimeoutFn,\n );\n}\n\n/**\n * Internally all Downstream messages (not just commits) are given a watermark.\n * These are used for internal ordering for:\n * 1. Replaying new changes in the Storer\n * 2. Filtering old changes in the Subscriber\n *\n * However, only the watermark for `Commit` messages are exposed to\n * subscribers, as that is the only semantically correct watermark to\n * use for tracking a position in a replication stream.\n */\nexport type WatermarkedChange = [watermark: string, ChangeStreamData];\n\nexport type ChangeStream = {\n changes: Source<ChangeStreamMessage>;\n\n /**\n * A Sink to push the {@link StatusMessage}s that reflect Commits\n * that have been successfully stored by the {@link Storer}, or\n * downstream {@link StatusMessage}s henceforth.\n */\n acks: Sink<ChangeSourceUpstream>;\n};\n\n/** Encapsulates an upstream-specific implementation of a stream of Changes. */\nexport interface ChangeSource {\n /**\n * Starts a stream of changes starting after the specific watermark,\n * with a corresponding sink for upstream acknowledgements.\n */\n startStream(afterWatermark: string): Promise<ChangeStream>;\n}\n\n/**\n * Upstream-agnostic dispatch of messages in a {@link ChangeStreamMessage} to a\n * {@link Forwarder} and {@link Storer} to execute the forward-store-ack\n * procedure described in {@link ChangeStreamer}.\n *\n * ### Subscriber Catchup\n *\n * Connecting clients first need to be \"caught up\" to the current watermark\n * (from stored change log entries) before new entries are forwarded to\n * them. This is non-trivial because the replication stream may be in the\n * middle of a pending streamed Transaction for which some entries have\n * already been forwarded but are not yet committed to the store.\n *\n *\n * ```\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * | Historic changes in storage | Pending (streamed) tx | Next tx\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * Replication stream\n * > > > > > > > > >\n * ^ ---> required catchup ---> ^\n * Subscriber watermark Subscription begins\n * ```\n *\n * Preemptively buffering the changes of every pending transaction\n * would be wasteful and consume too much memory for large transactions.\n *\n * Instead, the streamer synchronously dispatches changes and subscriptions\n * to the {@link Forwarder} and the {@link Storer} such that the two\n * components are aligned as to where in the stream the subscription started.\n * The two components then coordinate catchup and handoff via the\n * {@link Subscriber} object with the following algorithm:\n *\n * * If the streamer is in the middle of a pending Transaction, the\n * Subscriber is \"queued\" on both the Forwarder and the Storer. In this\n * state, new changes are *not* forwarded to the Subscriber, and catchup\n * is not yet executed.\n * * Once the commit message for the pending Transaction is processed\n * by the Storer, it begins catchup on the Subscriber (with a READONLY\n * snapshot so that it does not block subsequent storage operations).\n * This catchup is thus guaranteed to load the change log entries of\n * that last Transaction.\n * * When the Forwarder processes that same commit message, it moves the\n * Subscriber from the \"queued\" to the \"active\" set of clients such that\n * the Subscriber begins receiving new changes, starting from the next\n * Transaction.\n * * The Subscriber does not forward those changes, however, if its catchup\n * is not complete. Until then, it buffers the changes in memory.\n * * Once catchup is complete, the buffered changes are immediately sent\n * and the Subscriber henceforth forwards changes as they are received.\n *\n * In the (common) case where the streamer is not in the middle of a pending\n * transaction when a subscription begins, the Storer begins catchup\n * immediately and the Forwarder directly adds the Subscriber to its active\n * set. However, the Subscriber still buffers any forwarded messages until\n * its catchup is complete.\n *\n * ### Watermarks and ordering\n *\n * The ChangeStreamerService depends on its {@link ChangeSource} to send\n * changes in contiguous [`begin`, `data` ..., `data`, `commit`] sequences\n * in commit order. This follows Postgres's Logical Replication Protocol\n * Message Flow:\n *\n * https://www.postgresql.org/docs/16/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW\n *\n * > The logical replication protocol sends individual transactions one by one.\n * > This means that all messages between a pair of Begin and Commit messages belong to the same transaction.\n *\n * In order to correctly replay (new) and filter (old) messages to subscribers\n * at different points in the replication stream, these changes must be assigned\n * watermarks such that they preserve the order in which they were received\n * from the ChangeSource.\n *\n * A previous implementation incorrectly derived these watermarks from the Postgres\n * Log Sequence Numbers (LSN) of each message. However, LSNs from concurrent,\n * non-conflicting transactions can overlap, which can result in a `begin` message\n * with an earlier LSN arriving after a `commit` message. For example, the\n * changes for these transactions:\n *\n * ```\n * LSN: 1 2 3 4 5 6 7 8 9 10\n * tx1: begin data data data commit\n * tx2: begin data data data commit\n * ```\n *\n * will arrive as:\n *\n * ```\n * begin1, data2, data4, data6, commit8, begin3, data5, data7, data9, commit10\n * ```\n *\n * Thus, LSN of non-commit messages are not suitable for tracking the sorting\n * order of the replication stream.\n *\n * Instead, the ChangeStreamer uses the following algorithm for deterministic\n * catchup and filtering of changes:\n *\n * * A `commit` message is assigned to a watermark corresponding to its LSN.\n * These are guaranteed to be in commit order by definition.\n *\n * * `begin` and `data` messages are assigned to the watermark of the\n * preceding `commit` (the previous transaction, or the replication\n * slot's starting LSN) plus 1. This guarantees that they will be sorted\n * after the previously commit transaction even if their LSNs came before it.\n * This is referred to as the `preCommitWatermark`.\n *\n * * In the ChangeLog DB, messages have a secondary sort column `pos`, which is\n * the position of the message within its transaction, with the `begin` message\n * starting at `0`. This guarantees that `begin` and `data` messages will be\n * fetched in the original ChangeSource order during catchup.\n *\n * `begin` and `data` messages share the same watermark, but this is sufficient for\n * Subscriber filtering because subscribers only know about the `commit` watermarks\n * exposed in the `Downstream` `Commit` message. The Subscriber object thus compares\n * the internal watermarks of the incoming messages against the commit watermark of\n * the caller, updating the watermark at every `Commit` message that is forwarded.\n *\n * ### Cleanup\n *\n * As mentioned in the {@link ChangeStreamer} documentation: \"the ChangeStreamer\n * uses a combination of [the \"initial\", i.e. backup-derived watermark and] ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\"\n *\n * More concretely:\n *\n * * The `initial`, backup-derived watermark is the earliest to which cleanup\n * should ever happen.\n *\n * * However, it is possible for the replica backup to be *ahead* of a connected\n * subscriber; and if a network error causes that subscriber to retry from its\n * last watermark, the change streamer must support it.\n *\n * Thus, before cleaning up to an `initial` backup-derived watermark, the change\n * streamer first confirms that all connected subscribers have also passed\n * that watermark.\n */\nclass ChangeStreamerImpl implements ChangeStreamerService {\n readonly id: string;\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #replicaVersion: string;\n readonly #source: ChangeSource;\n readonly #storer: Storer;\n readonly #forwarder: Forwarder;\n\n readonly #autoReset: boolean;\n readonly #state: RunningState;\n readonly #initialWatermarks = new Set<string>();\n\n // Starting the (Postgres) ChangeStream results in killing the previous\n // Postgres subscriber, potentially creating a gap in which the old\n // change-streamer has shut down and the new change-streamer has not yet\n // been recognized as \"healthy\" (and thus does not get any requests).\n //\n // To minimize this gap, delay starting the ChangeStream until the first\n // request from a `serving` replicator, indicating that higher level\n // load-balancing / routing logic has begun routing requests to this task.\n readonly #serving = resolver();\n\n readonly #txCounter = getOrCreateCounter(\n 'replication',\n 'transactions',\n 'Count of replicated transactions',\n );\n\n #stream: ChangeStream | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n replicaVersion: string,\n source: ChangeSource,\n autoReset: boolean,\n setTimeoutFn = setTimeout,\n ) {\n this.id = `change-streamer`;\n this.#lc = lc.withContext('component', 'change-streamer');\n this.#shard = shard;\n this.#changeDB = changeDB;\n this.#replicaVersion = replicaVersion;\n this.#source = source;\n this.#storer = new Storer(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n consumed => this.#stream?.acks.push(['status', consumed[1], consumed[2]]),\n err => this.stop(err),\n );\n this.#forwarder = new Forwarder();\n this.#autoReset = autoReset;\n this.#state = new RunningState(this.id, undefined, setTimeoutFn);\n }\n\n async run() {\n this.#lc.info?.('starting change stream');\n\n // Once this change-streamer acquires \"ownership\" of the change DB,\n // it is safe to start the storer.\n await this.#storer.assumeOwnership();\n // The storer will, in turn, detect changes to ownership and stop\n // the change-streamer appropriately.\n this.#storer\n .run()\n .then(() => this.stop())\n .catch(e => this.stop(e));\n\n while (this.#state.shouldRun()) {\n let err: unknown;\n let watermark: string | null = null;\n try {\n const startAfter = await this.#storer.getLastWatermarkToStartStream();\n const stream = await this.#source.startStream(startAfter);\n this.#stream = stream;\n this.#state.resetBackoff();\n watermark = null;\n\n for await (const change of stream.changes) {\n const [type, msg] = change;\n switch (type) {\n case 'status':\n this.#storer.status(change); // storer acks once it gets through its queue\n continue;\n case 'control':\n await this.#handleControlMessage(msg);\n continue; // control messages are not stored/forwarded\n case 'begin':\n watermark = change[2].commitWatermark;\n break;\n case 'commit':\n if (watermark !== change[2].watermark) {\n throw new UnrecoverableError(\n `commit watermark ${change[2].watermark} does not match 'begin' watermark ${watermark}`,\n );\n }\n this.#txCounter.add(1);\n break;\n default:\n if (watermark === null) {\n throw new UnrecoverableError(\n `${type} change (${msg.tag}) received before 'begin' message`,\n );\n }\n break;\n }\n\n this.#storer.store([watermark, change]);\n this.#forwarder.forward([watermark, change]);\n\n if (type === 'commit') {\n watermark = null;\n }\n\n // Allow the storer to exert back pressure.\n const readyForMore = this.#storer.readyForMore();\n if (readyForMore) {\n await readyForMore;\n }\n }\n } catch (e) {\n err = e;\n } finally {\n this.#stream?.changes.cancel();\n this.#stream = undefined;\n }\n\n // When the change stream is interrupted, abort any pending transaction.\n if (watermark) {\n this.#lc.warn?.(`aborting interrupted transaction ${watermark}`);\n this.#storer.abort();\n this.#forwarder.forward([watermark, ['rollback', {tag: 'rollback'}]]);\n }\n\n await this.#state.backoff(this.#lc, err);\n }\n this.#lc.info?.('ChangeStreamer stopped');\n }\n\n async #handleControlMessage(msg: ChangeStreamControl[1]) {\n this.#lc.info?.('received control message', msg);\n const {tag} = msg;\n\n switch (tag) {\n case 'reset-required':\n await markResetRequired(this.#changeDB, this.#shard);\n await publishReplicationError(\n this.#lc,\n 'Replicating',\n msg.message ?? 'Resync required',\n msg.errorDetails,\n );\n if (this.#autoReset) {\n this.#lc.warn?.('shutting down for auto-reset');\n await this.stop(new AutoResetSignal());\n }\n break;\n default:\n unreachable(tag);\n }\n }\n\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const {protocolVersion, id, mode, replicaVersion, watermark, initial} = ctx;\n if (mode === 'serving') {\n this.#serving.resolve();\n }\n const downstream = Subscription.create<Downstream>({\n cleanup: () => this.#forwarder.remove(subscriber),\n });\n const subscriber = new Subscriber(\n protocolVersion,\n id,\n watermark,\n downstream,\n );\n if (replicaVersion !== this.#replicaVersion) {\n this.#lc.warn?.(\n `rejecting subscriber at replica version ${replicaVersion}`,\n );\n subscriber.close(\n ErrorType.WrongReplicaVersion,\n `current replica version is ${\n this.#replicaVersion\n } (requested ${replicaVersion})`,\n );\n } else {\n this.#lc.debug?.(`adding subscriber ${subscriber.id}`);\n\n this.#forwarder.add(subscriber);\n this.#storer.catchup(subscriber, mode);\n\n if (initial) {\n this.scheduleCleanup(watermark);\n }\n }\n return Promise.resolve(downstream);\n }\n\n scheduleCleanup(watermark: string) {\n const origSize = this.#initialWatermarks.size;\n this.#initialWatermarks.add(watermark);\n\n if (origSize === 0) {\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n\n async getChangeLogState(): Promise<{\n replicaVersion: string;\n minWatermark: string;\n }> {\n const minWatermark = await this.#storer.getMinWatermarkForCatchup();\n return {\n replicaVersion: this.#replicaVersion,\n minWatermark: minWatermark ?? this.#replicaVersion,\n };\n }\n\n async #purgeOldChanges(): Promise<void> {\n const initial = [...this.#initialWatermarks];\n if (initial.length === 0) {\n this.#lc.warn?.('No initial watermarks to check for cleanup'); // Not expected.\n return;\n }\n const current = [...this.#forwarder.getAcks()];\n if (current.length === 0) {\n // Also not expected, but possible (e.g. subscriber connects, then disconnects).\n // Bail to be safe.\n this.#lc.warn?.('No subscribers to confirm cleanup');\n return;\n }\n try {\n const earliestInitial = min(...(initial as AtLeastOne<LexiVersion>));\n const earliestCurrent = min(...(current as AtLeastOne<LexiVersion>));\n if (earliestCurrent < earliestInitial) {\n this.#lc.info?.(\n `At least one client is behind backup (${earliestCurrent} < ${earliestInitial})`,\n );\n } else {\n const deleted = await this.#storer.purgeRecordsBefore(earliestInitial);\n this.#lc.info?.(`Purged ${deleted} changes before ${earliestInitial}`);\n this.#initialWatermarks.delete(earliestInitial);\n }\n } finally {\n if (this.#initialWatermarks.size) {\n // If there are unpurged watermarks to check, schedule the next purge.\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n }\n\n async stop(err?: unknown) {\n this.#state.stop(this.#lc, err);\n this.#stream?.changes.cancel();\n await this.#storer.stop();\n }\n}\n\n// The delay between receiving an initial, backup-based watermark\n// and performing a check of whether to purge records before it.\n// This delay should be long enough to handle situations like the following:\n//\n// 1. `litestream restore` downloads a backup for the `replication-manager`\n// 2. `replication-manager` starts up and runs this `change-streamer`\n// 3. `zero-cache`s that are running on a different replica connect to this\n// `change-streamer` after exponential backoff retries.\n//\n// It is possible for a `zero-cache`[3] to be behind the backup restored [1].\n// This cleanup delay (30 seconds) is thus set to be a value comfortably\n// longer than the max delay for exponential backoff (10 seconds) in\n// `services/running-state.ts`. This allows the `zero-cache` [3] to reconnect\n// so that the `change-streamer` can track its progress and know when it has\n// surpassed the initial watermark of the backup [1].\nconst CLEANUP_DELAY_MS = DEFAULT_MAX_RETRY_DELAY_MS * 3;\n"],"names":["ErrorType.WrongReplicaVersion"],"mappings":";;;;;;;;;;;;;;;AA6CA,eAAsB,mBACpB,IACA,OACA,QACA,kBACA,mBACA,UACA,cACA,mBACA,WACA,eAAe,YACiB;AAEhC,QAAM,yBAAyB,IAAI,UAAU,KAAK;AAClD,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,QAAM,EAAC,mBAAkB;AACzB,SAAO,IAAI;AAAA,IACT;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEJ;AA4KA,MAAM,mBAAoD;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,EACA;AAAA,EACA,yCAAyB,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUzB,WAAW,SAAA;AAAA,EAEX,aAAa;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAAA,EAGF;AAAA,EAEA,YACE,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,QACA,WACA,eAAe,YACf;AACA,SAAK,KAAK;AACV,SAAK,MAAM,GAAG,YAAY,aAAa,iBAAiB;AACxD,SAAK,SAAS;AACd,SAAK,YAAY;AACjB,SAAK,kBAAkB;AACvB,SAAK,UAAU;AACf,SAAK,UAAU,IAAI;AAAA,MACjB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,CAAA,aAAY,KAAK,SAAS,KAAK,KAAK,CAAC,UAAU,SAAS,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC;AAAA,MACxE,CAAA,QAAO,KAAK,KAAK,GAAG;AAAA,IAAA;AAEtB,SAAK,aAAa,IAAI,UAAA;AACtB,SAAK,aAAa;AAClB,SAAK,SAAS,IAAI,aAAa,KAAK,IAAI,QAAW,YAAY;AAAA,EACjE;AAAA,EAEA,MAAM,MAAM;AACV,SAAK,IAAI,OAAO,wBAAwB;AAIxC,UAAM,KAAK,QAAQ,gBAAA;AAGnB,SAAK,QACF,IAAA,EACA,KAAK,MAAM,KAAK,KAAA,CAAM,EACtB,MAAM,CAAA,MAAK,KAAK,KAAK,CAAC,CAAC;AAE1B,WAAO,KAAK,OAAO,aAAa;AAC9B,UAAI;AACJ,UAAI,YAA2B;AAC/B,UAAI;AACF,cAAM,aAAa,MAAM,KAAK,QAAQ,8BAAA;AACtC,cAAM,SAAS,MAAM,KAAK,QAAQ,YAAY,UAAU;AACxD,aAAK,UAAU;AACf,aAAK,OAAO,aAAA;AACZ,oBAAY;AAEZ,yBAAiB,UAAU,OAAO,SAAS;AACzC,gBAAM,CAAC,MAAM,GAAG,IAAI;AACpB,kBAAQ,MAAA;AAAA,YACN,KAAK;AACH,mBAAK,QAAQ,OAAO,MAAM;AAC1B;AAAA,YACF,KAAK;AACH,oBAAM,KAAK,sBAAsB,GAAG;AACpC;AAAA;AAAA,YACF,KAAK;AACH,0BAAY,OAAO,CAAC,EAAE;AACtB;AAAA,YACF,KAAK;AACH,kBAAI,cAAc,OAAO,CAAC,EAAE,WAAW;AACrC,sBAAM,IAAI;AAAA,kBACR,oBAAoB,OAAO,CAAC,EAAE,SAAS,qCAAqC,SAAS;AAAA,gBAAA;AAAA,cAEzF;AACA,mBAAK,WAAW,IAAI,CAAC;AACrB;AAAA,YACF;AACE,kBAAI,cAAc,MAAM;AACtB,sBAAM,IAAI;AAAA,kBACR,GAAG,IAAI,YAAY,IAAI,GAAG;AAAA,gBAAA;AAAA,cAE9B;AACA;AAAA,UAAA;AAGJ,eAAK,QAAQ,MAAM,CAAC,WAAW,MAAM,CAAC;AACtC,eAAK,WAAW,QAAQ,CAAC,WAAW,MAAM,CAAC;AAE3C,cAAI,SAAS,UAAU;AACrB,wBAAY;AAAA,UACd;AAGA,gBAAM,eAAe,KAAK,QAAQ,aAAA;AAClC,cAAI,cAAc;AAChB,kBAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF,SAAS,GAAG;AACV,cAAM;AAAA,MACR,UAAA;AACE,aAAK,SAAS,QAAQ,OAAA;AACtB,aAAK,UAAU;AAAA,MACjB;AAGA,UAAI,WAAW;AACb,aAAK,IAAI,OAAO,oCAAoC,SAAS,EAAE;AAC/D,aAAK,QAAQ,MAAA;AACb,aAAK,WAAW,QAAQ,CAAC,WAAW,CAAC,YAAY,EAAC,KAAK,WAAA,CAAW,CAAC,CAAC;AAAA,MACtE;AAEA,YAAM,KAAK,OAAO,QAAQ,KAAK,KAAK,GAAG;AAAA,IACzC;AACA,SAAK,IAAI,OAAO,wBAAwB;AAAA,EAC1C;AAAA,EAEA,MAAM,sBAAsB,KAA6B;AACvD,SAAK,IAAI,OAAO,4BAA4B,GAAG;AAC/C,UAAM,EAAC,QAAO;AAEd,YAAQ,KAAA;AAAA,MACN,KAAK;AACH,cAAM,kBAAkB,KAAK,WAAW,KAAK,MAAM;AACnD,cAAM;AAAA,UACJ,KAAK;AAAA,UACL;AAAA,UACA,IAAI,WAAW;AAAA,UACf,IAAI;AAAA,QAAA;AAEN,YAAI,KAAK,YAAY;AACnB,eAAK,IAAI,OAAO,8BAA8B;AAC9C,gBAAM,KAAK,KAAK,IAAI,iBAAiB;AAAA,QACvC;AACA;AAAA,MACF;AACE,oBAAe;AAAA,IAAA;AAAA,EAErB;AAAA,EAEA,UAAU,KAAqD;AAC7D,UAAM,EAAC,iBAAiB,IAAI,MAAM,gBAAgB,WAAW,YAAW;AACxE,QAAI,SAAS,WAAW;AACtB,WAAK,SAAS,QAAA;AAAA,IAChB;AACA,UAAM,aAAa,aAAa,OAAmB;AAAA,MACjD,SAAS,MAAM,KAAK,WAAW,OAAO,UAAU;AAAA,IAAA,CACjD;AACD,UAAM,aAAa,IAAI;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,QAAI,mBAAmB,KAAK,iBAAiB;AAC3C,WAAK,IAAI;AAAA,QACP,2CAA2C,cAAc;AAAA,MAAA;AAE3D,iBAAW;AAAA,QACTA;AAAAA,QACA,8BACE,KAAK,eACP,eAAe,cAAc;AAAA,MAAA;AAAA,IAEjC,OAAO;AACL,WAAK,IAAI,QAAQ,qBAAqB,WAAW,EAAE,EAAE;AAErD,WAAK,WAAW,IAAI,UAAU;AAC9B,WAAK,QAAQ,QAAQ,YAAY,IAAI;AAErC,UAAI,SAAS;AACX,aAAK,gBAAgB,SAAS;AAAA,MAChC;AAAA,IACF;AACA,WAAO,QAAQ,QAAQ,UAAU;AAAA,EACnC;AAAA,EAEA,gBAAgB,WAAmB;AACjC,UAAM,WAAW,KAAK,mBAAmB;AACzC,SAAK,mBAAmB,IAAI,SAAS;AAErC,QAAI,aAAa,GAAG;AAClB,WAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,IACxE;AAAA,EACF;AAAA,EAEA,MAAM,oBAGH;AACD,UAAM,eAAe,MAAM,KAAK,QAAQ,0BAAA;AACxC,WAAO;AAAA,MACL,gBAAgB,KAAK;AAAA,MACrB,cAAc,gBAAgB,KAAK;AAAA,IAAA;AAAA,EAEvC;AAAA,EAEA,MAAM,mBAAkC;AACtC,UAAM,UAAU,CAAC,GAAG,KAAK,kBAAkB;AAC3C,QAAI,QAAQ,WAAW,GAAG;AACxB,WAAK,IAAI,OAAO,4CAA4C;AAC5D;AAAA,IACF;AACA,UAAM,UAAU,CAAC,GAAG,KAAK,WAAW,SAAS;AAC7C,QAAI,QAAQ,WAAW,GAAG;AAGxB,WAAK,IAAI,OAAO,mCAAmC;AACnD;AAAA,IACF;AACA,QAAI;AACF,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,UAAI,kBAAkB,iBAAiB;AACrC,aAAK,IAAI;AAAA,UACP,yCAAyC,eAAe,MAAM,eAAe;AAAA,QAAA;AAAA,MAEjF,OAAO;AACL,cAAM,UAAU,MAAM,KAAK,QAAQ,mBAAmB,eAAe;AACrE,aAAK,IAAI,OAAO,UAAU,OAAO,mBAAmB,eAAe,EAAE;AACrE,aAAK,mBAAmB,OAAO,eAAe;AAAA,MAChD;AAAA,IACF,UAAA;AACE,UAAI,KAAK,mBAAmB,MAAM;AAEhC,aAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,MACxE;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,KAAK,KAAe;AACxB,SAAK,OAAO,KAAK,KAAK,KAAK,GAAG;AAC9B,SAAK,SAAS,QAAQ,OAAA;AACtB,UAAM,KAAK,QAAQ,KAAA;AAAA,EACrB;AACF;AAiBA,MAAM,mBAAmB,6BAA6B;"}
|
|
1
|
+
{"version":3,"file":"change-streamer-service.js","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {unreachable} from '../../../../shared/src/asserts.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport {\n min,\n type AtLeastOne,\n type LexiVersion,\n} from '../../types/lexi-version.ts';\nimport type {PostgresDB} from '../../types/pg.ts';\nimport type {ShardID} from '../../types/shards.ts';\nimport type {Sink, Source} from '../../types/streams.ts';\nimport {Subscription} from '../../types/subscription.ts';\nimport {\n type ChangeStreamControl,\n type ChangeStreamData,\n type ChangeStreamMessage,\n} from '../change-source/protocol/current/downstream.ts';\nimport type {ChangeSourceUpstream} from '../change-source/protocol/current/upstream.ts';\nimport {publishReplicationError} from '../replicator/replication-status.ts';\nimport type {SubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {\n DEFAULT_MAX_RETRY_DELAY_MS,\n RunningState,\n UnrecoverableError,\n} from '../running-state.ts';\nimport {\n type ChangeStreamerService,\n type Downstream,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {Forwarder} from './forwarder.ts';\nimport {initChangeStreamerSchema} from './schema/init.ts';\nimport {\n AutoResetSignal,\n ensureReplicationConfig,\n markResetRequired,\n} from './schema/tables.ts';\nimport {Storer} from './storer.ts';\nimport {Subscriber} from './subscriber.ts';\n\n/**\n * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.\n */\nexport async function initializeStreamer(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n changeSource: ChangeSource,\n subscriptionState: SubscriptionState,\n autoReset: boolean,\n backPressureThreshold: number,\n setTimeoutFn = setTimeout,\n): Promise<ChangeStreamerService> {\n // Make sure the ChangeLog DB is set up.\n await initChangeStreamerSchema(lc, changeDB, shard);\n await ensureReplicationConfig(\n lc,\n changeDB,\n subscriptionState,\n shard,\n autoReset,\n );\n\n const {replicaVersion} = subscriptionState;\n return new ChangeStreamerImpl(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n changeSource,\n autoReset,\n backPressureThreshold,\n setTimeoutFn,\n );\n}\n\n/**\n * Internally all Downstream messages (not just commits) are given a watermark.\n * These are used for internal ordering for:\n * 1. Replaying new changes in the Storer\n * 2. Filtering old changes in the Subscriber\n *\n * However, only the watermark for `Commit` messages are exposed to\n * subscribers, as that is the only semantically correct watermark to\n * use for tracking a position in a replication stream.\n */\nexport type WatermarkedChange = [watermark: string, ChangeStreamData];\n\nexport type ChangeStream = {\n changes: Source<ChangeStreamMessage>;\n\n /**\n * A Sink to push the {@link StatusMessage}s that reflect Commits\n * that have been successfully stored by the {@link Storer}, or\n * downstream {@link StatusMessage}s henceforth.\n */\n acks: Sink<ChangeSourceUpstream>;\n};\n\n/** Encapsulates an upstream-specific implementation of a stream of Changes. */\nexport interface ChangeSource {\n /**\n * Starts a stream of changes starting after the specific watermark,\n * with a corresponding sink for upstream acknowledgements.\n */\n startStream(afterWatermark: string): Promise<ChangeStream>;\n}\n\n/**\n * Upstream-agnostic dispatch of messages in a {@link ChangeStreamMessage} to a\n * {@link Forwarder} and {@link Storer} to execute the forward-store-ack\n * procedure described in {@link ChangeStreamer}.\n *\n * ### Subscriber Catchup\n *\n * Connecting clients first need to be \"caught up\" to the current watermark\n * (from stored change log entries) before new entries are forwarded to\n * them. This is non-trivial because the replication stream may be in the\n * middle of a pending streamed Transaction for which some entries have\n * already been forwarded but are not yet committed to the store.\n *\n *\n * ```\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * | Historic changes in storage | Pending (streamed) tx | Next tx\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * Replication stream\n * > > > > > > > > >\n * ^ ---> required catchup ---> ^\n * Subscriber watermark Subscription begins\n * ```\n *\n * Preemptively buffering the changes of every pending transaction\n * would be wasteful and consume too much memory for large transactions.\n *\n * Instead, the streamer synchronously dispatches changes and subscriptions\n * to the {@link Forwarder} and the {@link Storer} such that the two\n * components are aligned as to where in the stream the subscription started.\n * The two components then coordinate catchup and handoff via the\n * {@link Subscriber} object with the following algorithm:\n *\n * * If the streamer is in the middle of a pending Transaction, the\n * Subscriber is \"queued\" on both the Forwarder and the Storer. In this\n * state, new changes are *not* forwarded to the Subscriber, and catchup\n * is not yet executed.\n * * Once the commit message for the pending Transaction is processed\n * by the Storer, it begins catchup on the Subscriber (with a READONLY\n * snapshot so that it does not block subsequent storage operations).\n * This catchup is thus guaranteed to load the change log entries of\n * that last Transaction.\n * * When the Forwarder processes that same commit message, it moves the\n * Subscriber from the \"queued\" to the \"active\" set of clients such that\n * the Subscriber begins receiving new changes, starting from the next\n * Transaction.\n * * The Subscriber does not forward those changes, however, if its catchup\n * is not complete. Until then, it buffers the changes in memory.\n * * Once catchup is complete, the buffered changes are immediately sent\n * and the Subscriber henceforth forwards changes as they are received.\n *\n * In the (common) case where the streamer is not in the middle of a pending\n * transaction when a subscription begins, the Storer begins catchup\n * immediately and the Forwarder directly adds the Subscriber to its active\n * set. However, the Subscriber still buffers any forwarded messages until\n * its catchup is complete.\n *\n * ### Watermarks and ordering\n *\n * The ChangeStreamerService depends on its {@link ChangeSource} to send\n * changes in contiguous [`begin`, `data` ..., `data`, `commit`] sequences\n * in commit order. This follows Postgres's Logical Replication Protocol\n * Message Flow:\n *\n * https://www.postgresql.org/docs/16/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW\n *\n * > The logical replication protocol sends individual transactions one by one.\n * > This means that all messages between a pair of Begin and Commit messages belong to the same transaction.\n *\n * In order to correctly replay (new) and filter (old) messages to subscribers\n * at different points in the replication stream, these changes must be assigned\n * watermarks such that they preserve the order in which they were received\n * from the ChangeSource.\n *\n * A previous implementation incorrectly derived these watermarks from the Postgres\n * Log Sequence Numbers (LSN) of each message. However, LSNs from concurrent,\n * non-conflicting transactions can overlap, which can result in a `begin` message\n * with an earlier LSN arriving after a `commit` message. For example, the\n * changes for these transactions:\n *\n * ```\n * LSN: 1 2 3 4 5 6 7 8 9 10\n * tx1: begin data data data commit\n * tx2: begin data data data commit\n * ```\n *\n * will arrive as:\n *\n * ```\n * begin1, data2, data4, data6, commit8, begin3, data5, data7, data9, commit10\n * ```\n *\n * Thus, LSN of non-commit messages are not suitable for tracking the sorting\n * order of the replication stream.\n *\n * Instead, the ChangeStreamer uses the following algorithm for deterministic\n * catchup and filtering of changes:\n *\n * * A `commit` message is assigned to a watermark corresponding to its LSN.\n * These are guaranteed to be in commit order by definition.\n *\n * * `begin` and `data` messages are assigned to the watermark of the\n * preceding `commit` (the previous transaction, or the replication\n * slot's starting LSN) plus 1. This guarantees that they will be sorted\n * after the previously commit transaction even if their LSNs came before it.\n * This is referred to as the `preCommitWatermark`.\n *\n * * In the ChangeLog DB, messages have a secondary sort column `pos`, which is\n * the position of the message within its transaction, with the `begin` message\n * starting at `0`. This guarantees that `begin` and `data` messages will be\n * fetched in the original ChangeSource order during catchup.\n *\n * `begin` and `data` messages share the same watermark, but this is sufficient for\n * Subscriber filtering because subscribers only know about the `commit` watermarks\n * exposed in the `Downstream` `Commit` message. The Subscriber object thus compares\n * the internal watermarks of the incoming messages against the commit watermark of\n * the caller, updating the watermark at every `Commit` message that is forwarded.\n *\n * ### Cleanup\n *\n * As mentioned in the {@link ChangeStreamer} documentation: \"the ChangeStreamer\n * uses a combination of [the \"initial\", i.e. backup-derived watermark and] ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\"\n *\n * More concretely:\n *\n * * The `initial`, backup-derived watermark is the earliest to which cleanup\n * should ever happen.\n *\n * * However, it is possible for the replica backup to be *ahead* of a connected\n * subscriber; and if a network error causes that subscriber to retry from its\n * last watermark, the change streamer must support it.\n *\n * Thus, before cleaning up to an `initial` backup-derived watermark, the change\n * streamer first confirms that all connected subscribers have also passed\n * that watermark.\n */\nclass ChangeStreamerImpl implements ChangeStreamerService {\n readonly id: string;\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #replicaVersion: string;\n readonly #source: ChangeSource;\n readonly #storer: Storer;\n readonly #forwarder: Forwarder;\n\n readonly #autoReset: boolean;\n readonly #state: RunningState;\n readonly #initialWatermarks = new Set<string>();\n\n // Starting the (Postgres) ChangeStream results in killing the previous\n // Postgres subscriber, potentially creating a gap in which the old\n // change-streamer has shut down and the new change-streamer has not yet\n // been recognized as \"healthy\" (and thus does not get any requests).\n //\n // To minimize this gap, delay starting the ChangeStream until the first\n // request from a `serving` replicator, indicating that higher level\n // load-balancing / routing logic has begun routing requests to this task.\n readonly #serving = resolver();\n\n readonly #txCounter = getOrCreateCounter(\n 'replication',\n 'transactions',\n 'Count of replicated transactions',\n );\n\n #stream: ChangeStream | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n replicaVersion: string,\n source: ChangeSource,\n autoReset: boolean,\n backPressureThreshold: number,\n setTimeoutFn = setTimeout,\n ) {\n this.id = `change-streamer`;\n this.#lc = lc.withContext('component', 'change-streamer');\n this.#shard = shard;\n this.#changeDB = changeDB;\n this.#replicaVersion = replicaVersion;\n this.#source = source;\n this.#storer = new Storer(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n consumed => this.#stream?.acks.push(['status', consumed[1], consumed[2]]),\n err => this.stop(err),\n backPressureThreshold,\n );\n this.#forwarder = new Forwarder();\n this.#autoReset = autoReset;\n this.#state = new RunningState(this.id, undefined, setTimeoutFn);\n }\n\n async run() {\n this.#lc.info?.('starting change stream');\n\n // Once this change-streamer acquires \"ownership\" of the change DB,\n // it is safe to start the storer.\n await this.#storer.assumeOwnership();\n // The storer will, in turn, detect changes to ownership and stop\n // the change-streamer appropriately.\n this.#storer\n .run()\n .then(() => this.stop())\n .catch(e => this.stop(e));\n\n while (this.#state.shouldRun()) {\n let err: unknown;\n let watermark: string | null = null;\n try {\n const startAfter = await this.#storer.getLastWatermarkToStartStream();\n const stream = await this.#source.startStream(startAfter);\n this.#stream = stream;\n this.#state.resetBackoff();\n watermark = null;\n\n for await (const change of stream.changes) {\n const [type, msg] = change;\n switch (type) {\n case 'status':\n this.#storer.status(change); // storer acks once it gets through its queue\n continue;\n case 'control':\n await this.#handleControlMessage(msg);\n continue; // control messages are not stored/forwarded\n case 'begin':\n watermark = change[2].commitWatermark;\n break;\n case 'commit':\n if (watermark !== change[2].watermark) {\n throw new UnrecoverableError(\n `commit watermark ${change[2].watermark} does not match 'begin' watermark ${watermark}`,\n );\n }\n this.#txCounter.add(1);\n break;\n default:\n if (watermark === null) {\n throw new UnrecoverableError(\n `${type} change (${msg.tag}) received before 'begin' message`,\n );\n }\n break;\n }\n\n this.#storer.store([watermark, change]);\n this.#forwarder.forward([watermark, change]);\n\n if (type === 'commit' || type === 'rollback') {\n watermark = null;\n }\n\n // Allow the storer to exert back pressure.\n const readyForMore = this.#storer.readyForMore();\n if (readyForMore) {\n await readyForMore;\n }\n }\n } catch (e) {\n err = e;\n } finally {\n this.#stream?.changes.cancel();\n this.#stream = undefined;\n }\n\n // When the change stream is interrupted, abort any pending transaction.\n if (watermark) {\n this.#lc.warn?.(`aborting interrupted transaction ${watermark}`);\n this.#storer.abort();\n this.#forwarder.forward([watermark, ['rollback', {tag: 'rollback'}]]);\n }\n\n await this.#state.backoff(this.#lc, err);\n }\n this.#lc.info?.('ChangeStreamer stopped');\n }\n\n async #handleControlMessage(msg: ChangeStreamControl[1]) {\n this.#lc.info?.('received control message', msg);\n const {tag} = msg;\n\n switch (tag) {\n case 'reset-required':\n await markResetRequired(this.#changeDB, this.#shard);\n await publishReplicationError(\n this.#lc,\n 'Replicating',\n msg.message ?? 'Resync required',\n msg.errorDetails,\n );\n if (this.#autoReset) {\n this.#lc.warn?.('shutting down for auto-reset');\n await this.stop(new AutoResetSignal());\n }\n break;\n default:\n unreachable(tag);\n }\n }\n\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const {protocolVersion, id, mode, replicaVersion, watermark, initial} = ctx;\n if (mode === 'serving') {\n this.#serving.resolve();\n }\n const downstream = Subscription.create<Downstream>({\n cleanup: () => this.#forwarder.remove(subscriber),\n });\n const subscriber = new Subscriber(\n protocolVersion,\n id,\n watermark,\n downstream,\n );\n if (replicaVersion !== this.#replicaVersion) {\n this.#lc.warn?.(\n `rejecting subscriber at replica version ${replicaVersion}`,\n );\n subscriber.close(\n ErrorType.WrongReplicaVersion,\n `current replica version is ${\n this.#replicaVersion\n } (requested ${replicaVersion})`,\n );\n } else {\n this.#lc.debug?.(`adding subscriber ${subscriber.id}`);\n\n this.#forwarder.add(subscriber);\n this.#storer.catchup(subscriber, mode);\n\n if (initial) {\n this.scheduleCleanup(watermark);\n }\n }\n return Promise.resolve(downstream);\n }\n\n scheduleCleanup(watermark: string) {\n const origSize = this.#initialWatermarks.size;\n this.#initialWatermarks.add(watermark);\n\n if (origSize === 0) {\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n\n async getChangeLogState(): Promise<{\n replicaVersion: string;\n minWatermark: string;\n }> {\n const minWatermark = await this.#storer.getMinWatermarkForCatchup();\n return {\n replicaVersion: this.#replicaVersion,\n minWatermark: minWatermark ?? this.#replicaVersion,\n };\n }\n\n async #purgeOldChanges(): Promise<void> {\n const initial = [...this.#initialWatermarks];\n if (initial.length === 0) {\n this.#lc.warn?.('No initial watermarks to check for cleanup'); // Not expected.\n return;\n }\n const current = [...this.#forwarder.getAcks()];\n if (current.length === 0) {\n // Also not expected, but possible (e.g. subscriber connects, then disconnects).\n // Bail to be safe.\n this.#lc.warn?.('No subscribers to confirm cleanup');\n return;\n }\n try {\n const earliestInitial = min(...(initial as AtLeastOne<LexiVersion>));\n const earliestCurrent = min(...(current as AtLeastOne<LexiVersion>));\n if (earliestCurrent < earliestInitial) {\n this.#lc.info?.(\n `At least one client is behind backup (${earliestCurrent} < ${earliestInitial})`,\n );\n } else {\n const deleted = await this.#storer.purgeRecordsBefore(earliestInitial);\n this.#lc.info?.(`Purged ${deleted} changes before ${earliestInitial}`);\n this.#initialWatermarks.delete(earliestInitial);\n }\n } finally {\n if (this.#initialWatermarks.size) {\n // If there are unpurged watermarks to check, schedule the next purge.\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n }\n\n async stop(err?: unknown) {\n this.#state.stop(this.#lc, err);\n this.#stream?.changes.cancel();\n await this.#storer.stop();\n }\n}\n\n// The delay between receiving an initial, backup-based watermark\n// and performing a check of whether to purge records before it.\n// This delay should be long enough to handle situations like the following:\n//\n// 1. `litestream restore` downloads a backup for the `replication-manager`\n// 2. `replication-manager` starts up and runs this `change-streamer`\n// 3. `zero-cache`s that are running on a different replica connect to this\n// `change-streamer` after exponential backoff retries.\n//\n// It is possible for a `zero-cache`[3] to be behind the backup restored [1].\n// This cleanup delay (30 seconds) is thus set to be a value comfortably\n// longer than the max delay for exponential backoff (10 seconds) in\n// `services/running-state.ts`. This allows the `zero-cache` [3] to reconnect\n// so that the `change-streamer` can track its progress and know when it has\n// surpassed the initial watermark of the backup [1].\nconst CLEANUP_DELAY_MS = DEFAULT_MAX_RETRY_DELAY_MS * 3;\n"],"names":["ErrorType.WrongReplicaVersion"],"mappings":";;;;;;;;;;;;;;;AA6CA,eAAsB,mBACpB,IACA,OACA,QACA,kBACA,mBACA,UACA,cACA,mBACA,WACA,uBACA,eAAe,YACiB;AAEhC,QAAM,yBAAyB,IAAI,UAAU,KAAK;AAClD,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,QAAM,EAAC,mBAAkB;AACzB,SAAO,IAAI;AAAA,IACT;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEJ;AA4KA,MAAM,mBAAoD;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,EACA;AAAA,EACA,yCAAyB,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUzB,WAAW,SAAA;AAAA,EAEX,aAAa;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAAA,EAGF;AAAA,EAEA,YACE,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,QACA,WACA,uBACA,eAAe,YACf;AACA,SAAK,KAAK;AACV,SAAK,MAAM,GAAG,YAAY,aAAa,iBAAiB;AACxD,SAAK,SAAS;AACd,SAAK,YAAY;AACjB,SAAK,kBAAkB;AACvB,SAAK,UAAU;AACf,SAAK,UAAU,IAAI;AAAA,MACjB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,CAAA,aAAY,KAAK,SAAS,KAAK,KAAK,CAAC,UAAU,SAAS,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC;AAAA,MACxE,CAAA,QAAO,KAAK,KAAK,GAAG;AAAA,MACpB;AAAA,IAAA;AAEF,SAAK,aAAa,IAAI,UAAA;AACtB,SAAK,aAAa;AAClB,SAAK,SAAS,IAAI,aAAa,KAAK,IAAI,QAAW,YAAY;AAAA,EACjE;AAAA,EAEA,MAAM,MAAM;AACV,SAAK,IAAI,OAAO,wBAAwB;AAIxC,UAAM,KAAK,QAAQ,gBAAA;AAGnB,SAAK,QACF,IAAA,EACA,KAAK,MAAM,KAAK,KAAA,CAAM,EACtB,MAAM,CAAA,MAAK,KAAK,KAAK,CAAC,CAAC;AAE1B,WAAO,KAAK,OAAO,aAAa;AAC9B,UAAI;AACJ,UAAI,YAA2B;AAC/B,UAAI;AACF,cAAM,aAAa,MAAM,KAAK,QAAQ,8BAAA;AACtC,cAAM,SAAS,MAAM,KAAK,QAAQ,YAAY,UAAU;AACxD,aAAK,UAAU;AACf,aAAK,OAAO,aAAA;AACZ,oBAAY;AAEZ,yBAAiB,UAAU,OAAO,SAAS;AACzC,gBAAM,CAAC,MAAM,GAAG,IAAI;AACpB,kBAAQ,MAAA;AAAA,YACN,KAAK;AACH,mBAAK,QAAQ,OAAO,MAAM;AAC1B;AAAA,YACF,KAAK;AACH,oBAAM,KAAK,sBAAsB,GAAG;AACpC;AAAA;AAAA,YACF,KAAK;AACH,0BAAY,OAAO,CAAC,EAAE;AACtB;AAAA,YACF,KAAK;AACH,kBAAI,cAAc,OAAO,CAAC,EAAE,WAAW;AACrC,sBAAM,IAAI;AAAA,kBACR,oBAAoB,OAAO,CAAC,EAAE,SAAS,qCAAqC,SAAS;AAAA,gBAAA;AAAA,cAEzF;AACA,mBAAK,WAAW,IAAI,CAAC;AACrB;AAAA,YACF;AACE,kBAAI,cAAc,MAAM;AACtB,sBAAM,IAAI;AAAA,kBACR,GAAG,IAAI,YAAY,IAAI,GAAG;AAAA,gBAAA;AAAA,cAE9B;AACA;AAAA,UAAA;AAGJ,eAAK,QAAQ,MAAM,CAAC,WAAW,MAAM,CAAC;AACtC,eAAK,WAAW,QAAQ,CAAC,WAAW,MAAM,CAAC;AAE3C,cAAI,SAAS,YAAY,SAAS,YAAY;AAC5C,wBAAY;AAAA,UACd;AAGA,gBAAM,eAAe,KAAK,QAAQ,aAAA;AAClC,cAAI,cAAc;AAChB,kBAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF,SAAS,GAAG;AACV,cAAM;AAAA,MACR,UAAA;AACE,aAAK,SAAS,QAAQ,OAAA;AACtB,aAAK,UAAU;AAAA,MACjB;AAGA,UAAI,WAAW;AACb,aAAK,IAAI,OAAO,oCAAoC,SAAS,EAAE;AAC/D,aAAK,QAAQ,MAAA;AACb,aAAK,WAAW,QAAQ,CAAC,WAAW,CAAC,YAAY,EAAC,KAAK,WAAA,CAAW,CAAC,CAAC;AAAA,MACtE;AAEA,YAAM,KAAK,OAAO,QAAQ,KAAK,KAAK,GAAG;AAAA,IACzC;AACA,SAAK,IAAI,OAAO,wBAAwB;AAAA,EAC1C;AAAA,EAEA,MAAM,sBAAsB,KAA6B;AACvD,SAAK,IAAI,OAAO,4BAA4B,GAAG;AAC/C,UAAM,EAAC,QAAO;AAEd,YAAQ,KAAA;AAAA,MACN,KAAK;AACH,cAAM,kBAAkB,KAAK,WAAW,KAAK,MAAM;AACnD,cAAM;AAAA,UACJ,KAAK;AAAA,UACL;AAAA,UACA,IAAI,WAAW;AAAA,UACf,IAAI;AAAA,QAAA;AAEN,YAAI,KAAK,YAAY;AACnB,eAAK,IAAI,OAAO,8BAA8B;AAC9C,gBAAM,KAAK,KAAK,IAAI,iBAAiB;AAAA,QACvC;AACA;AAAA,MACF;AACE,oBAAe;AAAA,IAAA;AAAA,EAErB;AAAA,EAEA,UAAU,KAAqD;AAC7D,UAAM,EAAC,iBAAiB,IAAI,MAAM,gBAAgB,WAAW,YAAW;AACxE,QAAI,SAAS,WAAW;AACtB,WAAK,SAAS,QAAA;AAAA,IAChB;AACA,UAAM,aAAa,aAAa,OAAmB;AAAA,MACjD,SAAS,MAAM,KAAK,WAAW,OAAO,UAAU;AAAA,IAAA,CACjD;AACD,UAAM,aAAa,IAAI;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,QAAI,mBAAmB,KAAK,iBAAiB;AAC3C,WAAK,IAAI;AAAA,QACP,2CAA2C,cAAc;AAAA,MAAA;AAE3D,iBAAW;AAAA,QACTA;AAAAA,QACA,8BACE,KAAK,eACP,eAAe,cAAc;AAAA,MAAA;AAAA,IAEjC,OAAO;AACL,WAAK,IAAI,QAAQ,qBAAqB,WAAW,EAAE,EAAE;AAErD,WAAK,WAAW,IAAI,UAAU;AAC9B,WAAK,QAAQ,QAAQ,YAAY,IAAI;AAErC,UAAI,SAAS;AACX,aAAK,gBAAgB,SAAS;AAAA,MAChC;AAAA,IACF;AACA,WAAO,QAAQ,QAAQ,UAAU;AAAA,EACnC;AAAA,EAEA,gBAAgB,WAAmB;AACjC,UAAM,WAAW,KAAK,mBAAmB;AACzC,SAAK,mBAAmB,IAAI,SAAS;AAErC,QAAI,aAAa,GAAG;AAClB,WAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,IACxE;AAAA,EACF;AAAA,EAEA,MAAM,oBAGH;AACD,UAAM,eAAe,MAAM,KAAK,QAAQ,0BAAA;AACxC,WAAO;AAAA,MACL,gBAAgB,KAAK;AAAA,MACrB,cAAc,gBAAgB,KAAK;AAAA,IAAA;AAAA,EAEvC;AAAA,EAEA,MAAM,mBAAkC;AACtC,UAAM,UAAU,CAAC,GAAG,KAAK,kBAAkB;AAC3C,QAAI,QAAQ,WAAW,GAAG;AACxB,WAAK,IAAI,OAAO,4CAA4C;AAC5D;AAAA,IACF;AACA,UAAM,UAAU,CAAC,GAAG,KAAK,WAAW,SAAS;AAC7C,QAAI,QAAQ,WAAW,GAAG;AAGxB,WAAK,IAAI,OAAO,mCAAmC;AACnD;AAAA,IACF;AACA,QAAI;AACF,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,UAAI,kBAAkB,iBAAiB;AACrC,aAAK,IAAI;AAAA,UACP,yCAAyC,eAAe,MAAM,eAAe;AAAA,QAAA;AAAA,MAEjF,OAAO;AACL,cAAM,UAAU,MAAM,KAAK,QAAQ,mBAAmB,eAAe;AACrE,aAAK,IAAI,OAAO,UAAU,OAAO,mBAAmB,eAAe,EAAE;AACrE,aAAK,mBAAmB,OAAO,eAAe;AAAA,MAChD;AAAA,IACF,UAAA;AACE,UAAI,KAAK,mBAAmB,MAAM;AAEhC,aAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,MACxE;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,KAAK,KAAe;AACxB,SAAK,OAAO,KAAK,KAAK,KAAK,GAAG;AAC9B,SAAK,SAAS,QAAQ,OAAA;AACtB,UAAM,KAAK,QAAQ,KAAA;AAAA,EACrB;AACF;AAiBA,MAAM,mBAAmB,6BAA6B;"}
|
|
@@ -45,7 +45,7 @@ export interface ChangeStreamer {
|
|
|
45
45
|
*/
|
|
46
46
|
subscribe(ctx: SubscriberContext): Promise<Source<Downstream>>;
|
|
47
47
|
}
|
|
48
|
-
export declare const PROTOCOL_VERSION =
|
|
48
|
+
export declare const PROTOCOL_VERSION = 5;
|
|
49
49
|
export type SubscriberContext = {
|
|
50
50
|
/**
|
|
51
51
|
* The supported change-streamer protocol version.
|
|
@@ -129,40 +129,56 @@ export declare const downstreamSchema: v.UnionType<[v.TupleType<[v.Type<"status"
|
|
|
129
129
|
commitWatermark: v.Type<string>;
|
|
130
130
|
}, undefined>]>, v.TupleType<[v.Type<"data">, v.UnionType<[v.ObjectType<{
|
|
131
131
|
tag: v.Type<"insert">;
|
|
132
|
-
relation: v.
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
132
|
+
relation: v.Type<{
|
|
133
|
+
rowKey: {
|
|
134
|
+
type?: "default" | "nothing" | "full" | "index" | undefined;
|
|
135
|
+
columns: string[];
|
|
136
|
+
};
|
|
137
|
+
keyColumns?: string[] | undefined;
|
|
138
|
+
replicaIdentity?: "default" | "nothing" | "full" | "index" | undefined;
|
|
139
|
+
schema: string;
|
|
140
|
+
name: string;
|
|
141
|
+
}>;
|
|
138
142
|
new: v.Type<Record<string, import("../../../../shared/src/bigint-json.ts").JSONValue>>;
|
|
139
143
|
}, undefined>, v.ObjectType<{
|
|
140
144
|
tag: v.Type<"update">;
|
|
141
|
-
relation: v.
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
145
|
+
relation: v.Type<{
|
|
146
|
+
rowKey: {
|
|
147
|
+
type?: "default" | "nothing" | "full" | "index" | undefined;
|
|
148
|
+
columns: string[];
|
|
149
|
+
};
|
|
150
|
+
keyColumns?: string[] | undefined;
|
|
151
|
+
replicaIdentity?: "default" | "nothing" | "full" | "index" | undefined;
|
|
152
|
+
schema: string;
|
|
153
|
+
name: string;
|
|
154
|
+
}>;
|
|
147
155
|
key: v.Type<Record<string, import("../../../../shared/src/bigint-json.ts").JSONValue> | null>;
|
|
148
156
|
new: v.Type<Record<string, import("../../../../shared/src/bigint-json.ts").JSONValue>>;
|
|
149
157
|
}, undefined>, v.ObjectType<{
|
|
150
158
|
tag: v.Type<"delete">;
|
|
151
|
-
relation: v.
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
159
|
+
relation: v.Type<{
|
|
160
|
+
rowKey: {
|
|
161
|
+
type?: "default" | "nothing" | "full" | "index" | undefined;
|
|
162
|
+
columns: string[];
|
|
163
|
+
};
|
|
164
|
+
keyColumns?: string[] | undefined;
|
|
165
|
+
replicaIdentity?: "default" | "nothing" | "full" | "index" | undefined;
|
|
166
|
+
schema: string;
|
|
167
|
+
name: string;
|
|
168
|
+
}>;
|
|
157
169
|
key: v.Type<Record<string, import("../../../../shared/src/bigint-json.ts").JSONValue>>;
|
|
158
170
|
}, undefined>, v.ObjectType<{
|
|
159
171
|
tag: v.Type<"truncate">;
|
|
160
|
-
relations: v.ArrayType<v.
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
172
|
+
relations: v.ArrayType<v.Type<{
|
|
173
|
+
rowKey: {
|
|
174
|
+
type?: "default" | "nothing" | "full" | "index" | undefined;
|
|
175
|
+
columns: string[];
|
|
176
|
+
};
|
|
177
|
+
keyColumns?: string[] | undefined;
|
|
178
|
+
replicaIdentity?: "default" | "nothing" | "full" | "index" | undefined;
|
|
179
|
+
schema: string;
|
|
180
|
+
name: string;
|
|
181
|
+
}>>;
|
|
166
182
|
}, undefined>, v.ObjectType<{
|
|
167
183
|
tag: v.Type<"create-table">;
|
|
168
184
|
spec: v.ObjectType<Omit<{
|
|
@@ -180,6 +196,12 @@ export declare const downstreamSchema: v.UnionType<[v.TupleType<[v.Type<"status"
|
|
|
180
196
|
}, "schema"> & {
|
|
181
197
|
schema: v.Type<string>;
|
|
182
198
|
}, undefined>;
|
|
199
|
+
metadata: v.Optional<{
|
|
200
|
+
rowKey: {
|
|
201
|
+
type?: "default" | "nothing" | "full" | "index" | undefined;
|
|
202
|
+
columns: string[];
|
|
203
|
+
};
|
|
204
|
+
}>;
|
|
183
205
|
}, undefined>, v.ObjectType<{
|
|
184
206
|
tag: v.Type<"rename-table">;
|
|
185
207
|
old: v.ObjectType<{
|
|
@@ -190,6 +212,24 @@ export declare const downstreamSchema: v.UnionType<[v.TupleType<[v.Type<"status"
|
|
|
190
212
|
schema: v.Type<string>;
|
|
191
213
|
name: v.Type<string>;
|
|
192
214
|
}, undefined>;
|
|
215
|
+
}, undefined>, v.ObjectType<{
|
|
216
|
+
tag: v.Type<"update-table-metadata">;
|
|
217
|
+
table: v.ObjectType<{
|
|
218
|
+
schema: v.Type<string>;
|
|
219
|
+
name: v.Type<string>;
|
|
220
|
+
}, undefined>;
|
|
221
|
+
old: v.ObjectType<{
|
|
222
|
+
rowKey: v.ObjectType<{
|
|
223
|
+
columns: v.ArrayType<v.Type<string>>;
|
|
224
|
+
type: v.Optional<"default" | "nothing" | "full" | "index">;
|
|
225
|
+
}, undefined>;
|
|
226
|
+
}, undefined>;
|
|
227
|
+
new: v.ObjectType<{
|
|
228
|
+
rowKey: v.ObjectType<{
|
|
229
|
+
columns: v.ArrayType<v.Type<string>>;
|
|
230
|
+
type: v.Optional<"default" | "nothing" | "full" | "index">;
|
|
231
|
+
}, undefined>;
|
|
232
|
+
}, undefined>;
|
|
193
233
|
}, undefined>, v.ObjectType<{
|
|
194
234
|
tag: v.Type<"add-column">;
|
|
195
235
|
table: v.ObjectType<{
|
|
@@ -208,6 +248,12 @@ export declare const downstreamSchema: v.UnionType<[v.TupleType<[v.Type<"status"
|
|
|
208
248
|
dflt: v.Optional<string | null>;
|
|
209
249
|
}, undefined>;
|
|
210
250
|
}, undefined>;
|
|
251
|
+
tableMetadata: v.Optional<{
|
|
252
|
+
rowKey: {
|
|
253
|
+
type?: "default" | "nothing" | "full" | "index" | undefined;
|
|
254
|
+
columns: string[];
|
|
255
|
+
};
|
|
256
|
+
}>;
|
|
211
257
|
}, undefined>, v.ObjectType<{
|
|
212
258
|
tag: v.Type<"update-column">;
|
|
213
259
|
table: v.ObjectType<{
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-streamer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,kCAAkC,CAAC;AACtD,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,wBAAwB,CAAC;AACnD,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,2CAA2C,CAAC;AAEtE,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAE3C;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAiCG;AACH,MAAM,WAAW,cAAc;IAC7B;;;;OAIG;IACH,SAAS,CAAC,GAAG,EAAE,iBAAiB,GAAG,OAAO,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC;CAChE;
|
|
1
|
+
{"version":3,"file":"change-streamer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,kCAAkC,CAAC;AACtD,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,wBAAwB,CAAC;AACnD,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,2CAA2C,CAAC;AAEtE,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAE3C;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAiCG;AACH,MAAM,WAAW,cAAc;IAC7B;;;;OAIG;IACH,SAAS,CAAC,GAAG,EAAE,iBAAiB,GAAG,OAAO,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC,CAAC;CAChE;AAmBD,eAAO,MAAM,gBAAgB,IAAI,CAAC;AAElC,MAAM,MAAM,iBAAiB,GAAG;IAC9B;;OAEG;IACH,eAAe,EAAE,MAAM,CAAC;IAExB;;;OAGG;IACH,MAAM,EAAE,MAAM,GAAG,IAAI,CAAC;IAEtB;;OAEG;IACH,EAAE,EAAE,MAAM,CAAC;IAEX;;;;OAIG;IACH,IAAI,EAAE,cAAc,CAAC;IAErB;;;;OAIG;IACH,cAAc,EAAE,MAAM,CAAC;IAEvB;;;OAGG;IACH,SAAS,EAAE,MAAM,CAAC;IAElB;;;;;OAKG;IACH,OAAO,EAAE,OAAO,CAAC;CAClB,CAAC;AAEF,MAAM,MAAM,WAAW,GAAG;IACxB,MAAM,EAAE,MAAM,CAAC;IAEf;;;;OAIG;IACH,SAAS,EAAE,MAAM,CAAC;CACnB,CAAC;AAEF;;;GAGG;AACH,eAAO,MAAM,YAAY;;aAEvB,CAAC;AAEH,eAAO,MAAM,mBAAmB;;eAA+C,CAAC;AAEhF;;;;;GAKG;AACH,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEhE,QAAA,MAAM,uBAAuB;;;aAG3B,CAAC;AAEH,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE,QAAA,MAAM,WAAW;;;eAAyD,CAAC;AAE3E,eAAO,MAAM,gBAAgB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAI5B,CAAC;AAEF,MAAM,MAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,WAAW,CAAC,CAAC;AAEhD;;;;;;;;;;GAUG;AACH,MAAM,MAAM,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,gBAAgB,CAAC,CAAC;AAE1D,MAAM,WAAW,qBAAsB,SAAQ,cAAc,EAAE,OAAO;IACpE;;;;OAIG;IACH,eAAe,CAAC,SAAS,EAAE,MAAM,GAAG,IAAI,CAAC;IAEzC,iBAAiB,IAAI,OAAO,CAAC;QAC3B,cAAc,EAAE,MAAM,CAAC;QACvB,YAAY,EAAE,MAAM,CAAC;KACtB,CAAC,CAAC;CACJ"}
|
|
@@ -2,7 +2,7 @@ import "../../../../shared/src/valita.js";
|
|
|
2
2
|
import "../change-source/protocol/current/data.js";
|
|
3
3
|
import { changeStreamDataSchema } from "../change-source/protocol/current/downstream.js";
|
|
4
4
|
import { object, literal, tuple, string, number, union } from "@badrap/valita";
|
|
5
|
-
const PROTOCOL_VERSION =
|
|
5
|
+
const PROTOCOL_VERSION = 5;
|
|
6
6
|
const statusSchema = object({
|
|
7
7
|
tag: literal("status")
|
|
8
8
|
});
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-streamer.js","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer.ts"],"sourcesContent":["import * as v from '../../../../shared/src/valita.ts';\nimport type {Source} from '../../types/streams.ts';\nimport {type Change} from '../change-source/protocol/current/data.ts';\nimport {changeStreamDataSchema} from '../change-source/protocol/current/downstream.ts';\nimport type {ReplicatorMode} from '../replicator/replicator.ts';\nimport type {Service} from '../service.ts';\n\n/**\n * The ChangeStreamer is the component between replicators (\"subscribers\")\n * and a canonical upstream source of changes (e.g. a Postgres logical\n * replication slot). It facilitates multiple subscribers without incurring\n * the associated upstream expense (e.g. PG replication slots are resource\n * intensive) with a \"forward-store-ack\" procedure.\n *\n * * Changes from the upstream source are immediately **forwarded** to\n * connected subscribers to minimize latency.\n *\n * * They are then **stored** in a separate DB to facilitate catchup\n * of connecting subscribers that are behind.\n *\n * * **Acknowledgements** are sent upstream after they are successfully\n * stored.\n *\n * Unlike Postgres replication slots, in which the progress of a static\n * subscriber is tracked in the replication slot, the ChangeStreamer\n * supports a dynamic set of subscribers (i.e.. zero-caches) that can\n * can continually change.\n *\n * However, it is not the case that the ChangeStreamer needs to support\n * arbitrarily old subscribers. Because the replica is continually\n * backed up to a global location and used to initialize new subscriber\n * tasks, an initial subscription request from a subscriber constitutes\n * a signal for how \"behind\" a new subscriber task can be. This is\n * reflected in the {@link SubscriberContext}, which indicates whether\n * the watermark corresponds to an \"initial\" watermark derived from the\n * replica at task startup.\n *\n * The ChangeStreamer uses a combination of this signal with ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\n */\nexport interface ChangeStreamer {\n /**\n * Subscribes to changes based on the supplied subscriber `ctx`,\n * which indicates the watermark at which the subscriber is up to\n * date.\n */\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>>;\n}\n\n// v1: Client-side support for JSON_FORMAT. Introduced in 0.18.\n// v2: Adds the \"status\" message which is initially used to signal that the\n// subscription is valid (i.e. starting at the requested watermark).\n//
|
|
1
|
+
{"version":3,"file":"change-streamer.js","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer.ts"],"sourcesContent":["import * as v from '../../../../shared/src/valita.ts';\nimport type {Source} from '../../types/streams.ts';\nimport {type Change} from '../change-source/protocol/current/data.ts';\nimport {changeStreamDataSchema} from '../change-source/protocol/current/downstream.ts';\nimport type {ReplicatorMode} from '../replicator/replicator.ts';\nimport type {Service} from '../service.ts';\n\n/**\n * The ChangeStreamer is the component between replicators (\"subscribers\")\n * and a canonical upstream source of changes (e.g. a Postgres logical\n * replication slot). It facilitates multiple subscribers without incurring\n * the associated upstream expense (e.g. PG replication slots are resource\n * intensive) with a \"forward-store-ack\" procedure.\n *\n * * Changes from the upstream source are immediately **forwarded** to\n * connected subscribers to minimize latency.\n *\n * * They are then **stored** in a separate DB to facilitate catchup\n * of connecting subscribers that are behind.\n *\n * * **Acknowledgements** are sent upstream after they are successfully\n * stored.\n *\n * Unlike Postgres replication slots, in which the progress of a static\n * subscriber is tracked in the replication slot, the ChangeStreamer\n * supports a dynamic set of subscribers (i.e.. zero-caches) that can\n * can continually change.\n *\n * However, it is not the case that the ChangeStreamer needs to support\n * arbitrarily old subscribers. Because the replica is continually\n * backed up to a global location and used to initialize new subscriber\n * tasks, an initial subscription request from a subscriber constitutes\n * a signal for how \"behind\" a new subscriber task can be. This is\n * reflected in the {@link SubscriberContext}, which indicates whether\n * the watermark corresponds to an \"initial\" watermark derived from the\n * replica at task startup.\n *\n * The ChangeStreamer uses a combination of this signal with ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\n */\nexport interface ChangeStreamer {\n /**\n * Subscribes to changes based on the supplied subscriber `ctx`,\n * which indicates the watermark at which the subscriber is up to\n * date.\n */\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>>;\n}\n\n// v1: Client-side support for JSON_FORMAT. Introduced in 0.18.\n// v2: v0.19\n// - Adds the \"status\" message which is initially used to signal that the\n// subscription is valid (i.e. starting at the requested watermark).\n// v3: Adds the \"taskID\" to the subscription context, and support for\n// the BackupMonitor-mediated \"/snapshot\" request.\n// v4: Adds the \"replicaVersion\" and \"minWatermark\" fields to the \"/snapshot\"\n// status request so that a subscriber can verify whether its replica,\n// whether it be restored or existing in a permanent volume, is compatible\n// with the change-streamer.\n// v5: v0.26\n// - Moves relation.keyColumns and relation.replicaIdentity to\n// relation.rowKey: { columns, type }.\n// - Adds `metadata` to `create-table` message\n// - Adds `tableMetadata` to `add-column` message\n// - Adds `table-update-metadata` message\n\nexport const PROTOCOL_VERSION = 5;\n\nexport type SubscriberContext = {\n /**\n * The supported change-streamer protocol version.\n */\n protocolVersion: number;\n\n /**\n * Task ID. This is used to link the request with a preceding snapshot\n * reservation.\n */\n taskID: string | null; // TODO: Make required when v3 is min.\n\n /**\n * Subscriber id. This is only used for debugging.\n */\n id: string;\n\n /**\n * The ReplicatorMode of the subscriber. 'backup' indicates that the\n * subscriber is local to the `change-streamer` in the `replication-manager`,\n * while 'serving' indicates that user-facing requests depend on the subscriber.\n */\n mode: ReplicatorMode;\n\n /**\n * The ChangeStreamer will return an Error if the subscriber is\n * on a different replica version (i.e. the initial snapshot associated\n * with the replication slot).\n */\n replicaVersion: string;\n\n /**\n * The watermark up to which the subscriber is up to date.\n * Only changes after the watermark will be streamed.\n */\n watermark: string;\n\n /**\n * Whether this is the first subscription request made by the task,\n * i.e. indicating that the watermark comes from a restored replica\n * backup. The ChangeStreamer uses this to determine which changes\n * are safe to purge from the Storer.\n */\n initial: boolean;\n};\n\nexport type ChangeEntry = {\n change: Change;\n\n /**\n * Note that it is technically possible for multiple changes to have\n * the same watermark, but that of a commit is guaranteed to be final,\n * so subscribers should only store the watermark of commit changes.\n */\n watermark: string;\n};\n\n/**\n * The StatusMessage payload for now is empty, but can be extended to\n * include meta-level information in the future.\n */\nexport const statusSchema = v.object({\n tag: v.literal('status'),\n});\n\nexport const statusMessageSchema = v.tuple([v.literal('status'), statusSchema]);\n\n/**\n * A StatusMessage will be immediately sent on a (v2+) subscription to\n * indicate that the subscription is valid (i.e. starting at the requested\n * watermark). Invalid subscriptions will instead result in a\n * SubscriptionError as the first message.\n */\nexport type StatusMessage = v.Infer<typeof statusMessageSchema>;\n\nconst subscriptionErrorSchema = v.object({\n type: v.number(), // ErrorType\n message: v.string().optional(),\n});\n\nexport type SubscriptionError = v.Infer<typeof subscriptionErrorSchema>;\n\nconst errorSchema = v.tuple([v.literal('error'), subscriptionErrorSchema]);\n\nexport const downstreamSchema = v.union(\n statusMessageSchema,\n changeStreamDataSchema,\n errorSchema,\n);\n\nexport type Error = v.Infer<typeof errorSchema>;\n\n/**\n * A stream of transactions, each starting with a {@link Begin} message,\n * containing one or more {@link Data} messages, and ending with a\n * {@link Commit} or {@link Rollback} message. The 'commit' tuple\n * includes a `watermark` that should be stored with the committed\n * data and used for resuming a subscription (e.g. in the\n * {@link SubscriberContext}).\n *\n * A {@link SubscriptionError} indicates an unrecoverable error that requires\n * manual intervention (e.g. configuration / operational error).\n */\nexport type Downstream = v.Infer<typeof downstreamSchema>;\n\nexport interface ChangeStreamerService extends ChangeStreamer, Service {\n /**\n * Notifies the change streamer of a watermark that has been backed up,\n * indicating that changes before the watermark can be purged if active\n * subscribers have progressed beyond the watermark.\n */\n scheduleCleanup(watermark: string): void;\n\n getChangeLogState(): Promise<{\n replicaVersion: string;\n minWatermark: string;\n }>;\n}\n"],"names":["v.object","v.literal","v.tuple","v.number","v.string","v.union"],"mappings":";;;;AAmEO,MAAM,mBAAmB;AA+DzB,MAAM,eAAeA,OAAS;AAAA,EACnC,KAAKC,QAAU,QAAQ;AACzB,CAAC;AAEM,MAAM,sBAAsBC,MAAQ,CAACD,QAAU,QAAQ,GAAG,YAAY,CAAC;AAU9E,MAAM,0BAA0BD,OAAS;AAAA,EACvC,MAAMG,OAAE;AAAA;AAAA,EACR,SAASC,OAAE,EAAS,SAAA;AACtB,CAAC;AAID,MAAM,cAAcF,MAAQ,CAACD,QAAU,OAAO,GAAG,uBAAuB,CAAC;AAElE,MAAM,mBAAmBI;AAAAA,EAC9B;AAAA,EACA;AAAA,EACA;AACF;"}
|
|
@@ -17,6 +17,7 @@ export type ChangeLogEntry = {
|
|
|
17
17
|
export type ReplicationState = {
|
|
18
18
|
lastWatermark: string;
|
|
19
19
|
owner: string | null;
|
|
20
|
+
ownerAddress: string | null;
|
|
20
21
|
};
|
|
21
22
|
export declare function createReplicationStateTable(shard: ShardID): string;
|
|
22
23
|
export declare function discoverChangeStreamerAddress(shard: ShardID, sql: PostgresDB): Promise<string | null>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"tables.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-streamer/schema/tables.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAErC,OAAO,EAAC,UAAU,EAAC,MAAM,0CAA0C,CAAC;AAEpE,OAAO,EAA0B,KAAK,UAAU,EAAC,MAAM,sBAAsB,CAAC;AAC9E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,0BAA0B,CAAC;AACjE,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,8CAA8C,CAAC;AACzE,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8CAA8C,CAAC;AAOpF,eAAO,MAAM,SAAS,QAAQ,CAAC;AAM/B,MAAM,MAAM,cAAc,GAAG;IAG3B,SAAS,EAAE,MAAM,CAAC;IAClB,MAAM,EAAE,MAAM,CAAC;CAChB,CAAC;AAiBF;;;GAGG;AACH,MAAM,MAAM,gBAAgB,GAAG;IAC7B,aAAa,EAAE,MAAM,CAAC;IACtB,KAAK,EAAE,MAAM,GAAG,IAAI,CAAC;
|
|
1
|
+
{"version":3,"file":"tables.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-streamer/schema/tables.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAErC,OAAO,EAAC,UAAU,EAAC,MAAM,0CAA0C,CAAC;AAEpE,OAAO,EAA0B,KAAK,UAAU,EAAC,MAAM,sBAAsB,CAAC;AAC9E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,0BAA0B,CAAC;AACjE,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,8CAA8C,CAAC;AACzE,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8CAA8C,CAAC;AAOpF,eAAO,MAAM,SAAS,QAAQ,CAAC;AAM/B,MAAM,MAAM,cAAc,GAAG;IAG3B,SAAS,EAAE,MAAM,CAAC;IAClB,MAAM,EAAE,MAAM,CAAC;CAChB,CAAC;AAiBF;;;GAGG;AACH,MAAM,MAAM,gBAAgB,GAAG;IAC7B,aAAa,EAAE,MAAM,CAAC;IACtB,KAAK,EAAE,MAAM,GAAG,IAAI,CAAC;IACrB,YAAY,EAAE,MAAM,GAAG,IAAI,CAAC;CAC7B,CAAC;AAEF,wBAAgB,2BAA2B,CAAC,KAAK,EAAE,OAAO,UASzD;AAED,wBAAsB,6BAA6B,CACjD,KAAK,EAAE,OAAO,EACd,GAAG,EAAE,UAAU,GACd,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAIxB;AAED;;;;;GAKG;AACH,MAAM,MAAM,iBAAiB,GAAG;IAC9B,cAAc,EAAE,MAAM,CAAC;IACvB,YAAY,EAAE,SAAS,MAAM,EAAE,CAAC;CACjC,CAAC;AAsBF,wBAAsB,cAAc,CAClC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,QAAQ,CAAC,cAAc,EAC3B,KAAK,EAAE,OAAO,iBAIf;AAED,wBAAsB,iBAAiB,CAAC,GAAG,EAAE,UAAU,EAAE,KAAK,EAAE,OAAO,iBAKtE;AAED,wBAAsB,uBAAuB,CAC3C,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,iBAAiB,EAAE,iBAAiB,EACpC,KAAK,EAAE,OAAO,EACd,SAAS,EAAE,OAAO,iBAiFnB;AAED,qBAAa,eAAgB,SAAQ,UAAU;IAC7C,QAAQ,CAAC,IAAI,qBAAqB;CACnC"}
|
|
@@ -75,7 +75,9 @@ async function ensureReplicationConfig(lc, db, subscriptionState, shard, autoRes
|
|
|
75
75
|
const { publications, replicaVersion, watermark } = subscriptionState;
|
|
76
76
|
const replicaConfig = { publications, replicaVersion };
|
|
77
77
|
const replicationState = {
|
|
78
|
-
lastWatermark: replicaVersion
|
|
78
|
+
lastWatermark: replicaVersion,
|
|
79
|
+
owner: null,
|
|
80
|
+
ownerAddress: null
|
|
79
81
|
};
|
|
80
82
|
const schema2 = cdcSchema(shard);
|
|
81
83
|
await db.begin(async (sql) => {
|
|
@@ -97,16 +99,15 @@ async function ensureReplicationConfig(lc, db, subscriptionState, shard, autoRes
|
|
|
97
99
|
);
|
|
98
100
|
stmts.push(
|
|
99
101
|
sql`TRUNCATE TABLE ${sql(schema2)}."changeLog"`,
|
|
100
|
-
sql`TRUNCATE TABLE ${sql(schema2)}."replicationConfig"
|
|
101
|
-
sql`TRUNCATE TABLE ${sql(schema2)}."replicationState"`
|
|
102
|
+
sql`TRUNCATE TABLE ${sql(schema2)}."replicationConfig"`
|
|
102
103
|
);
|
|
103
104
|
}
|
|
104
105
|
}
|
|
105
106
|
if (results.length === 0 || stmts.length > 0) {
|
|
106
107
|
stmts.push(
|
|
107
108
|
sql`INSERT INTO ${sql(schema2)}."replicationConfig" ${sql(replicaConfig)}`,
|
|
108
|
-
sql`INSERT INTO ${sql(schema2)}."replicationState"
|
|
109
|
-
|
|
109
|
+
sql`INSERT INTO ${sql(schema2)}."replicationState" ${sql(replicationState)}
|
|
110
|
+
ON CONFLICT (lock) DO UPDATE SET ${sql(replicationState)}`
|
|
110
111
|
);
|
|
111
112
|
return Promise.all(stmts);
|
|
112
113
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"tables.js","sources":["../../../../../../../zero-cache/src/services/change-streamer/schema/tables.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {ident} from 'pg-format';\nimport type postgres from 'postgres';\nimport {type PendingQuery, type Row} from 'postgres';\nimport {AbortError} from '../../../../../shared/src/abort-error.ts';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport {disableStatementTimeout, type PostgresDB} from '../../../types/pg.ts';\nimport {cdcSchema, type ShardID} from '../../../types/shards.ts';\nimport type {Change} from '../../change-source/protocol/current/data.ts';\nimport type {SubscriptionState} from '../../replicator/schema/replication-state.ts';\n\n// For readability in the sql statements.\nfunction schema(shard: ShardID) {\n return ident(cdcSchema(shard));\n}\n\nexport const PG_SCHEMA = 'cdc';\n\nfunction createSchema(shard: ShardID) {\n return /*sql*/ `CREATE SCHEMA IF NOT EXISTS ${schema(shard)};`;\n}\n\nexport type ChangeLogEntry = {\n // A strictly monotonically increasing, lexicographically sortable\n // value that uniquely identifies a position in the change stream.\n watermark: string;\n change: Change;\n};\n\nfunction createChangeLogTable(shard: ShardID) {\n // Note: The \"change\" column used to be JSONB, but that was problematic in that\n // it does not handle the NULL unicode character.\n // https://vladimir.varank.in/notes/2021/01/you-dont-insert-unicode-null-character-as-postgres-jsonb/\n return /*sql*/ `\n CREATE TABLE ${schema(shard)}.\"changeLog\" (\n watermark TEXT,\n pos INT8,\n change JSON NOT NULL,\n precommit TEXT, -- Only exists on commit entries. Purely for debugging.\n PRIMARY KEY (watermark, pos)\n );\n`;\n}\n\n/**\n * Tracks the watermark from which to resume the change stream and the\n * current owner (task ID) acting as the single writer to the changeLog.\n */\nexport type ReplicationState = {\n lastWatermark: string;\n owner: string | null;\n};\n\nexport function createReplicationStateTable(shard: ShardID) {\n return /*sql*/ `\n CREATE TABLE ${schema(shard)}.\"replicationState\" (\n \"lastWatermark\" TEXT NOT NULL,\n \"owner\" TEXT,\n \"ownerAddress\" TEXT,\n \"lock\" INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n`;\n}\n\nexport async function discoverChangeStreamerAddress(\n shard: ShardID,\n sql: PostgresDB,\n): Promise<string | null> {\n const result = await sql<{ownerAddress: string | null}[]> /*sql*/ `\n SELECT \"ownerAddress\" FROM ${sql(cdcSchema(shard))}.\"replicationState\"`;\n return result[0].ownerAddress;\n}\n\n/**\n * This mirrors the analogously named table in the SQLite replica\n * (`services/replicator/schema/replication-state.ts`), and is used\n * to detect when the replica has been reset and is no longer compatible\n * with the current ChangeLog.\n */\nexport type ReplicationConfig = {\n replicaVersion: string;\n publications: readonly string[];\n};\n\nfunction createReplicationConfigTable(shard: ShardID) {\n return /*sql*/ `\n CREATE TABLE ${schema(shard)}.\"replicationConfig\" (\n \"replicaVersion\" TEXT NOT NULL,\n \"publications\" TEXT[] NOT NULL,\n \"resetRequired\" BOOL,\n \"lock\" INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n`;\n}\n\nfunction createTables(shard: ShardID) {\n return (\n createSchema(shard) +\n createChangeLogTable(shard) +\n createReplicationStateTable(shard) +\n createReplicationConfigTable(shard)\n );\n}\n\nexport async function setupCDCTables(\n lc: LogContext,\n db: postgres.TransactionSql,\n shard: ShardID,\n) {\n lc.info?.(`Setting up CDC tables`);\n await db.unsafe(createTables(shard));\n}\n\nexport async function markResetRequired(sql: PostgresDB, shard: ShardID) {\n const schema = cdcSchema(shard);\n await sql`\n UPDATE ${sql(schema)}.\"replicationConfig\"\n SET \"resetRequired\" = true`;\n}\n\nexport async function ensureReplicationConfig(\n lc: LogContext,\n db: PostgresDB,\n subscriptionState: SubscriptionState,\n shard: ShardID,\n autoReset: boolean,\n) {\n const {publications, replicaVersion, watermark} = subscriptionState;\n const replicaConfig = {publications, replicaVersion};\n const replicationState:
|
|
1
|
+
{"version":3,"file":"tables.js","sources":["../../../../../../../zero-cache/src/services/change-streamer/schema/tables.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {ident} from 'pg-format';\nimport type postgres from 'postgres';\nimport {type PendingQuery, type Row} from 'postgres';\nimport {AbortError} from '../../../../../shared/src/abort-error.ts';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport {disableStatementTimeout, type PostgresDB} from '../../../types/pg.ts';\nimport {cdcSchema, type ShardID} from '../../../types/shards.ts';\nimport type {Change} from '../../change-source/protocol/current/data.ts';\nimport type {SubscriptionState} from '../../replicator/schema/replication-state.ts';\n\n// For readability in the sql statements.\nfunction schema(shard: ShardID) {\n return ident(cdcSchema(shard));\n}\n\nexport const PG_SCHEMA = 'cdc';\n\nfunction createSchema(shard: ShardID) {\n return /*sql*/ `CREATE SCHEMA IF NOT EXISTS ${schema(shard)};`;\n}\n\nexport type ChangeLogEntry = {\n // A strictly monotonically increasing, lexicographically sortable\n // value that uniquely identifies a position in the change stream.\n watermark: string;\n change: Change;\n};\n\nfunction createChangeLogTable(shard: ShardID) {\n // Note: The \"change\" column used to be JSONB, but that was problematic in that\n // it does not handle the NULL unicode character.\n // https://vladimir.varank.in/notes/2021/01/you-dont-insert-unicode-null-character-as-postgres-jsonb/\n return /*sql*/ `\n CREATE TABLE ${schema(shard)}.\"changeLog\" (\n watermark TEXT,\n pos INT8,\n change JSON NOT NULL,\n precommit TEXT, -- Only exists on commit entries. Purely for debugging.\n PRIMARY KEY (watermark, pos)\n );\n`;\n}\n\n/**\n * Tracks the watermark from which to resume the change stream and the\n * current owner (task ID) acting as the single writer to the changeLog.\n */\nexport type ReplicationState = {\n lastWatermark: string;\n owner: string | null;\n ownerAddress: string | null;\n};\n\nexport function createReplicationStateTable(shard: ShardID) {\n return /*sql*/ `\n CREATE TABLE ${schema(shard)}.\"replicationState\" (\n \"lastWatermark\" TEXT NOT NULL,\n \"owner\" TEXT,\n \"ownerAddress\" TEXT,\n \"lock\" INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n`;\n}\n\nexport async function discoverChangeStreamerAddress(\n shard: ShardID,\n sql: PostgresDB,\n): Promise<string | null> {\n const result = await sql<{ownerAddress: string | null}[]> /*sql*/ `\n SELECT \"ownerAddress\" FROM ${sql(cdcSchema(shard))}.\"replicationState\"`;\n return result[0].ownerAddress;\n}\n\n/**\n * This mirrors the analogously named table in the SQLite replica\n * (`services/replicator/schema/replication-state.ts`), and is used\n * to detect when the replica has been reset and is no longer compatible\n * with the current ChangeLog.\n */\nexport type ReplicationConfig = {\n replicaVersion: string;\n publications: readonly string[];\n};\n\nfunction createReplicationConfigTable(shard: ShardID) {\n return /*sql*/ `\n CREATE TABLE ${schema(shard)}.\"replicationConfig\" (\n \"replicaVersion\" TEXT NOT NULL,\n \"publications\" TEXT[] NOT NULL,\n \"resetRequired\" BOOL,\n \"lock\" INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n`;\n}\n\nfunction createTables(shard: ShardID) {\n return (\n createSchema(shard) +\n createChangeLogTable(shard) +\n createReplicationStateTable(shard) +\n createReplicationConfigTable(shard)\n );\n}\n\nexport async function setupCDCTables(\n lc: LogContext,\n db: postgres.TransactionSql,\n shard: ShardID,\n) {\n lc.info?.(`Setting up CDC tables`);\n await db.unsafe(createTables(shard));\n}\n\nexport async function markResetRequired(sql: PostgresDB, shard: ShardID) {\n const schema = cdcSchema(shard);\n await sql`\n UPDATE ${sql(schema)}.\"replicationConfig\"\n SET \"resetRequired\" = true`;\n}\n\nexport async function ensureReplicationConfig(\n lc: LogContext,\n db: PostgresDB,\n subscriptionState: SubscriptionState,\n shard: ShardID,\n autoReset: boolean,\n) {\n const {publications, replicaVersion, watermark} = subscriptionState;\n const replicaConfig = {publications, replicaVersion};\n const replicationState: ReplicationState = {\n lastWatermark: replicaVersion,\n owner: null,\n ownerAddress: null,\n };\n const schema = cdcSchema(shard);\n\n await db.begin(async sql => {\n disableStatementTimeout(sql);\n\n const stmts: PendingQuery<Row[]>[] = [];\n const results = await sql<\n {\n replicaVersion: string;\n publications: string[];\n resetRequired: boolean | null;\n }[]\n > /*sql*/ `\n SELECT \"replicaVersion\", \"publications\", \"resetRequired\" \n FROM ${sql(schema)}.\"replicationConfig\"`;\n\n if (results.length) {\n const {replicaVersion, publications} = results[0];\n if (\n replicaVersion !== replicaConfig.replicaVersion ||\n !equals(new Set(publications), new Set(replicaConfig.publications))\n ) {\n if (replicaConfig.replicaVersion !== watermark) {\n throw new AutoResetSignal(\n `Cannot reset change db@${replicaVersion} to ` +\n `service replica@${replicaConfig.replicaVersion} ` +\n `from watermark ${watermark}`,\n );\n }\n lc.info?.(\n `Data in cdc tables @${replicaVersion} is incompatible ` +\n `with replica @${replicaConfig.replicaVersion}. Clearing tables.`,\n );\n // Note: The replicationState table is explicitly not TRUNCATE'd.\n // Any existing row must be overwritten by an UPDATE or\n // INSERT ... ON CONFLICT clause in order to correctly abort\n // any pending transaction by a concurrently running\n // change-streamer. Deleting the existing row and creating\n // a new one, on the other hand, may not properly trigger the\n // SERIALIZATION failure necessary to abort the pending tx.\n stmts.push(\n sql`TRUNCATE TABLE ${sql(schema)}.\"changeLog\"`,\n sql`TRUNCATE TABLE ${sql(schema)}.\"replicationConfig\"`,\n );\n }\n }\n // Initialize (or re-initialize TRUNCATED) tables\n if (results.length === 0 || stmts.length > 0) {\n stmts.push(\n sql`INSERT INTO ${sql(schema)}.\"replicationConfig\" ${sql(replicaConfig)}`,\n sql`INSERT INTO ${sql(schema)}.\"replicationState\" ${sql(replicationState)} \n ON CONFLICT (lock) DO UPDATE SET ${sql(replicationState)}`,\n );\n return Promise.all(stmts);\n }\n\n const {resetRequired} = results[0];\n if (resetRequired) {\n if (autoReset) {\n throw new AutoResetSignal('reset required by replication stream');\n }\n lc.error?.(\n '\\n\\n\\n' +\n 'Reset required but --auto-reset is not enabled.\\n' +\n 'This can happen for upstream databases that do not support event triggers.\\n' +\n 'To correct this, see https://zero.rocicorp.dev/docs/connecting-to-postgres#schema-changes' +\n '\\n\\n\\n',\n );\n }\n\n return [];\n });\n}\n\nexport class AutoResetSignal extends AbortError {\n readonly name = 'AutoResetSignal';\n}\n"],"names":["schema","replicaVersion","publications"],"mappings":";;;;;;AAYA,SAAS,OAAO,OAAgB;AAC9B,SAAO,MAAM,UAAU,KAAK,CAAC;AAC/B;AAIA,SAAS,aAAa,OAAgB;AACpC;AAAA;AAAA,IAAe,+BAA+B,OAAO,KAAK,CAAC;AAAA;AAC7D;AASA,SAAS,qBAAqB,OAAgB;AAI5C;AAAA;AAAA,IAAe;AAAA,iBACA,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAQ9B;AAYO,SAAS,4BAA4B,OAAgB;AAC1D;AAAA;AAAA,IAAe;AAAA,iBACA,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAO9B;AAEA,eAAsB,8BACpB,OACA,KACwB;AACxB,QAAM,SAAS,MAAM;AAAA,iCACU,IAAI,UAAU,KAAK,CAAC,CAAC;AACpD,SAAO,OAAO,CAAC,EAAE;AACnB;AAaA,SAAS,6BAA6B,OAAgB;AACpD;AAAA;AAAA,IAAe;AAAA,iBACA,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAO9B;AAEA,SAAS,aAAa,OAAgB;AACpC,SACE,aAAa,KAAK,IAClB,qBAAqB,KAAK,IAC1B,4BAA4B,KAAK,IACjC,6BAA6B,KAAK;AAEtC;AAEA,eAAsB,eACpB,IACA,IACA,OACA;AACA,KAAG,OAAO,uBAAuB;AACjC,QAAM,GAAG,OAAO,aAAa,KAAK,CAAC;AACrC;AAEA,eAAsB,kBAAkB,KAAiB,OAAgB;AACvE,QAAMA,UAAS,UAAU,KAAK;AAC9B,QAAM;AAAA,WACG,IAAIA,OAAM,CAAC;AAAA;AAEtB;AAEA,eAAsB,wBACpB,IACA,IACA,mBACA,OACA,WACA;AACA,QAAM,EAAC,cAAc,gBAAgB,UAAA,IAAa;AAClD,QAAM,gBAAgB,EAAC,cAAc,eAAA;AACrC,QAAM,mBAAqC;AAAA,IACzC,eAAe;AAAA,IACf,OAAO;AAAA,IACP,cAAc;AAAA,EAAA;AAEhB,QAAMA,UAAS,UAAU,KAAK;AAE9B,QAAM,GAAG,MAAM,OAAM,QAAO;AAC1B,4BAAwB,GAAG;AAE3B,UAAM,QAA+B,CAAA;AACrC,UAAM,UAAU,MAAM;AAAA;AAAA,aAQb,IAAIA,OAAM,CAAC;AAEpB,QAAI,QAAQ,QAAQ;AAClB,YAAM,EAAC,gBAAAC,iBAAgB,cAAAC,cAAAA,IAAgB,QAAQ,CAAC;AAChD,UACED,oBAAmB,cAAc,kBACjC,CAAC,OAAO,IAAI,IAAIC,aAAY,GAAG,IAAI,IAAI,cAAc,YAAY,CAAC,GAClE;AACA,YAAI,cAAc,mBAAmB,WAAW;AAC9C,gBAAM,IAAI;AAAA,YACR,0BAA0BD,eAAc,uBACnB,cAAc,cAAc,mBAC7B,SAAS;AAAA,UAAA;AAAA,QAEjC;AACA,WAAG;AAAA,UACD,uBAAuBA,eAAc,kCAClB,cAAc,cAAc;AAAA,QAAA;AASjD,cAAM;AAAA,UACJ,qBAAqB,IAAID,OAAM,CAAC;AAAA,UAChC,qBAAqB,IAAIA,OAAM,CAAC;AAAA,QAAA;AAAA,MAEpC;AAAA,IACF;AAEA,QAAI,QAAQ,WAAW,KAAK,MAAM,SAAS,GAAG;AAC5C,YAAM;AAAA,QACJ,kBAAkB,IAAIA,OAAM,CAAC,wBAAwB,IAAI,aAAa,CAAC;AAAA,QACvE,kBAAkB,IAAIA,OAAM,CAAC,wBAAwB,IAAI,gBAAgB,CAAC;AAAA,iDACjC,IAAI,gBAAgB,CAAC;AAAA,MAAA;AAEhE,aAAO,QAAQ,IAAI,KAAK;AAAA,IAC1B;AAEA,UAAM,EAAC,cAAA,IAAiB,QAAQ,CAAC;AACjC,QAAI,eAAe;AACjB,UAAI,WAAW;AACb,cAAM,IAAI,gBAAgB,sCAAsC;AAAA,MAClE;AACA,SAAG;AAAA,QACD;AAAA,MAAA;AAAA,IAMJ;AAEA,WAAO,CAAA;AAAA,EACT,CAAC;AACH;AAEO,MAAM,wBAAwB,WAAW;AAAA,EACrC,OAAO;AAClB;"}
|
|
@@ -40,7 +40,7 @@ import type { Subscriber } from './subscriber.ts';
|
|
|
40
40
|
export declare class Storer implements Service {
|
|
41
41
|
#private;
|
|
42
42
|
readonly id = "storer";
|
|
43
|
-
constructor(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, db: PostgresDB, replicaVersion: string, onConsumed: (c: Commit | StatusMessage) => void, onFatal: (err: Error) => void);
|
|
43
|
+
constructor(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, db: PostgresDB, replicaVersion: string, onConsumed: (c: Commit | StatusMessage) => void, onFatal: (err: Error) => void, backPressureThreshold: number);
|
|
44
44
|
assumeOwnership(): Promise<void>;
|
|
45
45
|
getLastWatermarkToStartStream(): Promise<string>;
|
|
46
46
|
getMinWatermarkForCatchup(): Promise<string | null>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAUjD,OAAO,EAA0B,KAAK,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAC3E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,iDAAiD,CAAC;AAC5E,OAAO,KAAK,EAAC,aAAa,EAAC,MAAM,6CAA6C,CAAC;AAC/E,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8BAA8B,CAAC;AAQpE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;
|
|
1
|
+
{"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAUjD,OAAO,EAA0B,KAAK,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAC3E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,iDAAiD,CAAC;AAC5E,OAAO,KAAK,EAAC,aAAa,EAAC,MAAM,6CAA6C,CAAC;AAC/E,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8BAA8B,CAAC;AAQpE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AAsBhD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,qBAAa,MAAO,YAAW,OAAO;;IACpC,QAAQ,CAAC,EAAE,YAAY;gBAgBrB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,MAAM,EACtB,UAAU,EAAE,CAAC,CAAC,EAAE,MAAM,GAAG,aAAa,KAAK,IAAI,EAC/C,OAAO,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI,EAC7B,qBAAqB,EAAE,MAAM;IAmBzB,eAAe;IAcf,6BAA6B,IAAI,OAAO,CAAC,MAAM,CAAC;IAahD,yBAAyB,IAAI,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAQzD,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAyBtD,KAAK,CAAC,KAAK,EAAE,iBAAiB;IAI9B,KAAK;IAIL,MAAM,CAAC,CAAC,EAAE,aAAa;IAIvB,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,cAAc;IAMpD,YAAY,IAAI,OAAO,CAAC,IAAI,CAAC,GAAG,SAAS;IAwCnC,GAAG;IA6PT,IAAI;CAIL"}
|
|
@@ -14,7 +14,6 @@ import "../change-source/protocol/current/downstream.js";
|
|
|
14
14
|
import "./change-streamer.js";
|
|
15
15
|
import { WatermarkTooOld } from "./error-type-enum.js";
|
|
16
16
|
import { AutoResetSignal, markResetRequired } from "./schema/tables.js";
|
|
17
|
-
const QUEUE_SIZE_BACK_PRESSURE_THRESHOLD = 1e5;
|
|
18
17
|
class Storer {
|
|
19
18
|
id = "storer";
|
|
20
19
|
#lc;
|
|
@@ -27,9 +26,10 @@ class Storer {
|
|
|
27
26
|
#onConsumed;
|
|
28
27
|
#onFatal;
|
|
29
28
|
#queue = new Queue();
|
|
29
|
+
#backPressureThreshold;
|
|
30
30
|
#running = false;
|
|
31
|
-
constructor(lc, shard, taskID, discoveryAddress, discoveryProtocol, db, replicaVersion, onConsumed, onFatal) {
|
|
32
|
-
this.#lc = lc;
|
|
31
|
+
constructor(lc, shard, taskID, discoveryAddress, discoveryProtocol, db, replicaVersion, onConsumed, onFatal, backPressureThreshold) {
|
|
32
|
+
this.#lc = lc.withContext("component", "change-log");
|
|
33
33
|
this.#shard = shard;
|
|
34
34
|
this.#taskID = taskID;
|
|
35
35
|
this.#discoveryAddress = discoveryAddress;
|
|
@@ -38,6 +38,7 @@ class Storer {
|
|
|
38
38
|
this.#replicaVersion = replicaVersion;
|
|
39
39
|
this.#onConsumed = onConsumed;
|
|
40
40
|
this.#onFatal = onFatal;
|
|
41
|
+
this.#backPressureThreshold = backPressureThreshold;
|
|
41
42
|
}
|
|
42
43
|
// For readability in SQL statements.
|
|
43
44
|
#cdc(table) {
|
|
@@ -101,9 +102,19 @@ class Storer {
|
|
|
101
102
|
if (!this.#running) {
|
|
102
103
|
return void 0;
|
|
103
104
|
}
|
|
104
|
-
if (this.#readyForMore === null && this.#queue.size() >
|
|
105
|
+
if (this.#readyForMore === null && this.#queue.size() > this.#backPressureThreshold) {
|
|
105
106
|
this.#lc.warn?.(
|
|
106
|
-
`applying back pressure with ${this.#queue.size()} queued changes
|
|
107
|
+
`applying back pressure with ${this.#queue.size()} queued changes (threshold: ${this.#backPressureThreshold})
|
|
108
|
+
|
|
109
|
+
To inspect changeLog backlog in your CVR database:
|
|
110
|
+
SELECT
|
|
111
|
+
(change->'relation'->>'schema') || '.' || (change->'relation'->>'name') AS table_name,
|
|
112
|
+
change->>'tag' AS operation,
|
|
113
|
+
COUNT(*) AS count
|
|
114
|
+
FROM "<app_id>/cdc"."changeLog"
|
|
115
|
+
GROUP BY 1, 2
|
|
116
|
+
ORDER BY 3 DESC
|
|
117
|
+
LIMIT 20;`
|
|
107
118
|
);
|
|
108
119
|
this.#readyForMore = resolver();
|
|
109
120
|
}
|
|
@@ -111,7 +122,7 @@ class Storer {
|
|
|
111
122
|
}
|
|
112
123
|
#maybeReleaseBackPressure() {
|
|
113
124
|
if (this.#readyForMore !== null && // Wait for at least 10% of the threshold to free up.
|
|
114
|
-
this.#queue.size() <
|
|
125
|
+
this.#queue.size() < this.#backPressureThreshold * 0.9) {
|
|
115
126
|
this.#lc.info?.(
|
|
116
127
|
`releasing back pressure with ${this.#queue.size()} queued changes`
|
|
117
128
|
);
|