@rocicorp/zero 0.26.0-canary.0 → 0.26.0-canary.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/out/replicache/src/persist/collect-idb-databases.d.ts +4 -4
- package/out/replicache/src/persist/collect-idb-databases.d.ts.map +1 -1
- package/out/replicache/src/persist/collect-idb-databases.js +22 -19
- package/out/replicache/src/persist/collect-idb-databases.js.map +1 -1
- package/out/replicache/src/persist/refresh.d.ts.map +1 -1
- package/out/replicache/src/persist/refresh.js +0 -8
- package/out/replicache/src/persist/refresh.js.map +1 -1
- package/out/replicache/src/process-scheduler.d.ts +23 -0
- package/out/replicache/src/process-scheduler.d.ts.map +1 -1
- package/out/replicache/src/process-scheduler.js +50 -1
- package/out/replicache/src/process-scheduler.js.map +1 -1
- package/out/replicache/src/replicache-impl.d.ts +8 -0
- package/out/replicache/src/replicache-impl.d.ts.map +1 -1
- package/out/replicache/src/replicache-impl.js +11 -2
- package/out/replicache/src/replicache-impl.js.map +1 -1
- package/out/shared/src/custom-key-map.d.ts +4 -4
- package/out/shared/src/custom-key-map.d.ts.map +1 -1
- package/out/shared/src/custom-key-map.js.map +1 -1
- package/out/shared/src/falsy.d.ts +3 -0
- package/out/shared/src/falsy.d.ts.map +1 -0
- package/out/shared/src/iterables.d.ts +6 -8
- package/out/shared/src/iterables.d.ts.map +1 -1
- package/out/shared/src/iterables.js +13 -7
- package/out/shared/src/iterables.js.map +1 -1
- package/out/shared/src/options.d.ts +1 -0
- package/out/shared/src/options.d.ts.map +1 -1
- package/out/shared/src/options.js +5 -1
- package/out/shared/src/options.js.map +1 -1
- package/out/zero/package.json.js +1 -1
- package/out/zero/src/adapters/drizzle.js +1 -2
- package/out/zero/src/adapters/prisma.d.ts +2 -0
- package/out/zero/src/adapters/prisma.d.ts.map +1 -0
- package/out/zero/src/adapters/prisma.js +6 -0
- package/out/zero/src/adapters/prisma.js.map +1 -0
- package/out/zero/src/pg.js +4 -7
- package/out/zero/src/react.js +3 -1
- package/out/zero/src/react.js.map +1 -1
- package/out/zero/src/server.js +5 -8
- package/out/zero/src/zero-cache-dev.js +7 -3
- package/out/zero/src/zero-cache-dev.js.map +1 -1
- package/out/zero-cache/src/auth/load-permissions.d.ts +3 -2
- package/out/zero-cache/src/auth/load-permissions.d.ts.map +1 -1
- package/out/zero-cache/src/auth/load-permissions.js +14 -8
- package/out/zero-cache/src/auth/load-permissions.js.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.d.ts +6 -0
- package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.js +16 -3
- package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
- package/out/zero-cache/src/config/zero-config.d.ts +54 -9
- package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
- package/out/zero-cache/src/config/zero-config.js +80 -20
- package/out/zero-cache/src/config/zero-config.js.map +1 -1
- package/out/zero-cache/src/custom/fetch.d.ts +3 -0
- package/out/zero-cache/src/custom/fetch.d.ts.map +1 -1
- package/out/zero-cache/src/custom/fetch.js +26 -0
- package/out/zero-cache/src/custom/fetch.js.map +1 -1
- package/out/zero-cache/src/db/lite-tables.js +1 -1
- package/out/zero-cache/src/db/lite-tables.js.map +1 -1
- package/out/zero-cache/src/db/migration-lite.d.ts.map +1 -1
- package/out/zero-cache/src/db/migration-lite.js +9 -3
- package/out/zero-cache/src/db/migration-lite.js.map +1 -1
- package/out/zero-cache/src/db/migration.d.ts.map +1 -1
- package/out/zero-cache/src/db/migration.js +9 -3
- package/out/zero-cache/src/db/migration.js.map +1 -1
- package/out/zero-cache/src/db/specs.d.ts +4 -3
- package/out/zero-cache/src/db/specs.d.ts.map +1 -1
- package/out/zero-cache/src/db/specs.js +4 -1
- package/out/zero-cache/src/db/specs.js.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.js +9 -3
- package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
- package/out/zero-cache/src/observability/events.d.ts.map +1 -1
- package/out/zero-cache/src/observability/events.js +15 -5
- package/out/zero-cache/src/observability/events.js.map +1 -1
- package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/server/change-streamer.js +10 -2
- package/out/zero-cache/src/server/change-streamer.js.map +1 -1
- package/out/zero-cache/src/server/inspector-delegate.d.ts +1 -1
- package/out/zero-cache/src/server/inspector-delegate.d.ts.map +1 -1
- package/out/zero-cache/src/server/inspector-delegate.js +11 -30
- package/out/zero-cache/src/server/inspector-delegate.js.map +1 -1
- package/out/zero-cache/src/server/main.js +1 -1
- package/out/zero-cache/src/server/main.js.map +1 -1
- package/out/zero-cache/src/server/priority-op.d.ts +8 -0
- package/out/zero-cache/src/server/priority-op.d.ts.map +1 -0
- package/out/zero-cache/src/server/priority-op.js +29 -0
- package/out/zero-cache/src/server/priority-op.js.map +1 -0
- package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/server/syncer.js +10 -10
- package/out/zero-cache/src/server/syncer.js.map +1 -1
- package/out/zero-cache/src/services/analyze.js +1 -1
- package/out/zero-cache/src/services/analyze.js.map +1 -1
- package/out/zero-cache/src/services/change-source/custom/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/custom/change-source.js +4 -7
- package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.js +68 -13
- package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js +7 -2
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js +7 -4
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts +125 -180
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +1 -10
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.js +26 -12
- package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts +36 -90
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.js +51 -14
- package/out/zero-cache/src/services/change-source/pg/schema/published.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts +31 -36
- package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/shard.js +25 -17
- package/out/zero-cache/src/services/change-source/pg/schema/shard.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts +2 -2
- package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/validation.js +2 -4
- package/out/zero-cache/src/services/change-source/pg/schema/validation.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts +158 -53
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.js +55 -10
- package/out/zero-cache/src/services/change-source/protocol/current/data.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +210 -72
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current.js +4 -2
- package/out/zero-cache/src/services/change-source/replica-schema.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/replica-schema.js +20 -4
- package/out/zero-cache/src/services/change-source/replica-schema.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +6 -4
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts +71 -25
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.js +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts +1 -0
- package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/schema/tables.js +6 -5
- package/out/zero-cache/src/services/change-streamer/schema/tables.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.d.ts +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.js +17 -6
- package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.d.ts +2 -0
- package/out/zero-cache/src/services/change-streamer/subscriber.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.js +14 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.js.map +1 -1
- package/out/zero-cache/src/services/heapz.d.ts.map +1 -1
- package/out/zero-cache/src/services/heapz.js +1 -0
- package/out/zero-cache/src/services/heapz.js.map +1 -1
- package/out/zero-cache/src/services/life-cycle.d.ts +1 -1
- package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
- package/out/zero-cache/src/services/life-cycle.js.map +1 -1
- package/out/zero-cache/src/services/litestream/commands.d.ts.map +1 -1
- package/out/zero-cache/src/services/litestream/commands.js +3 -1
- package/out/zero-cache/src/services/litestream/commands.js.map +1 -1
- package/out/zero-cache/src/services/litestream/config.yml +1 -0
- package/out/zero-cache/src/services/mutagen/error.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/error.js +4 -1
- package/out/zero-cache/src/services/mutagen/error.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/mutagen.d.ts +4 -4
- package/out/zero-cache/src/services/mutagen/mutagen.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/mutagen.js +10 -24
- package/out/zero-cache/src/services/mutagen/mutagen.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/pusher.d.ts +8 -6
- package/out/zero-cache/src/services/mutagen/pusher.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/pusher.js +130 -19
- package/out/zero-cache/src/services/mutagen/pusher.js.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.js +24 -31
- package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/change-log.d.ts +4 -4
- package/out/zero-cache/src/services/replicator/schema/change-log.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/change-log.js +38 -36
- package/out/zero-cache/src/services/replicator/schema/change-log.js.map +1 -1
- package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.d.ts +3 -3
- package/out/zero-cache/src/services/replicator/schema/column-metadata.d.ts.map +1 -0
- package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.js +3 -3
- package/out/zero-cache/src/services/replicator/schema/column-metadata.js.map +1 -0
- package/out/zero-cache/src/services/replicator/schema/replication-state.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/replication-state.js +3 -1
- package/out/zero-cache/src/services/replicator/schema/replication-state.js.map +1 -1
- package/out/zero-cache/src/services/run-ast.js +1 -1
- package/out/zero-cache/src/services/run-ast.js.map +1 -1
- package/out/zero-cache/src/services/statz.d.ts.map +1 -1
- package/out/zero-cache/src/services/statz.js +1 -0
- package/out/zero-cache/src/services/statz.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/client-handler.d.ts +5 -6
- package/out/zero-cache/src/services/view-syncer/client-handler.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/client-handler.js +5 -23
- package/out/zero-cache/src/services/view-syncer/client-handler.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.js +65 -44
- package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts +0 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js +23 -6
- package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +14 -22
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +46 -67
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js +22 -11
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +0 -2
- package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.js +3 -11
- package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +6 -4
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.js +216 -243
- package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
- package/out/zero-cache/src/types/lexi-version.d.ts.map +1 -1
- package/out/zero-cache/src/types/lexi-version.js +4 -1
- package/out/zero-cache/src/types/lexi-version.js.map +1 -1
- package/out/zero-cache/src/types/lite.d.ts.map +1 -1
- package/out/zero-cache/src/types/lite.js +8 -2
- package/out/zero-cache/src/types/lite.js.map +1 -1
- package/out/zero-cache/src/types/shards.js +1 -1
- package/out/zero-cache/src/types/shards.js.map +1 -1
- package/out/zero-cache/src/types/sql.d.ts +5 -0
- package/out/zero-cache/src/types/sql.d.ts.map +1 -1
- package/out/zero-cache/src/types/sql.js +5 -1
- package/out/zero-cache/src/types/sql.js.map +1 -1
- package/out/zero-cache/src/types/subscription.js +1 -1
- package/out/zero-cache/src/types/subscription.js.map +1 -1
- package/out/zero-cache/src/workers/connect-params.d.ts +1 -1
- package/out/zero-cache/src/workers/connect-params.d.ts.map +1 -1
- package/out/zero-cache/src/workers/connect-params.js +2 -3
- package/out/zero-cache/src/workers/connect-params.js.map +1 -1
- package/out/zero-cache/src/workers/replicator.d.ts.map +1 -1
- package/out/zero-cache/src/workers/replicator.js +2 -5
- package/out/zero-cache/src/workers/replicator.js.map +1 -1
- package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
- package/out/zero-cache/src/workers/syncer-ws-message-handler.js +15 -10
- package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
- package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/workers/syncer.js +17 -10
- package/out/zero-cache/src/workers/syncer.js.map +1 -1
- package/out/zero-client/src/client/connection-manager.d.ts +8 -0
- package/out/zero-client/src/client/connection-manager.d.ts.map +1 -1
- package/out/zero-client/src/client/connection-manager.js +33 -0
- package/out/zero-client/src/client/connection-manager.js.map +1 -1
- package/out/zero-client/src/client/connection.d.ts.map +1 -1
- package/out/zero-client/src/client/connection.js +6 -3
- package/out/zero-client/src/client/connection.js.map +1 -1
- package/out/zero-client/src/client/context.js +1 -0
- package/out/zero-client/src/client/context.js.map +1 -1
- package/out/zero-client/src/client/error.js +1 -1
- package/out/zero-client/src/client/error.js.map +1 -1
- package/out/zero-client/src/client/mutator-proxy.d.ts.map +1 -1
- package/out/zero-client/src/client/mutator-proxy.js +15 -1
- package/out/zero-client/src/client/mutator-proxy.js.map +1 -1
- package/out/zero-client/src/client/options.d.ts +11 -1
- package/out/zero-client/src/client/options.d.ts.map +1 -1
- package/out/zero-client/src/client/options.js.map +1 -1
- package/out/zero-client/src/client/query-manager.d.ts +4 -0
- package/out/zero-client/src/client/query-manager.d.ts.map +1 -1
- package/out/zero-client/src/client/query-manager.js +7 -0
- package/out/zero-client/src/client/query-manager.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zero-client/src/client/zero.d.ts +5 -5
- package/out/zero-client/src/client/zero.d.ts.map +1 -1
- package/out/zero-client/src/client/zero.js +53 -8
- package/out/zero-client/src/client/zero.js.map +1 -1
- package/out/zero-client/src/mod.d.ts +1 -0
- package/out/zero-client/src/mod.d.ts.map +1 -1
- package/out/zero-protocol/src/connect.d.ts +4 -0
- package/out/zero-protocol/src/connect.d.ts.map +1 -1
- package/out/zero-protocol/src/connect.js +3 -1
- package/out/zero-protocol/src/connect.js.map +1 -1
- package/out/zero-protocol/src/protocol-version.d.ts +1 -1
- package/out/zero-protocol/src/protocol-version.d.ts.map +1 -1
- package/out/zero-protocol/src/protocol-version.js +1 -1
- package/out/zero-protocol/src/protocol-version.js.map +1 -1
- package/out/zero-protocol/src/push.d.ts +16 -0
- package/out/zero-protocol/src/push.d.ts.map +1 -1
- package/out/zero-protocol/src/push.js +25 -1
- package/out/zero-protocol/src/push.js.map +1 -1
- package/out/zero-protocol/src/up.d.ts +2 -0
- package/out/zero-protocol/src/up.d.ts.map +1 -1
- package/out/zero-react/src/mod.d.ts +3 -1
- package/out/zero-react/src/mod.d.ts.map +1 -1
- package/out/zero-react/src/paging-reducer.d.ts +61 -0
- package/out/zero-react/src/paging-reducer.d.ts.map +1 -0
- package/out/zero-react/src/paging-reducer.js +77 -0
- package/out/zero-react/src/paging-reducer.js.map +1 -0
- package/out/zero-react/src/use-query.d.ts +11 -1
- package/out/zero-react/src/use-query.d.ts.map +1 -1
- package/out/zero-react/src/use-query.js +13 -11
- package/out/zero-react/src/use-query.js.map +1 -1
- package/out/zero-react/src/use-rows.d.ts +39 -0
- package/out/zero-react/src/use-rows.d.ts.map +1 -0
- package/out/zero-react/src/use-rows.js +130 -0
- package/out/zero-react/src/use-rows.js.map +1 -0
- package/out/zero-react/src/use-zero-virtualizer.d.ts +122 -0
- package/out/zero-react/src/use-zero-virtualizer.d.ts.map +1 -0
- package/out/zero-react/src/use-zero-virtualizer.js +342 -0
- package/out/zero-react/src/use-zero-virtualizer.js.map +1 -0
- package/out/zero-react/src/zero-provider.js +1 -1
- package/out/zero-react/src/zero-provider.js.map +1 -1
- package/out/zero-server/src/adapters/drizzle.d.ts +18 -18
- package/out/zero-server/src/adapters/drizzle.d.ts.map +1 -1
- package/out/zero-server/src/adapters/drizzle.js +8 -22
- package/out/zero-server/src/adapters/drizzle.js.map +1 -1
- package/out/zero-server/src/adapters/pg.d.ts +19 -13
- package/out/zero-server/src/adapters/pg.d.ts.map +1 -1
- package/out/zero-server/src/adapters/pg.js.map +1 -1
- package/out/zero-server/src/adapters/postgresjs.d.ts +19 -13
- package/out/zero-server/src/adapters/postgresjs.d.ts.map +1 -1
- package/out/zero-server/src/adapters/postgresjs.js.map +1 -1
- package/out/zero-server/src/adapters/prisma.d.ts +66 -0
- package/out/zero-server/src/adapters/prisma.d.ts.map +1 -0
- package/out/zero-server/src/adapters/prisma.js +63 -0
- package/out/zero-server/src/adapters/prisma.js.map +1 -0
- package/out/zero-server/src/custom.js +1 -15
- package/out/zero-server/src/custom.js.map +1 -1
- package/out/zero-server/src/mod.d.ts +9 -8
- package/out/zero-server/src/mod.d.ts.map +1 -1
- package/out/zero-server/src/process-mutations.d.ts +2 -1
- package/out/zero-server/src/process-mutations.d.ts.map +1 -1
- package/out/zero-server/src/process-mutations.js +39 -4
- package/out/zero-server/src/process-mutations.js.map +1 -1
- package/out/zero-server/src/push-processor.js +1 -1
- package/out/zero-server/src/push-processor.js.map +1 -1
- package/out/zero-server/src/schema.d.ts.map +1 -1
- package/out/zero-server/src/schema.js +4 -1
- package/out/zero-server/src/schema.js.map +1 -1
- package/out/zero-server/src/zql-database.d.ts.map +1 -1
- package/out/zero-server/src/zql-database.js +18 -0
- package/out/zero-server/src/zql-database.js.map +1 -1
- package/out/zero-solid/src/mod.d.ts +1 -1
- package/out/zero-solid/src/mod.d.ts.map +1 -1
- package/out/zero-solid/src/solid-view.js +1 -0
- package/out/zero-solid/src/solid-view.js.map +1 -1
- package/out/zero-solid/src/use-query.d.ts +10 -1
- package/out/zero-solid/src/use-query.d.ts.map +1 -1
- package/out/zero-solid/src/use-query.js +22 -5
- package/out/zero-solid/src/use-query.js.map +1 -1
- package/out/zero-solid/src/use-zero.js +1 -1
- package/out/zero-solid/src/use-zero.js.map +1 -1
- package/out/zql/src/ivm/constraint.d.ts.map +1 -1
- package/out/zql/src/ivm/constraint.js +4 -1
- package/out/zql/src/ivm/constraint.js.map +1 -1
- package/out/zql/src/ivm/exists.d.ts.map +1 -1
- package/out/zql/src/ivm/exists.js +4 -1
- package/out/zql/src/ivm/exists.js.map +1 -1
- package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
- package/out/zql/src/ivm/join-utils.js +8 -2
- package/out/zql/src/ivm/join-utils.js.map +1 -1
- package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
- package/out/zql/src/ivm/memory-source.js +12 -3
- package/out/zql/src/ivm/memory-source.js.map +1 -1
- package/out/zql/src/ivm/push-accumulated.d.ts.map +1 -1
- package/out/zql/src/ivm/push-accumulated.js +25 -2
- package/out/zql/src/ivm/push-accumulated.js.map +1 -1
- package/out/zql/src/ivm/stream.d.ts.map +1 -1
- package/out/zql/src/ivm/stream.js +1 -1
- package/out/zql/src/ivm/stream.js.map +1 -1
- package/out/zql/src/ivm/take.d.ts.map +1 -1
- package/out/zql/src/ivm/take.js +24 -6
- package/out/zql/src/ivm/take.js.map +1 -1
- package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
- package/out/zql/src/ivm/union-fan-in.js +12 -3
- package/out/zql/src/ivm/union-fan-in.js.map +1 -1
- package/out/zql/src/mutate/mutator.js +4 -4
- package/out/zql/src/mutate/mutator.js.map +1 -1
- package/out/zql/src/query/create-builder.js +3 -5
- package/out/zql/src/query/create-builder.js.map +1 -1
- package/out/zql/src/query/query-registry.js +4 -4
- package/out/zql/src/query/query-registry.js.map +1 -1
- package/out/zqlite/src/table-source.d.ts.map +1 -1
- package/out/zqlite/src/table-source.js +1 -2
- package/out/zqlite/src/table-source.js.map +1 -1
- package/package.json +8 -4
- package/out/zero-cache/src/services/change-source/column-metadata.d.ts.map +0 -1
- package/out/zero-cache/src/services/change-source/column-metadata.js.map +0 -1
- package/out/zero-cache/src/types/schema-versions.d.ts +0 -12
- package/out/zero-cache/src/types/schema-versions.d.ts.map +0 -1
- package/out/zero-cache/src/types/schema-versions.js +0 -28
- package/out/zero-cache/src/types/schema-versions.js.map +0 -1
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"init.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {\n getVersionHistory,\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../../db/migration.ts';\nimport type {PostgresDB} from '../../../../types/pg.ts';\nimport {upstreamSchema, type ShardConfig} from '../../../../types/shards.ts';\nimport {AutoResetSignal} from '../../../change-streamer/schema/tables.ts';\nimport {decommissionShard} from '../decommission.ts';\nimport {publishedSchema} from './published.ts';\nimport {\n getMutationsTableDefinition,\n legacyReplicationSlot,\n metadataPublicationName,\n setupTablesAndReplication,\n setupTriggers,\n} from './shard.ts';\nimport {id} from '../../../../types/sql.ts';\n\n/**\n * Ensures that a shard is set up for initial sync.\n */\nexport async function ensureShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n): Promise<void> {\n const initialSetup: Migration = {\n migrateSchema: (lc, tx) => setupTablesAndReplication(lc, tx, shard),\n minSafeVersion: 1,\n };\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n initialSetup,\n // The incremental migration of any existing replicas will be replaced by\n // the incoming replica being synced, so the replicaVersion here is\n // unnecessary.\n getIncrementalMigrations(shard, 'obsolete'),\n );\n}\n\n/**\n * Updates the schema for an existing shard.\n */\nexport async function updateShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n replicaVersion: string,\n): Promise<void> {\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n {\n // If the expected existing shard is absent, throw an\n // AutoResetSignal to backtrack and initial sync.\n migrateSchema: () => {\n throw new AutoResetSignal(\n `upstream shard ${upstreamSchema(shard)} is not initialized`,\n );\n },\n },\n getIncrementalMigrations(shard, replicaVersion),\n );\n\n // The decommission check is run in updateShardSchema so that it happens\n // after initial sync, and not when the shard schema is initially set up.\n await decommissionLegacyShard(lc, db, shard);\n}\n\nfunction getIncrementalMigrations(\n shard: ShardConfig,\n replicaVersion?: string,\n): IncrementalMigrationMap {\n const shardConfigTable = `${upstreamSchema(shard)}.shardConfig`;\n\n return {\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('resetting to upgrade shard schema');\n },\n minSafeVersion: 3,\n },\n\n // v5 changes the upstream schema organization from \"zero_{SHARD_ID}\" to\n // the \"{APP_ID}_0\". An incremental migration indicates that the previous\n // SHARD_ID was \"0\" and the new APP_ID is \"zero\" (i.e. the default values\n // for those options). In this case, the upstream format is identical, and\n // no migration is necessary. However, the version is bumped to v5 to\n // indicate that it was created with the {APP_ID} configuration and should\n // not be decommissioned as a legacy shard.\n 5: {},\n\n 6: {\n migrateSchema: async (lc, sql) => {\n assert(\n replicaVersion,\n `replicaVersion is always passed for incremental migrations`,\n );\n await Promise.all([\n sql`\n ALTER TABLE ${sql(shardConfigTable)} ADD \"replicaVersion\" TEXT`,\n sql`\n UPDATE ${sql(shardConfigTable)} SET ${sql({replicaVersion})}`,\n ]);\n lc.info?.(\n `Recorded replicaVersion ${replicaVersion} in upstream shardConfig`,\n );\n },\n },\n\n // Updates the DDL event trigger protocol to v2, and adds support for\n // ALTER SCHEMA x RENAME TO y\n 7: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded to v2 event triggers`);\n },\n },\n\n // Adds support for non-disruptive resyncs, which tracks multiple\n // replicas with different slot names.\n 8: {\n migrateSchema: async (lc, sql) => {\n const legacyShardConfigSchema = v.object({\n replicaVersion: v.string().nullable(),\n initialSchema: publishedSchema.nullable(),\n });\n const result = await sql`\n SELECT \"replicaVersion\", \"initialSchema\" FROM ${sql(shardConfigTable)}`;\n assert(result.length === 1);\n const {replicaVersion, initialSchema} = v.parse(\n result[0],\n legacyShardConfigSchema,\n 'passthrough',\n );\n\n await Promise.all([\n sql`\n CREATE TABLE ${sql(upstreamSchema(shard))}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `,\n sql`\n INSERT INTO ${sql(upstreamSchema(shard))}.replicas ${sql({\n slot: legacyReplicationSlot(shard),\n version: replicaVersion,\n initialSchema,\n })}\n `,\n sql`\n ALTER TABLE ${sql(shardConfigTable)} DROP \"replicaVersion\", DROP \"initialSchema\"\n `,\n ]);\n lc.info?.(`Upgraded schema to support non-disruptive resyncs`);\n },\n },\n\n // Fixes field ordering of compound indexes. This incremental migration\n // only fixes indexes resulting from new schema changes. A full resync is\n // required to fix existing indexes.\n 9: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded DDL event triggers`);\n },\n },\n\n // Adds the `mutations` table used to track mutation results.\n 10: {\n migrateSchema: async (lc, sql) => {\n await sql.unsafe(/*sql*/ `\n ${getMutationsTableDefinition(upstreamSchema(shard))}\n ALTER PUBLICATION ${id(metadataPublicationName(shard.appID, shard.shardNum))} ADD TABLE ${id(upstreamSchema(shard))}.\"mutations\";\n `);\n lc.info?.('Upgraded schema with new mutations table');\n },\n },\n };\n}\n\nexport async function decommissionLegacyShard(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n) {\n if (shard.appID !== 'zero') {\n // When migration from non-default shard ids, e.g. \"zero_prod\" => \"prod_0\",\n // clean up the old \"zero_prod\" shard if it is pre-v5. Note that the v5\n // check is important to guard against cleaning up a **new** \"zero_0\" app\n // that coexists with the current App (with app-id === \"0\").\n const versionHistory = await getVersionHistory(db, `zero_${shard.appID}`);\n if (versionHistory !== null && versionHistory.schemaVersion < 5) {\n await decommissionShard(lc, db, 'zero', shard.appID);\n }\n }\n}\n"],"names":["lc","v.object","v.string","replicaVersion","v.parse"],"mappings":";;;;;;;;;;AA0BA,eAAsB,kBACpB,IACA,IACA,OACe;AACf,QAAM,eAA0B;AAAA,IAC9B,eAAe,CAACA,KAAI,OAAO,0BAA0BA,KAAI,IAAI,KAAK;AAAA,IAClE,gBAAgB;AAAA,EAAA;AAElB,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA;AAAA,IAIA,yBAAyB,OAAO,UAAU;AAAA,EAAA;AAE9C;AAKA,eAAsB,kBACpB,IACA,IACA,OACA,gBACe;AACf,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA,MAGE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR,kBAAkB,eAAe,KAAK,CAAC;AAAA,QAAA;AAAA,MAE3C;AAAA,IAAA;AAAA,IAEF,yBAAyB,OAAO,cAAc;AAAA,EAAA;AAKhD,QAAM,wBAAwB,IAAI,IAAI,KAAK;AAC7C;AAEA,SAAS,yBACP,OACA,gBACyB;AACzB,QAAM,mBAAmB,GAAG,eAAe,KAAK,CAAC;AAEjD,SAAO;AAAA,IACL,GAAG;AAAA,MACD,eAAe,MAAM;AACnB,cAAM,IAAI,gBAAgB,mCAAmC;AAAA,MAC/D;AAAA,MACA,gBAAgB;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAUlB,GAAG,CAAA;AAAA,IAEH,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC;AAAA,UACE;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA,UACnC;AAAA,mBACS,IAAI,gBAAgB,CAAC,QAAQ,IAAI,EAAC,eAAA,CAAe,CAAC;AAAA,QAAA,CAC5D;AACD,WAAG;AAAA,UACD,2BAA2B,cAAc;AAAA,QAAA;AAAA,MAE7C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,+BAA+B;AAAA,MAC3C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,0BAA0BC,OAAS;AAAA,UACvC,gBAAgBC,OAAE,EAAS,SAAA;AAAA,UAC3B,eAAe,gBAAgB,SAAA;AAAA,QAAS,CACzC;AACD,cAAM,SAAS,MAAM;AAAA,0DAC6B,IAAI,gBAAgB,CAAC;AACvE,eAAO,OAAO,WAAW,CAAC;AAC1B,cAAM,EAAC,gBAAAC,iBAAgB,cAAA,IAAiBC;AAAAA,UACtC,OAAO,CAAC;AAAA,UACR;AAAA,UACA;AAAA,QAAA;AAGF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,yBACe,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,UAMzC;AAAA,wBACc,IAAI,eAAe,KAAK,CAAC,CAAC,aAAa,IAAI;AAAA,YACvD,MAAM,sBAAsB,KAAK;AAAA,YACjC,SAASD;AAAAA,YACT;AAAA,UAAA,CACD,CAAC;AAAA;AAAA,UAEF;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA;AAAA,QAAA,CAEpC;AACD,WAAG,OAAO,mDAAmD;AAAA,MAC/D;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA,IAMF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,6BAA6B;AAAA,MACzC;AAAA,IAAA;AAAA;AAAA,IAIF,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,IAAI;AAAA;AAAA,UAAe;AAAA,YACrB,4BAA4B,eAAe,KAAK,CAAC,CAAC;AAAA,8BAChC,GAAG,wBAAwB,MAAM,OAAO,MAAM,QAAQ,CAAC,CAAC,cAAc,GAAG,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA,QAAA;AAErH,WAAG,OAAO,0CAA0C;AAAA,MACtD;AAAA,IAAA;AAAA,EACF;AAEJ;AAEA,eAAsB,wBACpB,IACA,IACA,OACA;AACA,MAAI,MAAM,UAAU,QAAQ;AAK1B,UAAM,iBAAiB,MAAM,kBAAkB,IAAI,QAAQ,MAAM,KAAK,EAAE;AACxE,QAAI,mBAAmB,QAAQ,eAAe,gBAAgB,GAAG;AAC/D,YAAM,kBAAkB,IAAI,IAAI,QAAQ,MAAM,KAAK;AAAA,IACrD;AAAA,EACF;AACF;"}
|
|
1
|
+
{"version":3,"file":"init.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {\n getVersionHistory,\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../../db/migration.ts';\nimport type {PostgresDB} from '../../../../types/pg.ts';\nimport {\n appSchema,\n upstreamSchema,\n type ShardConfig,\n} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {AutoResetSignal} from '../../../change-streamer/schema/tables.ts';\nimport {decommissionShard} from '../decommission.ts';\nimport {publishedSchema} from './published.ts';\nimport {\n getMutationsTableDefinition,\n legacyReplicationSlot,\n metadataPublicationName,\n setupTablesAndReplication,\n setupTriggers,\n} from './shard.ts';\n\n/**\n * Ensures that a shard is set up for initial sync.\n */\nexport async function ensureShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n): Promise<void> {\n const initialSetup: Migration = {\n migrateSchema: (lc, tx) => setupTablesAndReplication(lc, tx, shard),\n minSafeVersion: 1,\n };\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n initialSetup,\n // The incremental migration of any existing replicas will be replaced by\n // the incoming replica being synced, so the replicaVersion here is\n // unnecessary.\n getIncrementalMigrations(shard, 'obsolete'),\n );\n}\n\n/**\n * Updates the schema for an existing shard.\n */\nexport async function updateShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n replicaVersion: string,\n): Promise<void> {\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n {\n // If the expected existing shard is absent, throw an\n // AutoResetSignal to backtrack and initial sync.\n migrateSchema: () => {\n throw new AutoResetSignal(\n `upstream shard ${upstreamSchema(shard)} is not initialized`,\n );\n },\n },\n getIncrementalMigrations(shard, replicaVersion),\n );\n\n // The decommission check is run in updateShardSchema so that it happens\n // after initial sync, and not when the shard schema is initially set up.\n await decommissionLegacyShard(lc, db, shard);\n}\n\nfunction getIncrementalMigrations(\n shard: ShardConfig,\n replicaVersion?: string,\n): IncrementalMigrationMap {\n const shardConfigTable = `${upstreamSchema(shard)}.shardConfig`;\n\n return {\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('resetting to upgrade shard schema');\n },\n minSafeVersion: 3,\n },\n\n // v5 changes the upstream schema organization from \"zero_{SHARD_ID}\" to\n // the \"{APP_ID}_0\". An incremental migration indicates that the previous\n // SHARD_ID was \"0\" and the new APP_ID is \"zero\" (i.e. the default values\n // for those options). In this case, the upstream format is identical, and\n // no migration is necessary. However, the version is bumped to v5 to\n // indicate that it was created with the {APP_ID} configuration and should\n // not be decommissioned as a legacy shard.\n 5: {},\n\n 6: {\n migrateSchema: async (lc, sql) => {\n assert(\n replicaVersion,\n `replicaVersion is always passed for incremental migrations`,\n );\n await Promise.all([\n sql`\n ALTER TABLE ${sql(shardConfigTable)} ADD \"replicaVersion\" TEXT`,\n sql`\n UPDATE ${sql(shardConfigTable)} SET ${sql({replicaVersion})}`,\n ]);\n lc.info?.(\n `Recorded replicaVersion ${replicaVersion} in upstream shardConfig`,\n );\n },\n },\n\n // Updates the DDL event trigger protocol to v2, and adds support for\n // ALTER SCHEMA x RENAME TO y\n 7: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded to v2 event triggers`);\n },\n },\n\n // Adds support for non-disruptive resyncs, which tracks multiple\n // replicas with different slot names.\n 8: {\n migrateSchema: async (lc, sql) => {\n const legacyShardConfigSchema = v.object({\n replicaVersion: v.string().nullable(),\n initialSchema: publishedSchema.nullable(),\n });\n const result = await sql`\n SELECT \"replicaVersion\", \"initialSchema\" FROM ${sql(shardConfigTable)}`;\n assert(\n result.length === 1,\n () => `Expected exactly one shardConfig row, got ${result.length}`,\n );\n const {replicaVersion, initialSchema} = v.parse(\n result[0],\n legacyShardConfigSchema,\n 'passthrough',\n );\n\n await Promise.all([\n sql`\n CREATE TABLE ${sql(upstreamSchema(shard))}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `,\n sql`\n INSERT INTO ${sql(upstreamSchema(shard))}.replicas ${sql({\n slot: legacyReplicationSlot(shard),\n version: replicaVersion,\n initialSchema,\n })}\n `,\n sql`\n ALTER TABLE ${sql(shardConfigTable)} DROP \"replicaVersion\", DROP \"initialSchema\"\n `,\n ]);\n lc.info?.(`Upgraded schema to support non-disruptive resyncs`);\n },\n },\n\n // Fixes field ordering of compound indexes. This incremental migration\n // only fixes indexes resulting from new schema changes. A full resync is\n // required to fix existing indexes.\n //\n // The migration has been subsumed by the identical logic for migrating\n // to v12 (i.e. a trigger upgrade).\n 9: {},\n\n // Adds the `mutations` table used to track mutation results.\n 10: {\n migrateSchema: async (lc, sql) => {\n await sql.unsafe(/*sql*/ `\n ${getMutationsTableDefinition(upstreamSchema(shard))}\n ALTER PUBLICATION ${id(metadataPublicationName(shard.appID, shard.shardNum))} ADD TABLE ${id(upstreamSchema(shard))}.\"mutations\";\n `);\n lc.info?.('Upgraded schema with new mutations table');\n },\n },\n\n 11: {\n migrateSchema: async (lc, sql) => {\n await sql`DROP TABLE IF EXISTS ${sql(appSchema(shard))}.\"schemaVersions\"`;\n lc.info?.(`Dropped legacy schemaVersions table`);\n },\n },\n\n // Upgrade DDL trigger to query schemaOID, needed information for auto-backfill.\n 12: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded DDL event triggers`);\n },\n },\n };\n}\n\nexport async function decommissionLegacyShard(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n) {\n if (shard.appID !== 'zero') {\n // When migration from non-default shard ids, e.g. \"zero_prod\" => \"prod_0\",\n // clean up the old \"zero_prod\" shard if it is pre-v5. Note that the v5\n // check is important to guard against cleaning up a **new** \"zero_0\" app\n // that coexists with the current App (with app-id === \"0\").\n const versionHistory = await getVersionHistory(db, `zero_${shard.appID}`);\n if (versionHistory !== null && versionHistory.schemaVersion < 5) {\n await decommissionShard(lc, db, 'zero', shard.appID);\n }\n }\n}\n"],"names":["lc","v.object","v.string","replicaVersion","v.parse"],"mappings":";;;;;;;;;;AA8BA,eAAsB,kBACpB,IACA,IACA,OACe;AACf,QAAM,eAA0B;AAAA,IAC9B,eAAe,CAACA,KAAI,OAAO,0BAA0BA,KAAI,IAAI,KAAK;AAAA,IAClE,gBAAgB;AAAA,EAAA;AAElB,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA;AAAA,IAIA,yBAAyB,OAAO,UAAU;AAAA,EAAA;AAE9C;AAKA,eAAsB,kBACpB,IACA,IACA,OACA,gBACe;AACf,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA,MAGE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR,kBAAkB,eAAe,KAAK,CAAC;AAAA,QAAA;AAAA,MAE3C;AAAA,IAAA;AAAA,IAEF,yBAAyB,OAAO,cAAc;AAAA,EAAA;AAKhD,QAAM,wBAAwB,IAAI,IAAI,KAAK;AAC7C;AAEA,SAAS,yBACP,OACA,gBACyB;AACzB,QAAM,mBAAmB,GAAG,eAAe,KAAK,CAAC;AAEjD,SAAO;AAAA,IACL,GAAG;AAAA,MACD,eAAe,MAAM;AACnB,cAAM,IAAI,gBAAgB,mCAAmC;AAAA,MAC/D;AAAA,MACA,gBAAgB;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAUlB,GAAG,CAAA;AAAA,IAEH,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC;AAAA,UACE;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA,UACnC;AAAA,mBACS,IAAI,gBAAgB,CAAC,QAAQ,IAAI,EAAC,eAAA,CAAe,CAAC;AAAA,QAAA,CAC5D;AACD,WAAG;AAAA,UACD,2BAA2B,cAAc;AAAA,QAAA;AAAA,MAE7C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,+BAA+B;AAAA,MAC3C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,0BAA0BC,OAAS;AAAA,UACvC,gBAAgBC,OAAE,EAAS,SAAA;AAAA,UAC3B,eAAe,gBAAgB,SAAA;AAAA,QAAS,CACzC;AACD,cAAM,SAAS,MAAM;AAAA,0DAC6B,IAAI,gBAAgB,CAAC;AACvE;AAAA,UACE,OAAO,WAAW;AAAA,UAClB,MAAM,6CAA6C,OAAO,MAAM;AAAA,QAAA;AAElE,cAAM,EAAC,gBAAAC,iBAAgB,cAAA,IAAiBC;AAAAA,UACtC,OAAO,CAAC;AAAA,UACR;AAAA,UACA;AAAA,QAAA;AAGF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,yBACe,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,UAMzC;AAAA,wBACc,IAAI,eAAe,KAAK,CAAC,CAAC,aAAa,IAAI;AAAA,YACvD,MAAM,sBAAsB,KAAK;AAAA,YACjC,SAASD;AAAAA,YACT;AAAA,UAAA,CACD,CAAC;AAAA;AAAA,UAEF;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA;AAAA,QAAA,CAEpC;AACD,WAAG,OAAO,mDAAmD;AAAA,MAC/D;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IASF,GAAG,CAAA;AAAA;AAAA,IAGH,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,IAAI;AAAA;AAAA,UAAe;AAAA,YACrB,4BAA4B,eAAe,KAAK,CAAC,CAAC;AAAA,8BAChC,GAAG,wBAAwB,MAAM,OAAO,MAAM,QAAQ,CAAC,CAAC,cAAc,GAAG,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA,QAAA;AAErH,WAAG,OAAO,0CAA0C;AAAA,MACtD;AAAA,IAAA;AAAA,IAGF,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,2BAA2B,IAAI,UAAU,KAAK,CAAC,CAAC;AACtD,WAAG,OAAO,qCAAqC;AAAA,MACjD;AAAA,IAAA;AAAA;AAAA,IAIF,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,6BAA6B;AAAA,MACzC;AAAA,IAAA;AAAA,EACF;AAEJ;AAEA,eAAsB,wBACpB,IACA,IACA,OACA;AACA,MAAI,MAAM,UAAU,QAAQ;AAK1B,UAAM,iBAAiB,MAAM,kBAAkB,IAAI,QAAQ,MAAM,KAAK,EAAE;AACxE,QAAI,mBAAmB,QAAQ,eAAe,gBAAgB,GAAG;AAC/D,YAAM,kBAAkB,IAAI,IAAI,QAAQ,MAAM,KAAK;AAAA,IACrD;AAAA,EACF;AACF;"}
|
|
@@ -2,24 +2,26 @@ import type postgres from 'postgres';
|
|
|
2
2
|
import * as v from '../../../../../../shared/src/valita.ts';
|
|
3
3
|
export declare function publishedTableQuery(publications: readonly string[]): string;
|
|
4
4
|
export declare function indexDefinitionsQuery(publications: readonly string[]): string;
|
|
5
|
-
export declare const publishedSchema: v.
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
5
|
+
export declare const publishedSchema: v.Type<{
|
|
6
|
+
indexes: {
|
|
7
|
+
isReplicaIdentity?: boolean | undefined;
|
|
8
|
+
isPrimaryKey?: boolean | undefined;
|
|
9
|
+
isImmediate?: boolean | undefined;
|
|
10
|
+
name: string;
|
|
11
|
+
columns: Record<string, "ASC" | "DESC">;
|
|
12
|
+
schema: string;
|
|
13
|
+
tableName: string;
|
|
14
|
+
unique: boolean;
|
|
15
|
+
}[];
|
|
16
|
+
tables: {
|
|
17
|
+
replicaIdentityColumns: string[];
|
|
18
|
+
primaryKey?: string[] | undefined;
|
|
19
|
+
schemaOID?: number | undefined;
|
|
20
|
+
replicaIdentity?: "n" | "d" | "f" | "i" | undefined;
|
|
21
|
+
name: string;
|
|
22
|
+
schema: string;
|
|
23
|
+
oid: number;
|
|
24
|
+
columns: Record<string, {
|
|
23
25
|
pgTypeClass?: "e" | "d" | "b" | "c" | "p" | "r" | "m" | undefined;
|
|
24
26
|
elemPgTypeClass?: "e" | "d" | "b" | "c" | "p" | "r" | "m" | null | undefined;
|
|
25
27
|
characterMaximumLength?: number | null | undefined;
|
|
@@ -28,80 +30,24 @@ export declare const publishedSchema: v.ObjectType<Omit<{
|
|
|
28
30
|
pos: number;
|
|
29
31
|
dataType: string;
|
|
30
32
|
typeOID: number;
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
publications: v.Type<Record<string, {
|
|
33
|
+
}>;
|
|
34
|
+
publications: Record<string, {
|
|
34
35
|
rowFilter: string | null;
|
|
35
|
-
}
|
|
36
|
-
}
|
|
37
|
-
}
|
|
38
|
-
indexes: v.ArrayType<v.ObjectType<Omit<Omit<{
|
|
39
|
-
name: v.Type<string>;
|
|
40
|
-
tableName: v.Type<string>;
|
|
41
|
-
unique: v.Type<boolean>;
|
|
42
|
-
columns: v.Type<Record<string, "ASC" | "DESC">>;
|
|
43
|
-
}, "schema"> & {
|
|
44
|
-
schema: v.Type<string>;
|
|
45
|
-
}, "isReplicaIdentity" | "isImmediate"> & {
|
|
46
|
-
isReplicaIdentity: v.Optional<boolean>;
|
|
47
|
-
isImmediate: v.Optional<boolean>;
|
|
48
|
-
}, undefined>>;
|
|
49
|
-
}, undefined>;
|
|
36
|
+
}>;
|
|
37
|
+
}[];
|
|
38
|
+
}>;
|
|
50
39
|
export type PublishedSchema = v.Infer<typeof publishedSchema>;
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
}>>;
|
|
63
|
-
primaryKey: v.Optional<string[]>;
|
|
64
|
-
}, "schema"> & {
|
|
65
|
-
schema: v.Type<string>;
|
|
66
|
-
}, "columns" | "publications" | "oid" | "replicaIdentity"> & {
|
|
67
|
-
oid: v.Type<number>;
|
|
68
|
-
columns: v.Type<Record<string, {
|
|
69
|
-
pgTypeClass?: "e" | "d" | "b" | "c" | "p" | "r" | "m" | undefined;
|
|
70
|
-
elemPgTypeClass?: "e" | "d" | "b" | "c" | "p" | "r" | "m" | null | undefined;
|
|
71
|
-
characterMaximumLength?: number | null | undefined;
|
|
72
|
-
notNull?: boolean | null | undefined;
|
|
73
|
-
dflt?: string | null | undefined;
|
|
74
|
-
pos: number;
|
|
75
|
-
dataType: string;
|
|
76
|
-
typeOID: number;
|
|
77
|
-
}>>;
|
|
78
|
-
replicaIdentity: v.Optional<"n" | "d" | "f" | "i">;
|
|
79
|
-
publications: v.Type<Record<string, {
|
|
80
|
-
rowFilter: string | null;
|
|
81
|
-
}>>;
|
|
82
|
-
}, undefined>>;
|
|
83
|
-
}, "indexes"> & {
|
|
84
|
-
indexes: v.ArrayType<v.ObjectType<Omit<Omit<{
|
|
85
|
-
name: v.Type<string>;
|
|
86
|
-
tableName: v.Type<string>;
|
|
87
|
-
unique: v.Type<boolean>;
|
|
88
|
-
columns: v.Type<Record<string, "ASC" | "DESC">>;
|
|
89
|
-
}, "schema"> & {
|
|
90
|
-
schema: v.Type<string>;
|
|
91
|
-
}, "isReplicaIdentity" | "isImmediate"> & {
|
|
92
|
-
isReplicaIdentity: v.Optional<boolean>;
|
|
93
|
-
isImmediate: v.Optional<boolean>;
|
|
94
|
-
}, undefined>>;
|
|
95
|
-
}, "publications"> & {
|
|
96
|
-
publications: v.ArrayType<v.ObjectType<{
|
|
97
|
-
pubname: v.Type<string>;
|
|
98
|
-
pubinsert: v.Type<boolean>;
|
|
99
|
-
pubupdate: v.Type<boolean>;
|
|
100
|
-
pubdelete: v.Type<boolean>;
|
|
101
|
-
pubtruncate: v.Type<boolean>;
|
|
102
|
-
}, undefined>>;
|
|
103
|
-
}, undefined>;
|
|
104
|
-
export type PublicationInfo = v.Infer<typeof publicationInfoSchema>;
|
|
40
|
+
export type PublishedTableWithReplicaIdentity = PublishedSchema['tables'][number];
|
|
41
|
+
declare const publicationsResultSchema: v.ArrayType<v.ObjectType<{
|
|
42
|
+
pubname: v.Type<string>;
|
|
43
|
+
pubinsert: v.Type<boolean>;
|
|
44
|
+
pubupdate: v.Type<boolean>;
|
|
45
|
+
pubdelete: v.Type<boolean>;
|
|
46
|
+
pubtruncate: v.Type<boolean>;
|
|
47
|
+
}, undefined>>;
|
|
48
|
+
export type PublicationInfo = PublishedSchema & {
|
|
49
|
+
publications: v.Infer<typeof publicationsResultSchema>;
|
|
50
|
+
};
|
|
105
51
|
/**
|
|
106
52
|
* Retrieves published tables and columns.
|
|
107
53
|
*/
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"published.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/published.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAErC,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAG5D,wBAAgB,mBAAmB,CAAC,YAAY,EAAE,SAAS,MAAM,EAAE,
|
|
1
|
+
{"version":3,"file":"published.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/published.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAErC,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAG5D,wBAAgB,mBAAmB,CAAC,YAAY,EAAE,SAAS,MAAM,EAAE,UAmFlE;AAED,wBAAgB,qBAAqB,CAAC,YAAY,EAAE,SAAS,MAAM,EAAE,UA6EpE;AAED,eAAO,MAAM,eAAe;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;EAsCvB,CAAC;AAEN,MAAM,MAAM,eAAe,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,eAAe,CAAC,CAAC;AAE9D,MAAM,MAAM,iCAAiC,GAC3C,eAAe,CAAC,QAAQ,CAAC,CAAC,MAAM,CAAC,CAAC;AAUpC,QAAA,MAAM,wBAAwB;;;;;;cAA6B,CAAC;AAE5D,MAAM,MAAM,eAAe,GAAG,eAAe,GAAG;IAC9C,YAAY,EAAE,CAAC,CAAC,KAAK,CAAC,OAAO,wBAAwB,CAAC,CAAC;CACxD,CAAC;AAEF;;GAEG;AACH,wBAAsB,kBAAkB,CACtC,GAAG,EAAE,QAAQ,CAAC,GAAG,EACjB,YAAY,EAAE,MAAM,EAAE,GACrB,OAAO,CAAC,eAAe,CAAC,CAsD1B"}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { literal } from "pg-format";
|
|
2
2
|
import { equals } from "../../../../../../shared/src/set-utils.js";
|
|
3
3
|
import { parse } from "../../../../../../shared/src/valita.js";
|
|
4
|
-
import {
|
|
4
|
+
import { publishedIndexSpec, publishedTableSpec } from "../../../../db/specs.js";
|
|
5
5
|
import { object, array, boolean, string } from "@badrap/valita";
|
|
6
6
|
function publishedTableQuery(publications) {
|
|
7
7
|
return (
|
|
@@ -9,7 +9,8 @@ function publishedTableQuery(publications) {
|
|
|
9
9
|
`
|
|
10
10
|
WITH published_columns AS (SELECT
|
|
11
11
|
pc.oid::int8 AS "oid",
|
|
12
|
-
nspname AS "schema",
|
|
12
|
+
nspname AS "schema",
|
|
13
|
+
pc.relnamespace::int8 AS "schemaOID" ,
|
|
13
14
|
pc.relname AS "name",
|
|
14
15
|
pc.relreplident AS "replicaIdentity",
|
|
15
16
|
attnum AS "pos",
|
|
@@ -43,6 +44,7 @@ ORDER BY nspname, pc.relname),
|
|
|
43
44
|
tables AS (SELECT json_build_object(
|
|
44
45
|
'oid', "oid",
|
|
45
46
|
'schema', "schema",
|
|
47
|
+
'schemaOID', "schemaOID",
|
|
46
48
|
'name', "name",
|
|
47
49
|
'replicaIdentity', "replicaIdentity",
|
|
48
50
|
'columns', json_object_agg(
|
|
@@ -76,7 +78,7 @@ tables AS (SELECT json_build_object(
|
|
|
76
78
|
"publication",
|
|
77
79
|
jsonb_build_object('rowFilter', "rowFilter")
|
|
78
80
|
)
|
|
79
|
-
) AS "table" FROM published_columns GROUP BY "schema", "name", "oid", "replicaIdentity")
|
|
81
|
+
) AS "table" FROM published_columns GROUP BY "schema", "schemaOID", "name", "oid", "replicaIdentity")
|
|
80
82
|
|
|
81
83
|
SELECT COALESCE(json_agg("table"), '[]'::json) as "tables" FROM tables
|
|
82
84
|
`
|
|
@@ -93,6 +95,7 @@ function indexDefinitionsQuery(publications) {
|
|
|
93
95
|
index_column.name as "col",
|
|
94
96
|
CASE WHEN pg_index.indoption[index_column.pos-1] & 1 = 1 THEN 'DESC' ELSE 'ASC' END as "dir",
|
|
95
97
|
pg_index.indisunique as "unique",
|
|
98
|
+
pg_index.indisprimary as "isPrimaryKey",
|
|
96
99
|
pg_index.indisreplident as "isReplicaIdentity",
|
|
97
100
|
pg_index.indimmediate as "isImmediate"
|
|
98
101
|
FROM pg_indexes
|
|
@@ -133,21 +136,51 @@ function indexDefinitionsQuery(publications) {
|
|
|
133
136
|
'tableName', "tableName",
|
|
134
137
|
'name', "name",
|
|
135
138
|
'unique', "unique",
|
|
139
|
+
'isPrimaryKey', "isPrimaryKey",
|
|
136
140
|
'isReplicaIdentity', "isReplicaIdentity",
|
|
137
141
|
'isImmediate', "isImmediate",
|
|
138
142
|
'columns', json_object_agg("col", "dir")
|
|
139
143
|
) AS index FROM indexed_columns
|
|
140
|
-
GROUP BY "schema", "tableName", "name", "unique",
|
|
144
|
+
GROUP BY "schema", "tableName", "name", "unique",
|
|
145
|
+
"isPrimaryKey", "isReplicaIdentity", "isImmediate")
|
|
141
146
|
|
|
142
147
|
SELECT COALESCE(json_agg("index"), '[]'::json) as "indexes" FROM indexes
|
|
143
148
|
`
|
|
144
149
|
);
|
|
145
150
|
}
|
|
146
|
-
const
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
+
const publishedSchema = object({
|
|
152
|
+
tables: array(publishedTableSpec),
|
|
153
|
+
indexes: array(publishedIndexSpec)
|
|
154
|
+
}).map(({ tables, indexes }) => ({
|
|
155
|
+
indexes,
|
|
156
|
+
// Denormalize the schema such that each `table` includes the
|
|
157
|
+
// `replicaIdentityColumns` corresponding to the table's
|
|
158
|
+
// replica identity and associated primary key or index.
|
|
159
|
+
tables: tables.map((table) => {
|
|
160
|
+
const replicaIdentityColumns = [];
|
|
161
|
+
switch (table.replicaIdentity) {
|
|
162
|
+
case "d":
|
|
163
|
+
replicaIdentityColumns.push(...table.primaryKey ?? []);
|
|
164
|
+
break;
|
|
165
|
+
case "i":
|
|
166
|
+
replicaIdentityColumns.push(
|
|
167
|
+
...Object.keys(
|
|
168
|
+
indexes.find(
|
|
169
|
+
(ind) => ind.schema === table.schema && ind.tableName === table.name && ind.isReplicaIdentity
|
|
170
|
+
)?.columns ?? {}
|
|
171
|
+
)
|
|
172
|
+
);
|
|
173
|
+
break;
|
|
174
|
+
case "f":
|
|
175
|
+
replicaIdentityColumns.push(...Object.keys(table.columns));
|
|
176
|
+
break;
|
|
177
|
+
}
|
|
178
|
+
return {
|
|
179
|
+
...table,
|
|
180
|
+
replicaIdentityColumns
|
|
181
|
+
};
|
|
182
|
+
})
|
|
183
|
+
}));
|
|
151
184
|
const publicationSchema = object({
|
|
152
185
|
pubname: string(),
|
|
153
186
|
pubinsert: boolean(),
|
|
@@ -156,9 +189,6 @@ const publicationSchema = object({
|
|
|
156
189
|
pubtruncate: boolean()
|
|
157
190
|
});
|
|
158
191
|
const publicationsResultSchema = array(publicationSchema);
|
|
159
|
-
publishedSchema.extend({
|
|
160
|
-
publications: publicationsResultSchema
|
|
161
|
-
});
|
|
162
192
|
async function getPublicationInfo(sql, publications) {
|
|
163
193
|
const result = await sql.unsafe(
|
|
164
194
|
/*sql*/
|
|
@@ -200,8 +230,15 @@ async function getPublicationInfo(sql, publications) {
|
|
|
200
230
|
}
|
|
201
231
|
return {
|
|
202
232
|
publications: parse(result[1], publicationsResultSchema),
|
|
203
|
-
...parse(
|
|
204
|
-
|
|
233
|
+
...parse(
|
|
234
|
+
{
|
|
235
|
+
...result[2][0],
|
|
236
|
+
// tables
|
|
237
|
+
...result[3][0]
|
|
238
|
+
// indexes
|
|
239
|
+
},
|
|
240
|
+
publishedSchema
|
|
241
|
+
)
|
|
205
242
|
};
|
|
206
243
|
}
|
|
207
244
|
export {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"published.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/published.ts"],"sourcesContent":["import {literal} from 'pg-format';\nimport type postgres from 'postgres';\nimport {equals} from '../../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {publishedIndexSpec, publishedTableSpec} from '../../../../db/specs.ts';\n\nexport function publishedTableQuery(publications: readonly string[]) {\n // Notes:\n // * There's a bug in PG15 in which generated columns are incorrectly\n // included in pg_publication_tables.attnames, (even though the generated\n // column values are not be included in the replication stream).\n // The WHERE condition `attgenerated = ''` fixes this by explicitly excluding\n // generated columns from the list.\n return /*sql*/ `\nWITH published_columns AS (SELECT \n pc.oid::int8 AS \"oid\",\n nspname AS \"schema\", \n pc.relname AS \"name\", \n pc.relreplident AS \"replicaIdentity\",\n attnum AS \"pos\", \n attname AS \"col\", \n pt.typname AS \"type\", \n atttypid::int8 AS \"typeOID\", \n pt.typtype,\n elem_pt.typtype AS \"elemTyptype\",\n NULLIF(atttypmod, -1) AS \"maxLen\", \n attndims \"arrayDims\", \n attnotnull AS \"notNull\",\n pg_get_expr(pd.adbin, pd.adrelid) as \"dflt\",\n NULLIF(ARRAY_POSITION(conkey, attnum), -1) AS \"keyPos\", \n pb.rowfilter as \"rowFilter\",\n pb.pubname as \"publication\"\nFROM pg_attribute\nJOIN pg_class pc ON pc.oid = attrelid\nJOIN pg_namespace pns ON pns.oid = relnamespace\nJOIN pg_type pt ON atttypid = pt.oid\nLEFT JOIN pg_type elem_pt ON elem_pt.oid = pt.typelem\nJOIN pg_publication_tables as pb ON \n pb.schemaname = nspname AND \n pb.tablename = pc.relname AND\n attname = ANY(pb.attnames)\nLEFT JOIN pg_constraint pk ON pk.contype = 'p' AND pk.connamespace = relnamespace AND pk.conrelid = attrelid\nLEFT JOIN pg_attrdef pd ON pd.adrelid = attrelid AND pd.adnum = attnum\nWHERE pb.pubname IN (${literal(publications)}) AND \n (current_setting('server_version_num')::int >= 160000 OR attgenerated = '')\nORDER BY nspname, pc.relname),\n\ntables AS (SELECT json_build_object(\n 'oid', \"oid\",\n 'schema', \"schema\", \n 'name', \"name\", \n 'replicaIdentity', \"replicaIdentity\",\n 'columns', json_object_agg(\n DISTINCT\n col,\n jsonb_build_object(\n 'pos', \"pos\",\n 'dataType', CASE WHEN \"arrayDims\" = 0 \n THEN \"type\" \n ELSE substring(\"type\" from 2) || repeat('[]', \"arrayDims\") END,\n 'pgTypeClass', \"typtype\",\n 'elemPgTypeClass', \"elemTyptype\",\n 'typeOID', \"typeOID\",\n -- https://stackoverflow.com/a/52376230\n 'characterMaximumLength', CASE WHEN \"typeOID\" = 1043 OR \"typeOID\" = 1042 \n THEN \"maxLen\" - 4 \n ELSE \"maxLen\" END,\n 'notNull', \"notNull\",\n 'dflt', \"dflt\"\n )\n ),\n 'primaryKey', ARRAY( SELECT json_object_keys(\n json_strip_nulls(\n json_object_agg(\n DISTINCT \"col\", \"keyPos\" ORDER BY \"keyPos\"\n )\n )\n )),\n 'publications', json_object_agg(\n DISTINCT \n \"publication\", \n jsonb_build_object('rowFilter', \"rowFilter\")\n )\n) AS \"table\" FROM published_columns GROUP BY \"schema\", \"name\", \"oid\", \"replicaIdentity\")\n\nSELECT COALESCE(json_agg(\"table\"), '[]'::json) as \"tables\" FROM tables\n `;\n}\n\nexport function indexDefinitionsQuery(publications: readonly string[]) {\n // Note: pg_attribute contains column names for tables and for indexes.\n // However, the latter does not get updated when a column in a table is\n // renamed.\n //\n // https://www.postgresql.org/message-id/5860814f-c91d-4ab0-b771-ded90d7b9c55%40www.fastmail.com\n //\n // To address this, the pg_attribute rows are looked up for the index's\n // table rather than the index itself, using the pg_index.indkey array\n // to determine the set and order of columns to include.\n //\n // Notes:\n // * The first bit of indoption is 1 for DESC and 0 for ASC:\n // https://github.com/postgres/postgres/blob/4e1fad37872e49a711adad5d9870516e5c71a375/src/include/catalog/pg_index.h#L89\n // * pg_index.indkey is an int2vector which is 0-based instead of 1-based.\n // * The additional check for attgenerated is required for the aforementioned\n // (in publishedTableQuery) bug in PG15 in which generated columns are\n // incorrectly included in pg_publication_tables.attnames\n return /*sql*/ `\n WITH indexed_columns AS (SELECT\n pg_indexes.schemaname as \"schema\",\n pg_indexes.tablename as \"tableName\",\n pg_indexes.indexname as \"name\",\n index_column.name as \"col\",\n CASE WHEN pg_index.indoption[index_column.pos-1] & 1 = 1 THEN 'DESC' ELSE 'ASC' END as \"dir\",\n pg_index.indisunique as \"unique\",\n pg_index.indisreplident as \"isReplicaIdentity\",\n pg_index.indimmediate as \"isImmediate\"\n FROM pg_indexes\n JOIN pg_namespace ON pg_indexes.schemaname = pg_namespace.nspname\n JOIN pg_class pc ON\n pc.relname = pg_indexes.indexname\n AND pc.relnamespace = pg_namespace.oid\n JOIN pg_publication_tables as pb ON \n pb.schemaname = pg_indexes.schemaname AND \n pb.tablename = pg_indexes.tablename\n JOIN pg_index ON pg_index.indexrelid = pc.oid\n JOIN LATERAL (\n SELECT array_agg(attname) as attnames, array_agg(attgenerated != '') as generated FROM pg_attribute\n WHERE attrelid = pg_index.indrelid\n AND attnum = ANY( (pg_index.indkey::smallint[] )[:pg_index.indnkeyatts - 1] )\n ) as indexed ON true\n JOIN LATERAL (\n SELECT pg_attribute.attname as name, col.index_pos as pos\n FROM UNNEST( (pg_index.indkey::smallint[])[:pg_index.indnkeyatts - 1] ) \n WITH ORDINALITY as col(table_pos, index_pos)\n JOIN pg_attribute ON attrelid = pg_index.indrelid AND attnum = col.table_pos\n ) AS index_column ON true\n LEFT JOIN pg_constraint ON pg_constraint.conindid = pc.oid\n WHERE pb.pubname IN (${literal(publications)})\n AND pg_index.indexprs IS NULL\n AND pg_index.indpred IS NULL\n AND (pg_constraint.contype IS NULL OR pg_constraint.contype IN ('p', 'u'))\n AND indexed.attnames <@ pb.attnames\n AND (current_setting('server_version_num')::int >= 160000 OR false = ALL(indexed.generated))\n ORDER BY\n pg_indexes.schemaname,\n pg_indexes.tablename,\n pg_indexes.indexname,\n index_column.pos ASC),\n \n indexes AS (SELECT json_build_object(\n 'schema', \"schema\",\n 'tableName', \"tableName\",\n 'name', \"name\",\n 'unique', \"unique\",\n 'isReplicaIdentity', \"isReplicaIdentity\",\n 'isImmediate', \"isImmediate\",\n 'columns', json_object_agg(\"col\", \"dir\")\n ) AS index FROM indexed_columns \n GROUP BY \"schema\", \"tableName\", \"name\", \"unique\", \"isReplicaIdentity\", \"isImmediate\")\n\n SELECT COALESCE(json_agg(\"index\"), '[]'::json) as \"indexes\" FROM indexes\n `;\n}\n\nconst publishedTablesSchema = v.object({tables: v.array(publishedTableSpec)});\nconst publishedIndexesSchema = v.object({indexes: v.array(publishedIndexSpec)});\n\nexport const publishedSchema = publishedTablesSchema.extend(\n publishedIndexesSchema.shape,\n);\n\nexport type PublishedSchema = v.Infer<typeof publishedSchema>;\n\nconst publicationSchema = v.object({\n pubname: v.string(),\n pubinsert: v.boolean(),\n pubupdate: v.boolean(),\n pubdelete: v.boolean(),\n pubtruncate: v.boolean(),\n});\n\nconst publicationsResultSchema = v.array(publicationSchema);\n\nconst publicationInfoSchema = publishedSchema.extend({\n publications: publicationsResultSchema,\n});\n\nexport type PublicationInfo = v.Infer<typeof publicationInfoSchema>;\n\n/**\n * Retrieves published tables and columns.\n */\nexport async function getPublicationInfo(\n sql: postgres.Sql,\n publications: string[],\n): Promise<PublicationInfo> {\n const result = await sql.unsafe(/*sql*/ `\n SELECT \n schemaname AS \"schema\",\n tablename AS \"table\", \n json_object_agg(pubname, attnames) AS \"publications\"\n FROM pg_publication_tables pb\n WHERE pb.pubname IN (${literal(publications)})\n GROUP BY schemaname, tablename;\n\n SELECT ${Object.keys(publicationSchema.shape).join(\n ',',\n )} FROM pg_publication pb\n WHERE pb.pubname IN (${literal(publications)})\n ORDER BY pubname;\n\n ${publishedTableQuery(publications)};\n\n ${indexDefinitionsQuery(publications)};\n`);\n\n // The first query is used to check that tables in multiple publications\n // always publish the same set of columns.\n const publishedColumns = result[0] as {\n schema: string;\n table: string;\n publications: Record<string, string[]>;\n }[];\n for (const {table, publications} of publishedColumns) {\n let expected: Set<string>;\n Object.entries(publications).forEach(([_, columns], i) => {\n const cols = new Set(columns);\n if (i === 0) {\n expected = cols;\n } else if (!equals(expected, cols)) {\n throw new Error(\n `Table ${table} is exported with different columns: [${[\n ...expected,\n ]}] vs [${[...cols]}]`,\n );\n }\n });\n }\n\n return {\n publications: v.parse(result[1], publicationsResultSchema),\n ...v.parse(result[2][0], publishedTablesSchema),\n ...v.parse(result[3][0], publishedIndexesSchema),\n };\n}\n"],"names":["v.object","v.array","v.string","v.boolean","publications","v.parse"],"mappings":";;;;;AAMO,SAAS,oBAAoB,cAAiC;AAOnE;AAAA;AAAA,IAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,uBA8BM,QAAQ,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA4C5C;AAEO,SAAS,sBAAsB,cAAiC;AAkBrE;AAAA;AAAA,IAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,2BA+BU,QAAQ,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAyBhD;AAEA,MAAM,wBAAwBA,OAAS,EAAC,QAAQC,MAAQ,kBAAkB,EAAA,CAAE;AAC5E,MAAM,yBAAyBD,OAAS,EAAC,SAASC,MAAQ,kBAAkB,EAAA,CAAE;AAEvE,MAAM,kBAAkB,sBAAsB;AAAA,EACnD,uBAAuB;AACzB;AAIA,MAAM,oBAAoBD,OAAS;AAAA,EACjC,SAASE,OAAE;AAAA,EACX,WAAWC,QAAE;AAAA,EACb,WAAWA,QAAE;AAAA,EACb,WAAWA,QAAE;AAAA,EACb,aAAaA,QAAE;AACjB,CAAC;AAED,MAAM,2BAA2BF,MAAQ,iBAAiB;AAE5B,gBAAgB,OAAO;AAAA,EACnD,cAAc;AAChB,CAAC;AAOD,eAAsB,mBACpB,KACA,cAC0B;AAC1B,QAAM,SAAS,MAAM,IAAI;AAAA;AAAA,IAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,2BAMf,QAAQ,YAAY,CAAC;AAAA;AAAA;AAAA,WAGrC,OAAO,KAAK,kBAAkB,KAAK,EAAE;AAAA,MAC5C;AAAA,IAAA,CACD;AAAA,2BACwB,QAAQ,YAAY,CAAC;AAAA;AAAA;AAAA,IAG5C,oBAAoB,YAAY,CAAC;AAAA;AAAA,IAEjC,sBAAsB,YAAY,CAAC;AAAA;AAAA,EAAA;AAKrC,QAAM,mBAAmB,OAAO,CAAC;AAKjC,aAAW,EAAC,OAAO,cAAAG,cAAAA,KAAiB,kBAAkB;AACpD,QAAI;AACJ,WAAO,QAAQA,aAAY,EAAE,QAAQ,CAAC,CAAC,GAAG,OAAO,GAAG,MAAM;AACxD,YAAM,OAAO,IAAI,IAAI,OAAO;AAC5B,UAAI,MAAM,GAAG;AACX,mBAAW;AAAA,MACb,WAAW,CAAC,OAAO,UAAU,IAAI,GAAG;AAClC,cAAM,IAAI;AAAA,UACR,SAAS,KAAK,yCAAyC;AAAA,YACrD,GAAG;AAAA,UAAA,CACJ,SAAS,CAAC,GAAG,IAAI,CAAC;AAAA,QAAA;AAAA,MAEvB;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO;AAAA,IACL,cAAcC,MAAQ,OAAO,CAAC,GAAG,wBAAwB;AAAA,IACzD,GAAGA,MAAQ,OAAO,CAAC,EAAE,CAAC,GAAG,qBAAqB;AAAA,IAC9C,GAAGA,MAAQ,OAAO,CAAC,EAAE,CAAC,GAAG,sBAAsB;AAAA,EAAA;AAEnD;"}
|
|
1
|
+
{"version":3,"file":"published.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/published.ts"],"sourcesContent":["import {literal} from 'pg-format';\nimport type postgres from 'postgres';\nimport {equals} from '../../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {publishedIndexSpec, publishedTableSpec} from '../../../../db/specs.ts';\n\nexport function publishedTableQuery(publications: readonly string[]) {\n // Notes:\n // * There's a bug in PG15 in which generated columns are incorrectly\n // included in pg_publication_tables.attnames, (even though the generated\n // column values are not be included in the replication stream).\n // The WHERE condition `attgenerated = ''` fixes this by explicitly excluding\n // generated columns from the list.\n return /*sql*/ `\nWITH published_columns AS (SELECT \n pc.oid::int8 AS \"oid\",\n nspname AS \"schema\",\n pc.relnamespace::int8 AS \"schemaOID\" ,\n pc.relname AS \"name\", \n pc.relreplident AS \"replicaIdentity\",\n attnum AS \"pos\", \n attname AS \"col\", \n pt.typname AS \"type\", \n atttypid::int8 AS \"typeOID\", \n pt.typtype,\n elem_pt.typtype AS \"elemTyptype\",\n NULLIF(atttypmod, -1) AS \"maxLen\", \n attndims \"arrayDims\", \n attnotnull AS \"notNull\",\n pg_get_expr(pd.adbin, pd.adrelid) as \"dflt\",\n NULLIF(ARRAY_POSITION(conkey, attnum), -1) AS \"keyPos\", \n pb.rowfilter as \"rowFilter\",\n pb.pubname as \"publication\"\nFROM pg_attribute\nJOIN pg_class pc ON pc.oid = attrelid\nJOIN pg_namespace pns ON pns.oid = relnamespace\nJOIN pg_type pt ON atttypid = pt.oid\nLEFT JOIN pg_type elem_pt ON elem_pt.oid = pt.typelem\nJOIN pg_publication_tables as pb ON \n pb.schemaname = nspname AND \n pb.tablename = pc.relname AND\n attname = ANY(pb.attnames)\nLEFT JOIN pg_constraint pk ON pk.contype = 'p' AND pk.connamespace = relnamespace AND pk.conrelid = attrelid\nLEFT JOIN pg_attrdef pd ON pd.adrelid = attrelid AND pd.adnum = attnum\nWHERE pb.pubname IN (${literal(publications)}) AND \n (current_setting('server_version_num')::int >= 160000 OR attgenerated = '')\nORDER BY nspname, pc.relname),\n\ntables AS (SELECT json_build_object(\n 'oid', \"oid\",\n 'schema', \"schema\", \n 'schemaOID', \"schemaOID\",\n 'name', \"name\", \n 'replicaIdentity', \"replicaIdentity\",\n 'columns', json_object_agg(\n DISTINCT\n col,\n jsonb_build_object(\n 'pos', \"pos\",\n 'dataType', CASE WHEN \"arrayDims\" = 0 \n THEN \"type\" \n ELSE substring(\"type\" from 2) || repeat('[]', \"arrayDims\") END,\n 'pgTypeClass', \"typtype\",\n 'elemPgTypeClass', \"elemTyptype\",\n 'typeOID', \"typeOID\",\n -- https://stackoverflow.com/a/52376230\n 'characterMaximumLength', CASE WHEN \"typeOID\" = 1043 OR \"typeOID\" = 1042 \n THEN \"maxLen\" - 4 \n ELSE \"maxLen\" END,\n 'notNull', \"notNull\",\n 'dflt', \"dflt\"\n )\n ),\n 'primaryKey', ARRAY( SELECT json_object_keys(\n json_strip_nulls(\n json_object_agg(\n DISTINCT \"col\", \"keyPos\" ORDER BY \"keyPos\"\n )\n )\n )),\n 'publications', json_object_agg(\n DISTINCT \n \"publication\", \n jsonb_build_object('rowFilter', \"rowFilter\")\n )\n) AS \"table\" FROM published_columns GROUP BY \"schema\", \"schemaOID\", \"name\", \"oid\", \"replicaIdentity\")\n\nSELECT COALESCE(json_agg(\"table\"), '[]'::json) as \"tables\" FROM tables\n `;\n}\n\nexport function indexDefinitionsQuery(publications: readonly string[]) {\n // Note: pg_attribute contains column names for tables and for indexes.\n // However, the latter does not get updated when a column in a table is\n // renamed.\n //\n // https://www.postgresql.org/message-id/5860814f-c91d-4ab0-b771-ded90d7b9c55%40www.fastmail.com\n //\n // To address this, the pg_attribute rows are looked up for the index's\n // table rather than the index itself, using the pg_index.indkey array\n // to determine the set and order of columns to include.\n //\n // Notes:\n // * The first bit of indoption is 1 for DESC and 0 for ASC:\n // https://github.com/postgres/postgres/blob/4e1fad37872e49a711adad5d9870516e5c71a375/src/include/catalog/pg_index.h#L89\n // * pg_index.indkey is an int2vector which is 0-based instead of 1-based.\n // * The additional check for attgenerated is required for the aforementioned\n // (in publishedTableQuery) bug in PG15 in which generated columns are\n // incorrectly included in pg_publication_tables.attnames\n return /*sql*/ `\n WITH indexed_columns AS (SELECT\n pg_indexes.schemaname as \"schema\",\n pg_indexes.tablename as \"tableName\",\n pg_indexes.indexname as \"name\",\n index_column.name as \"col\",\n CASE WHEN pg_index.indoption[index_column.pos-1] & 1 = 1 THEN 'DESC' ELSE 'ASC' END as \"dir\",\n pg_index.indisunique as \"unique\",\n pg_index.indisprimary as \"isPrimaryKey\",\n pg_index.indisreplident as \"isReplicaIdentity\",\n pg_index.indimmediate as \"isImmediate\"\n FROM pg_indexes\n JOIN pg_namespace ON pg_indexes.schemaname = pg_namespace.nspname\n JOIN pg_class pc ON\n pc.relname = pg_indexes.indexname\n AND pc.relnamespace = pg_namespace.oid\n JOIN pg_publication_tables as pb ON \n pb.schemaname = pg_indexes.schemaname AND \n pb.tablename = pg_indexes.tablename\n JOIN pg_index ON pg_index.indexrelid = pc.oid\n JOIN LATERAL (\n SELECT array_agg(attname) as attnames, array_agg(attgenerated != '') as generated FROM pg_attribute\n WHERE attrelid = pg_index.indrelid\n AND attnum = ANY( (pg_index.indkey::smallint[] )[:pg_index.indnkeyatts - 1] )\n ) as indexed ON true\n JOIN LATERAL (\n SELECT pg_attribute.attname as name, col.index_pos as pos\n FROM UNNEST( (pg_index.indkey::smallint[])[:pg_index.indnkeyatts - 1] ) \n WITH ORDINALITY as col(table_pos, index_pos)\n JOIN pg_attribute ON attrelid = pg_index.indrelid AND attnum = col.table_pos\n ) AS index_column ON true\n LEFT JOIN pg_constraint ON pg_constraint.conindid = pc.oid\n WHERE pb.pubname IN (${literal(publications)})\n AND pg_index.indexprs IS NULL\n AND pg_index.indpred IS NULL\n AND (pg_constraint.contype IS NULL OR pg_constraint.contype IN ('p', 'u'))\n AND indexed.attnames <@ pb.attnames\n AND (current_setting('server_version_num')::int >= 160000 OR false = ALL(indexed.generated))\n ORDER BY\n pg_indexes.schemaname,\n pg_indexes.tablename,\n pg_indexes.indexname,\n index_column.pos ASC),\n \n indexes AS (SELECT json_build_object(\n 'schema', \"schema\",\n 'tableName', \"tableName\",\n 'name', \"name\",\n 'unique', \"unique\",\n 'isPrimaryKey', \"isPrimaryKey\",\n 'isReplicaIdentity', \"isReplicaIdentity\",\n 'isImmediate', \"isImmediate\",\n 'columns', json_object_agg(\"col\", \"dir\")\n ) AS index FROM indexed_columns \n GROUP BY \"schema\", \"tableName\", \"name\", \"unique\", \n \"isPrimaryKey\", \"isReplicaIdentity\", \"isImmediate\")\n\n SELECT COALESCE(json_agg(\"index\"), '[]'::json) as \"indexes\" FROM indexes\n `;\n}\n\nexport const publishedSchema = v\n .object({\n tables: v.array(publishedTableSpec),\n indexes: v.array(publishedIndexSpec),\n })\n .map(({tables, indexes}) => ({\n indexes,\n\n // Denormalize the schema such that each `table` includes the\n // `replicaIdentityColumns` corresponding to the table's\n // replica identity and associated primary key or index.\n tables: tables.map(table => {\n const replicaIdentityColumns: string[] = [];\n switch (table.replicaIdentity) {\n case 'd':\n replicaIdentityColumns.push(...(table.primaryKey ?? []));\n break;\n case 'i':\n replicaIdentityColumns.push(\n ...Object.keys(\n indexes.find(\n ind =>\n ind.schema === table.schema &&\n ind.tableName === table.name &&\n ind.isReplicaIdentity,\n )?.columns ?? {},\n ),\n );\n break;\n case 'f':\n replicaIdentityColumns.push(...Object.keys(table.columns));\n break;\n }\n return {\n ...table,\n replicaIdentityColumns,\n };\n }),\n }));\n\nexport type PublishedSchema = v.Infer<typeof publishedSchema>;\n\nexport type PublishedTableWithReplicaIdentity =\n PublishedSchema['tables'][number];\n\nconst publicationSchema = v.object({\n pubname: v.string(),\n pubinsert: v.boolean(),\n pubupdate: v.boolean(),\n pubdelete: v.boolean(),\n pubtruncate: v.boolean(),\n});\n\nconst publicationsResultSchema = v.array(publicationSchema);\n\nexport type PublicationInfo = PublishedSchema & {\n publications: v.Infer<typeof publicationsResultSchema>;\n};\n\n/**\n * Retrieves published tables and columns.\n */\nexport async function getPublicationInfo(\n sql: postgres.Sql,\n publications: string[],\n): Promise<PublicationInfo> {\n const result = await sql.unsafe(/*sql*/ `\n SELECT \n schemaname AS \"schema\",\n tablename AS \"table\", \n json_object_agg(pubname, attnames) AS \"publications\"\n FROM pg_publication_tables pb\n WHERE pb.pubname IN (${literal(publications)})\n GROUP BY schemaname, tablename;\n\n SELECT ${Object.keys(publicationSchema.shape).join(\n ',',\n )} FROM pg_publication pb\n WHERE pb.pubname IN (${literal(publications)})\n ORDER BY pubname;\n\n ${publishedTableQuery(publications)};\n\n ${indexDefinitionsQuery(publications)};\n`);\n\n // The first query is used to check that tables in multiple publications\n // always publish the same set of columns.\n const publishedColumns = result[0] as {\n schema: string;\n table: string;\n publications: Record<string, string[]>;\n }[];\n for (const {table, publications} of publishedColumns) {\n let expected: Set<string>;\n Object.entries(publications).forEach(([_, columns], i) => {\n const cols = new Set(columns);\n if (i === 0) {\n expected = cols;\n } else if (!equals(expected, cols)) {\n throw new Error(\n `Table ${table} is exported with different columns: [${[\n ...expected,\n ]}] vs [${[...cols]}]`,\n );\n }\n });\n }\n\n return {\n publications: v.parse(result[1], publicationsResultSchema),\n ...v.parse(\n {\n ...result[2][0], // tables\n ...result[3][0], // indexes\n },\n publishedSchema,\n ),\n };\n}\n"],"names":["v.object","v.array","v.string","v.boolean","publications","v.parse"],"mappings":";;;;;AAMO,SAAS,oBAAoB,cAAiC;AAOnE;AAAA;AAAA,IAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,uBA+BM,QAAQ,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA6C5C;AAEO,SAAS,sBAAsB,cAAiC;AAkBrE;AAAA;AAAA,IAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,2BAgCU,QAAQ,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA2BhD;AAEO,MAAM,kBAAkBA,OACrB;AAAA,EACN,QAAQC,MAAQ,kBAAkB;AAAA,EAClC,SAASA,MAAQ,kBAAkB;AACrC,CAAC,EACA,IAAI,CAAC,EAAC,QAAQ,eAAc;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA,EAKA,QAAQ,OAAO,IAAI,CAAA,UAAS;AAC1B,UAAM,yBAAmC,CAAA;AACzC,YAAQ,MAAM,iBAAA;AAAA,MACZ,KAAK;AACH,+BAAuB,KAAK,GAAI,MAAM,cAAc,CAAA,CAAG;AACvD;AAAA,MACF,KAAK;AACH,+BAAuB;AAAA,UACrB,GAAG,OAAO;AAAA,YACR,QAAQ;AAAA,cACN,CAAA,QACE,IAAI,WAAW,MAAM,UACrB,IAAI,cAAc,MAAM,QACxB,IAAI;AAAA,YAAA,GACL,WAAW,CAAA;AAAA,UAAC;AAAA,QACjB;AAEF;AAAA,MACF,KAAK;AACH,+BAAuB,KAAK,GAAG,OAAO,KAAK,MAAM,OAAO,CAAC;AACzD;AAAA,IAAA;AAEJ,WAAO;AAAA,MACL,GAAG;AAAA,MACH;AAAA,IAAA;AAAA,EAEJ,CAAC;AACH,EAAE;AAOJ,MAAM,oBAAoBD,OAAS;AAAA,EACjC,SAASE,OAAE;AAAA,EACX,WAAWC,QAAE;AAAA,EACb,WAAWA,QAAE;AAAA,EACb,WAAWA,QAAE;AAAA,EACb,aAAaA,QAAE;AACjB,CAAC;AAED,MAAM,2BAA2BF,MAAQ,iBAAiB;AAS1D,eAAsB,mBACpB,KACA,cAC0B;AAC1B,QAAM,SAAS,MAAM,IAAI;AAAA;AAAA,IAAe;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,2BAMf,QAAQ,YAAY,CAAC;AAAA;AAAA;AAAA,WAGrC,OAAO,KAAK,kBAAkB,KAAK,EAAE;AAAA,MAC5C;AAAA,IAAA,CACD;AAAA,2BACwB,QAAQ,YAAY,CAAC;AAAA;AAAA;AAAA,IAG5C,oBAAoB,YAAY,CAAC;AAAA;AAAA,IAEjC,sBAAsB,YAAY,CAAC;AAAA;AAAA,EAAA;AAKrC,QAAM,mBAAmB,OAAO,CAAC;AAKjC,aAAW,EAAC,OAAO,cAAAG,cAAAA,KAAiB,kBAAkB;AACpD,QAAI;AACJ,WAAO,QAAQA,aAAY,EAAE,QAAQ,CAAC,CAAC,GAAG,OAAO,GAAG,MAAM;AACxD,YAAM,OAAO,IAAI,IAAI,OAAO;AAC5B,UAAI,MAAM,GAAG;AACX,mBAAW;AAAA,MACb,WAAW,CAAC,OAAO,UAAU,IAAI,GAAG;AAClC,cAAM,IAAI;AAAA,UACR,SAAS,KAAK,yCAAyC;AAAA,YACrD,GAAG;AAAA,UAAA,CACJ,SAAS,CAAC,GAAG,IAAI,CAAC;AAAA,QAAA;AAAA,MAEvB;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO;AAAA,IACL,cAAcC,MAAQ,OAAO,CAAC,GAAG,wBAAwB;AAAA,IACzD,GAAGA;AAAAA,MACD;AAAA,QACE,GAAG,OAAO,CAAC,EAAE,CAAC;AAAA;AAAA,QACd,GAAG,OAAO,CAAC,EAAE,CAAC;AAAA;AAAA,MAAA;AAAA,MAEhB;AAAA,IAAA;AAAA,EACF;AAEJ;"}
|
|
@@ -3,6 +3,12 @@ import * as v from '../../../../../../shared/src/valita.ts';
|
|
|
3
3
|
import type { PostgresDB, PostgresTransaction } from '../../../../types/pg.ts';
|
|
4
4
|
import type { AppID, ShardConfig, ShardID } from '../../../../types/shards.ts';
|
|
5
5
|
import { type PublicationInfo, type PublishedSchema } from './published.ts';
|
|
6
|
+
/**
|
|
7
|
+
* Validates that a publication name is a valid PostgreSQL identifier.
|
|
8
|
+
* This provides defense-in-depth against SQL injection when publication
|
|
9
|
+
* names are used in replication commands.
|
|
10
|
+
*/
|
|
11
|
+
export declare function validatePublicationName(name: string): void;
|
|
6
12
|
export declare function internalPublicationPrefix({ appID }: AppID): string;
|
|
7
13
|
export declare function legacyReplicationSlot({ appID, shardNum }: ShardID): string;
|
|
8
14
|
export declare function replicationSlotPrefix(shard: ShardID): string;
|
|
@@ -38,24 +44,26 @@ declare const replicaSchema: v.ObjectType<Omit<{
|
|
|
38
44
|
}, "version" | "slot" | "initialSchema"> & {
|
|
39
45
|
slot: v.Type<string>;
|
|
40
46
|
version: v.Type<string>;
|
|
41
|
-
initialSchema: v.
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
47
|
+
initialSchema: v.Type<{
|
|
48
|
+
indexes: {
|
|
49
|
+
isReplicaIdentity?: boolean | undefined;
|
|
50
|
+
isPrimaryKey?: boolean | undefined;
|
|
51
|
+
isImmediate?: boolean | undefined;
|
|
52
|
+
name: string;
|
|
53
|
+
columns: Record<string, "ASC" | "DESC">;
|
|
54
|
+
schema: string;
|
|
55
|
+
tableName: string;
|
|
56
|
+
unique: boolean;
|
|
57
|
+
}[];
|
|
58
|
+
tables: {
|
|
59
|
+
replicaIdentityColumns: string[];
|
|
60
|
+
primaryKey?: string[] | undefined;
|
|
61
|
+
schemaOID?: number | undefined;
|
|
62
|
+
replicaIdentity?: "n" | "d" | "f" | "i" | undefined;
|
|
63
|
+
name: string;
|
|
64
|
+
schema: string;
|
|
65
|
+
oid: number;
|
|
66
|
+
columns: Record<string, {
|
|
59
67
|
pgTypeClass?: "e" | "d" | "b" | "c" | "p" | "r" | "m" | undefined;
|
|
60
68
|
elemPgTypeClass?: "e" | "d" | "b" | "c" | "p" | "r" | "m" | null | undefined;
|
|
61
69
|
characterMaximumLength?: number | null | undefined;
|
|
@@ -64,25 +72,12 @@ declare const replicaSchema: v.ObjectType<Omit<{
|
|
|
64
72
|
pos: number;
|
|
65
73
|
dataType: string;
|
|
66
74
|
typeOID: number;
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
publications: v.Type<Record<string, {
|
|
75
|
+
}>;
|
|
76
|
+
publications: Record<string, {
|
|
70
77
|
rowFilter: string | null;
|
|
71
|
-
}
|
|
72
|
-
}
|
|
73
|
-
}
|
|
74
|
-
indexes: v.ArrayType<v.ObjectType<Omit<Omit<{
|
|
75
|
-
name: v.Type<string>;
|
|
76
|
-
tableName: v.Type<string>;
|
|
77
|
-
unique: v.Type<boolean>;
|
|
78
|
-
columns: v.Type<Record<string, "ASC" | "DESC">>;
|
|
79
|
-
}, "schema"> & {
|
|
80
|
-
schema: v.Type<string>;
|
|
81
|
-
}, "isReplicaIdentity" | "isImmediate"> & {
|
|
82
|
-
isReplicaIdentity: v.Optional<boolean>;
|
|
83
|
-
isImmediate: v.Optional<boolean>;
|
|
84
|
-
}, undefined>>;
|
|
85
|
-
}, undefined>;
|
|
78
|
+
}>;
|
|
79
|
+
}[];
|
|
80
|
+
}>;
|
|
86
81
|
}, undefined>;
|
|
87
82
|
export type Replica = v.Infer<typeof replicaSchema>;
|
|
88
83
|
export declare function addReplica(sql: PostgresDB, shard: ShardID, slot: string, replicaVersion: string, { tables, indexes }: PublishedSchema): Promise<void>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"shard.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAE5D,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,yBAAyB,CAAC;AAC7E,OAAO,KAAK,EAAC,KAAK,EAAE,WAAW,EAAE,OAAO,EAAC,MAAM,6BAA6B,CAAC;AAI7E,OAAO,EAGL,KAAK,eAAe,EACpB,KAAK,eAAe,EACrB,MAAM,gBAAgB,CAAC;
|
|
1
|
+
{"version":3,"file":"shard.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAE5D,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,yBAAyB,CAAC;AAC7E,OAAO,KAAK,EAAC,KAAK,EAAE,WAAW,EAAE,OAAO,EAAC,MAAM,6BAA6B,CAAC;AAI7E,OAAO,EAGL,KAAK,eAAe,EACpB,KAAK,eAAe,EACrB,MAAM,gBAAgB,CAAC;AASxB;;;;GAIG;AACH,wBAAgB,uBAAuB,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI,CAY1D;AAED,wBAAgB,yBAAyB,CAAC,EAAC,KAAK,EAAC,EAAE,KAAK,UAEvD;AAED,wBAAgB,qBAAqB,CAAC,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAE,OAAO,UAE/D;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,OAAO,UAGnD;AAED;;;GAGG;AACH,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,OAAO,UAIvD;AAED,wBAAgB,kBAAkB,CAAC,KAAK,EAAE,OAAO,UAEhD;AAMD,wBAAgB,uBAAuB,CACrC,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,GAAG,MAAM,UAGzB;AAoCD,wBAAsB,kBAAkB,CAAC,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,KAAK,iBAEpE;AAED,wBAAgB,yBAAyB,CAAC,MAAM,EAAE,MAAM,UASvD;AAED;;;;;;;GAOG;AACH,wBAAgB,2BAA2B,CAAC,MAAM,EAAE,MAAM,UASzD;AAED,eAAO,MAAM,kBAAkB,gBAAgB,CAAC;AAEhD,wBAAgB,UAAU,CACxB,WAAW,EAAE,WAAW,EACxB,mBAAmB,EAAE,MAAM,GAC1B,MAAM,CA0CR;AAED,wBAAgB,SAAS,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,MAAM,CAYzE;AAED,QAAA,MAAM,yBAAyB;;;aAG7B,CAAC;AAEH,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC;AAE5E,QAAA,MAAM,aAAa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAIjB,CAAC;AAEH,MAAM,MAAM,OAAO,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAcpD,wBAAsB,UAAU,CAC9B,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,IAAI,EAAE,MAAM,EACZ,cAAc,EAAE,MAAM,EACtB,EAAC,MAAM,EAAE,OAAO,EAAC,EAAE,eAAe,iBAOnC;AAED,wBAAsB,mBAAmB,CACvC,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,cAAc,EAAE,MAAM,GACrB,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAczB;AAED,wBAAsB,sBAAsB,CAC1C,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,GACb,OAAO,CAAC,mBAAmB,CAAC,CAU9B;AAED;;;GAGG;AACH,wBAAsB,yBAAyB,CAC7C,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,mBAAmB,EACxB,SAAS,EAAE,WAAW,iBAyDvB;AAED,wBAAsB,aAAa,CACjC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,mBAAmB,EACvB,KAAK,EAAE,WAAW,iBAqBnB;AAED,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,UAAU,EACd,SAAS,EAAE,eAAe,QAkB3B;AAED,KAAK,iBAAiB,GAAG;IACvB,KAAK,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CACtD,CAAC;AAEF,wBAAgB,4CAA4C,CAC1D,IAAI,EAAE,eAAe,GACpB,iBAAiB,GAAG,SAAS,CA+C/B"}
|
|
@@ -10,6 +10,19 @@ import { createEventTriggerStatements } from "./ddl.js";
|
|
|
10
10
|
import { publishedSchema, getPublicationInfo } from "./published.js";
|
|
11
11
|
import { validate } from "./validation.js";
|
|
12
12
|
import { object, boolean, array, string } from "@badrap/valita";
|
|
13
|
+
const VALID_PUBLICATION_NAME = /^[a-zA-Z_][a-zA-Z0-9_]*$/;
|
|
14
|
+
function validatePublicationName(name) {
|
|
15
|
+
if (!VALID_PUBLICATION_NAME.test(name)) {
|
|
16
|
+
throw new Error(
|
|
17
|
+
`Invalid publication name "${name}". Publication names must start with a letter or underscore and contain only letters, digits, and underscores.`
|
|
18
|
+
);
|
|
19
|
+
}
|
|
20
|
+
if (name.length > 63) {
|
|
21
|
+
throw new Error(
|
|
22
|
+
`Publication name "${name}" exceeds PostgreSQL's 63-character identifier limit.`
|
|
23
|
+
);
|
|
24
|
+
}
|
|
25
|
+
}
|
|
13
26
|
function internalPublicationPrefix({ appID }) {
|
|
14
27
|
return `_${appID}_`;
|
|
15
28
|
}
|
|
@@ -39,19 +52,6 @@ function globalSetup(appID) {
|
|
|
39
52
|
`
|
|
40
53
|
CREATE SCHEMA IF NOT EXISTS ${app};
|
|
41
54
|
|
|
42
|
-
CREATE TABLE IF NOT EXISTS ${app}."schemaVersions" (
|
|
43
|
-
"minSupportedVersion" INT4,
|
|
44
|
-
"maxSupportedVersion" INT4,
|
|
45
|
-
|
|
46
|
-
-- Ensure that there is only a single row in the table.
|
|
47
|
-
-- Application code can be agnostic to this column, and
|
|
48
|
-
-- simply invoke UPDATE statements on the version columns.
|
|
49
|
-
"lock" BOOL PRIMARY KEY DEFAULT true CHECK (lock)
|
|
50
|
-
);
|
|
51
|
-
|
|
52
|
-
INSERT INTO ${app}."schemaVersions" ("lock", "minSupportedVersion", "maxSupportedVersion")
|
|
53
|
-
VALUES (true, 1, 1) ON CONFLICT DO NOTHING;
|
|
54
|
-
|
|
55
55
|
CREATE TABLE IF NOT EXISTS ${app}.permissions (
|
|
56
56
|
"permissions" JSONB,
|
|
57
57
|
"hash" TEXT,
|
|
@@ -113,7 +113,10 @@ function shardSetup(shardConfig, metadataPublication) {
|
|
|
113
113
|
const app = id(appSchema(shardConfig));
|
|
114
114
|
const shard = id(upstreamSchema(shardConfig));
|
|
115
115
|
const pubs = [...shardConfig.publications].sort();
|
|
116
|
-
assert(
|
|
116
|
+
assert(
|
|
117
|
+
pubs.includes(metadataPublication),
|
|
118
|
+
() => `Publications must include ${metadataPublication}`
|
|
119
|
+
);
|
|
117
120
|
return (
|
|
118
121
|
/*sql*/
|
|
119
122
|
`
|
|
@@ -124,7 +127,7 @@ function shardSetup(shardConfig, metadataPublication) {
|
|
|
124
127
|
|
|
125
128
|
DROP PUBLICATION IF EXISTS ${id(metadataPublication)};
|
|
126
129
|
CREATE PUBLICATION ${id(metadataPublication)}
|
|
127
|
-
FOR TABLE ${app}."
|
|
130
|
+
FOR TABLE ${app}."permissions", TABLE ${shard}."clients", ${shard}."mutations";
|
|
128
131
|
|
|
129
132
|
CREATE TABLE ${shard}."${SHARD_CONFIG_TABLE}" (
|
|
130
133
|
"publications" TEXT[] NOT NULL,
|
|
@@ -203,12 +206,16 @@ async function getInternalShardConfig(sql, shard) {
|
|
|
203
206
|
SELECT "publications", "ddlDetection"
|
|
204
207
|
FROM ${sql(upstreamSchema(shard))}."shardConfig";
|
|
205
208
|
`;
|
|
206
|
-
assert(
|
|
209
|
+
assert(
|
|
210
|
+
result.length === 1,
|
|
211
|
+
() => `Expected exactly one shardConfig row, got ${result.length}`
|
|
212
|
+
);
|
|
207
213
|
return parse(result[0], internalShardConfigSchema, "passthrough");
|
|
208
214
|
}
|
|
209
215
|
async function setupTablesAndReplication(lc, sql, requested) {
|
|
210
216
|
const { publications } = requested;
|
|
211
217
|
for (const pub of publications) {
|
|
218
|
+
validatePublicationName(pub);
|
|
212
219
|
if (pub.startsWith("_")) {
|
|
213
220
|
throw new Error(
|
|
214
221
|
`Publication names starting with "_" are reserved for internal use.
|
|
@@ -277,7 +284,7 @@ function validatePublications(lc, published) {
|
|
|
277
284
|
);
|
|
278
285
|
}
|
|
279
286
|
});
|
|
280
|
-
published.tables.forEach((table) => validate(lc, table
|
|
287
|
+
published.tables.forEach((table) => validate(lc, table));
|
|
281
288
|
}
|
|
282
289
|
function replicaIdentitiesForTablesWithoutPrimaryKeys(pubs) {
|
|
283
290
|
const replicaIdentities = [];
|
|
@@ -330,6 +337,7 @@ export {
|
|
|
330
337
|
setupTablesAndReplication,
|
|
331
338
|
setupTriggers,
|
|
332
339
|
shardSetup,
|
|
340
|
+
validatePublicationName,
|
|
333
341
|
validatePublications
|
|
334
342
|
};
|
|
335
343
|
//# sourceMappingURL=shard.js.map
|