@rocicorp/zero 0.26.0-canary.2 → 0.26.0-canary.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/out/replicache/src/persist/collect-idb-databases.d.ts +4 -4
- package/out/replicache/src/persist/collect-idb-databases.d.ts.map +1 -1
- package/out/replicache/src/persist/collect-idb-databases.js +22 -19
- package/out/replicache/src/persist/collect-idb-databases.js.map +1 -1
- package/out/replicache/src/persist/refresh.d.ts.map +1 -1
- package/out/replicache/src/persist/refresh.js +0 -8
- package/out/replicache/src/persist/refresh.js.map +1 -1
- package/out/replicache/src/process-scheduler.d.ts +23 -0
- package/out/replicache/src/process-scheduler.d.ts.map +1 -1
- package/out/replicache/src/process-scheduler.js +50 -1
- package/out/replicache/src/process-scheduler.js.map +1 -1
- package/out/replicache/src/replicache-impl.d.ts +8 -0
- package/out/replicache/src/replicache-impl.d.ts.map +1 -1
- package/out/replicache/src/replicache-impl.js +11 -2
- package/out/replicache/src/replicache-impl.js.map +1 -1
- package/out/shared/src/falsy.d.ts +3 -0
- package/out/shared/src/falsy.d.ts.map +1 -0
- package/out/zero/package.json.js +1 -1
- package/out/zero/src/adapters/drizzle.js +1 -2
- package/out/zero/src/adapters/prisma.d.ts +2 -0
- package/out/zero/src/adapters/prisma.d.ts.map +1 -0
- package/out/zero/src/adapters/prisma.js +6 -0
- package/out/zero/src/adapters/prisma.js.map +1 -0
- package/out/zero/src/pg.js +4 -7
- package/out/zero/src/react.js +3 -1
- package/out/zero/src/react.js.map +1 -1
- package/out/zero/src/server.js +5 -8
- package/out/zero-cache/src/auth/load-permissions.d.ts +3 -2
- package/out/zero-cache/src/auth/load-permissions.d.ts.map +1 -1
- package/out/zero-cache/src/auth/load-permissions.js +14 -8
- package/out/zero-cache/src/auth/load-permissions.js.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.d.ts +6 -0
- package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.js +16 -3
- package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
- package/out/zero-cache/src/config/zero-config.d.ts +44 -8
- package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
- package/out/zero-cache/src/config/zero-config.js +53 -13
- package/out/zero-cache/src/config/zero-config.js.map +1 -1
- package/out/zero-cache/src/custom/fetch.d.ts +3 -0
- package/out/zero-cache/src/custom/fetch.d.ts.map +1 -1
- package/out/zero-cache/src/custom/fetch.js +26 -0
- package/out/zero-cache/src/custom/fetch.js.map +1 -1
- package/out/zero-cache/src/db/lite-tables.js +1 -1
- package/out/zero-cache/src/db/lite-tables.js.map +1 -1
- package/out/zero-cache/src/db/migration-lite.d.ts.map +1 -1
- package/out/zero-cache/src/db/migration-lite.js +9 -3
- package/out/zero-cache/src/db/migration-lite.js.map +1 -1
- package/out/zero-cache/src/db/migration.d.ts.map +1 -1
- package/out/zero-cache/src/db/migration.js +9 -3
- package/out/zero-cache/src/db/migration.js.map +1 -1
- package/out/zero-cache/src/db/specs.d.ts +4 -3
- package/out/zero-cache/src/db/specs.d.ts.map +1 -1
- package/out/zero-cache/src/db/specs.js +4 -1
- package/out/zero-cache/src/db/specs.js.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.js +9 -3
- package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
- package/out/zero-cache/src/server/inspector-delegate.d.ts +1 -1
- package/out/zero-cache/src/server/inspector-delegate.d.ts.map +1 -1
- package/out/zero-cache/src/server/inspector-delegate.js +11 -30
- package/out/zero-cache/src/server/inspector-delegate.js.map +1 -1
- package/out/zero-cache/src/server/main.js +1 -1
- package/out/zero-cache/src/server/main.js.map +1 -1
- package/out/zero-cache/src/server/priority-op.d.ts +8 -0
- package/out/zero-cache/src/server/priority-op.d.ts.map +1 -0
- package/out/zero-cache/src/server/priority-op.js +29 -0
- package/out/zero-cache/src/server/priority-op.js.map +1 -0
- package/out/zero-cache/src/server/syncer.d.ts +0 -1
- package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/server/syncer.js +3 -21
- package/out/zero-cache/src/server/syncer.js.map +1 -1
- package/out/zero-cache/src/services/analyze.js +1 -1
- package/out/zero-cache/src/services/analyze.js.map +1 -1
- package/out/zero-cache/src/services/change-source/custom/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/custom/change-source.js +4 -3
- package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.js +68 -13
- package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js +7 -2
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js +7 -4
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts +125 -180
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.js +18 -10
- package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts +36 -90
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.js +51 -14
- package/out/zero-cache/src/services/change-source/pg/schema/published.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts +31 -36
- package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/shard.js +24 -3
- package/out/zero-cache/src/services/change-source/pg/schema/shard.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts +2 -2
- package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/validation.js +2 -4
- package/out/zero-cache/src/services/change-source/pg/schema/validation.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts +158 -53
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.js +55 -10
- package/out/zero-cache/src/services/change-source/protocol/current/data.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +210 -72
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current.js +4 -2
- package/out/zero-cache/src/services/change-source/replica-schema.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/replica-schema.js +19 -10
- package/out/zero-cache/src/services/change-source/replica-schema.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts +71 -25
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.js +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts +1 -0
- package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/schema/tables.js +6 -5
- package/out/zero-cache/src/services/change-streamer/schema/tables.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.js +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.d.ts +2 -0
- package/out/zero-cache/src/services/change-streamer/subscriber.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.js +14 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.js.map +1 -1
- package/out/zero-cache/src/services/heapz.d.ts.map +1 -1
- package/out/zero-cache/src/services/heapz.js +1 -0
- package/out/zero-cache/src/services/heapz.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/error.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/error.js +4 -1
- package/out/zero-cache/src/services/mutagen/error.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/mutagen.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/mutagen.js +1 -0
- package/out/zero-cache/src/services/mutagen/mutagen.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/pusher.d.ts +7 -4
- package/out/zero-cache/src/services/mutagen/pusher.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/pusher.js +80 -8
- package/out/zero-cache/src/services/mutagen/pusher.js.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.js +21 -29
- package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/change-log.d.ts +1 -2
- package/out/zero-cache/src/services/replicator/schema/change-log.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/change-log.js +2 -5
- package/out/zero-cache/src/services/replicator/schema/change-log.js.map +1 -1
- package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.d.ts +3 -3
- package/out/zero-cache/src/services/replicator/schema/column-metadata.d.ts.map +1 -0
- package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.js +3 -3
- package/out/zero-cache/src/services/replicator/schema/column-metadata.js.map +1 -0
- package/out/zero-cache/src/services/replicator/schema/replication-state.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/replication-state.js +3 -1
- package/out/zero-cache/src/services/replicator/schema/replication-state.js.map +1 -1
- package/out/zero-cache/src/services/run-ast.js +1 -1
- package/out/zero-cache/src/services/run-ast.js.map +1 -1
- package/out/zero-cache/src/services/statz.d.ts.map +1 -1
- package/out/zero-cache/src/services/statz.js +1 -0
- package/out/zero-cache/src/services/statz.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.js +59 -40
- package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts +0 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js +23 -6
- package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +13 -14
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +44 -56
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js +22 -11
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.js +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +6 -3
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.js +192 -217
- package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
- package/out/zero-cache/src/types/lexi-version.d.ts.map +1 -1
- package/out/zero-cache/src/types/lexi-version.js +4 -1
- package/out/zero-cache/src/types/lexi-version.js.map +1 -1
- package/out/zero-cache/src/types/lite.d.ts.map +1 -1
- package/out/zero-cache/src/types/lite.js +8 -2
- package/out/zero-cache/src/types/lite.js.map +1 -1
- package/out/zero-cache/src/types/shards.js +1 -1
- package/out/zero-cache/src/types/shards.js.map +1 -1
- package/out/zero-cache/src/types/sql.d.ts +5 -0
- package/out/zero-cache/src/types/sql.d.ts.map +1 -1
- package/out/zero-cache/src/types/sql.js +5 -1
- package/out/zero-cache/src/types/sql.js.map +1 -1
- package/out/zero-cache/src/types/subscription.js +1 -1
- package/out/zero-cache/src/types/subscription.js.map +1 -1
- package/out/zero-cache/src/workers/connect-params.d.ts +1 -0
- package/out/zero-cache/src/workers/connect-params.d.ts.map +1 -1
- package/out/zero-cache/src/workers/connect-params.js +2 -1
- package/out/zero-cache/src/workers/connect-params.js.map +1 -1
- package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
- package/out/zero-cache/src/workers/syncer-ws-message-handler.js +14 -6
- package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
- package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/workers/syncer.js +17 -10
- package/out/zero-cache/src/workers/syncer.js.map +1 -1
- package/out/zero-client/src/client/connection-manager.d.ts +8 -0
- package/out/zero-client/src/client/connection-manager.d.ts.map +1 -1
- package/out/zero-client/src/client/connection-manager.js +33 -0
- package/out/zero-client/src/client/connection-manager.js.map +1 -1
- package/out/zero-client/src/client/connection.d.ts.map +1 -1
- package/out/zero-client/src/client/connection.js +6 -3
- package/out/zero-client/src/client/connection.js.map +1 -1
- package/out/zero-client/src/client/error.js +1 -1
- package/out/zero-client/src/client/error.js.map +1 -1
- package/out/zero-client/src/client/mutator-proxy.d.ts.map +1 -1
- package/out/zero-client/src/client/mutator-proxy.js +15 -1
- package/out/zero-client/src/client/mutator-proxy.js.map +1 -1
- package/out/zero-client/src/client/options.d.ts +10 -0
- package/out/zero-client/src/client/options.d.ts.map +1 -1
- package/out/zero-client/src/client/options.js.map +1 -1
- package/out/zero-client/src/client/query-manager.d.ts +4 -0
- package/out/zero-client/src/client/query-manager.d.ts.map +1 -1
- package/out/zero-client/src/client/query-manager.js +7 -0
- package/out/zero-client/src/client/query-manager.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zero-client/src/client/zero.d.ts +3 -1
- package/out/zero-client/src/client/zero.d.ts.map +1 -1
- package/out/zero-client/src/client/zero.js +52 -7
- package/out/zero-client/src/client/zero.js.map +1 -1
- package/out/zero-client/src/mod.d.ts +1 -0
- package/out/zero-client/src/mod.d.ts.map +1 -1
- package/out/zero-protocol/src/connect.d.ts +4 -0
- package/out/zero-protocol/src/connect.d.ts.map +1 -1
- package/out/zero-protocol/src/connect.js +3 -1
- package/out/zero-protocol/src/connect.js.map +1 -1
- package/out/zero-protocol/src/protocol-version.d.ts +1 -1
- package/out/zero-protocol/src/protocol-version.d.ts.map +1 -1
- package/out/zero-protocol/src/protocol-version.js +1 -1
- package/out/zero-protocol/src/protocol-version.js.map +1 -1
- package/out/zero-protocol/src/push.d.ts +11 -2
- package/out/zero-protocol/src/push.d.ts.map +1 -1
- package/out/zero-protocol/src/push.js +22 -6
- package/out/zero-protocol/src/push.js.map +1 -1
- package/out/zero-protocol/src/up.d.ts +2 -0
- package/out/zero-protocol/src/up.d.ts.map +1 -1
- package/out/zero-react/src/mod.d.ts +3 -1
- package/out/zero-react/src/mod.d.ts.map +1 -1
- package/out/zero-react/src/paging-reducer.d.ts +61 -0
- package/out/zero-react/src/paging-reducer.d.ts.map +1 -0
- package/out/zero-react/src/paging-reducer.js +77 -0
- package/out/zero-react/src/paging-reducer.js.map +1 -0
- package/out/zero-react/src/use-query.d.ts +11 -1
- package/out/zero-react/src/use-query.d.ts.map +1 -1
- package/out/zero-react/src/use-query.js +13 -11
- package/out/zero-react/src/use-query.js.map +1 -1
- package/out/zero-react/src/use-rows.d.ts +39 -0
- package/out/zero-react/src/use-rows.d.ts.map +1 -0
- package/out/zero-react/src/use-rows.js +130 -0
- package/out/zero-react/src/use-rows.js.map +1 -0
- package/out/zero-react/src/use-zero-virtualizer.d.ts +122 -0
- package/out/zero-react/src/use-zero-virtualizer.d.ts.map +1 -0
- package/out/zero-react/src/use-zero-virtualizer.js +342 -0
- package/out/zero-react/src/use-zero-virtualizer.js.map +1 -0
- package/out/zero-react/src/zero-provider.js +1 -1
- package/out/zero-react/src/zero-provider.js.map +1 -1
- package/out/zero-server/src/adapters/drizzle.d.ts +18 -18
- package/out/zero-server/src/adapters/drizzle.d.ts.map +1 -1
- package/out/zero-server/src/adapters/drizzle.js +8 -22
- package/out/zero-server/src/adapters/drizzle.js.map +1 -1
- package/out/zero-server/src/adapters/pg.d.ts +19 -13
- package/out/zero-server/src/adapters/pg.d.ts.map +1 -1
- package/out/zero-server/src/adapters/pg.js.map +1 -1
- package/out/zero-server/src/adapters/postgresjs.d.ts +19 -13
- package/out/zero-server/src/adapters/postgresjs.d.ts.map +1 -1
- package/out/zero-server/src/adapters/postgresjs.js.map +1 -1
- package/out/zero-server/src/adapters/prisma.d.ts +66 -0
- package/out/zero-server/src/adapters/prisma.d.ts.map +1 -0
- package/out/zero-server/src/adapters/prisma.js +63 -0
- package/out/zero-server/src/adapters/prisma.js.map +1 -0
- package/out/zero-server/src/custom.js +1 -15
- package/out/zero-server/src/custom.js.map +1 -1
- package/out/zero-server/src/mod.d.ts +9 -8
- package/out/zero-server/src/mod.d.ts.map +1 -1
- package/out/zero-server/src/process-mutations.d.ts +2 -2
- package/out/zero-server/src/process-mutations.d.ts.map +1 -1
- package/out/zero-server/src/process-mutations.js +4 -8
- package/out/zero-server/src/process-mutations.js.map +1 -1
- package/out/zero-server/src/push-processor.js +1 -1
- package/out/zero-server/src/push-processor.js.map +1 -1
- package/out/zero-server/src/schema.d.ts.map +1 -1
- package/out/zero-server/src/schema.js +4 -1
- package/out/zero-server/src/schema.js.map +1 -1
- package/out/zero-server/src/zql-database.d.ts.map +1 -1
- package/out/zero-server/src/zql-database.js +17 -8
- package/out/zero-server/src/zql-database.js.map +1 -1
- package/out/zero-solid/src/mod.d.ts +1 -1
- package/out/zero-solid/src/mod.d.ts.map +1 -1
- package/out/zero-solid/src/use-query.d.ts +10 -1
- package/out/zero-solid/src/use-query.d.ts.map +1 -1
- package/out/zero-solid/src/use-query.js +21 -5
- package/out/zero-solid/src/use-query.js.map +1 -1
- package/out/zero-solid/src/use-zero.js +1 -1
- package/out/zero-solid/src/use-zero.js.map +1 -1
- package/out/zql/src/ivm/constraint.d.ts.map +1 -1
- package/out/zql/src/ivm/constraint.js +4 -1
- package/out/zql/src/ivm/constraint.js.map +1 -1
- package/out/zql/src/ivm/exists.d.ts.map +1 -1
- package/out/zql/src/ivm/exists.js +4 -1
- package/out/zql/src/ivm/exists.js.map +1 -1
- package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
- package/out/zql/src/ivm/join-utils.js +8 -2
- package/out/zql/src/ivm/join-utils.js.map +1 -1
- package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
- package/out/zql/src/ivm/memory-source.js +12 -3
- package/out/zql/src/ivm/memory-source.js.map +1 -1
- package/out/zql/src/ivm/push-accumulated.d.ts.map +1 -1
- package/out/zql/src/ivm/push-accumulated.js +25 -2
- package/out/zql/src/ivm/push-accumulated.js.map +1 -1
- package/out/zql/src/ivm/take.d.ts.map +1 -1
- package/out/zql/src/ivm/take.js +24 -6
- package/out/zql/src/ivm/take.js.map +1 -1
- package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
- package/out/zql/src/ivm/union-fan-in.js +12 -3
- package/out/zql/src/ivm/union-fan-in.js.map +1 -1
- package/out/zqlite/src/table-source.d.ts.map +1 -1
- package/out/zqlite/src/table-source.js +1 -2
- package/out/zqlite/src/table-source.js.map +1 -1
- package/package.json +6 -2
- package/out/zero-cache/src/services/change-source/column-metadata.d.ts.map +0 -1
- package/out/zero-cache/src/services/change-source/column-metadata.js.map +0 -1
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"migration.js","sources":["../../../../../zero-cache/src/db/migration.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type postgres from 'postgres';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {\n disableStatementTimeout,\n type PostgresDB,\n type PostgresTransaction,\n} from '../types/pg.ts';\n\ntype Operations = (log: LogContext, tx: PostgresTransaction) => Promise<void>;\n\n/**\n * Encapsulates the logic for setting up or upgrading to a new schema. After the\n * Migration code successfully completes, {@link runSchemaMigrations}\n * will update the schema version and commit the transaction.\n */\nexport type Migration = {\n /**\n * Perform database operations that create or alter table structure. This is\n * called at most once during lifetime of the application. If a `migrateData()`\n * operation is defined, that will be performed after `migrateSchema()` succeeds.\n */\n migrateSchema?: Operations;\n\n /**\n * Perform database operations to migrate data to the new schema. This is\n * called after `migrateSchema()` (if defined), and may be called again\n * to re-migrate data after the server was rolled back to an earlier version,\n * and rolled forward again.\n *\n * Consequently, the logic in `migrateData()` must be idempotent.\n */\n migrateData?: Operations;\n\n /**\n * Sets the `minSafeVersion` to the specified value, prohibiting running\n * any earlier code versions.\n */\n minSafeVersion?: number;\n};\n\n/**\n * Mapping of incremental migrations to move from the previous old code\n * version to next one. Versions must be non-zero.\n *\n * The schema resulting from performing incremental migrations should be\n * equivalent to that of the `setupMigration` on a blank database.\n *\n * The highest destinationVersion of this map denotes the current\n * \"code version\", and is also used as the destination version when\n * running the initial setup migration on a blank database.\n */\nexport type IncrementalMigrationMap = {\n [destinationVersion: number]: Migration;\n};\n\n/**\n * Ensures that the schema is compatible with the current code, updating and\n * migrating the schema if necessary.\n */\nexport async function runSchemaMigrations(\n log: LogContext,\n debugName: string,\n schemaName: string,\n db: PostgresDB,\n setupMigration: Migration,\n incrementalMigrationMap: IncrementalMigrationMap,\n): Promise<void> {\n log = log.withContext('initSchema', schemaName);\n try {\n const versionMigrations = sorted(incrementalMigrationMap);\n assert(\n versionMigrations.length,\n `Must specify at least one version migration`,\n );\n assert(\n versionMigrations[0][0] > 0,\n `Versions must be non-zero positive numbers`,\n );\n const codeVersion = versionMigrations[versionMigrations.length - 1][0];\n log.info?.(\n `Checking schema for compatibility with ${debugName} at schema v${codeVersion}`,\n );\n\n let versions = await db.begin(async tx => {\n const versions = await ensureVersionHistory(tx, schemaName);\n if (codeVersion < versions.minSafeVersion) {\n throw new Error(\n `Cannot run ${debugName} at schema v${codeVersion} because rollback limit is v${versions.minSafeVersion}`,\n );\n }\n\n if (versions.dataVersion > codeVersion) {\n log.info?.(\n `Data is at v${versions.dataVersion}. Resetting to v${codeVersion}`,\n );\n return updateVersionHistory(log, tx, schemaName, versions, codeVersion);\n }\n return versions;\n });\n\n if (versions.dataVersion < codeVersion) {\n const migrations =\n versions.dataVersion === 0\n ? // For the empty database v0, only run the setup migration.\n ([[codeVersion, setupMigration]] as const)\n : versionMigrations;\n\n for (const [dest, migration] of migrations) {\n if (versions.dataVersion < dest) {\n log.info?.(\n `Migrating schema from v${versions.dataVersion} to v${dest}`,\n );\n void log.flush(); // Flush logs before each migration to help debug crash-y migrations.\n\n versions = await db.begin(async tx => {\n disableStatementTimeout(tx);\n\n // Fetch meta from within the transaction to make the migration atomic.\n let versions = await ensureVersionHistory(tx, schemaName);\n if (versions.dataVersion < dest) {\n versions = await runMigration(\n log,\n schemaName,\n tx,\n versions,\n dest,\n migration,\n );\n assert(versions.dataVersion === dest);\n }\n return versions;\n });\n }\n }\n }\n\n assert(versions.dataVersion === codeVersion);\n log.info?.(`Running ${debugName} at schema v${codeVersion}`);\n } catch (e) {\n log.error?.('Error in ensureSchemaMigrated', e);\n throw e;\n } finally {\n void log.flush(); // Flush the logs but do not block server progress on it.\n }\n}\n\nfunction sorted(\n incrementalMigrationMap: IncrementalMigrationMap,\n): [number, Migration][] {\n const versionMigrations: [number, Migration][] = [];\n for (const [v, m] of Object.entries(incrementalMigrationMap)) {\n versionMigrations.push([Number(v), m]);\n }\n return versionMigrations.sort(([a], [b]) => a - b);\n}\n\n// Exposed for tests.\nexport const versionHistory = v.object({\n /**\n * The `schemaVersion` is highest code version that has ever been run\n * on the database, and is used to delineate the structure of the tables\n * in the database. A schemaVersion only moves forward; rolling back to\n * an earlier (safe) code version does not revert schema changes that\n * have already been applied.\n */\n schemaVersion: v.number(),\n\n /**\n * The data version is the code version of the latest server that ran.\n * Note that this may be less than the schemaVersion in the case that\n * a server is rolled back to an earlier version after a schema change.\n * In such a case, data (but not schema), may need to be re-migrated\n * when rolling forward again.\n */\n dataVersion: v.number(),\n\n /**\n * The minimum code version that is safe to run. This is used when\n * a schema migration is not backwards compatible with an older version\n * of the code.\n */\n minSafeVersion: v.number(),\n});\n\n// Exposed for tests.\nexport type VersionHistory = v.Infer<typeof versionHistory>;\n\n// Exposed for tests.\nexport async function createVersionHistoryTable(\n sql: postgres.Sql,\n schemaName: string,\n) {\n // Note: The `lock` column transparently ensures that at most one row exists.\n await sql`\n CREATE SCHEMA IF NOT EXISTS ${sql(schemaName)};\n CREATE TABLE IF NOT EXISTS ${sql(schemaName)}.\"versionHistory\" (\n \"dataVersion\" int NOT NULL,\n \"schemaVersion\" int NOT NULL,\n \"minSafeVersion\" int NOT NULL,\n\n lock char(1) NOT NULL CONSTRAINT DF_schema_meta_lock DEFAULT 'v',\n CONSTRAINT PK_schema_meta_lock PRIMARY KEY (lock),\n CONSTRAINT CK_schema_meta_lock CHECK (lock='v')\n );`.simple();\n}\n\nasync function ensureVersionHistory(\n sql: postgres.Sql,\n schemaName: string,\n): Promise<VersionHistory> {\n return must(await getVersionHistory(sql, schemaName, true));\n}\n\nexport async function getVersionHistory(\n sql: postgres.Sql,\n schemaName: string,\n create = false,\n): Promise<VersionHistory | null> {\n const exists = await sql`\n SELECT nspname, relname FROM pg_class \n JOIN pg_namespace ON relnamespace = pg_namespace.oid\n WHERE nspname = ${schemaName} AND relname = ${'versionHistory'}`;\n\n if (exists.length === 0) {\n if (create) {\n await createVersionHistoryTable(sql, schemaName);\n } else {\n return null;\n }\n }\n const rows = await sql`\n SELECT \"dataVersion\", \"schemaVersion\", \"minSafeVersion\" \n FROM ${sql(schemaName)}.\"versionHistory\"`;\n\n if (rows.length === 0) {\n return create\n ? {schemaVersion: 0, dataVersion: 0, minSafeVersion: 0}\n : null;\n }\n return v.parse(rows[0], versionHistory);\n}\n\nasync function updateVersionHistory(\n log: LogContext,\n sql: postgres.Sql,\n schemaName: string,\n prev: VersionHistory,\n newVersion: number,\n minSafeVersion?: number,\n): Promise<VersionHistory> {\n assert(newVersion > 0);\n const versions = {\n dataVersion: newVersion,\n // The schemaVersion never moves backwards.\n schemaVersion: Math.max(newVersion, prev.schemaVersion),\n minSafeVersion: getMinSafeVersion(log, prev, minSafeVersion),\n } satisfies VersionHistory;\n\n await sql`\n INSERT INTO ${sql(schemaName)}.\"versionHistory\" ${sql(versions)}\n ON CONFLICT (lock) DO UPDATE SET ${sql(versions)}\n `;\n return versions;\n}\n\nasync function runMigration(\n log: LogContext,\n schemaName: string,\n tx: PostgresTransaction,\n versions: VersionHistory,\n destinationVersion: number,\n migration: Migration,\n): Promise<VersionHistory> {\n if (versions.schemaVersion < destinationVersion) {\n await migration.migrateSchema?.(log, tx);\n }\n if (versions.dataVersion < destinationVersion) {\n await migration.migrateData?.(log, tx);\n }\n return updateVersionHistory(\n log,\n tx,\n schemaName,\n versions,\n destinationVersion,\n migration.minSafeVersion,\n );\n}\n\n/**\n * Bumps the rollback limit [[toAtLeast]] the specified version.\n * Leaves the rollback limit unchanged if it is equal or greater.\n */\nfunction getMinSafeVersion(\n log: LogContext,\n current: VersionHistory,\n proposedSafeVersion?: number,\n): number {\n if (proposedSafeVersion === undefined) {\n return current.minSafeVersion;\n }\n if (current.minSafeVersion >= proposedSafeVersion) {\n // The rollback limit must never move backwards.\n log.debug?.(\n `rollback limit is already at ${current.minSafeVersion}, ` +\n `don't need to bump to ${proposedSafeVersion}`,\n );\n return current.minSafeVersion;\n }\n log.info?.(\n `bumping rollback limit from ${current.minSafeVersion} to ${proposedSafeVersion}`,\n );\n return proposedSafeVersion;\n}\n"],"names":["versions","v","v.object","v.number","v.parse"],"mappings":";;;;;AA8DA,eAAsB,oBACpB,KACA,WACA,YACA,IACA,gBACA,yBACe;AACf,QAAM,IAAI,YAAY,cAAc,UAAU;AAC9C,MAAI;AACF,UAAM,oBAAoB,OAAO,uBAAuB;AACxD;AAAA,MACE,kBAAkB;AAAA,MAClB;AAAA,IAAA;AAEF;AAAA,MACE,kBAAkB,CAAC,EAAE,CAAC,IAAI;AAAA,MAC1B;AAAA,IAAA;AAEF,UAAM,cAAc,kBAAkB,kBAAkB,SAAS,CAAC,EAAE,CAAC;AACrE,QAAI;AAAA,MACF,0CAA0C,SAAS,eAAe,WAAW;AAAA,IAAA;AAG/E,QAAI,WAAW,MAAM,GAAG,MAAM,OAAM,OAAM;AACxC,YAAMA,YAAW,MAAM,qBAAqB,IAAI,UAAU;AAC1D,UAAI,cAAcA,UAAS,gBAAgB;AACzC,cAAM,IAAI;AAAA,UACR,cAAc,SAAS,eAAe,WAAW,+BAA+BA,UAAS,cAAc;AAAA,QAAA;AAAA,MAE3G;AAEA,UAAIA,UAAS,cAAc,aAAa;AACtC,YAAI;AAAA,UACF,eAAeA,UAAS,WAAW,mBAAmB,WAAW;AAAA,QAAA;AAEnE,eAAO,qBAAqB,KAAK,IAAI,YAAYA,WAAU,WAAW;AAAA,MACxE;AACA,aAAOA;AAAAA,IACT,CAAC;AAED,QAAI,SAAS,cAAc,aAAa;AACtC,YAAM,aACJ,SAAS,gBAAgB;AAAA;AAAA,QAEpB,CAAC,CAAC,aAAa,cAAc,CAAC;AAAA,UAC/B;AAEN,iBAAW,CAAC,MAAM,SAAS,KAAK,YAAY;AAC1C,YAAI,SAAS,cAAc,MAAM;AAC/B,cAAI;AAAA,YACF,0BAA0B,SAAS,WAAW,QAAQ,IAAI;AAAA,UAAA;AAE5D,eAAK,IAAI,MAAA;AAET,qBAAW,MAAM,GAAG,MAAM,OAAM,OAAM;AACpC,oCAAwB,EAAE;AAG1B,gBAAIA,YAAW,MAAM,qBAAqB,IAAI,UAAU;AACxD,gBAAIA,UAAS,cAAc,MAAM;AAC/BA,0BAAW,MAAM;AAAA,gBACf;AAAA,gBACA;AAAA,gBACA;AAAA,gBACAA;AAAAA,gBACA;AAAA,gBACA;AAAA,cAAA;AAEF,qBAAOA,UAAS,gBAAgB,IAAI;AAAA,YACtC;AACA,mBAAOA;AAAAA,UACT,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF;AAEA,WAAO,SAAS,gBAAgB,WAAW;AAC3C,QAAI,OAAO,WAAW,SAAS,eAAe,WAAW,EAAE;AAAA,EAC7D,SAAS,GAAG;AACV,QAAI,QAAQ,iCAAiC,CAAC;AAC9C,UAAM;AAAA,EACR,UAAA;AACE,SAAK,IAAI,MAAA;AAAA,EACX;AACF;AAEA,SAAS,OACP,yBACuB;AACvB,QAAM,oBAA2C,CAAA;AACjD,aAAW,CAACC,IAAG,CAAC,KAAK,OAAO,QAAQ,uBAAuB,GAAG;AAC5D,sBAAkB,KAAK,CAAC,OAAOA,EAAC,GAAG,CAAC,CAAC;AAAA,EACvC;AACA,SAAO,kBAAkB,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,MAAM,IAAI,CAAC;AACnD;AAGO,MAAM,iBAAiBC,OAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQrC,eAAeC,OAAE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASjB,aAAaA,OAAE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOf,gBAAgBA,OAAE;AACpB,CAAC;AAMD,eAAsB,0BACpB,KACA,YACA;AAEA,QAAM;AAAA,kCAC0B,IAAI,UAAU,CAAC;AAAA,iCAChB,IAAI,UAAU,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAQxC,OAAA;AACR;AAEA,eAAe,qBACb,KACA,YACyB;AACzB,SAAO,KAAK,MAAM,kBAAkB,KAAK,YAAY,IAAI,CAAC;AAC5D;AAEA,eAAsB,kBACpB,KACA,YACA,SAAS,OACuB;AAChC,QAAM,SAAS,MAAM;AAAA;AAAA;AAAA,sBAGD,UAAU,kBAAkB,gBAAgB;AAEhE,MAAI,OAAO,WAAW,GAAG;AACvB,QAAI,QAAQ;AACV,YAAM,0BAA0B,KAAK,UAAU;AAAA,IACjD,OAAO;AACL,aAAO;AAAA,IACT;AAAA,EACF;AACA,QAAM,OAAO,MAAM;AAAA;AAAA,cAEP,IAAI,UAAU,CAAC;AAE3B,MAAI,KAAK,WAAW,GAAG;AACrB,WAAO,SACH,EAAC,eAAe,GAAG,aAAa,GAAG,gBAAgB,MACnD;AAAA,EACN;AACA,SAAOC,MAAQ,KAAK,CAAC,GAAG,cAAc;AACxC;AAEA,eAAe,qBACb,KACA,KACA,YACA,MACA,YACA,gBACyB;AACzB,SAAO,aAAa,CAAC;AACrB,QAAM,WAAW;AAAA,IACf,aAAa;AAAA;AAAA,IAEb,eAAe,KAAK,IAAI,YAAY,KAAK,aAAa;AAAA,IACtD,gBAAgB,kBAAkB,KAAK,MAAM,cAAc;AAAA,EAAA;AAG7D,QAAM;AAAA,kBACU,IAAI,UAAU,CAAC,qBAAqB,IAAI,QAAQ,CAAC;AAAA,yCAC1B,IAAI,QAAQ,CAAC;AAAA;AAEpD,SAAO;AACT;AAEA,eAAe,aACb,KACA,YACA,IACA,UACA,oBACA,WACyB;AACzB,MAAI,SAAS,gBAAgB,oBAAoB;AAC/C,UAAM,UAAU,gBAAgB,KAAK,EAAE;AAAA,EACzC;AACA,MAAI,SAAS,cAAc,oBAAoB;AAC7C,UAAM,UAAU,cAAc,KAAK,EAAE;AAAA,EACvC;AACA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,UAAU;AAAA,EAAA;AAEd;AAMA,SAAS,kBACP,KACA,SACA,qBACQ;AACR,MAAI,wBAAwB,QAAW;AACrC,WAAO,QAAQ;AAAA,EACjB;AACA,MAAI,QAAQ,kBAAkB,qBAAqB;AAEjD,QAAI;AAAA,MACF,gCAAgC,QAAQ,cAAc,2BAC3B,mBAAmB;AAAA,IAAA;AAEhD,WAAO,QAAQ;AAAA,EACjB;AACA,MAAI;AAAA,IACF,+BAA+B,QAAQ,cAAc,OAAO,mBAAmB;AAAA,EAAA;AAEjF,SAAO;AACT;"}
|
|
1
|
+
{"version":3,"file":"migration.js","sources":["../../../../../zero-cache/src/db/migration.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type postgres from 'postgres';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {\n disableStatementTimeout,\n type PostgresDB,\n type PostgresTransaction,\n} from '../types/pg.ts';\n\ntype Operations = (log: LogContext, tx: PostgresTransaction) => Promise<void>;\n\n/**\n * Encapsulates the logic for setting up or upgrading to a new schema. After the\n * Migration code successfully completes, {@link runSchemaMigrations}\n * will update the schema version and commit the transaction.\n */\nexport type Migration = {\n /**\n * Perform database operations that create or alter table structure. This is\n * called at most once during lifetime of the application. If a `migrateData()`\n * operation is defined, that will be performed after `migrateSchema()` succeeds.\n */\n migrateSchema?: Operations;\n\n /**\n * Perform database operations to migrate data to the new schema. This is\n * called after `migrateSchema()` (if defined), and may be called again\n * to re-migrate data after the server was rolled back to an earlier version,\n * and rolled forward again.\n *\n * Consequently, the logic in `migrateData()` must be idempotent.\n */\n migrateData?: Operations;\n\n /**\n * Sets the `minSafeVersion` to the specified value, prohibiting running\n * any earlier code versions.\n */\n minSafeVersion?: number;\n};\n\n/**\n * Mapping of incremental migrations to move from the previous old code\n * version to next one. Versions must be non-zero.\n *\n * The schema resulting from performing incremental migrations should be\n * equivalent to that of the `setupMigration` on a blank database.\n *\n * The highest destinationVersion of this map denotes the current\n * \"code version\", and is also used as the destination version when\n * running the initial setup migration on a blank database.\n */\nexport type IncrementalMigrationMap = {\n [destinationVersion: number]: Migration;\n};\n\n/**\n * Ensures that the schema is compatible with the current code, updating and\n * migrating the schema if necessary.\n */\nexport async function runSchemaMigrations(\n log: LogContext,\n debugName: string,\n schemaName: string,\n db: PostgresDB,\n setupMigration: Migration,\n incrementalMigrationMap: IncrementalMigrationMap,\n): Promise<void> {\n log = log.withContext('initSchema', schemaName);\n try {\n const versionMigrations = sorted(incrementalMigrationMap);\n assert(\n versionMigrations.length,\n `Must specify at least one version migration`,\n );\n assert(\n versionMigrations[0][0] > 0,\n `Versions must be non-zero positive numbers`,\n );\n const codeVersion = versionMigrations[versionMigrations.length - 1][0];\n log.info?.(\n `Checking schema for compatibility with ${debugName} at schema v${codeVersion}`,\n );\n\n let versions = await db.begin(async tx => {\n const versions = await ensureVersionHistory(tx, schemaName);\n if (codeVersion < versions.minSafeVersion) {\n throw new Error(\n `Cannot run ${debugName} at schema v${codeVersion} because rollback limit is v${versions.minSafeVersion}`,\n );\n }\n\n if (versions.dataVersion > codeVersion) {\n log.info?.(\n `Data is at v${versions.dataVersion}. Resetting to v${codeVersion}`,\n );\n return updateVersionHistory(log, tx, schemaName, versions, codeVersion);\n }\n return versions;\n });\n\n if (versions.dataVersion < codeVersion) {\n const migrations =\n versions.dataVersion === 0\n ? // For the empty database v0, only run the setup migration.\n ([[codeVersion, setupMigration]] as const)\n : versionMigrations;\n\n for (const [dest, migration] of migrations) {\n if (versions.dataVersion < dest) {\n log.info?.(\n `Migrating schema from v${versions.dataVersion} to v${dest}`,\n );\n void log.flush(); // Flush logs before each migration to help debug crash-y migrations.\n\n versions = await db.begin(async tx => {\n disableStatementTimeout(tx);\n\n // Fetch meta from within the transaction to make the migration atomic.\n let versions = await ensureVersionHistory(tx, schemaName);\n if (versions.dataVersion < dest) {\n versions = await runMigration(\n log,\n schemaName,\n tx,\n versions,\n dest,\n migration,\n );\n assert(\n versions.dataVersion === dest,\n () =>\n `Migration did not reach target version: expected ${dest}, got ${versions.dataVersion}`,\n );\n }\n return versions;\n });\n }\n }\n }\n\n assert(\n versions.dataVersion === codeVersion,\n () =>\n `Final dataVersion (${versions.dataVersion}) does not match codeVersion (${codeVersion})`,\n );\n log.info?.(`Running ${debugName} at schema v${codeVersion}`);\n } catch (e) {\n log.error?.('Error in ensureSchemaMigrated', e);\n throw e;\n } finally {\n void log.flush(); // Flush the logs but do not block server progress on it.\n }\n}\n\nfunction sorted(\n incrementalMigrationMap: IncrementalMigrationMap,\n): [number, Migration][] {\n const versionMigrations: [number, Migration][] = [];\n for (const [v, m] of Object.entries(incrementalMigrationMap)) {\n versionMigrations.push([Number(v), m]);\n }\n return versionMigrations.sort(([a], [b]) => a - b);\n}\n\n// Exposed for tests.\nexport const versionHistory = v.object({\n /**\n * The `schemaVersion` is highest code version that has ever been run\n * on the database, and is used to delineate the structure of the tables\n * in the database. A schemaVersion only moves forward; rolling back to\n * an earlier (safe) code version does not revert schema changes that\n * have already been applied.\n */\n schemaVersion: v.number(),\n\n /**\n * The data version is the code version of the latest server that ran.\n * Note that this may be less than the schemaVersion in the case that\n * a server is rolled back to an earlier version after a schema change.\n * In such a case, data (but not schema), may need to be re-migrated\n * when rolling forward again.\n */\n dataVersion: v.number(),\n\n /**\n * The minimum code version that is safe to run. This is used when\n * a schema migration is not backwards compatible with an older version\n * of the code.\n */\n minSafeVersion: v.number(),\n});\n\n// Exposed for tests.\nexport type VersionHistory = v.Infer<typeof versionHistory>;\n\n// Exposed for tests.\nexport async function createVersionHistoryTable(\n sql: postgres.Sql,\n schemaName: string,\n) {\n // Note: The `lock` column transparently ensures that at most one row exists.\n await sql`\n CREATE SCHEMA IF NOT EXISTS ${sql(schemaName)};\n CREATE TABLE IF NOT EXISTS ${sql(schemaName)}.\"versionHistory\" (\n \"dataVersion\" int NOT NULL,\n \"schemaVersion\" int NOT NULL,\n \"minSafeVersion\" int NOT NULL,\n\n lock char(1) NOT NULL CONSTRAINT DF_schema_meta_lock DEFAULT 'v',\n CONSTRAINT PK_schema_meta_lock PRIMARY KEY (lock),\n CONSTRAINT CK_schema_meta_lock CHECK (lock='v')\n );`.simple();\n}\n\nasync function ensureVersionHistory(\n sql: postgres.Sql,\n schemaName: string,\n): Promise<VersionHistory> {\n return must(await getVersionHistory(sql, schemaName, true));\n}\n\nexport async function getVersionHistory(\n sql: postgres.Sql,\n schemaName: string,\n create = false,\n): Promise<VersionHistory | null> {\n const exists = await sql`\n SELECT nspname, relname FROM pg_class \n JOIN pg_namespace ON relnamespace = pg_namespace.oid\n WHERE nspname = ${schemaName} AND relname = ${'versionHistory'}`;\n\n if (exists.length === 0) {\n if (create) {\n await createVersionHistoryTable(sql, schemaName);\n } else {\n return null;\n }\n }\n const rows = await sql`\n SELECT \"dataVersion\", \"schemaVersion\", \"minSafeVersion\" \n FROM ${sql(schemaName)}.\"versionHistory\"`;\n\n if (rows.length === 0) {\n return create\n ? {schemaVersion: 0, dataVersion: 0, minSafeVersion: 0}\n : null;\n }\n return v.parse(rows[0], versionHistory);\n}\n\nasync function updateVersionHistory(\n log: LogContext,\n sql: postgres.Sql,\n schemaName: string,\n prev: VersionHistory,\n newVersion: number,\n minSafeVersion?: number,\n): Promise<VersionHistory> {\n assert(newVersion > 0, 'newVersion must be positive');\n const versions = {\n dataVersion: newVersion,\n // The schemaVersion never moves backwards.\n schemaVersion: Math.max(newVersion, prev.schemaVersion),\n minSafeVersion: getMinSafeVersion(log, prev, minSafeVersion),\n } satisfies VersionHistory;\n\n await sql`\n INSERT INTO ${sql(schemaName)}.\"versionHistory\" ${sql(versions)}\n ON CONFLICT (lock) DO UPDATE SET ${sql(versions)}\n `;\n return versions;\n}\n\nasync function runMigration(\n log: LogContext,\n schemaName: string,\n tx: PostgresTransaction,\n versions: VersionHistory,\n destinationVersion: number,\n migration: Migration,\n): Promise<VersionHistory> {\n if (versions.schemaVersion < destinationVersion) {\n await migration.migrateSchema?.(log, tx);\n }\n if (versions.dataVersion < destinationVersion) {\n await migration.migrateData?.(log, tx);\n }\n return updateVersionHistory(\n log,\n tx,\n schemaName,\n versions,\n destinationVersion,\n migration.minSafeVersion,\n );\n}\n\n/**\n * Bumps the rollback limit [[toAtLeast]] the specified version.\n * Leaves the rollback limit unchanged if it is equal or greater.\n */\nfunction getMinSafeVersion(\n log: LogContext,\n current: VersionHistory,\n proposedSafeVersion?: number,\n): number {\n if (proposedSafeVersion === undefined) {\n return current.minSafeVersion;\n }\n if (current.minSafeVersion >= proposedSafeVersion) {\n // The rollback limit must never move backwards.\n log.debug?.(\n `rollback limit is already at ${current.minSafeVersion}, ` +\n `don't need to bump to ${proposedSafeVersion}`,\n );\n return current.minSafeVersion;\n }\n log.info?.(\n `bumping rollback limit from ${current.minSafeVersion} to ${proposedSafeVersion}`,\n );\n return proposedSafeVersion;\n}\n"],"names":["versions","v","v.object","v.number","v.parse"],"mappings":";;;;;AA8DA,eAAsB,oBACpB,KACA,WACA,YACA,IACA,gBACA,yBACe;AACf,QAAM,IAAI,YAAY,cAAc,UAAU;AAC9C,MAAI;AACF,UAAM,oBAAoB,OAAO,uBAAuB;AACxD;AAAA,MACE,kBAAkB;AAAA,MAClB;AAAA,IAAA;AAEF;AAAA,MACE,kBAAkB,CAAC,EAAE,CAAC,IAAI;AAAA,MAC1B;AAAA,IAAA;AAEF,UAAM,cAAc,kBAAkB,kBAAkB,SAAS,CAAC,EAAE,CAAC;AACrE,QAAI;AAAA,MACF,0CAA0C,SAAS,eAAe,WAAW;AAAA,IAAA;AAG/E,QAAI,WAAW,MAAM,GAAG,MAAM,OAAM,OAAM;AACxC,YAAMA,YAAW,MAAM,qBAAqB,IAAI,UAAU;AAC1D,UAAI,cAAcA,UAAS,gBAAgB;AACzC,cAAM,IAAI;AAAA,UACR,cAAc,SAAS,eAAe,WAAW,+BAA+BA,UAAS,cAAc;AAAA,QAAA;AAAA,MAE3G;AAEA,UAAIA,UAAS,cAAc,aAAa;AACtC,YAAI;AAAA,UACF,eAAeA,UAAS,WAAW,mBAAmB,WAAW;AAAA,QAAA;AAEnE,eAAO,qBAAqB,KAAK,IAAI,YAAYA,WAAU,WAAW;AAAA,MACxE;AACA,aAAOA;AAAAA,IACT,CAAC;AAED,QAAI,SAAS,cAAc,aAAa;AACtC,YAAM,aACJ,SAAS,gBAAgB;AAAA;AAAA,QAEpB,CAAC,CAAC,aAAa,cAAc,CAAC;AAAA,UAC/B;AAEN,iBAAW,CAAC,MAAM,SAAS,KAAK,YAAY;AAC1C,YAAI,SAAS,cAAc,MAAM;AAC/B,cAAI;AAAA,YACF,0BAA0B,SAAS,WAAW,QAAQ,IAAI;AAAA,UAAA;AAE5D,eAAK,IAAI,MAAA;AAET,qBAAW,MAAM,GAAG,MAAM,OAAM,OAAM;AACpC,oCAAwB,EAAE;AAG1B,gBAAIA,YAAW,MAAM,qBAAqB,IAAI,UAAU;AACxD,gBAAIA,UAAS,cAAc,MAAM;AAC/BA,0BAAW,MAAM;AAAA,gBACf;AAAA,gBACA;AAAA,gBACA;AAAA,gBACAA;AAAAA,gBACA;AAAA,gBACA;AAAA,cAAA;AAEF;AAAA,gBACEA,UAAS,gBAAgB;AAAA,gBACzB,MACE,oDAAoD,IAAI,SAASA,UAAS,WAAW;AAAA,cAAA;AAAA,YAE3F;AACA,mBAAOA;AAAAA,UACT,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF;AAEA;AAAA,MACE,SAAS,gBAAgB;AAAA,MACzB,MACE,sBAAsB,SAAS,WAAW,iCAAiC,WAAW;AAAA,IAAA;AAE1F,QAAI,OAAO,WAAW,SAAS,eAAe,WAAW,EAAE;AAAA,EAC7D,SAAS,GAAG;AACV,QAAI,QAAQ,iCAAiC,CAAC;AAC9C,UAAM;AAAA,EACR,UAAA;AACE,SAAK,IAAI,MAAA;AAAA,EACX;AACF;AAEA,SAAS,OACP,yBACuB;AACvB,QAAM,oBAA2C,CAAA;AACjD,aAAW,CAACC,IAAG,CAAC,KAAK,OAAO,QAAQ,uBAAuB,GAAG;AAC5D,sBAAkB,KAAK,CAAC,OAAOA,EAAC,GAAG,CAAC,CAAC;AAAA,EACvC;AACA,SAAO,kBAAkB,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,MAAM,IAAI,CAAC;AACnD;AAGO,MAAM,iBAAiBC,OAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQrC,eAAeC,OAAE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASjB,aAAaA,OAAE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOf,gBAAgBA,OAAE;AACpB,CAAC;AAMD,eAAsB,0BACpB,KACA,YACA;AAEA,QAAM;AAAA,kCAC0B,IAAI,UAAU,CAAC;AAAA,iCAChB,IAAI,UAAU,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,QAQxC,OAAA;AACR;AAEA,eAAe,qBACb,KACA,YACyB;AACzB,SAAO,KAAK,MAAM,kBAAkB,KAAK,YAAY,IAAI,CAAC;AAC5D;AAEA,eAAsB,kBACpB,KACA,YACA,SAAS,OACuB;AAChC,QAAM,SAAS,MAAM;AAAA;AAAA;AAAA,sBAGD,UAAU,kBAAkB,gBAAgB;AAEhE,MAAI,OAAO,WAAW,GAAG;AACvB,QAAI,QAAQ;AACV,YAAM,0BAA0B,KAAK,UAAU;AAAA,IACjD,OAAO;AACL,aAAO;AAAA,IACT;AAAA,EACF;AACA,QAAM,OAAO,MAAM;AAAA;AAAA,cAEP,IAAI,UAAU,CAAC;AAE3B,MAAI,KAAK,WAAW,GAAG;AACrB,WAAO,SACH,EAAC,eAAe,GAAG,aAAa,GAAG,gBAAgB,MACnD;AAAA,EACN;AACA,SAAOC,MAAQ,KAAK,CAAC,GAAG,cAAc;AACxC;AAEA,eAAe,qBACb,KACA,KACA,YACA,MACA,YACA,gBACyB;AACzB,SAAO,aAAa,GAAG,6BAA6B;AACpD,QAAM,WAAW;AAAA,IACf,aAAa;AAAA;AAAA,IAEb,eAAe,KAAK,IAAI,YAAY,KAAK,aAAa;AAAA,IACtD,gBAAgB,kBAAkB,KAAK,MAAM,cAAc;AAAA,EAAA;AAG7D,QAAM;AAAA,kBACU,IAAI,UAAU,CAAC,qBAAqB,IAAI,QAAQ,CAAC;AAAA,yCAC1B,IAAI,QAAQ,CAAC;AAAA;AAEpD,SAAO;AACT;AAEA,eAAe,aACb,KACA,YACA,IACA,UACA,oBACA,WACyB;AACzB,MAAI,SAAS,gBAAgB,oBAAoB;AAC/C,UAAM,UAAU,gBAAgB,KAAK,EAAE;AAAA,EACzC;AACA,MAAI,SAAS,cAAc,oBAAoB;AAC7C,UAAM,UAAU,cAAc,KAAK,EAAE;AAAA,EACvC;AACA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,UAAU;AAAA,EAAA;AAEd;AAMA,SAAS,kBACP,KACA,SACA,qBACQ;AACR,MAAI,wBAAwB,QAAW;AACrC,WAAO,QAAQ;AAAA,EACjB;AACA,MAAI,QAAQ,kBAAkB,qBAAqB;AAEjD,QAAI;AAAA,MACF,gCAAgC,QAAQ,cAAc,2BAC3B,mBAAmB;AAAA,IAAA;AAEhD,WAAO,QAAQ;AAAA,EACjB;AACA,MAAI;AAAA,IACF,+BAA+B,QAAQ,cAAc,OAAO,mBAAmB;AAAA,EAAA;AAEjF,SAAO;AACT;"}
|
|
@@ -3,7 +3,6 @@ import * as v from '../../../shared/src/valita.ts';
|
|
|
3
3
|
import type { PrimaryKey } from '../../../zero-protocol/src/primary-key.ts';
|
|
4
4
|
import type { SchemaValue } from '../../../zero-schema/src/table-schema.ts';
|
|
5
5
|
export declare const pgTypeClassSchema: v.Type<"e" | "d" | "b" | "c" | "p" | "r" | "m">;
|
|
6
|
-
export declare const pgReplicaIdentitySchema: v.Type<"n" | "d" | "f" | "i">;
|
|
7
6
|
export declare const columnSpec: v.ObjectType<{
|
|
8
7
|
pos: v.Type<number>;
|
|
9
8
|
dataType: v.Type<string>;
|
|
@@ -56,8 +55,9 @@ export declare const publishedTableSpec: v.ObjectType<Omit<Omit<{
|
|
|
56
55
|
primaryKey: v.Optional<string[]>;
|
|
57
56
|
}, "schema"> & {
|
|
58
57
|
schema: v.Type<string>;
|
|
59
|
-
}, "columns" | "publications" | "oid" | "replicaIdentity"> & {
|
|
58
|
+
}, "columns" | "publications" | "oid" | "schemaOID" | "replicaIdentity"> & {
|
|
60
59
|
oid: v.Type<number>;
|
|
60
|
+
schemaOID: v.Optional<number>;
|
|
61
61
|
columns: v.Type<Record<string, {
|
|
62
62
|
pgTypeClass?: "e" | "d" | "b" | "c" | "p" | "r" | "m" | undefined;
|
|
63
63
|
elemPgTypeClass?: "e" | "d" | "b" | "c" | "p" | "r" | "m" | null | undefined;
|
|
@@ -124,8 +124,9 @@ export declare const publishedIndexSpec: v.ObjectType<Omit<Omit<{
|
|
|
124
124
|
columns: v.Type<Record<string, "ASC" | "DESC">>;
|
|
125
125
|
}, "schema"> & {
|
|
126
126
|
schema: v.Type<string>;
|
|
127
|
-
}, "isReplicaIdentity" | "isImmediate"> & {
|
|
127
|
+
}, "isReplicaIdentity" | "isPrimaryKey" | "isImmediate"> & {
|
|
128
128
|
isReplicaIdentity: v.Optional<boolean>;
|
|
129
|
+
isPrimaryKey: v.Optional<boolean>;
|
|
129
130
|
isImmediate: v.Optional<boolean>;
|
|
130
131
|
}, undefined>;
|
|
131
132
|
export type PublishedIndexSpec = DeepReadonly<v.Infer<typeof publishedIndexSpec>>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"specs.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/specs.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6BAA6B,CAAC;AAC9D,OAAO,KAAK,CAAC,MAAM,+BAA+B,CAAC;AACnD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,2CAA2C,CAAC;AAC1E,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,0CAA0C,CAAC;AAI1E,eAAO,MAAM,iBAAiB,iDAQ7B,CAAC;
|
|
1
|
+
{"version":3,"file":"specs.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/specs.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6BAA6B,CAAC;AAC9D,OAAO,KAAK,CAAC,MAAM,+BAA+B,CAAC;AACnD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,2CAA2C,CAAC;AAC1E,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,0CAA0C,CAAC;AAI1E,eAAO,MAAM,iBAAiB,iDAQ7B,CAAC;AASF,eAAO,MAAM,UAAU;;;;;;;;aAarB,CAAC;AAEH,MAAM,MAAM,UAAU,GAAG,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,UAAU,CAAC,CAAC,CAAC;AAM9D,eAAO,MAAM,aAAa;;;;;;;;;;;;aAIxB,CAAC;AAEH,eAAO,MAAM,SAAS;;;;;;;;;;;;;;aAEpB,CAAC;AAEH,eAAO,MAAM,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAQ7B,CAAC;AAEH,MAAM,MAAM,oBAAoB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAEjE,MAAM,MAAM,aAAa,GAAG,QAAQ,CAAC,oBAAoB,CAAC,CAAC;AAE3D,MAAM,MAAM,qBAAqB,GAAG,IAAI,CAAC,aAAa,EAAE,YAAY,CAAC,GAAG;IACtE;;;OAGG;IACH,UAAU,EAAE,UAAU,EAAE,CAAC;IAEzB;;;;OAIG;IACH,UAAU,EAAE,UAAU,CAAC;IAEvB;;;OAGG;IACH,uBAAuB,EAAE,UAAU,EAAE,CAAC;CACvC,CAAC;AAEF,MAAM,MAAM,cAAc,GAAG;IAC3B,SAAS,EAAE,qBAAqB,CAAC;IACjC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,WAAW,CAAC,CAAC;CACtC,CAAC;AAEF,MAAM,MAAM,SAAS,GAAG,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,SAAS,CAAC,CAAC,CAAC;AAE5D,MAAM,MAAM,kBAAkB,GAAG,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,kBAAkB,CAAC,CAAC,CAAC;AAE9E,eAAO,MAAM,eAAe,wBAAgC,CAAC;AAE7D,eAAO,MAAM,aAAa;;;;;aAKxB,CAAC;AAEH,MAAM,MAAM,oBAAoB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAEjE,MAAM,MAAM,aAAa,GAAG,QAAQ,CAAC,oBAAoB,CAAC,CAAC;AAE3D,eAAO,MAAM,SAAS;;;;;;;aAEpB,CAAC;AAEH,MAAM,MAAM,SAAS,GAAG,YAAY,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,SAAS,CAAC,CAAC,CAAC;AAEhE,eAAO,MAAM,kBAAkB;;;;;;;;;;;aAI7B,CAAC;AAEH,MAAM,MAAM,kBAAkB,GAAG,YAAY,CAC3C,CAAC,CAAC,KAAK,CAAC,OAAO,kBAAkB,CAAC,CACnC,CAAC"}
|
|
@@ -42,6 +42,9 @@ const tableSpec = liteTableSpec.extend({
|
|
|
42
42
|
});
|
|
43
43
|
const publishedTableSpec = tableSpec.extend({
|
|
44
44
|
oid: number(),
|
|
45
|
+
// Always present for new instances (e.g. from DDL triggers), but
|
|
46
|
+
// may from `initialSchema` object stored in the `replicas` table.
|
|
47
|
+
schemaOID: number().optional(),
|
|
45
48
|
columns: record(publishedColumnSpec),
|
|
46
49
|
replicaIdentity: pgReplicaIdentitySchema.optional(),
|
|
47
50
|
publications: record(object({ rowFilter: string().nullable() }))
|
|
@@ -58,6 +61,7 @@ const indexSpec = liteIndexSpec.extend({
|
|
|
58
61
|
});
|
|
59
62
|
const publishedIndexSpec = indexSpec.extend({
|
|
60
63
|
isReplicaIdentity: boolean().optional(),
|
|
64
|
+
isPrimaryKey: boolean().optional(),
|
|
61
65
|
isImmediate: boolean().optional()
|
|
62
66
|
});
|
|
63
67
|
export {
|
|
@@ -66,7 +70,6 @@ export {
|
|
|
66
70
|
indexSpec,
|
|
67
71
|
liteIndexSpec,
|
|
68
72
|
liteTableSpec,
|
|
69
|
-
pgReplicaIdentitySchema,
|
|
70
73
|
pgTypeClassSchema,
|
|
71
74
|
publishedIndexSpec,
|
|
72
75
|
publishedTableSpec,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"specs.js","sources":["../../../../../zero-cache/src/db/specs.ts"],"sourcesContent":["import type {DeepReadonly} from '../../../shared/src/json.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport type {PrimaryKey} from '../../../zero-protocol/src/primary-key.ts';\nimport type {SchemaValue} from '../../../zero-schema/src/table-schema.ts';\nimport * as PostgresReplicaIdentity from './postgres-replica-identity-enum.ts';\nimport * as PostgresTypeClass from './postgres-type-class-enum.ts';\n\nexport const pgTypeClassSchema = v.literalUnion(\n PostgresTypeClass.Base,\n PostgresTypeClass.Composite,\n PostgresTypeClass.Domain,\n PostgresTypeClass.Enum,\n PostgresTypeClass.Pseudo,\n PostgresTypeClass.Range,\n PostgresTypeClass.Multirange,\n);\n\
|
|
1
|
+
{"version":3,"file":"specs.js","sources":["../../../../../zero-cache/src/db/specs.ts"],"sourcesContent":["import type {DeepReadonly} from '../../../shared/src/json.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport type {PrimaryKey} from '../../../zero-protocol/src/primary-key.ts';\nimport type {SchemaValue} from '../../../zero-schema/src/table-schema.ts';\nimport * as PostgresReplicaIdentity from './postgres-replica-identity-enum.ts';\nimport * as PostgresTypeClass from './postgres-type-class-enum.ts';\n\nexport const pgTypeClassSchema = v.literalUnion(\n PostgresTypeClass.Base,\n PostgresTypeClass.Composite,\n PostgresTypeClass.Domain,\n PostgresTypeClass.Enum,\n PostgresTypeClass.Pseudo,\n PostgresTypeClass.Range,\n PostgresTypeClass.Multirange,\n);\n\nconst pgReplicaIdentitySchema = v.literalUnion(\n PostgresReplicaIdentity.Default,\n PostgresReplicaIdentity.Nothing,\n PostgresReplicaIdentity.Full,\n PostgresReplicaIdentity.Index,\n);\n\nexport const columnSpec = v.object({\n pos: v.number(),\n dataType: v.string(),\n pgTypeClass: pgTypeClassSchema.optional(),\n\n // If the column is an array, this will be the type of the\n // elements in the array. If the column is not an array,\n // this will be null.\n elemPgTypeClass: pgTypeClassSchema.nullable().optional(),\n\n characterMaximumLength: v.number().nullable().optional(),\n notNull: v.boolean().nullable().optional(),\n dflt: v.string().nullable().optional(),\n});\n\nexport type ColumnSpec = Readonly<v.Infer<typeof columnSpec>>;\n\nconst publishedColumnSpec = columnSpec.extend({\n typeOID: v.number(),\n});\n\nexport const liteTableSpec = v.object({\n name: v.string(),\n columns: v.record(columnSpec),\n primaryKey: v.array(v.string()).optional(),\n});\n\nexport const tableSpec = liteTableSpec.extend({\n schema: v.string(),\n});\n\nexport const publishedTableSpec = tableSpec.extend({\n oid: v.number(),\n // Always present for new instances (e.g. from DDL triggers), but\n // may from `initialSchema` object stored in the `replicas` table.\n schemaOID: v.number().optional(),\n columns: v.record(publishedColumnSpec),\n replicaIdentity: pgReplicaIdentitySchema.optional(),\n publications: v.record(v.object({rowFilter: v.string().nullable()})),\n});\n\nexport type MutableLiteTableSpec = v.Infer<typeof liteTableSpec>;\n\nexport type LiteTableSpec = Readonly<MutableLiteTableSpec>;\n\nexport type LiteTableSpecWithKeys = Omit<LiteTableSpec, 'primaryKey'> & {\n /**\n * All keys associated with a unique index. Includes indexes with\n * nullable columns.\n */\n uniqueKeys: PrimaryKey[];\n\n /**\n * The key selected to act as the \"primary key\". Primary keys\n * are not explicitly set on the replica, but an appropriate\n * unique index is required.\n */\n primaryKey: PrimaryKey; // note: required\n\n /**\n * All keys associated with a unique index over non-null\n * columns, i.e. suitable as a primary key.\n */\n allPotentialPrimaryKeys: PrimaryKey[];\n};\n\nexport type LiteAndZqlSpec = {\n tableSpec: LiteTableSpecWithKeys;\n zqlSpec: Record<string, SchemaValue>;\n};\n\nexport type TableSpec = Readonly<v.Infer<typeof tableSpec>>;\n\nexport type PublishedTableSpec = Readonly<v.Infer<typeof publishedTableSpec>>;\n\nexport const directionSchema = v.literalUnion('ASC', 'DESC');\n\nexport const liteIndexSpec = v.object({\n name: v.string(),\n tableName: v.string(),\n unique: v.boolean(),\n columns: v.record(directionSchema),\n});\n\nexport type MutableLiteIndexSpec = v.Infer<typeof liteIndexSpec>;\n\nexport type LiteIndexSpec = Readonly<MutableLiteIndexSpec>;\n\nexport const indexSpec = liteIndexSpec.extend({\n schema: v.string(),\n});\n\nexport type IndexSpec = DeepReadonly<v.Infer<typeof indexSpec>>;\n\nexport const publishedIndexSpec = indexSpec.extend({\n isReplicaIdentity: v.boolean().optional(),\n isPrimaryKey: v.boolean().optional(),\n isImmediate: v.boolean().optional(),\n});\n\nexport type PublishedIndexSpec = DeepReadonly<\n v.Infer<typeof publishedIndexSpec>\n>;\n"],"names":["v.literalUnion","PostgresTypeClass.Base","PostgresTypeClass.Composite","PostgresTypeClass.Domain","PostgresTypeClass.Enum","PostgresTypeClass.Pseudo","PostgresTypeClass.Range","PostgresTypeClass.Multirange","PostgresReplicaIdentity.Default","PostgresReplicaIdentity.Nothing","PostgresReplicaIdentity.Full","PostgresReplicaIdentity.Index","v.object","v.number","v.string","v.boolean","v.record","v.array"],"mappings":";;;;AAOO,MAAM,oBAAoBA;AAAAA,EAC/BC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AACF;AAEA,MAAM,0BAA0BP;AAAAA,EAC9BQ;AAAAA,EACAC;AAAAA,EACAC;AAAAA,EACAC;AACF;AAEO,MAAM,aAAaC,OAAS;AAAA,EACjC,KAAKC,OAAE;AAAA,EACP,UAAUC,OAAE;AAAA,EACZ,aAAa,kBAAkB,SAAA;AAAA;AAAA;AAAA;AAAA,EAK/B,iBAAiB,kBAAkB,SAAA,EAAW,SAAA;AAAA,EAE9C,wBAAwBD,OAAE,EAAS,SAAA,EAAW,SAAA;AAAA,EAC9C,SAASE,QAAE,EAAU,SAAA,EAAW,SAAA;AAAA,EAChC,MAAMD,OAAE,EAAS,SAAA,EAAW,SAAA;AAC9B,CAAC;AAID,MAAM,sBAAsB,WAAW,OAAO;AAAA,EAC5C,SAASD,OAAE;AACb,CAAC;AAEM,MAAM,gBAAgBD,OAAS;AAAA,EACpC,MAAME,OAAE;AAAA,EACR,SAASE,OAAS,UAAU;AAAA,EAC5B,YAAYC,MAAQH,OAAE,CAAQ,EAAE,SAAA;AAClC,CAAC;AAEM,MAAM,YAAY,cAAc,OAAO;AAAA,EAC5C,QAAQA,OAAE;AACZ,CAAC;AAEM,MAAM,qBAAqB,UAAU,OAAO;AAAA,EACjD,KAAKD,OAAE;AAAA;AAAA;AAAA,EAGP,WAAWA,OAAE,EAAS,SAAA;AAAA,EACtB,SAASG,OAAS,mBAAmB;AAAA,EACrC,iBAAiB,wBAAwB,SAAA;AAAA,EACzC,cAAcA,OAASJ,OAAS,EAAC,WAAWE,OAAE,EAAS,SAAA,GAAW,CAAC;AACrE,CAAC;AAoCM,MAAM,kBAAkBd,aAAe,OAAO,MAAM;AAEpD,MAAM,gBAAgBY,OAAS;AAAA,EACpC,MAAME,OAAE;AAAA,EACR,WAAWA,OAAE;AAAA,EACb,QAAQC,QAAE;AAAA,EACV,SAASC,OAAS,eAAe;AACnC,CAAC;AAMM,MAAM,YAAY,cAAc,OAAO;AAAA,EAC5C,QAAQF,OAAE;AACZ,CAAC;AAIM,MAAM,qBAAqB,UAAU,OAAO;AAAA,EACjD,mBAAmBC,QAAE,EAAU,SAAA;AAAA,EAC/B,cAAcA,QAAE,EAAU,SAAA;AAAA,EAC1B,aAAaA,QAAE,EAAU,SAAA;AAC3B,CAAC;"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"transaction-pool.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAIrC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,6BAA6B,CAAC;AAGtD,OAAO,EAEL,KAAK,UAAU,EACf,KAAK,mBAAmB,EACzB,MAAM,gBAAgB,CAAC;AACxB,OAAO,KAAK,KAAK,IAAI,MAAM,gBAAgB,CAAC;AAE5C,KAAK,IAAI,GAAG,IAAI,CAAC,OAAO,IAAI,CAAC,CAAC;AAE9B,KAAK,YAAY,CAAC,CAAC,IAAI,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAEtC,MAAM,MAAM,SAAS,GACjB,QAAQ,CAAC,YAAY,CAAC,CAAC,QAAQ,CAAC,GAAG,GAAG,QAAQ,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,GAChE,QAAQ,CAAC,YAAY,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,CAAC;AAE1C;;;;;GAKG;AACH,MAAM,MAAM,IAAI,GAAG,CACjB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,SAAS,EAAE,CAAC,CAAC;AAE/B;;;;GAIG;AACH,MAAM,MAAM,QAAQ,CAAC,CAAC,IAAI,CACxB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,CAAC,CAAC,CAAC;AAErB;;;;;;;GAOG;AACH,qBAAa,eAAe;;IAkB1B;;;;;;;;;;;;;;;OAeG;gBAED,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,IAAI,EACV,IAAI,CAAC,EAAE,IAAI,EACX,OAAO,CAAC,EAAE,IAAI,EACd,cAAc,SAAI,EAClB,UAAU,SAAiB,EAC3B,YAAY,eAAgB;
|
|
1
|
+
{"version":3,"file":"transaction-pool.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AAIrC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,6BAA6B,CAAC;AAGtD,OAAO,EAEL,KAAK,UAAU,EACf,KAAK,mBAAmB,EACzB,MAAM,gBAAgB,CAAC;AACxB,OAAO,KAAK,KAAK,IAAI,MAAM,gBAAgB,CAAC;AAE5C,KAAK,IAAI,GAAG,IAAI,CAAC,OAAO,IAAI,CAAC,CAAC;AAE9B,KAAK,YAAY,CAAC,CAAC,IAAI,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AAEtC,MAAM,MAAM,SAAS,GACjB,QAAQ,CAAC,YAAY,CAAC,CAAC,QAAQ,CAAC,GAAG,GAAG,QAAQ,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,GAChE,QAAQ,CAAC,YAAY,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,CAAC;AAE1C;;;;;GAKG;AACH,MAAM,MAAM,IAAI,GAAG,CACjB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,SAAS,EAAE,CAAC,CAAC;AAE/B;;;;GAIG;AACH,MAAM,MAAM,QAAQ,CAAC,CAAC,IAAI,CACxB,EAAE,EAAE,mBAAmB,EACvB,EAAE,EAAE,UAAU,KACX,YAAY,CAAC,CAAC,CAAC,CAAC;AAErB;;;;;;;GAOG;AACH,qBAAa,eAAe;;IAkB1B;;;;;;;;;;;;;;;OAeG;gBAED,EAAE,EAAE,UAAU,EACd,IAAI,EAAE,IAAI,EACV,IAAI,CAAC,EAAE,IAAI,EACX,OAAO,CAAC,EAAE,IAAI,EACd,cAAc,SAAI,EAClB,UAAU,SAAiB,EAC3B,YAAY,eAAgB;IAkB9B;;;OAGG;IACH,GAAG,CAAC,EAAE,EAAE,UAAU,GAAG,IAAI;IASzB;;;;;;OAMG;IACH,iBAAiB,CAAC,GAAG,EAAE,MAAM,EAAE,KAAK,EAAE,MAAM;iCAIf,MAAM,SAAS,MAAM;;IAKlD;;;;;;;;;;;;;;;;;;;OAmBG;IACG,IAAI;IAqGV;;;;;;;;;;;OAWG;IACH,OAAO,CAAC,IAAI,EAAE,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;IAiElC;;;;;OAKG;IACH,eAAe,CAAC,CAAC,EAAE,QAAQ,EAAE,QAAQ,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC;IAsDrD;;;OAGG;IACH,KAAK;IAIL;;;OAGG;IACH,OAAO;IASP;;;;;;;;;;;;;;;;;OAiBG;IAEH,GAAG,CAAC,KAAK,SAAI;IAQb;;OAEG;IACH,KAAK,CAAC,KAAK,SAAI;IAYf,SAAS,IAAI,OAAO;IAIpB;;OAEG;IACH,IAAI,CAAC,GAAG,EAAE,OAAO;CAiBlB;AAED,KAAK,wBAAwB,GAAG;IAC9B;;;;;OAKG;IACH,cAAc,EAAE,IAAI,CAAC;IAErB;;;;;OAKG;IACH,aAAa,EAAE,IAAI,CAAC;IAEpB;;;;OAIG;IACH,WAAW,EAAE,IAAI,CAAC;IAElB,qCAAqC;IACrC,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,CAAC;CAC7B,CAAC;AAEF;;;;GAIG;AACH,wBAAgB,qBAAqB,IAAI,wBAAwB,CAoDhE;AAED;;;;GAIG;AACH,wBAAgB,cAAc,IAAI;IAChC,IAAI,EAAE,IAAI,CAAC;IACX,OAAO,EAAE,IAAI,CAAC;IACd,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,CAAC;CAC7B,CAyCA;AAED;;GAEG;AACH,wBAAgB,cAAc,CAAC,UAAU,EAAE,MAAM,GAAG;IAClD,IAAI,EAAE,IAAI,CAAC;IACX,QAAQ,EAAE,OAAO,CAAC,IAAI,CAAC,CAAC;CACzB,CAYA;AAED;;;;;GAKG;AACH,qBAAa,gBAAiB,SAAQ,KAAK;gBAC7B,KAAK,CAAC,EAAE,OAAO;CAI5B;AAoED,KAAK,WAAW,GAAG;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,IAAI,EAAE,IAAI,GAAG,MAAM,CAAC;CACrB,CAAC;AAEF,KAAK,YAAY,GAAG;IAClB,iBAAiB,EAAE,WAAW,CAAC;IAC/B,eAAe,EAAE,WAAW,CAAC;CAC9B,CAAC;AAGF,eAAO,MAAM,aAAa,EAAE,YAS3B,CAAC"}
|
|
@@ -39,8 +39,11 @@ class TransactionPool {
|
|
|
39
39
|
* workers will be shut down after an idle timeout of 5 seconds.
|
|
40
40
|
*/
|
|
41
41
|
constructor(lc, mode, init, cleanup, initialWorkers = 1, maxWorkers = initialWorkers, timeoutTasks = TIMEOUT_TASKS) {
|
|
42
|
-
assert(initialWorkers > 0);
|
|
43
|
-
assert(
|
|
42
|
+
assert(initialWorkers > 0, "initialWorkers must be positive");
|
|
43
|
+
assert(
|
|
44
|
+
maxWorkers >= initialWorkers,
|
|
45
|
+
"maxWorkers must be >= initialWorkers"
|
|
46
|
+
);
|
|
44
47
|
this.#lc = lc;
|
|
45
48
|
this.#mode = mode;
|
|
46
49
|
this.#init = init ? this.#stmtRunner(init) : void 0;
|
|
@@ -326,7 +329,10 @@ class TransactionPool {
|
|
|
326
329
|
* Decrements the internal reference count, automatically invoking {@link setDone} when it reaches 0.
|
|
327
330
|
*/
|
|
328
331
|
unref(count = 1) {
|
|
329
|
-
assert(
|
|
332
|
+
assert(
|
|
333
|
+
count <= this.#refCount,
|
|
334
|
+
() => `Cannot unref ${count} when refCount is ${this.#refCount}`
|
|
335
|
+
);
|
|
330
336
|
this.#refCount -= count;
|
|
331
337
|
if (this.#refCount === 0) {
|
|
332
338
|
this.setDone();
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"transaction-pool.js","sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {type Resolver, resolver} from '@rocicorp/resolver';\nimport type postgres from 'postgres';\nimport {AbortError} from '../../../shared/src/abort-error.ts';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {stringify} from '../../../shared/src/bigint-json.ts';\nimport type {Enum} from '../../../shared/src/enum.ts';\nimport {Queue} from '../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport {\n disableStatementTimeout,\n type PostgresDB,\n type PostgresTransaction,\n} from '../types/pg.ts';\nimport type * as Mode from './mode-enum.ts';\n\ntype Mode = Enum<typeof Mode>;\n\ntype MaybePromise<T> = Promise<T> | T;\n\nexport type Statement =\n | postgres.PendingQuery<(postgres.Row & Iterable<postgres.Row>)[]>\n | postgres.PendingQuery<postgres.Row[]>;\n\n/**\n * A {@link Task} is logic run from within a transaction in a {@link TransactionPool}.\n * It returns a list of `Statements` that the transaction executes asynchronously and\n * awaits when it receives the 'done' signal.\n *\n */\nexport type Task = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<Statement[]>;\n\n/**\n * A {@link ReadTask} is run from within a transaction, but unlike a {@link Task},\n * the results of a ReadTask are opaque to the TransactionPool and returned to the\n * caller of {@link TransactionPool.processReadTask}.\n */\nexport type ReadTask<T> = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<T>;\n\n/**\n * A TransactionPool is a pool of one or more {@link postgres.TransactionSql}\n * objects that participate in processing a dynamic queue of tasks.\n *\n * This can be used for serializing a set of tasks that arrive asynchronously\n * to a single transaction (for writing) or performing parallel reads across\n * multiple connections at the same snapshot (e.g. read only snapshot transactions).\n */\nexport class TransactionPool {\n #lc: LogContext;\n readonly #mode: Mode;\n readonly #init: TaskRunner | undefined;\n readonly #cleanup: TaskRunner | undefined;\n readonly #tasks = new Queue<TaskRunner | Error | 'done'>();\n readonly #workers: Promise<unknown>[] = [];\n readonly #initialWorkers: number;\n readonly #maxWorkers: number;\n readonly #timeoutTask: TimeoutTasks;\n #numWorkers: number;\n #numWorking = 0;\n #db: PostgresDB | undefined; // set when running. stored to allow adaptive pool sizing.\n\n #refCount = 1;\n #done = false;\n #failure: Error | undefined;\n\n /**\n * @param init A {@link Task} that is run in each Transaction before it begins\n * processing general tasks. This can be used to to set the transaction\n * mode, export/set snapshots, etc. This will be run even if\n * {@link fail} has been called on the pool.\n * @param cleanup A {@link Task} that is run in each Transaction before it closes.\n * This will be run even if {@link fail} has been called, or if a\n * preceding Task threw an Error.\n * @param initialWorkers The initial number of transaction workers to process tasks.\n * This is the steady state number of workers that will be kept\n * alive if the TransactionPool is long lived.\n * This must be greater than 0. Defaults to 1.\n * @param maxWorkers When specified, allows the pool to grow to `maxWorkers`. This\n * must be greater than or equal to `initialWorkers`. On-demand\n * workers will be shut down after an idle timeout of 5 seconds.\n */\n constructor(\n lc: LogContext,\n mode: Mode,\n init?: Task,\n cleanup?: Task,\n initialWorkers = 1,\n maxWorkers = initialWorkers,\n timeoutTasks = TIMEOUT_TASKS, // Overridden for tests.\n ) {\n assert(initialWorkers > 0);\n assert(maxWorkers >= initialWorkers);\n\n this.#lc = lc;\n this.#mode = mode;\n this.#init = init ? this.#stmtRunner(init) : undefined;\n this.#cleanup = cleanup ? this.#stmtRunner(cleanup) : undefined;\n this.#initialWorkers = initialWorkers;\n this.#numWorkers = initialWorkers;\n this.#maxWorkers = maxWorkers;\n this.#timeoutTask = timeoutTasks;\n }\n\n /**\n * Starts the pool of workers to process Tasks with transactions opened from the\n * specified {@link db}.\n */\n run(db: PostgresDB): this {\n assert(!this.#db, 'already running');\n this.#db = db;\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#addWorker(db);\n }\n return this;\n }\n\n /**\n * Adds context parameters to internal LogContext. This is useful for context values that\n * are not known when the TransactionPool is constructed (e.g. determined after a database\n * call when the pool is running).\n *\n * Returns an object that can be used to add more parameters.\n */\n addLoggingContext(key: string, value: string) {\n this.#lc = this.#lc.withContext(key, value);\n\n return {\n addLoggingContext: (key: string, value: string) =>\n this.addLoggingContext(key, value),\n };\n }\n\n /**\n * Returns a promise that:\n *\n * * resolves after {@link setDone} has been called (or the the pool as been {@link unref}ed\n * to a 0 ref count), once all added tasks have been processed and all transactions have been\n * committed or closed.\n *\n * * rejects if processing was aborted with {@link fail} or if processing any of\n * the tasks resulted in an error. All uncommitted transactions will have been\n * rolled back.\n *\n * Note that partial failures are possible if processing writes with multiple workers\n * (e.g. `setDone` is called, allowing some workers to commit, after which other\n * workers encounter errors). Using a TransactionPool in this manner does not make\n * sense in terms of transactional semantics, and is thus not recommended.\n *\n * For reads, however, multiple workers is useful for performing parallel reads\n * at the same snapshot. See {@link synchronizedSnapshots} for an example.\n * Resolves or rejects when all workers are done or failed.\n */\n async done() {\n const numWorkers = this.#workers.length;\n await Promise.all(this.#workers);\n\n if (numWorkers < this.#workers.length) {\n // If workers were added after the initial set, they must be awaited to ensure\n // that the results (i.e. rejections) of all workers are accounted for. This only\n // needs to be re-done once, because the fact that the first `await` completed\n // guarantees that the pool is in a terminal state and no new workers can be added.\n await Promise.all(this.#workers);\n }\n this.#lc.debug?.('transaction pool done');\n }\n\n #addWorker(db: PostgresDB) {\n const id = this.#workers.length + 1;\n const lc = this.#lc.withContext('tx', id);\n\n const tt: TimeoutTask =\n this.#workers.length < this.#initialWorkers\n ? this.#timeoutTask.forInitialWorkers\n : this.#timeoutTask.forExtraWorkers;\n const {timeoutMs} = tt;\n const timeoutTask = tt.task === 'done' ? 'done' : this.#stmtRunner(tt.task);\n\n const worker = async (tx: PostgresTransaction) => {\n try {\n lc.debug?.('started transaction');\n disableStatementTimeout(tx);\n\n let last: Promise<void> = promiseVoid;\n\n const executeTask = async (runner: TaskRunner) => {\n runner !== this.#init && this.#numWorking++;\n const {pending} = await runner.run(tx, lc, () => {\n runner !== this.#init && this.#numWorking--;\n });\n last = pending ?? last;\n };\n\n let task: TaskRunner | Error | 'done' =\n this.#init ?? (await this.#tasks.dequeue(timeoutTask, timeoutMs));\n\n try {\n while (task !== 'done') {\n if (\n task instanceof Error ||\n (task !== this.#init && this.#failure)\n ) {\n throw this.#failure ?? task;\n }\n await executeTask(task);\n\n // await the next task.\n task = await this.#tasks.dequeue(timeoutTask, timeoutMs);\n }\n } finally {\n // Execute the cleanup task even on failure.\n if (this.#cleanup) {\n await executeTask(this.#cleanup);\n }\n }\n\n lc.debug?.('closing transaction');\n // Given the semantics of a Postgres transaction, the last statement\n // will only succeed if all of the preceding statements succeeded.\n return last;\n } catch (e) {\n if (e !== this.#failure) {\n this.fail(e); // A failure in any worker should fail the pool.\n }\n throw e;\n }\n };\n\n this.#workers.push(\n db\n .begin(this.#mode, worker)\n .catch(e => {\n if (e instanceof RollbackSignal) {\n // A RollbackSignal is used to gracefully rollback the postgres.js\n // transaction block. It should not be thrown up to the application.\n lc.debug?.('aborted transaction');\n } else {\n throw e;\n }\n })\n .finally(() => this.#numWorkers--),\n );\n\n // After adding the worker, enqueue a terminal signal if we are in either of the\n // terminal states (both of which prevent more tasks from being enqueued), to ensure\n // that the added worker eventually exits.\n if (this.#done) {\n this.#tasks.enqueue('done');\n }\n if (this.#failure) {\n this.#tasks.enqueue(this.#failure);\n }\n }\n\n /**\n * Processes the statements produced by the specified {@link Task},\n * returning a Promise that resolves when the statements are either processed\n * by the database or rejected.\n *\n * Note that statement failures will result in failing the entire\n * TransactionPool (per transaction semantics). However, the returned Promise\n * itself will resolve rather than reject. As such, it is fine to ignore\n * returned Promises in order to pipeline requests to the database. It is\n * recommended to occasionally await them (e.g. after some threshold) in\n * order to avoid memory blowup in the case of database slowness.\n */\n process(task: Task): Promise<void> {\n const r = resolver<void>();\n this.#process(this.#stmtRunner(task, r));\n return r.promise;\n }\n\n readonly #start = performance.now();\n #stmts = 0;\n\n /**\n * Implements the semantics specified in {@link process()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the statements are produced,\n * allowing them to be pipelined to the database.\n * * Statement errors result in failing the transaction pool.\n * * The client-supplied Resolver resolves on success or failure;\n * it is never rejected.\n */\n #stmtRunner(task: Task, r: {resolve: () => void} = resolver()): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let stmts: Statement[];\n try {\n stmts = await task(tx, lc);\n } catch (e) {\n r.resolve();\n throw e;\n } finally {\n freeWorker();\n }\n\n if (stmts.length === 0) {\n r.resolve();\n return {pending: null};\n }\n\n // Execute the statements (i.e. send to the db) immediately.\n // The last result is returned for the worker to await before\n // closing the transaction.\n const last = stmts.reduce(\n (_, stmt) =>\n stmt\n .execute()\n .then(() => {\n if (++this.#stmts % 1000 === 0) {\n const q = stmt as unknown as Query;\n lc.debug?.(\n `executed ${this.#stmts}th statement (${(performance.now() - this.#start).toFixed(3)} ms)`,\n {\n statement: q.string,\n params: stringify(q.parameters),\n },\n );\n }\n })\n .catch(e => this.fail(e)),\n promiseVoid,\n );\n return {pending: last.then(r.resolve)};\n },\n rejected: r.resolve,\n };\n }\n\n /**\n * Processes and returns the result of executing the {@link ReadTask} from\n * within the transaction. An error thrown by the task will result in\n * rejecting the returned Promise, but will not affect the transaction pool\n * itself.\n */\n processReadTask<T>(readTask: ReadTask<T>): Promise<T> {\n const r = resolver<T>();\n this.#process(this.#readRunner(readTask, r));\n return r.promise;\n }\n\n /**\n * Implements the semantics specified in {@link processReadTask()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the result is produced,\n * before resolving the client-supplied Resolver.\n * * Errors result in rejecting the client-supplied Resolver but\n * do not affect transaction pool.\n */\n #readRunner<T>(readTask: ReadTask<T>, r: Resolver<T>): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let result: T;\n try {\n result = await readTask(tx, lc);\n freeWorker();\n r.resolve(result);\n } catch (e) {\n freeWorker();\n r.reject(e);\n }\n return {pending: null};\n },\n rejected: r.reject,\n };\n }\n\n #process(runner: TaskRunner): void {\n assert(!this.#done, 'already set done');\n if (this.#failure) {\n runner.rejected(this.#failure);\n return;\n }\n\n this.#tasks.enqueue(runner);\n\n // Check if the pool size can and should be increased.\n if (this.#numWorkers < this.#maxWorkers) {\n const outstanding = this.#tasks.size();\n\n if (outstanding > this.#numWorkers - this.#numWorking) {\n this.#db && this.#addWorker(this.#db);\n this.#numWorkers++;\n this.#lc.debug?.(`Increased pool size to ${this.#numWorkers}`);\n }\n }\n }\n\n /**\n * Ends all workers with a ROLLBACK. Throws if the pool is already done\n * or aborted.\n */\n abort() {\n this.fail(new RollbackSignal());\n }\n\n /**\n * Signals to all workers to end their transaction once all pending tasks have\n * been completed. Throws if the pool is already done or aborted.\n */\n setDone() {\n assert(!this.#done, 'already set done');\n this.#done = true;\n\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#tasks.enqueue('done');\n }\n }\n\n /**\n * An alternative to explicitly calling {@link setDone}, `ref()` increments an internal reference\n * count, and {@link unref} decrements it. When the reference count reaches 0, {@link setDone} is\n * automatically called. A TransactionPool is initialized with a reference count of 1.\n *\n * `ref()` should be called before sharing the pool with another component, and only after the\n * pool has been started with {@link run()}. It must not be called on a TransactionPool that is\n * already done (either via {@link unref()} or {@link setDone()}. (Doing so indicates a logical\n * error in the code.)\n *\n * It follows that:\n * * The creator of the TransactionPool is responsible for running it.\n * * The TransactionPool should be ref'ed before being sharing.\n * * The receiver of the TransactionPool is only responsible for unref'ing it.\n *\n * On the other hand, a transaction pool that fails with a runtime error can still be ref'ed;\n * attempts to use the pool will result in the runtime error as expected.\n */\n // TODO: Get rid of the ref-counting stuff. It's no longer needed.\n ref(count = 1) {\n assert(\n this.#db !== undefined && !this.#done,\n `Cannot ref() a TransactionPool that is not running`,\n );\n this.#refCount += count;\n }\n\n /**\n * Decrements the internal reference count, automatically invoking {@link setDone} when it reaches 0.\n */\n unref(count = 1) {\n assert(count <= this.#refCount);\n\n this.#refCount -= count;\n if (this.#refCount === 0) {\n this.setDone();\n }\n }\n\n isRunning(): boolean {\n return this.#db !== undefined && !this.#done && this.#failure === undefined;\n }\n\n /**\n * Signals all workers to fail their transactions with the given {@link err}.\n */\n fail(err: unknown) {\n if (!this.#failure) {\n this.#failure = ensureError(err); // Fail fast: this is checked in the worker loop.\n const level =\n this.#failure instanceof ControlFlowError\n ? 'debug'\n : this.#failure instanceof AbortError\n ? 'info'\n : 'error';\n this.#lc[level]?.(this.#failure);\n\n for (let i = 0; i < this.#numWorkers; i++) {\n // Enqueue the Error to terminate any workers waiting for tasks.\n this.#tasks.enqueue(this.#failure);\n }\n }\n }\n}\n\ntype SynchronizeSnapshotTasks = {\n /**\n * The `init` Task for the TransactionPool from which the snapshot originates.\n * The pool must have Mode.SERIALIZABLE, and will be set to READ ONLY by the\n * `exportSnapshot` init task. If the TransactionPool has multiple workers, the\n * first worker will export a snapshot that the others set.\n */\n exportSnapshot: Task;\n\n /**\n * The `cleanup` Task for the TransactionPool from which the snapshot\n * originates. This Task will wait for the follower pool to `setSnapshot`\n * to ensure that the snapshot is successfully shared before the originating\n * transaction is closed.\n */\n cleanupExport: Task;\n\n /**\n * The `init` Task for the TransactionPool in which workers will\n * consequently see the same snapshot as that of the first pool. The pool\n * must have Mode.SERIALIZABLE, and will have the ability to perform writes.\n */\n setSnapshot: Task;\n\n /** The ID of the shared snapshot. */\n snapshotID: Promise<string>;\n};\n\n/**\n * Init Tasks for Postgres snapshot synchronization across transactions.\n *\n * https://www.postgresql.org/docs/9.3/functions-admin.html#:~:text=Snapshot%20Synchronization%20Functions,identical%20content%20in%20the%20database.\n */\nexport function synchronizedSnapshots(): SynchronizeSnapshotTasks {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n const {\n promise: snapshotCaptured,\n resolve: captureSnapshot,\n reject: failCapture,\n } = resolver<unknown>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot. TODO: Plumb the workerNum and use that instead.\n let firstWorkerRun = false;\n\n // Note: Neither init task should `await`, as processing in each pool can proceed\n // as soon as the statements have been sent to the db. However, the `cleanupExport`\n // task must `await` the result of `setSnapshot` to ensure that exporting transaction\n // does not close before the snapshot has been captured.\n return {\n exportSnapshot: tx => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n const stmt =\n tx`SELECT pg_export_snapshot() AS snapshot; SET TRANSACTION READ ONLY;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n tx`SET TRANSACTION READ ONLY`.simple(),\n ]);\n },\n\n setSnapshot: tx =>\n snapshotExported.then(snapshotID => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n // Intercept the promise to propagate the information to `cleanupExport`.\n stmt.then(captureSnapshot, failCapture);\n return [stmt];\n }),\n\n cleanupExport: async () => {\n await snapshotCaptured;\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * Returns `init` and `cleanup` {@link Task}s for a TransactionPool that ensure its workers\n * share a single view of the database. This is used for View Notifier and View Syncer logic\n * that allows multiple entities to perform parallel reads on the same snapshot of the database.\n */\nexport function sharedSnapshot(): {\n init: Task;\n cleanup: Task;\n snapshotID: Promise<string>;\n} {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot.\n let firstWorkerRun = false;\n\n // Set when any worker is done, signalling that all non-sentinel Tasks have been\n // dequeued, and thus any subsequently spawned workers should skip their initTask\n // since the snapshot is no longer needed (and soon to become invalid).\n let firstWorkerDone = false;\n\n return {\n init: (tx, lc) => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n const stmt = tx`SELECT pg_export_snapshot() AS snapshot;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n if (!firstWorkerDone) {\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n ]);\n }\n lc.debug?.('All work is done. No need to set snapshot');\n return [];\n },\n\n cleanup: () => {\n firstWorkerDone = true;\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * @returns An `init` Task for importing a snapshot from another transaction.\n */\nexport function importSnapshot(snapshotID: string): {\n init: Task;\n imported: Promise<void>;\n} {\n const {promise: imported, resolve, reject} = resolver<void>();\n\n return {\n init: tx => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n stmt.then(() => resolve(), reject);\n return [stmt];\n },\n\n imported,\n };\n}\n\n/**\n * A superclass of Errors used for control flow that is needed to handle\n * another Error but does not constitute an error condition itself (e.g.\n * aborting transactions after a previous one fails). Subclassing this Error\n * will result in lowering the log level from `error` to `debug`.\n */\nexport class ControlFlowError extends Error {\n constructor(cause?: unknown) {\n super();\n this.cause = cause;\n }\n}\n\n/**\n * Internal error used to rollback the worker transaction. This is used\n * instead of executing a `ROLLBACK` statement because the postgres.js\n * library will otherwise try to execute an extraneous `COMMIT`, which\n * results in outputting a \"no transaction in progress\" warning to the\n * database logs.\n *\n * Throwing an exception, on the other hand, executes the postgres.js\n * codepath that calls `ROLLBACK` instead.\n */\nclass RollbackSignal extends ControlFlowError {\n readonly name = 'RollbackSignal';\n readonly message = 'rolling back transaction';\n}\n\nfunction ensureError(err: unknown): Error {\n if (err instanceof Error) {\n return err;\n }\n const error = new Error();\n error.cause = err;\n return error;\n}\n\ninterface TaskRunner {\n /**\n * Manages the running of a Task or ReadTask in two phases:\n *\n * - If the task involves blocking, this is done in the worker. Once the\n * blocking is done, `freeWorker()` is invoked to signal that the worker\n * is available to run another task. Note that this should be invoked\n * *before* resolving the result to the calling thread so that a\n * subsequent task can reuse the same worker.\n *\n * - Task statements are executed on the database asynchronously. The final\n * result of this processing is encapsulated in the returned `pending`\n * Promise. The worker will await the last pending Promise before closing\n * the transaction.\n *\n * @param freeWorker should be called as soon as all blocking operations are\n * completed in order to return the transaction to the pool.\n * @returns A `pending` Promise indicating when the statements have been\n * processed by the database, allowing the transaction to be closed.\n * This should be `null` if there are no transaction-dependent\n * statements to await.\n */\n run(\n tx: PostgresTransaction,\n lc: LogContext,\n freeWorker: () => void,\n ): Promise<{pending: Promise<void> | null}>;\n\n /**\n * Invoked if the TransactionPool is already in a failed state when the task\n * is requested.\n */\n rejected(reason: unknown): void;\n}\n\n// TODO: Get rid of the timeout stuff. It's no longer needed.\nconst IDLE_TIMEOUT_MS = 5_000;\n\nconst KEEPALIVE_TIMEOUT_MS = 60_000;\n\nconst KEEPALIVE_TASK: Task = tx => [tx`SELECT 1`.simple()];\n\ntype TimeoutTask = {\n timeoutMs: number;\n task: Task | 'done';\n};\n\ntype TimeoutTasks = {\n forInitialWorkers: TimeoutTask;\n forExtraWorkers: TimeoutTask;\n};\n\n// Production timeout tasks. Overridden in tests.\nexport const TIMEOUT_TASKS: TimeoutTasks = {\n forInitialWorkers: {\n timeoutMs: KEEPALIVE_TIMEOUT_MS,\n task: KEEPALIVE_TASK,\n },\n forExtraWorkers: {\n timeoutMs: IDLE_TIMEOUT_MS,\n task: 'done',\n },\n};\n\n// The slice of information from the Query object in Postgres.js that gets logged for debugging.\n// https://github.com/porsager/postgres/blob/f58cd4f3affd3e8ce8f53e42799672d86cd2c70b/src/connection.js#L219\ntype Query = {string: string; parameters: object[]};\n"],"names":["key","value"],"mappings":";;;;;;;AAqDO,MAAM,gBAAgB;AAAA,EAC3B;AAAA,EACS;AAAA,EACA;AAAA,EACA;AAAA,EACA,SAAS,IAAI,MAAA;AAAA,EACb,WAA+B,CAAA;AAAA,EAC/B;AAAA,EACA;AAAA,EACA;AAAA,EACT;AAAA,EACA,cAAc;AAAA,EACd;AAAA;AAAA,EAEA,YAAY;AAAA,EACZ,QAAQ;AAAA,EACR;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,YACE,IACA,MACA,MACA,SACA,iBAAiB,GACjB,aAAa,gBACb,eAAe,eACf;AACA,WAAO,iBAAiB,CAAC;AACzB,WAAO,cAAc,cAAc;AAEnC,SAAK,MAAM;AACX,SAAK,QAAQ;AACb,SAAK,QAAQ,OAAO,KAAK,YAAY,IAAI,IAAI;AAC7C,SAAK,WAAW,UAAU,KAAK,YAAY,OAAO,IAAI;AACtD,SAAK,kBAAkB;AACvB,SAAK,cAAc;AACnB,SAAK,cAAc;AACnB,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,IAAI,IAAsB;AACxB,WAAO,CAAC,KAAK,KAAK,iBAAiB;AACnC,SAAK,MAAM;AACX,aAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AACzC,WAAK,WAAW,EAAE;AAAA,IACpB;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,kBAAkB,KAAa,OAAe;AAC5C,SAAK,MAAM,KAAK,IAAI,YAAY,KAAK,KAAK;AAE1C,WAAO;AAAA,MACL,mBAAmB,CAACA,MAAaC,WAC/B,KAAK,kBAAkBD,MAAKC,MAAK;AAAA,IAAA;AAAA,EAEvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsBA,MAAM,OAAO;AACX,UAAM,aAAa,KAAK,SAAS;AACjC,UAAM,QAAQ,IAAI,KAAK,QAAQ;AAE/B,QAAI,aAAa,KAAK,SAAS,QAAQ;AAKrC,YAAM,QAAQ,IAAI,KAAK,QAAQ;AAAA,IACjC;AACA,SAAK,IAAI,QAAQ,uBAAuB;AAAA,EAC1C;AAAA,EAEA,WAAW,IAAgB;AACzB,UAAM,KAAK,KAAK,SAAS,SAAS;AAClC,UAAM,KAAK,KAAK,IAAI,YAAY,MAAM,EAAE;AAExC,UAAM,KACJ,KAAK,SAAS,SAAS,KAAK,kBACxB,KAAK,aAAa,oBAClB,KAAK,aAAa;AACxB,UAAM,EAAC,cAAa;AACpB,UAAM,cAAc,GAAG,SAAS,SAAS,SAAS,KAAK,YAAY,GAAG,IAAI;AAE1E,UAAM,SAAS,OAAO,OAA4B;AAChD,UAAI;AACF,WAAG,QAAQ,qBAAqB;AAChC,gCAAwB,EAAE;AAE1B,YAAI,OAAsB;AAE1B,cAAM,cAAc,OAAO,WAAuB;AAChD,qBAAW,KAAK,SAAS,KAAK;AAC9B,gBAAM,EAAC,YAAW,MAAM,OAAO,IAAI,IAAI,IAAI,MAAM;AAC/C,uBAAW,KAAK,SAAS,KAAK;AAAA,UAChC,CAAC;AACD,iBAAO,WAAW;AAAA,QACpB;AAEA,YAAI,OACF,KAAK,SAAU,MAAM,KAAK,OAAO,QAAQ,aAAa,SAAS;AAEjE,YAAI;AACF,iBAAO,SAAS,QAAQ;AACtB,gBACE,gBAAgB,SACf,SAAS,KAAK,SAAS,KAAK,UAC7B;AACA,oBAAM,KAAK,YAAY;AAAA,YACzB;AACA,kBAAM,YAAY,IAAI;AAGtB,mBAAO,MAAM,KAAK,OAAO,QAAQ,aAAa,SAAS;AAAA,UACzD;AAAA,QACF,UAAA;AAEE,cAAI,KAAK,UAAU;AACjB,kBAAM,YAAY,KAAK,QAAQ;AAAA,UACjC;AAAA,QACF;AAEA,WAAG,QAAQ,qBAAqB;AAGhC,eAAO;AAAA,MACT,SAAS,GAAG;AACV,YAAI,MAAM,KAAK,UAAU;AACvB,eAAK,KAAK,CAAC;AAAA,QACb;AACA,cAAM;AAAA,MACR;AAAA,IACF;AAEA,SAAK,SAAS;AAAA,MACZ,GACG,MAAM,KAAK,OAAO,MAAM,EACxB,MAAM,CAAA,MAAK;AACV,YAAI,aAAa,gBAAgB;AAG/B,aAAG,QAAQ,qBAAqB;AAAA,QAClC,OAAO;AACL,gBAAM;AAAA,QACR;AAAA,MACF,CAAC,EACA,QAAQ,MAAM,KAAK,aAAa;AAAA,IAAA;AAMrC,QAAI,KAAK,OAAO;AACd,WAAK,OAAO,QAAQ,MAAM;AAAA,IAC5B;AACA,QAAI,KAAK,UAAU;AACjB,WAAK,OAAO,QAAQ,KAAK,QAAQ;AAAA,IACnC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,QAAQ,MAA2B;AACjC,UAAM,IAAI,SAAA;AACV,SAAK,SAAS,KAAK,YAAY,MAAM,CAAC,CAAC;AACvC,WAAO,EAAE;AAAA,EACX;AAAA,EAES,SAAS,YAAY,IAAA;AAAA,EAC9B,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYT,YAAY,MAAY,IAA2B,YAAwB;AACzE,WAAO;AAAA,MACL,KAAK,OAAO,IAAI,IAAI,eAAe;AACjC,YAAI;AACJ,YAAI;AACF,kBAAQ,MAAM,KAAK,IAAI,EAAE;AAAA,QAC3B,SAAS,GAAG;AACV,YAAE,QAAA;AACF,gBAAM;AAAA,QACR,UAAA;AACE,qBAAA;AAAA,QACF;AAEA,YAAI,MAAM,WAAW,GAAG;AACtB,YAAE,QAAA;AACF,iBAAO,EAAC,SAAS,KAAA;AAAA,QACnB;AAKA,cAAM,OAAO,MAAM;AAAA,UACjB,CAAC,GAAG,SACF,KACG,QAAA,EACA,KAAK,MAAM;AACV,gBAAI,EAAE,KAAK,SAAS,QAAS,GAAG;AAC9B,oBAAM,IAAI;AACV,iBAAG;AAAA,gBACD,YAAY,KAAK,MAAM,kBAAkB,YAAY,IAAA,IAAQ,KAAK,QAAQ,QAAQ,CAAC,CAAC;AAAA,gBACpF;AAAA,kBACE,WAAW,EAAE;AAAA,kBACb,QAAQ,UAAU,EAAE,UAAU;AAAA,gBAAA;AAAA,cAChC;AAAA,YAEJ;AAAA,UACF,CAAC,EACA,MAAM,OAAK,KAAK,KAAK,CAAC,CAAC;AAAA,UAC5B;AAAA,QAAA;AAEF,eAAO,EAAC,SAAS,KAAK,KAAK,EAAE,OAAO,EAAA;AAAA,MACtC;AAAA,MACA,UAAU,EAAE;AAAA,IAAA;AAAA,EAEhB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,gBAAmB,UAAmC;AACpD,UAAM,IAAI,SAAA;AACV,SAAK,SAAS,KAAK,YAAY,UAAU,CAAC,CAAC;AAC3C,WAAO,EAAE;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,YAAe,UAAuB,GAA4B;AAChE,WAAO;AAAA,MACL,KAAK,OAAO,IAAI,IAAI,eAAe;AACjC,YAAI;AACJ,YAAI;AACF,mBAAS,MAAM,SAAS,IAAI,EAAE;AAC9B,qBAAA;AACA,YAAE,QAAQ,MAAM;AAAA,QAClB,SAAS,GAAG;AACV,qBAAA;AACA,YAAE,OAAO,CAAC;AAAA,QACZ;AACA,eAAO,EAAC,SAAS,KAAA;AAAA,MACnB;AAAA,MACA,UAAU,EAAE;AAAA,IAAA;AAAA,EAEhB;AAAA,EAEA,SAAS,QAA0B;AACjC,WAAO,CAAC,KAAK,OAAO,kBAAkB;AACtC,QAAI,KAAK,UAAU;AACjB,aAAO,SAAS,KAAK,QAAQ;AAC7B;AAAA,IACF;AAEA,SAAK,OAAO,QAAQ,MAAM;AAG1B,QAAI,KAAK,cAAc,KAAK,aAAa;AACvC,YAAM,cAAc,KAAK,OAAO,KAAA;AAEhC,UAAI,cAAc,KAAK,cAAc,KAAK,aAAa;AACrD,aAAK,OAAO,KAAK,WAAW,KAAK,GAAG;AACpC,aAAK;AACL,aAAK,IAAI,QAAQ,0BAA0B,KAAK,WAAW,EAAE;AAAA,MAC/D;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,QAAQ;AACN,SAAK,KAAK,IAAI,gBAAgB;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,UAAU;AACR,WAAO,CAAC,KAAK,OAAO,kBAAkB;AACtC,SAAK,QAAQ;AAEb,aAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AACzC,WAAK,OAAO,QAAQ,MAAM;AAAA,IAC5B;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqBA,IAAI,QAAQ,GAAG;AACb;AAAA,MACE,KAAK,QAAQ,UAAa,CAAC,KAAK;AAAA,MAChC;AAAA,IAAA;AAEF,SAAK,aAAa;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,QAAQ,GAAG;AACf,WAAO,SAAS,KAAK,SAAS;AAE9B,SAAK,aAAa;AAClB,QAAI,KAAK,cAAc,GAAG;AACxB,WAAK,QAAA;AAAA,IACP;AAAA,EACF;AAAA,EAEA,YAAqB;AACnB,WAAO,KAAK,QAAQ,UAAa,CAAC,KAAK,SAAS,KAAK,aAAa;AAAA,EACpE;AAAA;AAAA;AAAA;AAAA,EAKA,KAAK,KAAc;AACjB,QAAI,CAAC,KAAK,UAAU;AAClB,WAAK,WAAW,YAAY,GAAG;AAC/B,YAAM,QACJ,KAAK,oBAAoB,mBACrB,UACA,KAAK,oBAAoB,aACvB,SACA;AACR,WAAK,IAAI,KAAK,IAAI,KAAK,QAAQ;AAE/B,eAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AAEzC,aAAK,OAAO,QAAQ,KAAK,QAAQ;AAAA,MACnC;AAAA,IACF;AAAA,EACF;AACF;AAgJO,SAAS,eAAe,YAG7B;AACA,QAAM,EAAC,SAAS,UAAU,SAAS,OAAA,IAAU,SAAA;AAE7C,SAAO;AAAA,IACL,MAAM,CAAA,OAAM;AACV,YAAM,OAAO,GAAG,OAAO,6BAA6B,UAAU,GAAG;AACjE,WAAK,KAAK,MAAM,QAAA,GAAW,MAAM;AACjC,aAAO,CAAC,IAAI;AAAA,IACd;AAAA,IAEA;AAAA,EAAA;AAEJ;AAQO,MAAM,yBAAyB,MAAM;AAAA,EAC1C,YAAY,OAAiB;AAC3B,UAAA;AACA,SAAK,QAAQ;AAAA,EACf;AACF;AAYA,MAAM,uBAAuB,iBAAiB;AAAA,EACnC,OAAO;AAAA,EACP,UAAU;AACrB;AAEA,SAAS,YAAY,KAAqB;AACxC,MAAI,eAAe,OAAO;AACxB,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,IAAI,MAAA;AAClB,QAAM,QAAQ;AACd,SAAO;AACT;AAsCA,MAAM,kBAAkB;AAExB,MAAM,uBAAuB;AAE7B,MAAM,iBAAuB,CAAA,OAAM,CAAC,aAAa,QAAQ;AAalD,MAAM,gBAA8B;AAAA,EACzC,mBAAmB;AAAA,IACjB,WAAW;AAAA,IACX,MAAM;AAAA,EAAA;AAAA,EAER,iBAAiB;AAAA,IACf,WAAW;AAAA,IACX,MAAM;AAAA,EAAA;AAEV;"}
|
|
1
|
+
{"version":3,"file":"transaction-pool.js","sources":["../../../../../zero-cache/src/db/transaction-pool.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {type Resolver, resolver} from '@rocicorp/resolver';\nimport type postgres from 'postgres';\nimport {AbortError} from '../../../shared/src/abort-error.ts';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {stringify} from '../../../shared/src/bigint-json.ts';\nimport type {Enum} from '../../../shared/src/enum.ts';\nimport {Queue} from '../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport {\n disableStatementTimeout,\n type PostgresDB,\n type PostgresTransaction,\n} from '../types/pg.ts';\nimport type * as Mode from './mode-enum.ts';\n\ntype Mode = Enum<typeof Mode>;\n\ntype MaybePromise<T> = Promise<T> | T;\n\nexport type Statement =\n | postgres.PendingQuery<(postgres.Row & Iterable<postgres.Row>)[]>\n | postgres.PendingQuery<postgres.Row[]>;\n\n/**\n * A {@link Task} is logic run from within a transaction in a {@link TransactionPool}.\n * It returns a list of `Statements` that the transaction executes asynchronously and\n * awaits when it receives the 'done' signal.\n *\n */\nexport type Task = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<Statement[]>;\n\n/**\n * A {@link ReadTask} is run from within a transaction, but unlike a {@link Task},\n * the results of a ReadTask are opaque to the TransactionPool and returned to the\n * caller of {@link TransactionPool.processReadTask}.\n */\nexport type ReadTask<T> = (\n tx: PostgresTransaction,\n lc: LogContext,\n) => MaybePromise<T>;\n\n/**\n * A TransactionPool is a pool of one or more {@link postgres.TransactionSql}\n * objects that participate in processing a dynamic queue of tasks.\n *\n * This can be used for serializing a set of tasks that arrive asynchronously\n * to a single transaction (for writing) or performing parallel reads across\n * multiple connections at the same snapshot (e.g. read only snapshot transactions).\n */\nexport class TransactionPool {\n #lc: LogContext;\n readonly #mode: Mode;\n readonly #init: TaskRunner | undefined;\n readonly #cleanup: TaskRunner | undefined;\n readonly #tasks = new Queue<TaskRunner | Error | 'done'>();\n readonly #workers: Promise<unknown>[] = [];\n readonly #initialWorkers: number;\n readonly #maxWorkers: number;\n readonly #timeoutTask: TimeoutTasks;\n #numWorkers: number;\n #numWorking = 0;\n #db: PostgresDB | undefined; // set when running. stored to allow adaptive pool sizing.\n\n #refCount = 1;\n #done = false;\n #failure: Error | undefined;\n\n /**\n * @param init A {@link Task} that is run in each Transaction before it begins\n * processing general tasks. This can be used to to set the transaction\n * mode, export/set snapshots, etc. This will be run even if\n * {@link fail} has been called on the pool.\n * @param cleanup A {@link Task} that is run in each Transaction before it closes.\n * This will be run even if {@link fail} has been called, or if a\n * preceding Task threw an Error.\n * @param initialWorkers The initial number of transaction workers to process tasks.\n * This is the steady state number of workers that will be kept\n * alive if the TransactionPool is long lived.\n * This must be greater than 0. Defaults to 1.\n * @param maxWorkers When specified, allows the pool to grow to `maxWorkers`. This\n * must be greater than or equal to `initialWorkers`. On-demand\n * workers will be shut down after an idle timeout of 5 seconds.\n */\n constructor(\n lc: LogContext,\n mode: Mode,\n init?: Task,\n cleanup?: Task,\n initialWorkers = 1,\n maxWorkers = initialWorkers,\n timeoutTasks = TIMEOUT_TASKS, // Overridden for tests.\n ) {\n assert(initialWorkers > 0, 'initialWorkers must be positive');\n assert(\n maxWorkers >= initialWorkers,\n 'maxWorkers must be >= initialWorkers',\n );\n\n this.#lc = lc;\n this.#mode = mode;\n this.#init = init ? this.#stmtRunner(init) : undefined;\n this.#cleanup = cleanup ? this.#stmtRunner(cleanup) : undefined;\n this.#initialWorkers = initialWorkers;\n this.#numWorkers = initialWorkers;\n this.#maxWorkers = maxWorkers;\n this.#timeoutTask = timeoutTasks;\n }\n\n /**\n * Starts the pool of workers to process Tasks with transactions opened from the\n * specified {@link db}.\n */\n run(db: PostgresDB): this {\n assert(!this.#db, 'already running');\n this.#db = db;\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#addWorker(db);\n }\n return this;\n }\n\n /**\n * Adds context parameters to internal LogContext. This is useful for context values that\n * are not known when the TransactionPool is constructed (e.g. determined after a database\n * call when the pool is running).\n *\n * Returns an object that can be used to add more parameters.\n */\n addLoggingContext(key: string, value: string) {\n this.#lc = this.#lc.withContext(key, value);\n\n return {\n addLoggingContext: (key: string, value: string) =>\n this.addLoggingContext(key, value),\n };\n }\n\n /**\n * Returns a promise that:\n *\n * * resolves after {@link setDone} has been called (or the the pool as been {@link unref}ed\n * to a 0 ref count), once all added tasks have been processed and all transactions have been\n * committed or closed.\n *\n * * rejects if processing was aborted with {@link fail} or if processing any of\n * the tasks resulted in an error. All uncommitted transactions will have been\n * rolled back.\n *\n * Note that partial failures are possible if processing writes with multiple workers\n * (e.g. `setDone` is called, allowing some workers to commit, after which other\n * workers encounter errors). Using a TransactionPool in this manner does not make\n * sense in terms of transactional semantics, and is thus not recommended.\n *\n * For reads, however, multiple workers is useful for performing parallel reads\n * at the same snapshot. See {@link synchronizedSnapshots} for an example.\n * Resolves or rejects when all workers are done or failed.\n */\n async done() {\n const numWorkers = this.#workers.length;\n await Promise.all(this.#workers);\n\n if (numWorkers < this.#workers.length) {\n // If workers were added after the initial set, they must be awaited to ensure\n // that the results (i.e. rejections) of all workers are accounted for. This only\n // needs to be re-done once, because the fact that the first `await` completed\n // guarantees that the pool is in a terminal state and no new workers can be added.\n await Promise.all(this.#workers);\n }\n this.#lc.debug?.('transaction pool done');\n }\n\n #addWorker(db: PostgresDB) {\n const id = this.#workers.length + 1;\n const lc = this.#lc.withContext('tx', id);\n\n const tt: TimeoutTask =\n this.#workers.length < this.#initialWorkers\n ? this.#timeoutTask.forInitialWorkers\n : this.#timeoutTask.forExtraWorkers;\n const {timeoutMs} = tt;\n const timeoutTask = tt.task === 'done' ? 'done' : this.#stmtRunner(tt.task);\n\n const worker = async (tx: PostgresTransaction) => {\n try {\n lc.debug?.('started transaction');\n disableStatementTimeout(tx);\n\n let last: Promise<void> = promiseVoid;\n\n const executeTask = async (runner: TaskRunner) => {\n runner !== this.#init && this.#numWorking++;\n const {pending} = await runner.run(tx, lc, () => {\n runner !== this.#init && this.#numWorking--;\n });\n last = pending ?? last;\n };\n\n let task: TaskRunner | Error | 'done' =\n this.#init ?? (await this.#tasks.dequeue(timeoutTask, timeoutMs));\n\n try {\n while (task !== 'done') {\n if (\n task instanceof Error ||\n (task !== this.#init && this.#failure)\n ) {\n throw this.#failure ?? task;\n }\n await executeTask(task);\n\n // await the next task.\n task = await this.#tasks.dequeue(timeoutTask, timeoutMs);\n }\n } finally {\n // Execute the cleanup task even on failure.\n if (this.#cleanup) {\n await executeTask(this.#cleanup);\n }\n }\n\n lc.debug?.('closing transaction');\n // Given the semantics of a Postgres transaction, the last statement\n // will only succeed if all of the preceding statements succeeded.\n return last;\n } catch (e) {\n if (e !== this.#failure) {\n this.fail(e); // A failure in any worker should fail the pool.\n }\n throw e;\n }\n };\n\n this.#workers.push(\n db\n .begin(this.#mode, worker)\n .catch(e => {\n if (e instanceof RollbackSignal) {\n // A RollbackSignal is used to gracefully rollback the postgres.js\n // transaction block. It should not be thrown up to the application.\n lc.debug?.('aborted transaction');\n } else {\n throw e;\n }\n })\n .finally(() => this.#numWorkers--),\n );\n\n // After adding the worker, enqueue a terminal signal if we are in either of the\n // terminal states (both of which prevent more tasks from being enqueued), to ensure\n // that the added worker eventually exits.\n if (this.#done) {\n this.#tasks.enqueue('done');\n }\n if (this.#failure) {\n this.#tasks.enqueue(this.#failure);\n }\n }\n\n /**\n * Processes the statements produced by the specified {@link Task},\n * returning a Promise that resolves when the statements are either processed\n * by the database or rejected.\n *\n * Note that statement failures will result in failing the entire\n * TransactionPool (per transaction semantics). However, the returned Promise\n * itself will resolve rather than reject. As such, it is fine to ignore\n * returned Promises in order to pipeline requests to the database. It is\n * recommended to occasionally await them (e.g. after some threshold) in\n * order to avoid memory blowup in the case of database slowness.\n */\n process(task: Task): Promise<void> {\n const r = resolver<void>();\n this.#process(this.#stmtRunner(task, r));\n return r.promise;\n }\n\n readonly #start = performance.now();\n #stmts = 0;\n\n /**\n * Implements the semantics specified in {@link process()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the statements are produced,\n * allowing them to be pipelined to the database.\n * * Statement errors result in failing the transaction pool.\n * * The client-supplied Resolver resolves on success or failure;\n * it is never rejected.\n */\n #stmtRunner(task: Task, r: {resolve: () => void} = resolver()): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let stmts: Statement[];\n try {\n stmts = await task(tx, lc);\n } catch (e) {\n r.resolve();\n throw e;\n } finally {\n freeWorker();\n }\n\n if (stmts.length === 0) {\n r.resolve();\n return {pending: null};\n }\n\n // Execute the statements (i.e. send to the db) immediately.\n // The last result is returned for the worker to await before\n // closing the transaction.\n const last = stmts.reduce(\n (_, stmt) =>\n stmt\n .execute()\n .then(() => {\n if (++this.#stmts % 1000 === 0) {\n const q = stmt as unknown as Query;\n lc.debug?.(\n `executed ${this.#stmts}th statement (${(performance.now() - this.#start).toFixed(3)} ms)`,\n {\n statement: q.string,\n params: stringify(q.parameters),\n },\n );\n }\n })\n .catch(e => this.fail(e)),\n promiseVoid,\n );\n return {pending: last.then(r.resolve)};\n },\n rejected: r.resolve,\n };\n }\n\n /**\n * Processes and returns the result of executing the {@link ReadTask} from\n * within the transaction. An error thrown by the task will result in\n * rejecting the returned Promise, but will not affect the transaction pool\n * itself.\n */\n processReadTask<T>(readTask: ReadTask<T>): Promise<T> {\n const r = resolver<T>();\n this.#process(this.#readRunner(readTask, r));\n return r.promise;\n }\n\n /**\n * Implements the semantics specified in {@link processReadTask()}.\n *\n * Specifically:\n * * `freeWorker()` is called as soon as the result is produced,\n * before resolving the client-supplied Resolver.\n * * Errors result in rejecting the client-supplied Resolver but\n * do not affect transaction pool.\n */\n #readRunner<T>(readTask: ReadTask<T>, r: Resolver<T>): TaskRunner {\n return {\n run: async (tx, lc, freeWorker) => {\n let result: T;\n try {\n result = await readTask(tx, lc);\n freeWorker();\n r.resolve(result);\n } catch (e) {\n freeWorker();\n r.reject(e);\n }\n return {pending: null};\n },\n rejected: r.reject,\n };\n }\n\n #process(runner: TaskRunner): void {\n assert(!this.#done, 'already set done');\n if (this.#failure) {\n runner.rejected(this.#failure);\n return;\n }\n\n this.#tasks.enqueue(runner);\n\n // Check if the pool size can and should be increased.\n if (this.#numWorkers < this.#maxWorkers) {\n const outstanding = this.#tasks.size();\n\n if (outstanding > this.#numWorkers - this.#numWorking) {\n this.#db && this.#addWorker(this.#db);\n this.#numWorkers++;\n this.#lc.debug?.(`Increased pool size to ${this.#numWorkers}`);\n }\n }\n }\n\n /**\n * Ends all workers with a ROLLBACK. Throws if the pool is already done\n * or aborted.\n */\n abort() {\n this.fail(new RollbackSignal());\n }\n\n /**\n * Signals to all workers to end their transaction once all pending tasks have\n * been completed. Throws if the pool is already done or aborted.\n */\n setDone() {\n assert(!this.#done, 'already set done');\n this.#done = true;\n\n for (let i = 0; i < this.#numWorkers; i++) {\n this.#tasks.enqueue('done');\n }\n }\n\n /**\n * An alternative to explicitly calling {@link setDone}, `ref()` increments an internal reference\n * count, and {@link unref} decrements it. When the reference count reaches 0, {@link setDone} is\n * automatically called. A TransactionPool is initialized with a reference count of 1.\n *\n * `ref()` should be called before sharing the pool with another component, and only after the\n * pool has been started with {@link run()}. It must not be called on a TransactionPool that is\n * already done (either via {@link unref()} or {@link setDone()}. (Doing so indicates a logical\n * error in the code.)\n *\n * It follows that:\n * * The creator of the TransactionPool is responsible for running it.\n * * The TransactionPool should be ref'ed before being sharing.\n * * The receiver of the TransactionPool is only responsible for unref'ing it.\n *\n * On the other hand, a transaction pool that fails with a runtime error can still be ref'ed;\n * attempts to use the pool will result in the runtime error as expected.\n */\n // TODO: Get rid of the ref-counting stuff. It's no longer needed.\n ref(count = 1) {\n assert(\n this.#db !== undefined && !this.#done,\n `Cannot ref() a TransactionPool that is not running`,\n );\n this.#refCount += count;\n }\n\n /**\n * Decrements the internal reference count, automatically invoking {@link setDone} when it reaches 0.\n */\n unref(count = 1) {\n assert(\n count <= this.#refCount,\n () => `Cannot unref ${count} when refCount is ${this.#refCount}`,\n );\n\n this.#refCount -= count;\n if (this.#refCount === 0) {\n this.setDone();\n }\n }\n\n isRunning(): boolean {\n return this.#db !== undefined && !this.#done && this.#failure === undefined;\n }\n\n /**\n * Signals all workers to fail their transactions with the given {@link err}.\n */\n fail(err: unknown) {\n if (!this.#failure) {\n this.#failure = ensureError(err); // Fail fast: this is checked in the worker loop.\n const level =\n this.#failure instanceof ControlFlowError\n ? 'debug'\n : this.#failure instanceof AbortError\n ? 'info'\n : 'error';\n this.#lc[level]?.(this.#failure);\n\n for (let i = 0; i < this.#numWorkers; i++) {\n // Enqueue the Error to terminate any workers waiting for tasks.\n this.#tasks.enqueue(this.#failure);\n }\n }\n }\n}\n\ntype SynchronizeSnapshotTasks = {\n /**\n * The `init` Task for the TransactionPool from which the snapshot originates.\n * The pool must have Mode.SERIALIZABLE, and will be set to READ ONLY by the\n * `exportSnapshot` init task. If the TransactionPool has multiple workers, the\n * first worker will export a snapshot that the others set.\n */\n exportSnapshot: Task;\n\n /**\n * The `cleanup` Task for the TransactionPool from which the snapshot\n * originates. This Task will wait for the follower pool to `setSnapshot`\n * to ensure that the snapshot is successfully shared before the originating\n * transaction is closed.\n */\n cleanupExport: Task;\n\n /**\n * The `init` Task for the TransactionPool in which workers will\n * consequently see the same snapshot as that of the first pool. The pool\n * must have Mode.SERIALIZABLE, and will have the ability to perform writes.\n */\n setSnapshot: Task;\n\n /** The ID of the shared snapshot. */\n snapshotID: Promise<string>;\n};\n\n/**\n * Init Tasks for Postgres snapshot synchronization across transactions.\n *\n * https://www.postgresql.org/docs/9.3/functions-admin.html#:~:text=Snapshot%20Synchronization%20Functions,identical%20content%20in%20the%20database.\n */\nexport function synchronizedSnapshots(): SynchronizeSnapshotTasks {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n const {\n promise: snapshotCaptured,\n resolve: captureSnapshot,\n reject: failCapture,\n } = resolver<unknown>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot. TODO: Plumb the workerNum and use that instead.\n let firstWorkerRun = false;\n\n // Note: Neither init task should `await`, as processing in each pool can proceed\n // as soon as the statements have been sent to the db. However, the `cleanupExport`\n // task must `await` the result of `setSnapshot` to ensure that exporting transaction\n // does not close before the snapshot has been captured.\n return {\n exportSnapshot: tx => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n const stmt =\n tx`SELECT pg_export_snapshot() AS snapshot; SET TRANSACTION READ ONLY;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n tx`SET TRANSACTION READ ONLY`.simple(),\n ]);\n },\n\n setSnapshot: tx =>\n snapshotExported.then(snapshotID => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n // Intercept the promise to propagate the information to `cleanupExport`.\n stmt.then(captureSnapshot, failCapture);\n return [stmt];\n }),\n\n cleanupExport: async () => {\n await snapshotCaptured;\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * Returns `init` and `cleanup` {@link Task}s for a TransactionPool that ensure its workers\n * share a single view of the database. This is used for View Notifier and View Syncer logic\n * that allows multiple entities to perform parallel reads on the same snapshot of the database.\n */\nexport function sharedSnapshot(): {\n init: Task;\n cleanup: Task;\n snapshotID: Promise<string>;\n} {\n const {\n promise: snapshotExported,\n resolve: exportSnapshot,\n reject: failExport,\n } = resolver<string>();\n\n // Set by the first worker to run its initTask, who becomes responsible for\n // exporting the snapshot.\n let firstWorkerRun = false;\n\n // Set when any worker is done, signalling that all non-sentinel Tasks have been\n // dequeued, and thus any subsequently spawned workers should skip their initTask\n // since the snapshot is no longer needed (and soon to become invalid).\n let firstWorkerDone = false;\n\n return {\n init: (tx, lc) => {\n if (!firstWorkerRun) {\n firstWorkerRun = true;\n const stmt = tx`SELECT pg_export_snapshot() AS snapshot;`.simple();\n // Intercept the promise to propagate the information to `snapshotExported`.\n stmt.then(result => exportSnapshot(result[0].snapshot), failExport);\n return [stmt]; // Also return the stmt so that it gets awaited (and errors handled).\n }\n if (!firstWorkerDone) {\n return snapshotExported.then(snapshotID => [\n tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`),\n ]);\n }\n lc.debug?.('All work is done. No need to set snapshot');\n return [];\n },\n\n cleanup: () => {\n firstWorkerDone = true;\n return [];\n },\n\n snapshotID: snapshotExported,\n };\n}\n\n/**\n * @returns An `init` Task for importing a snapshot from another transaction.\n */\nexport function importSnapshot(snapshotID: string): {\n init: Task;\n imported: Promise<void>;\n} {\n const {promise: imported, resolve, reject} = resolver<void>();\n\n return {\n init: tx => {\n const stmt = tx.unsafe(`SET TRANSACTION SNAPSHOT '${snapshotID}'`);\n stmt.then(() => resolve(), reject);\n return [stmt];\n },\n\n imported,\n };\n}\n\n/**\n * A superclass of Errors used for control flow that is needed to handle\n * another Error but does not constitute an error condition itself (e.g.\n * aborting transactions after a previous one fails). Subclassing this Error\n * will result in lowering the log level from `error` to `debug`.\n */\nexport class ControlFlowError extends Error {\n constructor(cause?: unknown) {\n super();\n this.cause = cause;\n }\n}\n\n/**\n * Internal error used to rollback the worker transaction. This is used\n * instead of executing a `ROLLBACK` statement because the postgres.js\n * library will otherwise try to execute an extraneous `COMMIT`, which\n * results in outputting a \"no transaction in progress\" warning to the\n * database logs.\n *\n * Throwing an exception, on the other hand, executes the postgres.js\n * codepath that calls `ROLLBACK` instead.\n */\nclass RollbackSignal extends ControlFlowError {\n readonly name = 'RollbackSignal';\n readonly message = 'rolling back transaction';\n}\n\nfunction ensureError(err: unknown): Error {\n if (err instanceof Error) {\n return err;\n }\n const error = new Error();\n error.cause = err;\n return error;\n}\n\ninterface TaskRunner {\n /**\n * Manages the running of a Task or ReadTask in two phases:\n *\n * - If the task involves blocking, this is done in the worker. Once the\n * blocking is done, `freeWorker()` is invoked to signal that the worker\n * is available to run another task. Note that this should be invoked\n * *before* resolving the result to the calling thread so that a\n * subsequent task can reuse the same worker.\n *\n * - Task statements are executed on the database asynchronously. The final\n * result of this processing is encapsulated in the returned `pending`\n * Promise. The worker will await the last pending Promise before closing\n * the transaction.\n *\n * @param freeWorker should be called as soon as all blocking operations are\n * completed in order to return the transaction to the pool.\n * @returns A `pending` Promise indicating when the statements have been\n * processed by the database, allowing the transaction to be closed.\n * This should be `null` if there are no transaction-dependent\n * statements to await.\n */\n run(\n tx: PostgresTransaction,\n lc: LogContext,\n freeWorker: () => void,\n ): Promise<{pending: Promise<void> | null}>;\n\n /**\n * Invoked if the TransactionPool is already in a failed state when the task\n * is requested.\n */\n rejected(reason: unknown): void;\n}\n\n// TODO: Get rid of the timeout stuff. It's no longer needed.\nconst IDLE_TIMEOUT_MS = 5_000;\n\nconst KEEPALIVE_TIMEOUT_MS = 60_000;\n\nconst KEEPALIVE_TASK: Task = tx => [tx`SELECT 1`.simple()];\n\ntype TimeoutTask = {\n timeoutMs: number;\n task: Task | 'done';\n};\n\ntype TimeoutTasks = {\n forInitialWorkers: TimeoutTask;\n forExtraWorkers: TimeoutTask;\n};\n\n// Production timeout tasks. Overridden in tests.\nexport const TIMEOUT_TASKS: TimeoutTasks = {\n forInitialWorkers: {\n timeoutMs: KEEPALIVE_TIMEOUT_MS,\n task: KEEPALIVE_TASK,\n },\n forExtraWorkers: {\n timeoutMs: IDLE_TIMEOUT_MS,\n task: 'done',\n },\n};\n\n// The slice of information from the Query object in Postgres.js that gets logged for debugging.\n// https://github.com/porsager/postgres/blob/f58cd4f3affd3e8ce8f53e42799672d86cd2c70b/src/connection.js#L219\ntype Query = {string: string; parameters: object[]};\n"],"names":["key","value"],"mappings":";;;;;;;AAqDO,MAAM,gBAAgB;AAAA,EAC3B;AAAA,EACS;AAAA,EACA;AAAA,EACA;AAAA,EACA,SAAS,IAAI,MAAA;AAAA,EACb,WAA+B,CAAA;AAAA,EAC/B;AAAA,EACA;AAAA,EACA;AAAA,EACT;AAAA,EACA,cAAc;AAAA,EACd;AAAA;AAAA,EAEA,YAAY;AAAA,EACZ,QAAQ;AAAA,EACR;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,YACE,IACA,MACA,MACA,SACA,iBAAiB,GACjB,aAAa,gBACb,eAAe,eACf;AACA,WAAO,iBAAiB,GAAG,iCAAiC;AAC5D;AAAA,MACE,cAAc;AAAA,MACd;AAAA,IAAA;AAGF,SAAK,MAAM;AACX,SAAK,QAAQ;AACb,SAAK,QAAQ,OAAO,KAAK,YAAY,IAAI,IAAI;AAC7C,SAAK,WAAW,UAAU,KAAK,YAAY,OAAO,IAAI;AACtD,SAAK,kBAAkB;AACvB,SAAK,cAAc;AACnB,SAAK,cAAc;AACnB,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,IAAI,IAAsB;AACxB,WAAO,CAAC,KAAK,KAAK,iBAAiB;AACnC,SAAK,MAAM;AACX,aAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AACzC,WAAK,WAAW,EAAE;AAAA,IACpB;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,kBAAkB,KAAa,OAAe;AAC5C,SAAK,MAAM,KAAK,IAAI,YAAY,KAAK,KAAK;AAE1C,WAAO;AAAA,MACL,mBAAmB,CAACA,MAAaC,WAC/B,KAAK,kBAAkBD,MAAKC,MAAK;AAAA,IAAA;AAAA,EAEvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsBA,MAAM,OAAO;AACX,UAAM,aAAa,KAAK,SAAS;AACjC,UAAM,QAAQ,IAAI,KAAK,QAAQ;AAE/B,QAAI,aAAa,KAAK,SAAS,QAAQ;AAKrC,YAAM,QAAQ,IAAI,KAAK,QAAQ;AAAA,IACjC;AACA,SAAK,IAAI,QAAQ,uBAAuB;AAAA,EAC1C;AAAA,EAEA,WAAW,IAAgB;AACzB,UAAM,KAAK,KAAK,SAAS,SAAS;AAClC,UAAM,KAAK,KAAK,IAAI,YAAY,MAAM,EAAE;AAExC,UAAM,KACJ,KAAK,SAAS,SAAS,KAAK,kBACxB,KAAK,aAAa,oBAClB,KAAK,aAAa;AACxB,UAAM,EAAC,cAAa;AACpB,UAAM,cAAc,GAAG,SAAS,SAAS,SAAS,KAAK,YAAY,GAAG,IAAI;AAE1E,UAAM,SAAS,OAAO,OAA4B;AAChD,UAAI;AACF,WAAG,QAAQ,qBAAqB;AAChC,gCAAwB,EAAE;AAE1B,YAAI,OAAsB;AAE1B,cAAM,cAAc,OAAO,WAAuB;AAChD,qBAAW,KAAK,SAAS,KAAK;AAC9B,gBAAM,EAAC,YAAW,MAAM,OAAO,IAAI,IAAI,IAAI,MAAM;AAC/C,uBAAW,KAAK,SAAS,KAAK;AAAA,UAChC,CAAC;AACD,iBAAO,WAAW;AAAA,QACpB;AAEA,YAAI,OACF,KAAK,SAAU,MAAM,KAAK,OAAO,QAAQ,aAAa,SAAS;AAEjE,YAAI;AACF,iBAAO,SAAS,QAAQ;AACtB,gBACE,gBAAgB,SACf,SAAS,KAAK,SAAS,KAAK,UAC7B;AACA,oBAAM,KAAK,YAAY;AAAA,YACzB;AACA,kBAAM,YAAY,IAAI;AAGtB,mBAAO,MAAM,KAAK,OAAO,QAAQ,aAAa,SAAS;AAAA,UACzD;AAAA,QACF,UAAA;AAEE,cAAI,KAAK,UAAU;AACjB,kBAAM,YAAY,KAAK,QAAQ;AAAA,UACjC;AAAA,QACF;AAEA,WAAG,QAAQ,qBAAqB;AAGhC,eAAO;AAAA,MACT,SAAS,GAAG;AACV,YAAI,MAAM,KAAK,UAAU;AACvB,eAAK,KAAK,CAAC;AAAA,QACb;AACA,cAAM;AAAA,MACR;AAAA,IACF;AAEA,SAAK,SAAS;AAAA,MACZ,GACG,MAAM,KAAK,OAAO,MAAM,EACxB,MAAM,CAAA,MAAK;AACV,YAAI,aAAa,gBAAgB;AAG/B,aAAG,QAAQ,qBAAqB;AAAA,QAClC,OAAO;AACL,gBAAM;AAAA,QACR;AAAA,MACF,CAAC,EACA,QAAQ,MAAM,KAAK,aAAa;AAAA,IAAA;AAMrC,QAAI,KAAK,OAAO;AACd,WAAK,OAAO,QAAQ,MAAM;AAAA,IAC5B;AACA,QAAI,KAAK,UAAU;AACjB,WAAK,OAAO,QAAQ,KAAK,QAAQ;AAAA,IACnC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,QAAQ,MAA2B;AACjC,UAAM,IAAI,SAAA;AACV,SAAK,SAAS,KAAK,YAAY,MAAM,CAAC,CAAC;AACvC,WAAO,EAAE;AAAA,EACX;AAAA,EAES,SAAS,YAAY,IAAA;AAAA,EAC9B,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYT,YAAY,MAAY,IAA2B,YAAwB;AACzE,WAAO;AAAA,MACL,KAAK,OAAO,IAAI,IAAI,eAAe;AACjC,YAAI;AACJ,YAAI;AACF,kBAAQ,MAAM,KAAK,IAAI,EAAE;AAAA,QAC3B,SAAS,GAAG;AACV,YAAE,QAAA;AACF,gBAAM;AAAA,QACR,UAAA;AACE,qBAAA;AAAA,QACF;AAEA,YAAI,MAAM,WAAW,GAAG;AACtB,YAAE,QAAA;AACF,iBAAO,EAAC,SAAS,KAAA;AAAA,QACnB;AAKA,cAAM,OAAO,MAAM;AAAA,UACjB,CAAC,GAAG,SACF,KACG,QAAA,EACA,KAAK,MAAM;AACV,gBAAI,EAAE,KAAK,SAAS,QAAS,GAAG;AAC9B,oBAAM,IAAI;AACV,iBAAG;AAAA,gBACD,YAAY,KAAK,MAAM,kBAAkB,YAAY,IAAA,IAAQ,KAAK,QAAQ,QAAQ,CAAC,CAAC;AAAA,gBACpF;AAAA,kBACE,WAAW,EAAE;AAAA,kBACb,QAAQ,UAAU,EAAE,UAAU;AAAA,gBAAA;AAAA,cAChC;AAAA,YAEJ;AAAA,UACF,CAAC,EACA,MAAM,OAAK,KAAK,KAAK,CAAC,CAAC;AAAA,UAC5B;AAAA,QAAA;AAEF,eAAO,EAAC,SAAS,KAAK,KAAK,EAAE,OAAO,EAAA;AAAA,MACtC;AAAA,MACA,UAAU,EAAE;AAAA,IAAA;AAAA,EAEhB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,gBAAmB,UAAmC;AACpD,UAAM,IAAI,SAAA;AACV,SAAK,SAAS,KAAK,YAAY,UAAU,CAAC,CAAC;AAC3C,WAAO,EAAE;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,YAAe,UAAuB,GAA4B;AAChE,WAAO;AAAA,MACL,KAAK,OAAO,IAAI,IAAI,eAAe;AACjC,YAAI;AACJ,YAAI;AACF,mBAAS,MAAM,SAAS,IAAI,EAAE;AAC9B,qBAAA;AACA,YAAE,QAAQ,MAAM;AAAA,QAClB,SAAS,GAAG;AACV,qBAAA;AACA,YAAE,OAAO,CAAC;AAAA,QACZ;AACA,eAAO,EAAC,SAAS,KAAA;AAAA,MACnB;AAAA,MACA,UAAU,EAAE;AAAA,IAAA;AAAA,EAEhB;AAAA,EAEA,SAAS,QAA0B;AACjC,WAAO,CAAC,KAAK,OAAO,kBAAkB;AACtC,QAAI,KAAK,UAAU;AACjB,aAAO,SAAS,KAAK,QAAQ;AAC7B;AAAA,IACF;AAEA,SAAK,OAAO,QAAQ,MAAM;AAG1B,QAAI,KAAK,cAAc,KAAK,aAAa;AACvC,YAAM,cAAc,KAAK,OAAO,KAAA;AAEhC,UAAI,cAAc,KAAK,cAAc,KAAK,aAAa;AACrD,aAAK,OAAO,KAAK,WAAW,KAAK,GAAG;AACpC,aAAK;AACL,aAAK,IAAI,QAAQ,0BAA0B,KAAK,WAAW,EAAE;AAAA,MAC/D;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,QAAQ;AACN,SAAK,KAAK,IAAI,gBAAgB;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,UAAU;AACR,WAAO,CAAC,KAAK,OAAO,kBAAkB;AACtC,SAAK,QAAQ;AAEb,aAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AACzC,WAAK,OAAO,QAAQ,MAAM;AAAA,IAC5B;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqBA,IAAI,QAAQ,GAAG;AACb;AAAA,MACE,KAAK,QAAQ,UAAa,CAAC,KAAK;AAAA,MAChC;AAAA,IAAA;AAEF,SAAK,aAAa;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,QAAQ,GAAG;AACf;AAAA,MACE,SAAS,KAAK;AAAA,MACd,MAAM,gBAAgB,KAAK,qBAAqB,KAAK,SAAS;AAAA,IAAA;AAGhE,SAAK,aAAa;AAClB,QAAI,KAAK,cAAc,GAAG;AACxB,WAAK,QAAA;AAAA,IACP;AAAA,EACF;AAAA,EAEA,YAAqB;AACnB,WAAO,KAAK,QAAQ,UAAa,CAAC,KAAK,SAAS,KAAK,aAAa;AAAA,EACpE;AAAA;AAAA;AAAA;AAAA,EAKA,KAAK,KAAc;AACjB,QAAI,CAAC,KAAK,UAAU;AAClB,WAAK,WAAW,YAAY,GAAG;AAC/B,YAAM,QACJ,KAAK,oBAAoB,mBACrB,UACA,KAAK,oBAAoB,aACvB,SACA;AACR,WAAK,IAAI,KAAK,IAAI,KAAK,QAAQ;AAE/B,eAAS,IAAI,GAAG,IAAI,KAAK,aAAa,KAAK;AAEzC,aAAK,OAAO,QAAQ,KAAK,QAAQ;AAAA,MACnC;AAAA,IACF;AAAA,EACF;AACF;AAgJO,SAAS,eAAe,YAG7B;AACA,QAAM,EAAC,SAAS,UAAU,SAAS,OAAA,IAAU,SAAA;AAE7C,SAAO;AAAA,IACL,MAAM,CAAA,OAAM;AACV,YAAM,OAAO,GAAG,OAAO,6BAA6B,UAAU,GAAG;AACjE,WAAK,KAAK,MAAM,QAAA,GAAW,MAAM;AACjC,aAAO,CAAC,IAAI;AAAA,IACd;AAAA,IAEA;AAAA,EAAA;AAEJ;AAQO,MAAM,yBAAyB,MAAM;AAAA,EAC1C,YAAY,OAAiB;AAC3B,UAAA;AACA,SAAK,QAAQ;AAAA,EACf;AACF;AAYA,MAAM,uBAAuB,iBAAiB;AAAA,EACnC,OAAO;AAAA,EACP,UAAU;AACrB;AAEA,SAAS,YAAY,KAAqB;AACxC,MAAI,eAAe,OAAO;AACxB,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,IAAI,MAAA;AAClB,QAAM,QAAQ;AACd,SAAO;AACT;AAsCA,MAAM,kBAAkB;AAExB,MAAM,uBAAuB;AAE7B,MAAM,iBAAuB,CAAA,OAAM,CAAC,aAAa,QAAQ;AAalD,MAAM,gBAA8B;AAAA,EACzC,mBAAmB;AAAA,IACjB,WAAW;AAAA,IACX,MAAM;AAAA,EAAA;AAAA,EAER,iBAAiB;AAAA,IACf,WAAW;AAAA,IACX,MAAM;AAAA,EAAA;AAEV;"}
|
|
@@ -25,7 +25,7 @@ export declare class InspectorDelegate implements MetricsDelegate {
|
|
|
25
25
|
};
|
|
26
26
|
getASTForQuery(queryID: string): AST | undefined;
|
|
27
27
|
removeQuery(queryID: string): void;
|
|
28
|
-
addQuery(
|
|
28
|
+
addQuery(queryID: string, ast: AST): void;
|
|
29
29
|
/**
|
|
30
30
|
* Check if the client is authenticated. We only require authentication once
|
|
31
31
|
* per "worker".
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"inspector-delegate.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/inspector-delegate.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,6BAA6B,CAAC;AAEnE,OAAO,EAAC,OAAO,EAAC,MAAM,gCAAgC,CAAC;AACvD,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,mCAAmC,CAAC;AAE3D,OAAO,KAAK,EAAC,aAAa,IAAI,iBAAiB,EAAC,MAAM,4CAA4C,CAAC;AAEnG,OAAO,EAEL,KAAK,SAAS,EACd,KAAK,eAAe,EACrB,MAAM,4CAA4C,CAAC;AAEpD,OAAO,KAAK,EAAC,sBAAsB,EAAC,MAAM,sCAAsC,CAAC;AACjF,OAAO,KAAK,EAAC,aAAa,EAAC,MAAM,oBAAoB,CAAC;AAGtD;;;GAGG;AACH,MAAM,MAAM,aAAa,GAAG;IAC1B,8BAA8B,EAAE,OAAO,CAAC;IACxC,qBAAqB,EAAE,OAAO,CAAC;CAChC,CAAC;AAEF,KAAK,aAAa,GAAG,MAAM,CAAC;AAQ5B,qBAAa,iBAAkB,YAAW,eAAe;;
|
|
1
|
+
{"version":3,"file":"inspector-delegate.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/inspector-delegate.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,6BAA6B,CAAC;AAEnE,OAAO,EAAC,OAAO,EAAC,MAAM,gCAAgC,CAAC;AACvD,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,mCAAmC,CAAC;AAE3D,OAAO,KAAK,EAAC,aAAa,IAAI,iBAAiB,EAAC,MAAM,4CAA4C,CAAC;AAEnG,OAAO,EAEL,KAAK,SAAS,EACd,KAAK,eAAe,EACrB,MAAM,4CAA4C,CAAC;AAEpD,OAAO,KAAK,EAAC,sBAAsB,EAAC,MAAM,sCAAsC,CAAC;AACjF,OAAO,KAAK,EAAC,aAAa,EAAC,MAAM,oBAAoB,CAAC;AAGtD;;;GAGG;AACH,MAAM,MAAM,aAAa,GAAG;IAC1B,8BAA8B,EAAE,OAAO,CAAC;IACxC,qBAAqB,EAAE,OAAO,CAAC;CAChC,CAAC;AAEF,KAAK,aAAa,GAAG,MAAM,CAAC;AAQ5B,qBAAa,iBAAkB,YAAW,eAAe;;gBAM3C,sBAAsB,EAAE,sBAAsB,GAAG,SAAS;IAItE,SAAS,CAAC,CAAC,SAAS,MAAM,SAAS,EACjC,MAAM,EAAE,CAAC,EACT,KAAK,EAAE,MAAM,EACb,GAAG,IAAI,EAAE,SAAS,CAAC,CAAC,CAAC,GACpB,IAAI;IAYP,sBAAsB,CAAC,OAAO,EAAE,MAAM,GAAG,iBAAiB,GAAG,IAAI;IAKjE,cAAc;;;;IAId,cAAc,CAAC,OAAO,EAAE,MAAM,GAAG,GAAG,GAAG,SAAS;IAIhD,WAAW,CAAC,OAAO,EAAE,MAAM,GAAG,IAAI;IAKlC,QAAQ,CAAC,OAAO,EAAE,MAAM,EAAE,GAAG,EAAE,GAAG,GAAG,IAAI;IAIzC;;;OAGG;IACH,eAAe,CAAC,aAAa,EAAE,aAAa,GAAG,OAAO;IAMtD,gBAAgB,CAAC,aAAa,EAAE,aAAa,GAAG,IAAI;IAIpD,kBAAkB,CAAC,aAAa,EAAE,aAAa;IAI/C;;;;OAIG;IACG,oBAAoB,CACxB,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,SAAS,iBAAiB,EAAE,EAClC,aAAa,EAAE,aAAa,EAC5B,YAAY,EAAE,MAAM,GAAG,SAAS,GAC/B,OAAO,CAAC,GAAG,CAAC;CA2ChB"}
|
|
@@ -9,24 +9,20 @@ const authenticatedClientGroupIDs = /* @__PURE__ */ new Set();
|
|
|
9
9
|
class InspectorDelegate {
|
|
10
10
|
#globalMetrics = newMetrics();
|
|
11
11
|
#perQueryServerMetrics = /* @__PURE__ */ new Map();
|
|
12
|
-
#
|
|
13
|
-
#queryIDToTransformationHash = /* @__PURE__ */ new Map();
|
|
14
|
-
#transformationASTs = /* @__PURE__ */ new Map();
|
|
12
|
+
#queryIDToAST = /* @__PURE__ */ new Map();
|
|
15
13
|
#customQueryTransformer;
|
|
16
14
|
constructor(customQueryTransformer) {
|
|
17
15
|
this.#customQueryTransformer = customQueryTransformer;
|
|
18
16
|
}
|
|
19
17
|
addMetric(metric, value, ...args) {
|
|
20
18
|
assert(isServerMetric(metric), `Invalid server metric: ${metric}`);
|
|
21
|
-
const
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
this.#perQueryServerMetrics.set(queryID, serverMetrics);
|
|
27
|
-
}
|
|
28
|
-
serverMetrics[metric].add(value);
|
|
19
|
+
const queryID = args[0];
|
|
20
|
+
let serverMetrics = this.#perQueryServerMetrics.get(queryID);
|
|
21
|
+
if (!serverMetrics) {
|
|
22
|
+
serverMetrics = newMetrics();
|
|
23
|
+
this.#perQueryServerMetrics.set(queryID, serverMetrics);
|
|
29
24
|
}
|
|
25
|
+
serverMetrics[metric].add(value);
|
|
30
26
|
this.#globalMetrics[metric].add(value);
|
|
31
27
|
}
|
|
32
28
|
getMetricsJSONForQuery(queryID) {
|
|
@@ -37,29 +33,14 @@ class InspectorDelegate {
|
|
|
37
33
|
return mapValues(this.#globalMetrics, (v) => v.toJSON());
|
|
38
34
|
}
|
|
39
35
|
getASTForQuery(queryID) {
|
|
40
|
-
|
|
41
|
-
return transformationHash ? this.#transformationASTs.get(transformationHash) : void 0;
|
|
36
|
+
return this.#queryIDToAST.get(queryID);
|
|
42
37
|
}
|
|
43
38
|
removeQuery(queryID) {
|
|
44
39
|
this.#perQueryServerMetrics.delete(queryID);
|
|
45
|
-
this.#
|
|
46
|
-
for (const [transformationHash, idSet] of this.#hashToIDs.entries()) {
|
|
47
|
-
idSet.delete(queryID);
|
|
48
|
-
if (idSet.size === 0) {
|
|
49
|
-
this.#hashToIDs.delete(transformationHash);
|
|
50
|
-
this.#transformationASTs.delete(transformationHash);
|
|
51
|
-
}
|
|
52
|
-
}
|
|
40
|
+
this.#queryIDToAST.delete(queryID);
|
|
53
41
|
}
|
|
54
|
-
addQuery(
|
|
55
|
-
|
|
56
|
-
if (existing === void 0) {
|
|
57
|
-
this.#hashToIDs.set(transformationHash, /* @__PURE__ */ new Set([queryID]));
|
|
58
|
-
} else {
|
|
59
|
-
existing.add(queryID);
|
|
60
|
-
}
|
|
61
|
-
this.#queryIDToTransformationHash.set(queryID, transformationHash);
|
|
62
|
-
this.#transformationASTs.set(transformationHash, ast);
|
|
42
|
+
addQuery(queryID, ast) {
|
|
43
|
+
this.#queryIDToAST.set(queryID, ast);
|
|
63
44
|
}
|
|
64
45
|
/**
|
|
65
46
|
* Check if the client is authenticated. We only require authentication once
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"inspector-delegate.js","sources":["../../../../../zero-cache/src/server/inspector-delegate.ts"],"sourcesContent":["import {assert} from '../../../shared/src/asserts.ts';\nimport type {ReadonlyJSONValue} from '../../../shared/src/json.ts';\nimport {mapValues} from '../../../shared/src/objects.ts';\nimport {TDigest} from '../../../shared/src/tdigest.ts';\nimport type {AST} from '../../../zero-protocol/src/ast.ts';\nimport {ProtocolError} from '../../../zero-protocol/src/error.ts';\nimport type {ServerMetrics as ServerMetricsJSON} from '../../../zero-protocol/src/inspect-down.ts';\nimport {hashOfNameAndArgs} from '../../../zero-protocol/src/query-hash.ts';\nimport {\n isServerMetric,\n type MetricMap,\n type MetricsDelegate,\n} from '../../../zql/src/query/metrics-delegate.ts';\nimport {isDevelopmentMode} from '../config/normalize.ts';\nimport type {CustomQueryTransformer} from '../custom-queries/transform-query.ts';\nimport type {HeaderOptions} from '../custom/fetch.ts';\nimport type {CustomQueryRecord} from '../services/view-syncer/schema/types.ts';\n\n/**\n * Server-side metrics collected for queries during materialization and update.\n * These metrics are reported via the inspector and complement client-side metrics.\n */\nexport type ServerMetrics = {\n 'query-materialization-server': TDigest;\n 'query-update-server': TDigest;\n};\n\ntype ClientGroupID = string;\n\n/**\n * Set of authenticated client group IDs. We keep this outside of the class to\n * share this state across all instances of the InspectorDelegate.\n */\nconst authenticatedClientGroupIDs = new Set<ClientGroupID>();\n\nexport class InspectorDelegate implements MetricsDelegate {\n readonly #globalMetrics: ServerMetrics = newMetrics();\n readonly #perQueryServerMetrics = new Map<string, ServerMetrics>();\n readonly #
|
|
1
|
+
{"version":3,"file":"inspector-delegate.js","sources":["../../../../../zero-cache/src/server/inspector-delegate.ts"],"sourcesContent":["import {assert} from '../../../shared/src/asserts.ts';\nimport type {ReadonlyJSONValue} from '../../../shared/src/json.ts';\nimport {mapValues} from '../../../shared/src/objects.ts';\nimport {TDigest} from '../../../shared/src/tdigest.ts';\nimport type {AST} from '../../../zero-protocol/src/ast.ts';\nimport {ProtocolError} from '../../../zero-protocol/src/error.ts';\nimport type {ServerMetrics as ServerMetricsJSON} from '../../../zero-protocol/src/inspect-down.ts';\nimport {hashOfNameAndArgs} from '../../../zero-protocol/src/query-hash.ts';\nimport {\n isServerMetric,\n type MetricMap,\n type MetricsDelegate,\n} from '../../../zql/src/query/metrics-delegate.ts';\nimport {isDevelopmentMode} from '../config/normalize.ts';\nimport type {CustomQueryTransformer} from '../custom-queries/transform-query.ts';\nimport type {HeaderOptions} from '../custom/fetch.ts';\nimport type {CustomQueryRecord} from '../services/view-syncer/schema/types.ts';\n\n/**\n * Server-side metrics collected for queries during materialization and update.\n * These metrics are reported via the inspector and complement client-side metrics.\n */\nexport type ServerMetrics = {\n 'query-materialization-server': TDigest;\n 'query-update-server': TDigest;\n};\n\ntype ClientGroupID = string;\n\n/**\n * Set of authenticated client group IDs. We keep this outside of the class to\n * share this state across all instances of the InspectorDelegate.\n */\nconst authenticatedClientGroupIDs = new Set<ClientGroupID>();\n\nexport class InspectorDelegate implements MetricsDelegate {\n readonly #globalMetrics: ServerMetrics = newMetrics();\n readonly #perQueryServerMetrics = new Map<string, ServerMetrics>();\n readonly #queryIDToAST: Map<string, AST> = new Map();\n readonly #customQueryTransformer: CustomQueryTransformer | undefined;\n\n constructor(customQueryTransformer: CustomQueryTransformer | undefined) {\n this.#customQueryTransformer = customQueryTransformer;\n }\n\n addMetric<K extends keyof MetricMap>(\n metric: K,\n value: number,\n ...args: MetricMap[K]\n ): void {\n assert(isServerMetric(metric), `Invalid server metric: ${metric}`);\n const queryID = args[0];\n let serverMetrics = this.#perQueryServerMetrics.get(queryID);\n if (!serverMetrics) {\n serverMetrics = newMetrics();\n this.#perQueryServerMetrics.set(queryID, serverMetrics);\n }\n serverMetrics[metric].add(value);\n this.#globalMetrics[metric].add(value);\n }\n\n getMetricsJSONForQuery(queryID: string): ServerMetricsJSON | null {\n const serverMetrics = this.#perQueryServerMetrics.get(queryID);\n return serverMetrics ? mapValues(serverMetrics, v => v.toJSON()) : null;\n }\n\n getMetricsJSON() {\n return mapValues(this.#globalMetrics, v => v.toJSON());\n }\n\n getASTForQuery(queryID: string): AST | undefined {\n return this.#queryIDToAST.get(queryID);\n }\n\n removeQuery(queryID: string): void {\n this.#perQueryServerMetrics.delete(queryID);\n this.#queryIDToAST.delete(queryID);\n }\n\n addQuery(queryID: string, ast: AST): void {\n this.#queryIDToAST.set(queryID, ast);\n }\n\n /**\n * Check if the client is authenticated. We only require authentication once\n * per \"worker\".\n */\n isAuthenticated(clientGroupID: ClientGroupID): boolean {\n return (\n isDevelopmentMode() || authenticatedClientGroupIDs.has(clientGroupID)\n );\n }\n\n setAuthenticated(clientGroupID: ClientGroupID): void {\n authenticatedClientGroupIDs.add(clientGroupID);\n }\n\n clearAuthenticated(clientGroupID: ClientGroupID) {\n authenticatedClientGroupIDs.delete(clientGroupID);\n }\n\n /**\n * Transforms a single custom query by name and args using the configured\n * CustomQueryTransformer. This is primarily used by the inspector to transform\n * queries for analysis.\n */\n async transformCustomQuery(\n name: string,\n args: readonly ReadonlyJSONValue[],\n headerOptions: HeaderOptions,\n userQueryURL: string | undefined,\n ): Promise<AST> {\n assert(\n this.#customQueryTransformer,\n 'Custom query transformation requested but no CustomQueryTransformer is configured',\n );\n\n // Create a fake CustomQueryRecord for the single query\n const queryID = hashOfNameAndArgs(name, args);\n const queries: CustomQueryRecord[] = [\n {\n id: queryID,\n type: 'custom',\n name,\n args,\n clientState: {},\n },\n ];\n\n const results = await this.#customQueryTransformer.transform(\n headerOptions,\n queries,\n userQueryURL,\n );\n\n if ('kind' in results) {\n throw new ProtocolError(results);\n }\n\n const result = results[0];\n if (!result) {\n throw new Error('No transformation result returned');\n }\n\n if ('error' in result) {\n const message =\n result.message ?? 'Unknown application error from custom query';\n throw new Error(\n `Error transforming custom query ${name} (${result.error}): ${message} ${JSON.stringify(result.details)}`,\n );\n }\n\n return result.transformedAst;\n }\n}\n\nfunction newMetrics(): ServerMetrics {\n return {\n 'query-materialization-server': new TDigest(),\n 'query-update-server': new TDigest(),\n };\n}\n"],"names":[],"mappings":";;;;;;;AAiCA,MAAM,kDAAkC,IAAA;AAEjC,MAAM,kBAA6C;AAAA,EAC/C,iBAAgC,WAAA;AAAA,EAChC,6CAA6B,IAAA;AAAA,EAC7B,oCAAsC,IAAA;AAAA,EACtC;AAAA,EAET,YAAY,wBAA4D;AACtE,SAAK,0BAA0B;AAAA,EACjC;AAAA,EAEA,UACE,QACA,UACG,MACG;AACN,WAAO,eAAe,MAAM,GAAG,0BAA0B,MAAM,EAAE;AACjE,UAAM,UAAU,KAAK,CAAC;AACtB,QAAI,gBAAgB,KAAK,uBAAuB,IAAI,OAAO;AAC3D,QAAI,CAAC,eAAe;AAClB,sBAAgB,WAAA;AAChB,WAAK,uBAAuB,IAAI,SAAS,aAAa;AAAA,IACxD;AACA,kBAAc,MAAM,EAAE,IAAI,KAAK;AAC/B,SAAK,eAAe,MAAM,EAAE,IAAI,KAAK;AAAA,EACvC;AAAA,EAEA,uBAAuB,SAA2C;AAChE,UAAM,gBAAgB,KAAK,uBAAuB,IAAI,OAAO;AAC7D,WAAO,gBAAgB,UAAU,eAAe,OAAK,EAAE,OAAA,CAAQ,IAAI;AAAA,EACrE;AAAA,EAEA,iBAAiB;AACf,WAAO,UAAU,KAAK,gBAAgB,CAAA,MAAK,EAAE,QAAQ;AAAA,EACvD;AAAA,EAEA,eAAe,SAAkC;AAC/C,WAAO,KAAK,cAAc,IAAI,OAAO;AAAA,EACvC;AAAA,EAEA,YAAY,SAAuB;AACjC,SAAK,uBAAuB,OAAO,OAAO;AAC1C,SAAK,cAAc,OAAO,OAAO;AAAA,EACnC;AAAA,EAEA,SAAS,SAAiB,KAAgB;AACxC,SAAK,cAAc,IAAI,SAAS,GAAG;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,gBAAgB,eAAuC;AACrD,WACE,kBAAA,KAAuB,4BAA4B,IAAI,aAAa;AAAA,EAExE;AAAA,EAEA,iBAAiB,eAAoC;AACnD,gCAA4B,IAAI,aAAa;AAAA,EAC/C;AAAA,EAEA,mBAAmB,eAA8B;AAC/C,gCAA4B,OAAO,aAAa;AAAA,EAClD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,qBACJ,MACA,MACA,eACA,cACc;AACd;AAAA,MACE,KAAK;AAAA,MACL;AAAA,IAAA;AAIF,UAAM,UAAU,kBAAkB,MAAM,IAAI;AAC5C,UAAM,UAA+B;AAAA,MACnC;AAAA,QACE,IAAI;AAAA,QACJ,MAAM;AAAA,QACN;AAAA,QACA;AAAA,QACA,aAAa,CAAA;AAAA,MAAC;AAAA,IAChB;AAGF,UAAM,UAAU,MAAM,KAAK,wBAAwB;AAAA,MACjD;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAGF,QAAI,UAAU,SAAS;AACrB,YAAM,IAAI,cAAc,OAAO;AAAA,IACjC;AAEA,UAAM,SAAS,QAAQ,CAAC;AACxB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM,mCAAmC;AAAA,IACrD;AAEA,QAAI,WAAW,QAAQ;AACrB,YAAM,UACJ,OAAO,WAAW;AACpB,YAAM,IAAI;AAAA,QACR,mCAAmC,IAAI,KAAK,OAAO,KAAK,MAAM,OAAO,IAAI,KAAK,UAAU,OAAO,OAAO,CAAC;AAAA,MAAA;AAAA,IAE3G;AAEA,WAAO,OAAO;AAAA,EAChB;AACF;AAEA,SAAS,aAA4B;AACnC,SAAO;AAAA,IACL,gCAAgC,IAAI,QAAA;AAAA,IACpC,uBAAuB,IAAI,QAAA;AAAA,EAAQ;AAEvC;"}
|
|
@@ -52,7 +52,7 @@ async function runWorker(parent, env) {
|
|
|
52
52
|
restoreStart = await restoreReplica(lc, config);
|
|
53
53
|
} catch (e) {
|
|
54
54
|
if (runChangeStreamer) {
|
|
55
|
-
lc.error?.("error restoring backup. resyncing the replica.");
|
|
55
|
+
lc.error?.("error restoring backup. resyncing the replica.", e);
|
|
56
56
|
} else {
|
|
57
57
|
throw e;
|
|
58
58
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"main.js","sources":["../../../../../zero-cache/src/server/main.ts"],"sourcesContent":["import {resolver} from '@rocicorp/resolver';\nimport path from 'node:path';\nimport {must} from '../../../shared/src/must.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {\n exitAfter,\n ProcessManager,\n runUntilKilled,\n type WorkerType,\n} from '../services/life-cycle.ts';\nimport {\n restoreReplica,\n startReplicaBackupProcess,\n} from '../services/litestream/commands.ts';\nimport {\n childWorker,\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {\n createNotifierFrom,\n handleSubscriptionsFrom,\n type ReplicaFileMode,\n subscribeTo,\n} from '../workers/replicator.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\nimport {WorkerDispatcher} from './worker-dispatcher.ts';\nimport {\n CHANGE_STREAMER_URL,\n MUTATOR_URL,\n REAPER_URL,\n REPLICATOR_URL,\n SYNCER_URL,\n} from './worker-urls.ts';\n\nconst clientConnectionBifurcated = false;\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n): Promise<void> {\n const startMs = Date.now();\n const config = getNormalizedZeroConfig({env});\n\n startOtelAuto(createLogContext(config, {worker: 'dispatcher'}, false));\n const lc = createLogContext(config, {worker: 'dispatcher'}, true);\n initEventSink(lc, config);\n\n const processes = new ProcessManager(lc, parent);\n\n const {numSyncWorkers: numSyncers} = config;\n if (config.upstream.maxConns < numSyncers) {\n throw new Error(\n `Insufficient upstream connections (${config.upstream.maxConns}) for ${numSyncers} syncers.` +\n `Increase ZERO_UPSTREAM_MAX_CONNS or decrease ZERO_NUM_SYNC_WORKERS (which defaults to available cores).`,\n );\n }\n if (config.cvr.maxConns < numSyncers) {\n throw new Error(\n `Insufficient cvr connections (${config.cvr.maxConns}) for ${numSyncers} syncers.` +\n `Increase ZERO_CVR_MAX_CONNS or decrease ZERO_NUM_SYNC_WORKERS (which defaults to available cores).`,\n );\n }\n\n const internalFlags: string[] =\n numSyncers === 0\n ? []\n : [\n '--upstream-max-conns-per-worker',\n String(Math.floor(config.upstream.maxConns / numSyncers)),\n '--cvr-max-conns-per-worker',\n String(Math.floor(config.cvr.maxConns / numSyncers)),\n ];\n\n function loadWorker(\n moduleUrl: URL,\n type: WorkerType,\n id?: string | number,\n ...args: string[]\n ): Worker {\n const worker = childWorker(moduleUrl, env, ...args, ...internalFlags);\n const name = path.basename(moduleUrl.pathname) + (id ? ` (${id})` : '');\n return processes.addWorker(worker, type, name);\n }\n\n const {\n taskID,\n changeStreamer: {mode: changeStreamerMode, uri: changeStreamerURI},\n litestream,\n } = config;\n const runChangeStreamer =\n changeStreamerMode === 'dedicated' && changeStreamerURI === undefined;\n\n let restoreStart = new Date();\n if (litestream.backupURL || (litestream.executable && !runChangeStreamer)) {\n try {\n restoreStart = await restoreReplica(lc, config);\n } catch (e) {\n if (runChangeStreamer) {\n // If the restore failed, e.g. due to a corrupt backup, the\n // replication-manager recovers by re-syncing.\n lc.error?.('error restoring backup. resyncing the replica.');\n } else {\n // View-syncers, on the other hand, have no option other than to retry\n // until a valid backup has been published. This is achieved by\n // shutting down and letting the container runner retry with its\n // configured policy.\n throw e;\n }\n }\n }\n\n const {promise: changeStreamerReady, resolve: changeStreamerStarted} =\n resolver();\n const changeStreamer = runChangeStreamer\n ? loadWorker(\n CHANGE_STREAMER_URL,\n 'supporting',\n undefined,\n String(restoreStart.getTime()),\n ).once('message', changeStreamerStarted)\n : (changeStreamerStarted() ?? undefined);\n\n const {promise: reaperReady, resolve: reaperStarted} = resolver();\n if (numSyncers > 0) {\n loadWorker(REAPER_URL, 'supporting').once('message', reaperStarted);\n } else {\n reaperStarted();\n }\n\n // Wait for the change-streamer to be ready to guarantee that a replica\n // file is present.\n await changeStreamerReady;\n\n if (runChangeStreamer && litestream.backupURL) {\n // Start a backup replicator and corresponding litestream backup process.\n const {promise: backupReady, resolve} = resolver();\n const mode: ReplicaFileMode = 'backup';\n loadWorker(REPLICATOR_URL, 'supporting', mode, mode).once(\n // Wait for the Replicator's first message (i.e. \"ready\") before starting\n // litestream backup in order to avoid contending on the lock when the\n // replicator first prepares the db file.\n 'message',\n () => {\n processes.addSubprocess(\n startReplicaBackupProcess(config),\n 'supporting',\n 'litestream',\n );\n resolve();\n },\n );\n await backupReady;\n }\n\n // Before starting the view-syncers, ensure that the reaper has started\n // up, indicating that any CVR db migrations have been performed.\n await reaperReady;\n\n const syncers: Worker[] = [];\n if (numSyncers) {\n const mode: ReplicaFileMode =\n runChangeStreamer && litestream.backupURL ? 'serving-copy' : 'serving';\n const {promise: replicaReady, resolve} = resolver();\n const replicator = loadWorker(\n REPLICATOR_URL,\n 'supporting',\n mode,\n mode,\n ).once('message', () => {\n subscribeTo(lc, replicator);\n resolve();\n });\n await replicaReady;\n\n const notifier = createNotifierFrom(lc, replicator);\n for (let i = 0; i < numSyncers; i++) {\n syncers.push(loadWorker(SYNCER_URL, 'user-facing', i + 1, mode));\n }\n syncers.forEach(syncer => handleSubscriptionsFrom(lc, syncer, notifier));\n }\n let mutator: Worker | undefined;\n if (clientConnectionBifurcated) {\n mutator = loadWorker(MUTATOR_URL, 'supporting', 'mutator');\n }\n\n lc.info?.('waiting for workers to be ready ...');\n const logWaiting = setInterval(\n () => lc.info?.(`still waiting for ${processes.initializing().join(', ')}`),\n 10_000,\n );\n await processes.allWorkersReady();\n clearInterval(logWaiting);\n lc.info?.(`all workers ready (${Date.now() - startMs} ms)`);\n\n parent.send(['ready', {ready: true}]);\n\n try {\n await runUntilKilled(\n lc,\n parent,\n new WorkerDispatcher(\n lc,\n taskID,\n parent,\n syncers,\n mutator,\n changeStreamer,\n ),\n );\n } catch (err) {\n processes.logErrorAndExit(err, 'dispatcher');\n }\n\n await processes.done();\n}\n\nif (!singleProcessMode()) {\n void exitAfter(() => runWorker(must(parentWorker), process.env));\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;AAwCA,eAA8B,UAC5B,QACA,KACe;AACf,QAAM,UAAU,KAAK,IAAA;AACrB,QAAM,SAAS,wBAAwB,EAAC,KAAI;AAE5C,gBAAc,iBAAiB,QAAQ,EAAC,QAAQ,aAAA,GAAe,KAAK,CAAC;AACrE,QAAM,KAAK,iBAAiB,QAAQ,EAAC,QAAQ,aAAA,GAAe,IAAI;AAChE,gBAAc,IAAI,MAAM;AAExB,QAAM,YAAY,IAAI,eAAe,IAAI,MAAM;AAE/C,QAAM,EAAC,gBAAgB,WAAA,IAAc;AACrC,MAAI,OAAO,SAAS,WAAW,YAAY;AACzC,UAAM,IAAI;AAAA,MACR,sCAAsC,OAAO,SAAS,QAAQ,SAAS,UAAU;AAAA,IAAA;AAAA,EAGrF;AACA,MAAI,OAAO,IAAI,WAAW,YAAY;AACpC,UAAM,IAAI;AAAA,MACR,iCAAiC,OAAO,IAAI,QAAQ,SAAS,UAAU;AAAA,IAAA;AAAA,EAG3E;AAEA,QAAM,gBACJ,eAAe,IACX,KACA;AAAA,IACE;AAAA,IACA,OAAO,KAAK,MAAM,OAAO,SAAS,WAAW,UAAU,CAAC;AAAA,IACxD;AAAA,IACA,OAAO,KAAK,MAAM,OAAO,IAAI,WAAW,UAAU,CAAC;AAAA,EAAA;AAG3D,WAAS,WACP,WACA,MACA,OACG,MACK;AACR,UAAM,SAAS,YAAY,WAAW,KAAK,GAAG,MAAM,GAAG,aAAa;AACpE,UAAM,OAAO,KAAK,SAAS,UAAU,QAAQ,KAAK,KAAK,KAAK,EAAE,MAAM;AACpE,WAAO,UAAU,UAAU,QAAQ,MAAM,IAAI;AAAA,EAC/C;AAEA,QAAM;AAAA,IACJ;AAAA,IACA,gBAAgB,EAAC,MAAM,oBAAoB,KAAK,kBAAA;AAAA,IAChD;AAAA,EAAA,IACE;AACJ,QAAM,oBACJ,uBAAuB,eAAe,sBAAsB;AAE9D,MAAI,mCAAmB,KAAA;AACvB,MAAI,WAAW,aAAc,WAAW,cAAc,CAAC,mBAAoB;AACzE,QAAI;AACF,qBAAe,MAAM,eAAe,IAAI,MAAM;AAAA,IAChD,SAAS,GAAG;AACV,UAAI,mBAAmB;AAGrB,WAAG,QAAQ,gDAAgD;AAAA,MAC7D,OAAO;AAKL,cAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,QAAM,EAAC,SAAS,qBAAqB,SAAS,sBAAA,IAC5C,SAAA;AACF,QAAM,iBAAiB,oBACnB;AAAA,IACE;AAAA,IACA;AAAA,IACA;AAAA,IACA,OAAO,aAAa,QAAA,CAAS;AAAA,EAAA,EAC7B,KAAK,WAAW,qBAAqB,IACtC,2BAA2B;AAEhC,QAAM,EAAC,SAAS,aAAa,SAAS,cAAA,IAAiB,SAAA;AACvD,MAAI,aAAa,GAAG;AAClB,eAAW,YAAY,YAAY,EAAE,KAAK,WAAW,aAAa;AAAA,EACpE,OAAO;AACL,kBAAA;AAAA,EACF;AAIA,QAAM;AAEN,MAAI,qBAAqB,WAAW,WAAW;AAE7C,UAAM,EAAC,SAAS,aAAa,QAAA,IAAW,SAAA;AACxC,UAAM,OAAwB;AAC9B,eAAW,gBAAgB,cAAc,MAAM,IAAI,EAAE;AAAA;AAAA;AAAA;AAAA,MAInD;AAAA,MACA,MAAM;AACJ,kBAAU;AAAA,UACR,0BAA0B,MAAM;AAAA,UAChC;AAAA,UACA;AAAA,QAAA;AAEF,gBAAA;AAAA,MACF;AAAA,IAAA;AAEF,UAAM;AAAA,EACR;AAIA,QAAM;AAEN,QAAM,UAAoB,CAAA;AAC1B,MAAI,YAAY;AACd,UAAM,OACJ,qBAAqB,WAAW,YAAY,iBAAiB;AAC/D,UAAM,EAAC,SAAS,cAAc,QAAA,IAAW,SAAA;AACzC,UAAM,aAAa;AAAA,MACjB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA,EACA,KAAK,WAAW,MAAM;AACtB,kBAAY,IAAI,UAAU;AAC1B,cAAA;AAAA,IACF,CAAC;AACD,UAAM;AAEN,UAAM,WAAW,mBAAmB,IAAI,UAAU;AAClD,aAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,cAAQ,KAAK,WAAW,YAAY,eAAe,IAAI,GAAG,IAAI,CAAC;AAAA,IACjE;AACA,YAAQ,QAAQ,CAAA,WAAU,wBAAwB,IAAI,QAAQ,QAAQ,CAAC;AAAA,EACzE;AACA,MAAI;AAKJ,KAAG,OAAO,qCAAqC;AAC/C,QAAM,aAAa;AAAA,IACjB,MAAM,GAAG,OAAO,qBAAqB,UAAU,eAAe,KAAK,IAAI,CAAC,EAAE;AAAA,IAC1E;AAAA,EAAA;AAEF,QAAM,UAAU,gBAAA;AAChB,gBAAc,UAAU;AACxB,KAAG,OAAO,sBAAsB,KAAK,QAAQ,OAAO,MAAM;AAE1D,SAAO,KAAK,CAAC,SAAS,EAAC,OAAO,KAAA,CAAK,CAAC;AAEpC,MAAI;AACF,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA,IAAI;AAAA,QACF;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EAEJ,SAAS,KAAK;AACZ,cAAU,gBAAgB,KAAK,YAAY;AAAA,EAC7C;AAEA,QAAM,UAAU,KAAA;AAClB;AAEA,IAAI,CAAC,qBAAqB;AACxB,OAAK,UAAU,MAAM,UAAU,KAAK,YAAY,GAAG,QAAQ,GAAG,CAAC;AACjE;"}
|
|
1
|
+
{"version":3,"file":"main.js","sources":["../../../../../zero-cache/src/server/main.ts"],"sourcesContent":["import {resolver} from '@rocicorp/resolver';\nimport path from 'node:path';\nimport {must} from '../../../shared/src/must.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {\n exitAfter,\n ProcessManager,\n runUntilKilled,\n type WorkerType,\n} from '../services/life-cycle.ts';\nimport {\n restoreReplica,\n startReplicaBackupProcess,\n} from '../services/litestream/commands.ts';\nimport {\n childWorker,\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {\n createNotifierFrom,\n handleSubscriptionsFrom,\n type ReplicaFileMode,\n subscribeTo,\n} from '../workers/replicator.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\nimport {WorkerDispatcher} from './worker-dispatcher.ts';\nimport {\n CHANGE_STREAMER_URL,\n MUTATOR_URL,\n REAPER_URL,\n REPLICATOR_URL,\n SYNCER_URL,\n} from './worker-urls.ts';\n\nconst clientConnectionBifurcated = false;\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n): Promise<void> {\n const startMs = Date.now();\n const config = getNormalizedZeroConfig({env});\n\n startOtelAuto(createLogContext(config, {worker: 'dispatcher'}, false));\n const lc = createLogContext(config, {worker: 'dispatcher'}, true);\n initEventSink(lc, config);\n\n const processes = new ProcessManager(lc, parent);\n\n const {numSyncWorkers: numSyncers} = config;\n if (config.upstream.maxConns < numSyncers) {\n throw new Error(\n `Insufficient upstream connections (${config.upstream.maxConns}) for ${numSyncers} syncers.` +\n `Increase ZERO_UPSTREAM_MAX_CONNS or decrease ZERO_NUM_SYNC_WORKERS (which defaults to available cores).`,\n );\n }\n if (config.cvr.maxConns < numSyncers) {\n throw new Error(\n `Insufficient cvr connections (${config.cvr.maxConns}) for ${numSyncers} syncers.` +\n `Increase ZERO_CVR_MAX_CONNS or decrease ZERO_NUM_SYNC_WORKERS (which defaults to available cores).`,\n );\n }\n\n const internalFlags: string[] =\n numSyncers === 0\n ? []\n : [\n '--upstream-max-conns-per-worker',\n String(Math.floor(config.upstream.maxConns / numSyncers)),\n '--cvr-max-conns-per-worker',\n String(Math.floor(config.cvr.maxConns / numSyncers)),\n ];\n\n function loadWorker(\n moduleUrl: URL,\n type: WorkerType,\n id?: string | number,\n ...args: string[]\n ): Worker {\n const worker = childWorker(moduleUrl, env, ...args, ...internalFlags);\n const name = path.basename(moduleUrl.pathname) + (id ? ` (${id})` : '');\n return processes.addWorker(worker, type, name);\n }\n\n const {\n taskID,\n changeStreamer: {mode: changeStreamerMode, uri: changeStreamerURI},\n litestream,\n } = config;\n const runChangeStreamer =\n changeStreamerMode === 'dedicated' && changeStreamerURI === undefined;\n\n let restoreStart = new Date();\n if (litestream.backupURL || (litestream.executable && !runChangeStreamer)) {\n try {\n restoreStart = await restoreReplica(lc, config);\n } catch (e) {\n if (runChangeStreamer) {\n // If the restore failed, e.g. due to a corrupt backup, the\n // replication-manager recovers by re-syncing.\n lc.error?.('error restoring backup. resyncing the replica.', e);\n } else {\n // View-syncers, on the other hand, have no option other than to retry\n // until a valid backup has been published. This is achieved by\n // shutting down and letting the container runner retry with its\n // configured policy.\n throw e;\n }\n }\n }\n\n const {promise: changeStreamerReady, resolve: changeStreamerStarted} =\n resolver();\n const changeStreamer = runChangeStreamer\n ? loadWorker(\n CHANGE_STREAMER_URL,\n 'supporting',\n undefined,\n String(restoreStart.getTime()),\n ).once('message', changeStreamerStarted)\n : (changeStreamerStarted() ?? undefined);\n\n const {promise: reaperReady, resolve: reaperStarted} = resolver();\n if (numSyncers > 0) {\n loadWorker(REAPER_URL, 'supporting').once('message', reaperStarted);\n } else {\n reaperStarted();\n }\n\n // Wait for the change-streamer to be ready to guarantee that a replica\n // file is present.\n await changeStreamerReady;\n\n if (runChangeStreamer && litestream.backupURL) {\n // Start a backup replicator and corresponding litestream backup process.\n const {promise: backupReady, resolve} = resolver();\n const mode: ReplicaFileMode = 'backup';\n loadWorker(REPLICATOR_URL, 'supporting', mode, mode).once(\n // Wait for the Replicator's first message (i.e. \"ready\") before starting\n // litestream backup in order to avoid contending on the lock when the\n // replicator first prepares the db file.\n 'message',\n () => {\n processes.addSubprocess(\n startReplicaBackupProcess(config),\n 'supporting',\n 'litestream',\n );\n resolve();\n },\n );\n await backupReady;\n }\n\n // Before starting the view-syncers, ensure that the reaper has started\n // up, indicating that any CVR db migrations have been performed.\n await reaperReady;\n\n const syncers: Worker[] = [];\n if (numSyncers) {\n const mode: ReplicaFileMode =\n runChangeStreamer && litestream.backupURL ? 'serving-copy' : 'serving';\n const {promise: replicaReady, resolve} = resolver();\n const replicator = loadWorker(\n REPLICATOR_URL,\n 'supporting',\n mode,\n mode,\n ).once('message', () => {\n subscribeTo(lc, replicator);\n resolve();\n });\n await replicaReady;\n\n const notifier = createNotifierFrom(lc, replicator);\n for (let i = 0; i < numSyncers; i++) {\n syncers.push(loadWorker(SYNCER_URL, 'user-facing', i + 1, mode));\n }\n syncers.forEach(syncer => handleSubscriptionsFrom(lc, syncer, notifier));\n }\n let mutator: Worker | undefined;\n if (clientConnectionBifurcated) {\n mutator = loadWorker(MUTATOR_URL, 'supporting', 'mutator');\n }\n\n lc.info?.('waiting for workers to be ready ...');\n const logWaiting = setInterval(\n () => lc.info?.(`still waiting for ${processes.initializing().join(', ')}`),\n 10_000,\n );\n await processes.allWorkersReady();\n clearInterval(logWaiting);\n lc.info?.(`all workers ready (${Date.now() - startMs} ms)`);\n\n parent.send(['ready', {ready: true}]);\n\n try {\n await runUntilKilled(\n lc,\n parent,\n new WorkerDispatcher(\n lc,\n taskID,\n parent,\n syncers,\n mutator,\n changeStreamer,\n ),\n );\n } catch (err) {\n processes.logErrorAndExit(err, 'dispatcher');\n }\n\n await processes.done();\n}\n\nif (!singleProcessMode()) {\n void exitAfter(() => runWorker(must(parentWorker), process.env));\n}\n"],"names":[],"mappings":";;;;;;;;;;;;;AAwCA,eAA8B,UAC5B,QACA,KACe;AACf,QAAM,UAAU,KAAK,IAAA;AACrB,QAAM,SAAS,wBAAwB,EAAC,KAAI;AAE5C,gBAAc,iBAAiB,QAAQ,EAAC,QAAQ,aAAA,GAAe,KAAK,CAAC;AACrE,QAAM,KAAK,iBAAiB,QAAQ,EAAC,QAAQ,aAAA,GAAe,IAAI;AAChE,gBAAc,IAAI,MAAM;AAExB,QAAM,YAAY,IAAI,eAAe,IAAI,MAAM;AAE/C,QAAM,EAAC,gBAAgB,WAAA,IAAc;AACrC,MAAI,OAAO,SAAS,WAAW,YAAY;AACzC,UAAM,IAAI;AAAA,MACR,sCAAsC,OAAO,SAAS,QAAQ,SAAS,UAAU;AAAA,IAAA;AAAA,EAGrF;AACA,MAAI,OAAO,IAAI,WAAW,YAAY;AACpC,UAAM,IAAI;AAAA,MACR,iCAAiC,OAAO,IAAI,QAAQ,SAAS,UAAU;AAAA,IAAA;AAAA,EAG3E;AAEA,QAAM,gBACJ,eAAe,IACX,KACA;AAAA,IACE;AAAA,IACA,OAAO,KAAK,MAAM,OAAO,SAAS,WAAW,UAAU,CAAC;AAAA,IACxD;AAAA,IACA,OAAO,KAAK,MAAM,OAAO,IAAI,WAAW,UAAU,CAAC;AAAA,EAAA;AAG3D,WAAS,WACP,WACA,MACA,OACG,MACK;AACR,UAAM,SAAS,YAAY,WAAW,KAAK,GAAG,MAAM,GAAG,aAAa;AACpE,UAAM,OAAO,KAAK,SAAS,UAAU,QAAQ,KAAK,KAAK,KAAK,EAAE,MAAM;AACpE,WAAO,UAAU,UAAU,QAAQ,MAAM,IAAI;AAAA,EAC/C;AAEA,QAAM;AAAA,IACJ;AAAA,IACA,gBAAgB,EAAC,MAAM,oBAAoB,KAAK,kBAAA;AAAA,IAChD;AAAA,EAAA,IACE;AACJ,QAAM,oBACJ,uBAAuB,eAAe,sBAAsB;AAE9D,MAAI,mCAAmB,KAAA;AACvB,MAAI,WAAW,aAAc,WAAW,cAAc,CAAC,mBAAoB;AACzE,QAAI;AACF,qBAAe,MAAM,eAAe,IAAI,MAAM;AAAA,IAChD,SAAS,GAAG;AACV,UAAI,mBAAmB;AAGrB,WAAG,QAAQ,kDAAkD,CAAC;AAAA,MAChE,OAAO;AAKL,cAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAEA,QAAM,EAAC,SAAS,qBAAqB,SAAS,sBAAA,IAC5C,SAAA;AACF,QAAM,iBAAiB,oBACnB;AAAA,IACE;AAAA,IACA;AAAA,IACA;AAAA,IACA,OAAO,aAAa,QAAA,CAAS;AAAA,EAAA,EAC7B,KAAK,WAAW,qBAAqB,IACtC,2BAA2B;AAEhC,QAAM,EAAC,SAAS,aAAa,SAAS,cAAA,IAAiB,SAAA;AACvD,MAAI,aAAa,GAAG;AAClB,eAAW,YAAY,YAAY,EAAE,KAAK,WAAW,aAAa;AAAA,EACpE,OAAO;AACL,kBAAA;AAAA,EACF;AAIA,QAAM;AAEN,MAAI,qBAAqB,WAAW,WAAW;AAE7C,UAAM,EAAC,SAAS,aAAa,QAAA,IAAW,SAAA;AACxC,UAAM,OAAwB;AAC9B,eAAW,gBAAgB,cAAc,MAAM,IAAI,EAAE;AAAA;AAAA;AAAA;AAAA,MAInD;AAAA,MACA,MAAM;AACJ,kBAAU;AAAA,UACR,0BAA0B,MAAM;AAAA,UAChC;AAAA,UACA;AAAA,QAAA;AAEF,gBAAA;AAAA,MACF;AAAA,IAAA;AAEF,UAAM;AAAA,EACR;AAIA,QAAM;AAEN,QAAM,UAAoB,CAAA;AAC1B,MAAI,YAAY;AACd,UAAM,OACJ,qBAAqB,WAAW,YAAY,iBAAiB;AAC/D,UAAM,EAAC,SAAS,cAAc,QAAA,IAAW,SAAA;AACzC,UAAM,aAAa;AAAA,MACjB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA,EACA,KAAK,WAAW,MAAM;AACtB,kBAAY,IAAI,UAAU;AAC1B,cAAA;AAAA,IACF,CAAC;AACD,UAAM;AAEN,UAAM,WAAW,mBAAmB,IAAI,UAAU;AAClD,aAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,cAAQ,KAAK,WAAW,YAAY,eAAe,IAAI,GAAG,IAAI,CAAC;AAAA,IACjE;AACA,YAAQ,QAAQ,CAAA,WAAU,wBAAwB,IAAI,QAAQ,QAAQ,CAAC;AAAA,EACzE;AACA,MAAI;AAKJ,KAAG,OAAO,qCAAqC;AAC/C,QAAM,aAAa;AAAA,IACjB,MAAM,GAAG,OAAO,qBAAqB,UAAU,eAAe,KAAK,IAAI,CAAC,EAAE;AAAA,IAC1E;AAAA,EAAA;AAEF,QAAM,UAAU,gBAAA;AAChB,gBAAc,UAAU;AACxB,KAAG,OAAO,sBAAsB,KAAK,QAAQ,OAAO,MAAM;AAE1D,SAAO,KAAK,CAAC,SAAS,EAAC,OAAO,KAAA,CAAK,CAAC;AAEpC,MAAI;AACF,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA,IAAI;AAAA,QACF;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAAA,IACF;AAAA,EAEJ,SAAS,KAAK;AACZ,cAAU,gBAAgB,KAAK,YAAY;AAAA,EAC7C;AAEA,QAAM,UAAU,KAAA;AAClB;AAEA,IAAI,CAAC,qBAAqB;AACxB,OAAK,UAAU,MAAM,UAAU,KAAK,YAAY,GAAG,QAAQ,GAAG,CAAC;AACjE;"}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import type { LogContext } from '@rocicorp/logger';
|
|
2
|
+
/**
|
|
3
|
+
* Run an operation with priority, indicating that IVM should use smaller time
|
|
4
|
+
* slices to allow this operation to proceed more quickly
|
|
5
|
+
*/
|
|
6
|
+
export declare function runPriorityOp<T>(lc: LogContext, description: string, op: () => Promise<T>): Promise<T>;
|
|
7
|
+
export declare function isPriorityOpRunning(): boolean;
|
|
8
|
+
//# sourceMappingURL=priority-op.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"priority-op.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/priority-op.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAKjD;;;GAGG;AACH,wBAAsB,aAAa,CAAC,CAAC,EACnC,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,EAAE,EAAE,MAAM,OAAO,CAAC,CAAC,CAAC,cAmBrB;AAED,wBAAgB,mBAAmB,YAElC"}
|