@rocicorp/zero 0.26.0-canary.2 → 0.26.0-canary.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/out/replicache/src/persist/collect-idb-databases.d.ts +4 -4
- package/out/replicache/src/persist/collect-idb-databases.d.ts.map +1 -1
- package/out/replicache/src/persist/collect-idb-databases.js +22 -19
- package/out/replicache/src/persist/collect-idb-databases.js.map +1 -1
- package/out/replicache/src/persist/refresh.d.ts.map +1 -1
- package/out/replicache/src/persist/refresh.js +0 -8
- package/out/replicache/src/persist/refresh.js.map +1 -1
- package/out/replicache/src/process-scheduler.d.ts +23 -0
- package/out/replicache/src/process-scheduler.d.ts.map +1 -1
- package/out/replicache/src/process-scheduler.js +50 -1
- package/out/replicache/src/process-scheduler.js.map +1 -1
- package/out/replicache/src/replicache-impl.d.ts +8 -0
- package/out/replicache/src/replicache-impl.d.ts.map +1 -1
- package/out/replicache/src/replicache-impl.js +11 -2
- package/out/replicache/src/replicache-impl.js.map +1 -1
- package/out/shared/src/falsy.d.ts +3 -0
- package/out/shared/src/falsy.d.ts.map +1 -0
- package/out/zero/package.json.js +1 -1
- package/out/zero/src/adapters/drizzle.js +1 -2
- package/out/zero/src/adapters/prisma.d.ts +2 -0
- package/out/zero/src/adapters/prisma.d.ts.map +1 -0
- package/out/zero/src/adapters/prisma.js +6 -0
- package/out/zero/src/adapters/prisma.js.map +1 -0
- package/out/zero/src/pg.js +4 -7
- package/out/zero/src/react.js +3 -1
- package/out/zero/src/react.js.map +1 -1
- package/out/zero/src/server.js +5 -8
- package/out/zero-cache/src/auth/load-permissions.d.ts +3 -2
- package/out/zero-cache/src/auth/load-permissions.d.ts.map +1 -1
- package/out/zero-cache/src/auth/load-permissions.js +14 -8
- package/out/zero-cache/src/auth/load-permissions.js.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.d.ts +6 -0
- package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.js +16 -3
- package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
- package/out/zero-cache/src/config/zero-config.d.ts +44 -8
- package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
- package/out/zero-cache/src/config/zero-config.js +53 -13
- package/out/zero-cache/src/config/zero-config.js.map +1 -1
- package/out/zero-cache/src/custom/fetch.d.ts +3 -0
- package/out/zero-cache/src/custom/fetch.d.ts.map +1 -1
- package/out/zero-cache/src/custom/fetch.js +26 -0
- package/out/zero-cache/src/custom/fetch.js.map +1 -1
- package/out/zero-cache/src/db/lite-tables.js +1 -1
- package/out/zero-cache/src/db/lite-tables.js.map +1 -1
- package/out/zero-cache/src/db/migration-lite.d.ts.map +1 -1
- package/out/zero-cache/src/db/migration-lite.js +9 -3
- package/out/zero-cache/src/db/migration-lite.js.map +1 -1
- package/out/zero-cache/src/db/migration.d.ts.map +1 -1
- package/out/zero-cache/src/db/migration.js +9 -3
- package/out/zero-cache/src/db/migration.js.map +1 -1
- package/out/zero-cache/src/db/specs.d.ts +4 -3
- package/out/zero-cache/src/db/specs.d.ts.map +1 -1
- package/out/zero-cache/src/db/specs.js +4 -1
- package/out/zero-cache/src/db/specs.js.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.js +9 -3
- package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
- package/out/zero-cache/src/server/inspector-delegate.d.ts +1 -1
- package/out/zero-cache/src/server/inspector-delegate.d.ts.map +1 -1
- package/out/zero-cache/src/server/inspector-delegate.js +11 -30
- package/out/zero-cache/src/server/inspector-delegate.js.map +1 -1
- package/out/zero-cache/src/server/main.js +1 -1
- package/out/zero-cache/src/server/main.js.map +1 -1
- package/out/zero-cache/src/server/priority-op.d.ts +8 -0
- package/out/zero-cache/src/server/priority-op.d.ts.map +1 -0
- package/out/zero-cache/src/server/priority-op.js +29 -0
- package/out/zero-cache/src/server/priority-op.js.map +1 -0
- package/out/zero-cache/src/server/syncer.d.ts +0 -1
- package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/server/syncer.js +3 -21
- package/out/zero-cache/src/server/syncer.js.map +1 -1
- package/out/zero-cache/src/services/analyze.js +1 -1
- package/out/zero-cache/src/services/analyze.js.map +1 -1
- package/out/zero-cache/src/services/change-source/custom/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/custom/change-source.js +4 -3
- package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.js +68 -13
- package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js +7 -2
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js +7 -4
- package/out/zero-cache/src/services/change-source/pg/logical-replication/stream.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts +125 -180
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.js +18 -10
- package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts +36 -90
- package/out/zero-cache/src/services/change-source/pg/schema/published.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/published.js +51 -14
- package/out/zero-cache/src/services/change-source/pg/schema/published.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts +31 -36
- package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/shard.js +24 -3
- package/out/zero-cache/src/services/change-source/pg/schema/shard.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts +2 -2
- package/out/zero-cache/src/services/change-source/pg/schema/validation.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/validation.js +2 -4
- package/out/zero-cache/src/services/change-source/pg/schema/validation.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts +158 -53
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.js +55 -10
- package/out/zero-cache/src/services/change-source/protocol/current/data.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +210 -72
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current.js +4 -2
- package/out/zero-cache/src/services/change-source/replica-schema.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/replica-schema.js +19 -10
- package/out/zero-cache/src/services/change-source/replica-schema.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts +71 -25
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.js +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts +1 -0
- package/out/zero-cache/src/services/change-streamer/schema/tables.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/schema/tables.js +6 -5
- package/out/zero-cache/src/services/change-streamer/schema/tables.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.js +1 -1
- package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.d.ts +2 -0
- package/out/zero-cache/src/services/change-streamer/subscriber.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.js +14 -1
- package/out/zero-cache/src/services/change-streamer/subscriber.js.map +1 -1
- package/out/zero-cache/src/services/heapz.d.ts.map +1 -1
- package/out/zero-cache/src/services/heapz.js +1 -0
- package/out/zero-cache/src/services/heapz.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/error.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/error.js +4 -1
- package/out/zero-cache/src/services/mutagen/error.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/mutagen.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/mutagen.js +1 -0
- package/out/zero-cache/src/services/mutagen/mutagen.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/pusher.d.ts +7 -4
- package/out/zero-cache/src/services/mutagen/pusher.d.ts.map +1 -1
- package/out/zero-cache/src/services/mutagen/pusher.js +80 -8
- package/out/zero-cache/src/services/mutagen/pusher.js.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.js +21 -29
- package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/change-log.d.ts +1 -2
- package/out/zero-cache/src/services/replicator/schema/change-log.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/change-log.js +2 -5
- package/out/zero-cache/src/services/replicator/schema/change-log.js.map +1 -1
- package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.d.ts +3 -3
- package/out/zero-cache/src/services/replicator/schema/column-metadata.d.ts.map +1 -0
- package/out/zero-cache/src/services/{change-source → replicator/schema}/column-metadata.js +3 -3
- package/out/zero-cache/src/services/replicator/schema/column-metadata.js.map +1 -0
- package/out/zero-cache/src/services/replicator/schema/replication-state.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/schema/replication-state.js +3 -1
- package/out/zero-cache/src/services/replicator/schema/replication-state.js.map +1 -1
- package/out/zero-cache/src/services/run-ast.js +1 -1
- package/out/zero-cache/src/services/run-ast.js.map +1 -1
- package/out/zero-cache/src/services/statz.d.ts.map +1 -1
- package/out/zero-cache/src/services/statz.js +1 -0
- package/out/zero-cache/src/services/statz.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.js +59 -40
- package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts +0 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js +23 -6
- package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +13 -14
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +44 -56
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js +22 -11
- package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.js +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +6 -3
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.js +192 -217
- package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
- package/out/zero-cache/src/types/lexi-version.d.ts.map +1 -1
- package/out/zero-cache/src/types/lexi-version.js +4 -1
- package/out/zero-cache/src/types/lexi-version.js.map +1 -1
- package/out/zero-cache/src/types/lite.d.ts.map +1 -1
- package/out/zero-cache/src/types/lite.js +8 -2
- package/out/zero-cache/src/types/lite.js.map +1 -1
- package/out/zero-cache/src/types/shards.js +1 -1
- package/out/zero-cache/src/types/shards.js.map +1 -1
- package/out/zero-cache/src/types/sql.d.ts +5 -0
- package/out/zero-cache/src/types/sql.d.ts.map +1 -1
- package/out/zero-cache/src/types/sql.js +5 -1
- package/out/zero-cache/src/types/sql.js.map +1 -1
- package/out/zero-cache/src/types/subscription.js +1 -1
- package/out/zero-cache/src/types/subscription.js.map +1 -1
- package/out/zero-cache/src/workers/connect-params.d.ts +1 -0
- package/out/zero-cache/src/workers/connect-params.d.ts.map +1 -1
- package/out/zero-cache/src/workers/connect-params.js +2 -1
- package/out/zero-cache/src/workers/connect-params.js.map +1 -1
- package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
- package/out/zero-cache/src/workers/syncer-ws-message-handler.js +14 -6
- package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
- package/out/zero-cache/src/workers/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/workers/syncer.js +17 -10
- package/out/zero-cache/src/workers/syncer.js.map +1 -1
- package/out/zero-client/src/client/connection-manager.d.ts +8 -0
- package/out/zero-client/src/client/connection-manager.d.ts.map +1 -1
- package/out/zero-client/src/client/connection-manager.js +33 -0
- package/out/zero-client/src/client/connection-manager.js.map +1 -1
- package/out/zero-client/src/client/connection.d.ts.map +1 -1
- package/out/zero-client/src/client/connection.js +6 -3
- package/out/zero-client/src/client/connection.js.map +1 -1
- package/out/zero-client/src/client/error.js +1 -1
- package/out/zero-client/src/client/error.js.map +1 -1
- package/out/zero-client/src/client/mutator-proxy.d.ts.map +1 -1
- package/out/zero-client/src/client/mutator-proxy.js +15 -1
- package/out/zero-client/src/client/mutator-proxy.js.map +1 -1
- package/out/zero-client/src/client/options.d.ts +10 -0
- package/out/zero-client/src/client/options.d.ts.map +1 -1
- package/out/zero-client/src/client/options.js.map +1 -1
- package/out/zero-client/src/client/query-manager.d.ts +4 -0
- package/out/zero-client/src/client/query-manager.d.ts.map +1 -1
- package/out/zero-client/src/client/query-manager.js +7 -0
- package/out/zero-client/src/client/query-manager.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zero-client/src/client/zero.d.ts +3 -1
- package/out/zero-client/src/client/zero.d.ts.map +1 -1
- package/out/zero-client/src/client/zero.js +52 -7
- package/out/zero-client/src/client/zero.js.map +1 -1
- package/out/zero-client/src/mod.d.ts +1 -0
- package/out/zero-client/src/mod.d.ts.map +1 -1
- package/out/zero-protocol/src/connect.d.ts +4 -0
- package/out/zero-protocol/src/connect.d.ts.map +1 -1
- package/out/zero-protocol/src/connect.js +3 -1
- package/out/zero-protocol/src/connect.js.map +1 -1
- package/out/zero-protocol/src/protocol-version.d.ts +1 -1
- package/out/zero-protocol/src/protocol-version.d.ts.map +1 -1
- package/out/zero-protocol/src/protocol-version.js +1 -1
- package/out/zero-protocol/src/protocol-version.js.map +1 -1
- package/out/zero-protocol/src/push.d.ts +11 -2
- package/out/zero-protocol/src/push.d.ts.map +1 -1
- package/out/zero-protocol/src/push.js +22 -6
- package/out/zero-protocol/src/push.js.map +1 -1
- package/out/zero-protocol/src/up.d.ts +2 -0
- package/out/zero-protocol/src/up.d.ts.map +1 -1
- package/out/zero-react/src/mod.d.ts +3 -1
- package/out/zero-react/src/mod.d.ts.map +1 -1
- package/out/zero-react/src/paging-reducer.d.ts +61 -0
- package/out/zero-react/src/paging-reducer.d.ts.map +1 -0
- package/out/zero-react/src/paging-reducer.js +77 -0
- package/out/zero-react/src/paging-reducer.js.map +1 -0
- package/out/zero-react/src/use-query.d.ts +11 -1
- package/out/zero-react/src/use-query.d.ts.map +1 -1
- package/out/zero-react/src/use-query.js +13 -11
- package/out/zero-react/src/use-query.js.map +1 -1
- package/out/zero-react/src/use-rows.d.ts +39 -0
- package/out/zero-react/src/use-rows.d.ts.map +1 -0
- package/out/zero-react/src/use-rows.js +130 -0
- package/out/zero-react/src/use-rows.js.map +1 -0
- package/out/zero-react/src/use-zero-virtualizer.d.ts +122 -0
- package/out/zero-react/src/use-zero-virtualizer.d.ts.map +1 -0
- package/out/zero-react/src/use-zero-virtualizer.js +342 -0
- package/out/zero-react/src/use-zero-virtualizer.js.map +1 -0
- package/out/zero-react/src/zero-provider.js +1 -1
- package/out/zero-react/src/zero-provider.js.map +1 -1
- package/out/zero-server/src/adapters/drizzle.d.ts +18 -18
- package/out/zero-server/src/adapters/drizzle.d.ts.map +1 -1
- package/out/zero-server/src/adapters/drizzle.js +8 -22
- package/out/zero-server/src/adapters/drizzle.js.map +1 -1
- package/out/zero-server/src/adapters/pg.d.ts +19 -13
- package/out/zero-server/src/adapters/pg.d.ts.map +1 -1
- package/out/zero-server/src/adapters/pg.js.map +1 -1
- package/out/zero-server/src/adapters/postgresjs.d.ts +19 -13
- package/out/zero-server/src/adapters/postgresjs.d.ts.map +1 -1
- package/out/zero-server/src/adapters/postgresjs.js.map +1 -1
- package/out/zero-server/src/adapters/prisma.d.ts +66 -0
- package/out/zero-server/src/adapters/prisma.d.ts.map +1 -0
- package/out/zero-server/src/adapters/prisma.js +63 -0
- package/out/zero-server/src/adapters/prisma.js.map +1 -0
- package/out/zero-server/src/custom.js +1 -15
- package/out/zero-server/src/custom.js.map +1 -1
- package/out/zero-server/src/mod.d.ts +9 -8
- package/out/zero-server/src/mod.d.ts.map +1 -1
- package/out/zero-server/src/process-mutations.d.ts +2 -2
- package/out/zero-server/src/process-mutations.d.ts.map +1 -1
- package/out/zero-server/src/process-mutations.js +4 -8
- package/out/zero-server/src/process-mutations.js.map +1 -1
- package/out/zero-server/src/push-processor.js +1 -1
- package/out/zero-server/src/push-processor.js.map +1 -1
- package/out/zero-server/src/schema.d.ts.map +1 -1
- package/out/zero-server/src/schema.js +4 -1
- package/out/zero-server/src/schema.js.map +1 -1
- package/out/zero-server/src/zql-database.d.ts.map +1 -1
- package/out/zero-server/src/zql-database.js +17 -8
- package/out/zero-server/src/zql-database.js.map +1 -1
- package/out/zero-solid/src/mod.d.ts +1 -1
- package/out/zero-solid/src/mod.d.ts.map +1 -1
- package/out/zero-solid/src/use-query.d.ts +10 -1
- package/out/zero-solid/src/use-query.d.ts.map +1 -1
- package/out/zero-solid/src/use-query.js +21 -5
- package/out/zero-solid/src/use-query.js.map +1 -1
- package/out/zero-solid/src/use-zero.js +1 -1
- package/out/zero-solid/src/use-zero.js.map +1 -1
- package/out/zql/src/ivm/constraint.d.ts.map +1 -1
- package/out/zql/src/ivm/constraint.js +4 -1
- package/out/zql/src/ivm/constraint.js.map +1 -1
- package/out/zql/src/ivm/exists.d.ts.map +1 -1
- package/out/zql/src/ivm/exists.js +4 -1
- package/out/zql/src/ivm/exists.js.map +1 -1
- package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
- package/out/zql/src/ivm/join-utils.js +8 -2
- package/out/zql/src/ivm/join-utils.js.map +1 -1
- package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
- package/out/zql/src/ivm/memory-source.js +12 -3
- package/out/zql/src/ivm/memory-source.js.map +1 -1
- package/out/zql/src/ivm/push-accumulated.d.ts.map +1 -1
- package/out/zql/src/ivm/push-accumulated.js +25 -2
- package/out/zql/src/ivm/push-accumulated.js.map +1 -1
- package/out/zql/src/ivm/take.d.ts.map +1 -1
- package/out/zql/src/ivm/take.js +24 -6
- package/out/zql/src/ivm/take.js.map +1 -1
- package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
- package/out/zql/src/ivm/union-fan-in.js +12 -3
- package/out/zql/src/ivm/union-fan-in.js.map +1 -1
- package/out/zqlite/src/table-source.d.ts.map +1 -1
- package/out/zqlite/src/table-source.js +1 -2
- package/out/zqlite/src/table-source.js.map +1 -1
- package/package.json +6 -2
- package/out/zero-cache/src/services/change-source/column-metadata.d.ts.map +0 -1
- package/out/zero-cache/src/services/change-source/column-metadata.js.map +0 -1
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"column-metadata.js","sources":["../../../../../../../zero-cache/src/services/replicator/schema/column-metadata.ts"],"sourcesContent":["/**\n * Column metadata table for storing upstream PostgreSQL schema information.\n *\n * Previously, upstream type metadata was embedded in SQLite column type strings\n * using pipe-delimited notation (e.g., \"int8|NOT_NULL|TEXT_ENUM\"). This caused\n * issues with SQLite type affinity and made schema inspection difficult.\n *\n * This table stores that metadata separately, allowing SQLite columns to use\n * plain type names while preserving all necessary upstream type information.\n */\n\nimport type {Database, Statement} from '../../../../../zqlite/src/db.ts';\nimport {isArrayColumn, isEnumColumn} from '../../../db/pg-to-lite.ts';\nimport type {ColumnSpec, LiteTableSpec} from '../../../db/specs.ts';\nimport {\n isArray as checkIsArray,\n isEnum as checkIsEnum,\n liteTypeString,\n nullableUpstream,\n upstreamDataType,\n} from '../../../types/lite.ts';\n\n/**\n * Structured column metadata, replacing the old pipe-delimited string format.\n */\nexport interface ColumnMetadata {\n /** PostgreSQL type name, e.g., 'int8', 'varchar', 'text[]', 'user_role' */\n upstreamType: string;\n isNotNull: boolean;\n isEnum: boolean;\n isArray: boolean;\n /** Maximum character length for varchar/char types */\n characterMaxLength?: number | null;\n}\n\ntype ColumnMetadataRow = {\n upstream_type: string;\n is_not_null: number;\n is_enum: number;\n is_array: number;\n character_max_length: number | null;\n};\n\nexport const CREATE_COLUMN_METADATA_TABLE = `\n CREATE TABLE \"_zero.column_metadata\" (\n table_name TEXT NOT NULL,\n column_name TEXT NOT NULL,\n upstream_type TEXT NOT NULL,\n is_not_null INTEGER NOT NULL,\n is_enum INTEGER NOT NULL,\n is_array INTEGER NOT NULL,\n character_max_length INTEGER,\n PRIMARY KEY (table_name, column_name)\n );\n`;\n\n/**\n * Efficient column metadata store that prepares all statements upfront.\n * Use this class to avoid re-preparing statements on every operation.\n *\n * Access via `ColumnMetadataStore.getInstance(db)`, which returns `undefined`\n * if the metadata table doesn't exist yet.\n */\nexport class ColumnMetadataStore {\n static #instances = new WeakMap<Database, ColumnMetadataStore>();\n\n readonly #insertStmt: Statement;\n readonly #updateStmt: Statement;\n readonly #deleteColumnStmt: Statement;\n readonly #deleteTableStmt: Statement;\n readonly #renameTableStmt: Statement;\n readonly #getColumnStmt: Statement;\n readonly #getTableStmt: Statement;\n readonly #hasTableStmt: Statement;\n\n private constructor(db: Database) {\n this.#insertStmt = db.prepare(`\n INSERT INTO \"_zero.column_metadata\"\n (table_name, column_name, upstream_type, is_not_null, is_enum, is_array, character_max_length)\n VALUES (?, ?, ?, ?, ?, ?, ?)\n `);\n\n this.#updateStmt = db.prepare(`\n UPDATE \"_zero.column_metadata\"\n SET column_name = ?,\n upstream_type = ?,\n is_not_null = ?,\n is_enum = ?,\n is_array = ?,\n character_max_length = ?\n WHERE table_name = ? AND column_name = ?\n `);\n\n this.#deleteColumnStmt = db.prepare(`\n DELETE FROM \"_zero.column_metadata\"\n WHERE table_name = ? AND column_name = ?\n `);\n\n this.#deleteTableStmt = db.prepare(`\n DELETE FROM \"_zero.column_metadata\"\n WHERE table_name = ?\n `);\n\n this.#renameTableStmt = db.prepare(`\n UPDATE \"_zero.column_metadata\"\n SET table_name = ?\n WHERE table_name = ?\n `);\n\n this.#getColumnStmt = db.prepare(`\n SELECT upstream_type, is_not_null, is_enum, is_array, character_max_length\n FROM \"_zero.column_metadata\"\n WHERE table_name = ? AND column_name = ?\n `);\n\n this.#getTableStmt = db.prepare(`\n SELECT column_name, upstream_type, is_not_null, is_enum, is_array, character_max_length\n FROM \"_zero.column_metadata\"\n WHERE table_name = ?\n ORDER BY column_name\n `);\n\n this.#hasTableStmt = db.prepare(`\n SELECT 1 FROM sqlite_master\n WHERE type = 'table' AND name = '_zero.column_metadata'\n `);\n }\n\n /**\n * Gets the singleton instance of ColumnMetadataStore for the given database.\n * Returns `undefined` if the metadata table doesn't exist yet.\n */\n static getInstance(db: Database): ColumnMetadataStore | undefined {\n // Check if table exists\n const tableExists = db\n .prepare(\n `SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = '_zero.column_metadata'`,\n )\n .get();\n\n if (!tableExists) {\n return undefined;\n }\n\n let instance = ColumnMetadataStore.#instances.get(db);\n if (!instance) {\n instance = new ColumnMetadataStore(db);\n ColumnMetadataStore.#instances.set(db, instance);\n }\n return instance;\n }\n\n insert(tableName: string, columnName: string, spec: ColumnSpec): void {\n const metadata = pgColumnSpecToMetadata(spec);\n this.#insertMetadata(tableName, columnName, metadata);\n }\n\n #insertMetadata(\n tableName: string,\n columnName: string,\n metadata: ColumnMetadata,\n ): void {\n this.#insertStmt.run(\n tableName,\n columnName,\n metadata.upstreamType,\n metadata.isNotNull ? 1 : 0,\n metadata.isEnum ? 1 : 0,\n metadata.isArray ? 1 : 0,\n metadata.characterMaxLength ?? null,\n );\n }\n\n update(\n tableName: string,\n oldColumnName: string,\n newColumnName: string,\n spec: ColumnSpec,\n ): void {\n const metadata = pgColumnSpecToMetadata(spec);\n this.#updateStmt.run(\n newColumnName,\n metadata.upstreamType,\n metadata.isNotNull ? 1 : 0,\n metadata.isEnum ? 1 : 0,\n metadata.isArray ? 1 : 0,\n metadata.characterMaxLength ?? null,\n tableName,\n oldColumnName,\n );\n }\n\n deleteColumn(tableName: string, columnName: string): void {\n this.#deleteColumnStmt.run(tableName, columnName);\n }\n\n deleteTable(tableName: string): void {\n this.#deleteTableStmt.run(tableName);\n }\n\n renameTable(oldTableName: string, newTableName: string): void {\n this.#renameTableStmt.run(newTableName, oldTableName);\n }\n\n getColumn(tableName: string, columnName: string): ColumnMetadata | undefined {\n const row = this.#getColumnStmt.get(tableName, columnName) as\n | ColumnMetadataRow\n | undefined;\n\n if (!row) {\n return undefined;\n }\n\n return {\n upstreamType: row.upstream_type,\n isNotNull: row.is_not_null !== 0,\n isEnum: row.is_enum !== 0,\n isArray: row.is_array !== 0,\n characterMaxLength: row.character_max_length,\n };\n }\n\n getTable(tableName: string): Map<string, ColumnMetadata> {\n const rows = this.#getTableStmt.all(tableName) as Array<\n ColumnMetadataRow & {column_name: string}\n >;\n\n const metadata = new Map<string, ColumnMetadata>();\n for (const row of rows) {\n metadata.set(row.column_name, {\n upstreamType: row.upstream_type,\n isNotNull: row.is_not_null !== 0,\n isEnum: row.is_enum !== 0,\n isArray: row.is_array !== 0,\n characterMaxLength: row.character_max_length,\n });\n }\n\n return metadata;\n }\n\n hasTable(): boolean {\n const result = this.#hasTableStmt.get();\n return result !== undefined;\n }\n\n /**\n * Populates metadata table from existing tables that use pipe notation.\n * This is used during migration v8 to backfill the metadata table.\n */\n populateFromExistingTables(tables: LiteTableSpec[]): void {\n for (const table of tables) {\n for (const [columnName, columnSpec] of Object.entries(table.columns)) {\n const metadata = liteTypeStringToMetadata(\n columnSpec.dataType,\n columnSpec.characterMaximumLength,\n );\n this.#insertMetadata(table.name, columnName, metadata);\n }\n }\n }\n}\n\n/**\n * Converts pipe-delimited LiteTypeString to structured ColumnMetadata.\n * This is a compatibility helper for the migration period.\n */\nexport function liteTypeStringToMetadata(\n liteTypeString: string,\n characterMaxLength?: number | null,\n): ColumnMetadata {\n const baseType = upstreamDataType(liteTypeString);\n const isArrayType = checkIsArray(liteTypeString);\n\n // Reconstruct the full upstream type including array notation\n // For new-style arrays like 'text[]', upstreamDataType returns 'text[]'\n // For old-style arrays like 'int4|NOT_NULL[]', upstreamDataType returns 'int4', so we append '[]'\n const fullUpstreamType =\n isArrayType && !baseType.includes('[]') ? `${baseType}[]` : baseType;\n\n return {\n upstreamType: fullUpstreamType,\n isNotNull: !nullableUpstream(liteTypeString),\n isEnum: checkIsEnum(liteTypeString),\n isArray: isArrayType,\n characterMaxLength: characterMaxLength ?? null,\n };\n}\n\n/**\n * Converts structured ColumnMetadata back to pipe-delimited LiteTypeString.\n * This is a compatibility helper for the migration period.\n */\nexport function metadataToLiteTypeString(metadata: ColumnMetadata): string {\n return liteTypeString(\n metadata.upstreamType,\n metadata.isNotNull,\n metadata.isEnum,\n metadata.isArray,\n );\n}\n\n/**\n * Converts PostgreSQL ColumnSpec to structured ColumnMetadata.\n * Used during replication to populate the metadata table from upstream schema.\n *\n * Uses the same logic as liteTypeString() and mapPostgresToLiteColumn() via shared helpers.\n */\nexport function pgColumnSpecToMetadata(spec: ColumnSpec): ColumnMetadata {\n return {\n upstreamType: spec.dataType,\n isNotNull: spec.notNull ?? false,\n isEnum: isEnumColumn(spec),\n isArray: isArrayColumn(spec),\n characterMaxLength: spec.characterMaximumLength ?? null,\n };\n}\n"],"names":["liteTypeString","checkIsArray","checkIsEnum"],"mappings":";;AA2CO,MAAM,+BAA+B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAoBrC,MAAM,oBAAoB;AAAA,EAC/B,OAAO,aAAa,oBAAI,QAAA;AAAA,EAEf;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAED,YAAY,IAAc;AAChC,SAAK,cAAc,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA,KAI7B;AAED,SAAK,cAAc,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,KAS7B;AAED,SAAK,oBAAoB,GAAG,QAAQ;AAAA;AAAA;AAAA,KAGnC;AAED,SAAK,mBAAmB,GAAG,QAAQ;AAAA;AAAA;AAAA,KAGlC;AAED,SAAK,mBAAmB,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA,KAIlC;AAED,SAAK,iBAAiB,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA,KAIhC;AAED,SAAK,gBAAgB,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA,KAK/B;AAED,SAAK,gBAAgB,GAAG,QAAQ;AAAA;AAAA;AAAA,KAG/B;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,OAAO,YAAY,IAA+C;AAEhE,UAAM,cAAc,GACjB;AAAA,MACC;AAAA,IAAA,EAED,IAAA;AAEH,QAAI,CAAC,aAAa;AAChB,aAAO;AAAA,IACT;AAEA,QAAI,WAAW,oBAAoB,WAAW,IAAI,EAAE;AACpD,QAAI,CAAC,UAAU;AACb,iBAAW,IAAI,oBAAoB,EAAE;AACrC,0BAAoB,WAAW,IAAI,IAAI,QAAQ;AAAA,IACjD;AACA,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,WAAmB,YAAoB,MAAwB;AACpE,UAAM,WAAW,uBAAuB,IAAI;AAC5C,SAAK,gBAAgB,WAAW,YAAY,QAAQ;AAAA,EACtD;AAAA,EAEA,gBACE,WACA,YACA,UACM;AACN,SAAK,YAAY;AAAA,MACf;AAAA,MACA;AAAA,MACA,SAAS;AAAA,MACT,SAAS,YAAY,IAAI;AAAA,MACzB,SAAS,SAAS,IAAI;AAAA,MACtB,SAAS,UAAU,IAAI;AAAA,MACvB,SAAS,sBAAsB;AAAA,IAAA;AAAA,EAEnC;AAAA,EAEA,OACE,WACA,eACA,eACA,MACM;AACN,UAAM,WAAW,uBAAuB,IAAI;AAC5C,SAAK,YAAY;AAAA,MACf;AAAA,MACA,SAAS;AAAA,MACT,SAAS,YAAY,IAAI;AAAA,MACzB,SAAS,SAAS,IAAI;AAAA,MACtB,SAAS,UAAU,IAAI;AAAA,MACvB,SAAS,sBAAsB;AAAA,MAC/B;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ;AAAA,EAEA,aAAa,WAAmB,YAA0B;AACxD,SAAK,kBAAkB,IAAI,WAAW,UAAU;AAAA,EAClD;AAAA,EAEA,YAAY,WAAyB;AACnC,SAAK,iBAAiB,IAAI,SAAS;AAAA,EACrC;AAAA,EAEA,YAAY,cAAsB,cAA4B;AAC5D,SAAK,iBAAiB,IAAI,cAAc,YAAY;AAAA,EACtD;AAAA,EAEA,UAAU,WAAmB,YAAgD;AAC3E,UAAM,MAAM,KAAK,eAAe,IAAI,WAAW,UAAU;AAIzD,QAAI,CAAC,KAAK;AACR,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,MACL,cAAc,IAAI;AAAA,MAClB,WAAW,IAAI,gBAAgB;AAAA,MAC/B,QAAQ,IAAI,YAAY;AAAA,MACxB,SAAS,IAAI,aAAa;AAAA,MAC1B,oBAAoB,IAAI;AAAA,IAAA;AAAA,EAE5B;AAAA,EAEA,SAAS,WAAgD;AACvD,UAAM,OAAO,KAAK,cAAc,IAAI,SAAS;AAI7C,UAAM,+BAAe,IAAA;AACrB,eAAW,OAAO,MAAM;AACtB,eAAS,IAAI,IAAI,aAAa;AAAA,QAC5B,cAAc,IAAI;AAAA,QAClB,WAAW,IAAI,gBAAgB;AAAA,QAC/B,QAAQ,IAAI,YAAY;AAAA,QACxB,SAAS,IAAI,aAAa;AAAA,QAC1B,oBAAoB,IAAI;AAAA,MAAA,CACzB;AAAA,IACH;AAEA,WAAO;AAAA,EACT;AAAA,EAEA,WAAoB;AAClB,UAAM,SAAS,KAAK,cAAc,IAAA;AAClC,WAAO,WAAW;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,2BAA2B,QAA+B;AACxD,eAAW,SAAS,QAAQ;AAC1B,iBAAW,CAAC,YAAY,UAAU,KAAK,OAAO,QAAQ,MAAM,OAAO,GAAG;AACpE,cAAM,WAAW;AAAA,UACf,WAAW;AAAA,UACX,WAAW;AAAA,QAAA;AAEb,aAAK,gBAAgB,MAAM,MAAM,YAAY,QAAQ;AAAA,MACvD;AAAA,IACF;AAAA,EACF;AACF;AAMO,SAAS,yBACdA,iBACA,oBACgB;AAChB,QAAM,WAAW,iBAAiBA,eAAc;AAChD,QAAM,cAAcC,QAAaD,eAAc;AAK/C,QAAM,mBACJ,eAAe,CAAC,SAAS,SAAS,IAAI,IAAI,GAAG,QAAQ,OAAO;AAE9D,SAAO;AAAA,IACL,cAAc;AAAA,IACd,WAAW,CAAC,iBAAiBA,eAAc;AAAA,IAC3C,QAAQE,OAAYF,eAAc;AAAA,IAClC,SAAS;AAAA,IACT,oBAAoB,sBAAsB;AAAA,EAAA;AAE9C;AAMO,SAAS,yBAAyB,UAAkC;AACzE,SAAO;AAAA,IACL,SAAS;AAAA,IACT,SAAS;AAAA,IACT,SAAS;AAAA,IACT,SAAS;AAAA,EAAA;AAEb;AAQO,SAAS,uBAAuB,MAAkC;AACvE,SAAO;AAAA,IACL,cAAc,KAAK;AAAA,IACnB,WAAW,KAAK,WAAW;AAAA,IAC3B,QAAQ,aAAa,IAAI;AAAA,IACzB,SAAS,cAAc,IAAI;AAAA,IAC3B,oBAAoB,KAAK,0BAA0B;AAAA,EAAA;AAEvD;"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"replication-state.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AACzD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iCAAiC,CAAC;AAC9D,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,2BAA2B,CAAC;
|
|
1
|
+
{"version":3,"file":"replication-state.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AACzD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iCAAiC,CAAC;AAC9D,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,2BAA2B,CAAC;AAG/D,OAAO,EAAC,wBAAwB,EAAC,MAAM,gBAAgB,CAAC;AAExD,OAAO,EAAC,wBAAwB,EAAC,CAAC;AAElC,MAAM,MAAM,YAAY,GAAG,MAAM,GAAG,SAAS,GAAG,QAAQ,CAAC;AAMzD,eAAO,MAAM,2BAA2B,iKAKvC,CAAC;AAiCF,QAAA,MAAM,uBAAuB;;;;EASxB,CAAC;AAEN,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE,QAAA,MAAM,sBAAsB;;aAE1B,CAAC;AAEH,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAEtE,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,QAAQ,EACZ,YAAY,EAAE,MAAM,EAAE,EACtB,SAAS,EAAE,MAAM,QAelB;AAED,wBAAgB,WAAW,CAAC,EAAE,EAAE,QAAQ,EAAE,KAAK,EAAE,YAAY,QAM5D;AAED,wBAAgB,kBAAkB,CAAC,EAAE,EAAE,QAAQ;;;IAY9C;AAED,wBAAgB,oBAAoB,CAAC,EAAE,EAAE,eAAe,GAAG,iBAAiB,CAU3E;AAED,wBAAgB,0BAA0B,CACxC,EAAE,EAAE,eAAe,EACnB,SAAS,EAAE,MAAM,QAGlB;AAED,wBAAgB,mBAAmB,CAAC,EAAE,EAAE,eAAe,GAAG,gBAAgB,CAGzE"}
|
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
import { parse } from "../../../../../shared/src/valita.js";
|
|
2
|
+
import { CREATE_CHANGELOG_SCHEMA } from "./change-log.js";
|
|
3
|
+
import { CREATE_COLUMN_METADATA_TABLE } from "./column-metadata.js";
|
|
2
4
|
import { array, string, object } from "@badrap/valita";
|
|
3
5
|
const CREATE_RUNTIME_EVENTS_TABLE = `
|
|
4
6
|
CREATE TABLE "_zero.runtimeEvents" (
|
|
@@ -24,7 +26,7 @@ const CREATE_REPLICATION_STATE_SCHEMA = (
|
|
|
24
26
|
stateVersion TEXT NOT NULL,
|
|
25
27
|
lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)
|
|
26
28
|
);
|
|
27
|
-
` + CREATE_RUNTIME_EVENTS_TABLE
|
|
29
|
+
` + CREATE_CHANGELOG_SCHEMA + CREATE_RUNTIME_EVENTS_TABLE + CREATE_COLUMN_METADATA_TABLE
|
|
28
30
|
);
|
|
29
31
|
const stringArray = array(string());
|
|
30
32
|
const subscriptionStateSchema = object({
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"replication-state.js","sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"sourcesContent":["/**\n * Replication metadata, used for incremental view maintenance and catchup.\n *\n * These tables are created atomically in {@link setupReplicationTables}\n * after the logical replication handoff when initial data synchronization has completed.\n */\n\nimport * as v from '../../../../../shared/src/valita.ts';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport type {StatementRunner} from '../../../db/statements.ts';\nimport {ZERO_VERSION_COLUMN_NAME} from './constants.ts';\n\nexport {ZERO_VERSION_COLUMN_NAME};\n\nexport type RuntimeEvent = 'sync' | 'upgrade' | 'vacuum';\n\n// event : The RuntimeEvent. Only one row per event is tracked.\n// Inserting an event will REPLACE any row for the same event.\n// timestamp : SQLite timestamp string, e.g. \"2024-04-12 11:37:46\".\n// Append a `Z` when parsing with `new Date(...)`;\nexport const CREATE_RUNTIME_EVENTS_TABLE = `\n CREATE TABLE \"_zero.runtimeEvents\" (\n event TEXT PRIMARY KEY ON CONFLICT REPLACE,\n timestamp TEXT NOT NULL DEFAULT (current_timestamp)\n );\n`;\n\nconst CREATE_REPLICATION_STATE_SCHEMA =\n // replicaVersion : A value identifying the version at which the initial sync happened, i.e.\n // the version at which all rows were copied, and to `_0_version` was set.\n // This value is used to distinguish data from other replicas (e.g. if a\n // replica is reset or if there are ever multiple replicas).\n // publications : JSON stringified array of publication names\n // lock : Auto-magic column for enforcing single-row semantics.\n `\n CREATE TABLE \"_zero.replicationConfig\" (\n replicaVersion TEXT NOT NULL,\n publications TEXT NOT NULL,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n // stateVersion : The latest version replicated from upstream, starting with the initial\n // `replicaVersion` and moving forward to each subsequent commit watermark\n // (e.g. corresponding to a Postgres LSN). Versions are represented as\n // lexicographically sortable watermarks (e.g. LexiVersions).\n //\n `\n CREATE TABLE \"_zero.replicationState\" (\n stateVersion TEXT NOT NULL,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n CREATE_RUNTIME_EVENTS_TABLE;\n\nconst stringArray = v.array(v.string());\n\nconst subscriptionStateSchema = v\n .object({\n replicaVersion: v.string(),\n publications: v.string(),\n watermark: v.string(),\n })\n .map(s => ({\n ...s,\n publications: v.parse(JSON.parse(s.publications), stringArray),\n }));\n\nexport type SubscriptionState = v.Infer<typeof subscriptionStateSchema>;\n\nconst replicationStateSchema = v.object({\n stateVersion: v.string(),\n});\n\nexport type ReplicationState = v.Infer<typeof replicationStateSchema>;\n\nexport function initReplicationState(\n db: Database,\n publications: string[],\n watermark: string,\n) {\n db.exec(CREATE_REPLICATION_STATE_SCHEMA);\n db.prepare(\n `\n INSERT INTO \"_zero.replicationConfig\" \n (replicaVersion, publications) VALUES (?, ?)\n `,\n ).run(watermark, JSON.stringify(publications.sort()));\n db.prepare(\n `\n INSERT INTO \"_zero.replicationState\" (stateVersion) VALUES (?)\n `,\n ).run(watermark);\n recordEvent(db, 'sync');\n}\n\nexport function recordEvent(db: Database, event: RuntimeEvent) {\n db.prepare(\n `\n INSERT INTO \"_zero.runtimeEvents\" (event) VALUES (?) \n `,\n ).run(event);\n}\n\nexport function getAscendingEvents(db: Database) {\n const result = db\n .prepare(\n `SELECT event, timestamp FROM \"_zero.runtimeEvents\" \n ORDER BY timestamp ASC\n `,\n )\n .all<{event: string; timestamp: string}>();\n return result.map(({event, timestamp}) => ({\n event,\n timestamp: new Date(timestamp + 'Z'),\n }));\n}\n\nexport function getSubscriptionState(db: StatementRunner): SubscriptionState {\n const result = db.get(\n `\n SELECT c.replicaVersion, c.publications, s.stateVersion as watermark\n FROM \"_zero.replicationConfig\" as c\n JOIN \"_zero.replicationState\" as s\n ON c.lock = s.lock\n `,\n );\n return v.parse(result, subscriptionStateSchema);\n}\n\nexport function updateReplicationWatermark(\n db: StatementRunner,\n watermark: string,\n) {\n db.run(`UPDATE \"_zero.replicationState\" SET stateVersion=?`, watermark);\n}\n\nexport function getReplicationState(db: StatementRunner): ReplicationState {\n const result = db.get(`SELECT stateVersion FROM \"_zero.replicationState\"`);\n return v.parse(result, replicationStateSchema);\n}\n"],"names":["v.array","v.string","v.object","v.parse"],"mappings":"
|
|
1
|
+
{"version":3,"file":"replication-state.js","sources":["../../../../../../../zero-cache/src/services/replicator/schema/replication-state.ts"],"sourcesContent":["/**\n * Replication metadata, used for incremental view maintenance and catchup.\n *\n * These tables are created atomically in {@link setupReplicationTables}\n * after the logical replication handoff when initial data synchronization has completed.\n */\n\nimport * as v from '../../../../../shared/src/valita.ts';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport type {StatementRunner} from '../../../db/statements.ts';\nimport {CREATE_CHANGELOG_SCHEMA} from './change-log.ts';\nimport {CREATE_COLUMN_METADATA_TABLE} from './column-metadata.ts';\nimport {ZERO_VERSION_COLUMN_NAME} from './constants.ts';\n\nexport {ZERO_VERSION_COLUMN_NAME};\n\nexport type RuntimeEvent = 'sync' | 'upgrade' | 'vacuum';\n\n// event : The RuntimeEvent. Only one row per event is tracked.\n// Inserting an event will REPLACE any row for the same event.\n// timestamp : SQLite timestamp string, e.g. \"2024-04-12 11:37:46\".\n// Append a `Z` when parsing with `new Date(...)`;\nexport const CREATE_RUNTIME_EVENTS_TABLE = `\n CREATE TABLE \"_zero.runtimeEvents\" (\n event TEXT PRIMARY KEY ON CONFLICT REPLACE,\n timestamp TEXT NOT NULL DEFAULT (current_timestamp)\n );\n`;\n\nconst CREATE_REPLICATION_STATE_SCHEMA =\n // replicaVersion : A value identifying the version at which the initial sync happened, i.e.\n // the version at which all rows were copied, and to `_0_version` was set.\n // This value is used to distinguish data from other replicas (e.g. if a\n // replica is reset or if there are ever multiple replicas).\n // publications : JSON stringified array of publication names\n // lock : Auto-magic column for enforcing single-row semantics.\n `\n CREATE TABLE \"_zero.replicationConfig\" (\n replicaVersion TEXT NOT NULL,\n publications TEXT NOT NULL,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n // stateVersion : The latest version replicated from upstream, starting with the initial\n // `replicaVersion` and moving forward to each subsequent commit watermark\n // (e.g. corresponding to a Postgres LSN). Versions are represented as\n // lexicographically sortable watermarks (e.g. LexiVersions).\n //\n `\n CREATE TABLE \"_zero.replicationState\" (\n stateVersion TEXT NOT NULL,\n lock INTEGER PRIMARY KEY DEFAULT 1 CHECK (lock=1)\n );\n ` +\n CREATE_CHANGELOG_SCHEMA +\n CREATE_RUNTIME_EVENTS_TABLE +\n CREATE_COLUMN_METADATA_TABLE;\n\nconst stringArray = v.array(v.string());\n\nconst subscriptionStateSchema = v\n .object({\n replicaVersion: v.string(),\n publications: v.string(),\n watermark: v.string(),\n })\n .map(s => ({\n ...s,\n publications: v.parse(JSON.parse(s.publications), stringArray),\n }));\n\nexport type SubscriptionState = v.Infer<typeof subscriptionStateSchema>;\n\nconst replicationStateSchema = v.object({\n stateVersion: v.string(),\n});\n\nexport type ReplicationState = v.Infer<typeof replicationStateSchema>;\n\nexport function initReplicationState(\n db: Database,\n publications: string[],\n watermark: string,\n) {\n db.exec(CREATE_REPLICATION_STATE_SCHEMA);\n db.prepare(\n `\n INSERT INTO \"_zero.replicationConfig\" \n (replicaVersion, publications) VALUES (?, ?)\n `,\n ).run(watermark, JSON.stringify(publications.sort()));\n db.prepare(\n `\n INSERT INTO \"_zero.replicationState\" (stateVersion) VALUES (?)\n `,\n ).run(watermark);\n recordEvent(db, 'sync');\n}\n\nexport function recordEvent(db: Database, event: RuntimeEvent) {\n db.prepare(\n `\n INSERT INTO \"_zero.runtimeEvents\" (event) VALUES (?) \n `,\n ).run(event);\n}\n\nexport function getAscendingEvents(db: Database) {\n const result = db\n .prepare(\n `SELECT event, timestamp FROM \"_zero.runtimeEvents\" \n ORDER BY timestamp ASC\n `,\n )\n .all<{event: string; timestamp: string}>();\n return result.map(({event, timestamp}) => ({\n event,\n timestamp: new Date(timestamp + 'Z'),\n }));\n}\n\nexport function getSubscriptionState(db: StatementRunner): SubscriptionState {\n const result = db.get(\n `\n SELECT c.replicaVersion, c.publications, s.stateVersion as watermark\n FROM \"_zero.replicationConfig\" as c\n JOIN \"_zero.replicationState\" as s\n ON c.lock = s.lock\n `,\n );\n return v.parse(result, subscriptionStateSchema);\n}\n\nexport function updateReplicationWatermark(\n db: StatementRunner,\n watermark: string,\n) {\n db.run(`UPDATE \"_zero.replicationState\" SET stateVersion=?`, watermark);\n}\n\nexport function getReplicationState(db: StatementRunner): ReplicationState {\n const result = db.get(`SELECT stateVersion FROM \"_zero.replicationState\"`);\n return v.parse(result, replicationStateSchema);\n}\n"],"names":["v.array","v.string","v.object","v.parse"],"mappings":";;;;AAsBO,MAAM,8BAA8B;AAAA;AAAA;AAAA;AAAA;AAAA;AAO3C,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAkBA,0BACA,8BACA;AAAA;AAEF,MAAM,cAAcA,MAAQC,QAAU;AAEtC,MAAM,0BAA0BC,OACtB;AAAA,EACN,gBAAgBD,OAAE;AAAA,EAClB,cAAcA,OAAE;AAAA,EAChB,WAAWA,OAAE;AACf,CAAC,EACA,IAAI,CAAA,OAAM;AAAA,EACT,GAAG;AAAA,EACH,cAAcE,MAAQ,KAAK,MAAM,EAAE,YAAY,GAAG,WAAW;AAC/D,EAAE;AAIJ,MAAM,yBAAyBD,OAAS;AAAA,EACtC,cAAcD,OAAE;AAClB,CAAC;AAIM,SAAS,qBACd,IACA,cACA,WACA;AACA,KAAG,KAAK,+BAA+B;AACvC,KAAG;AAAA,IACD;AAAA;AAAA;AAAA;AAAA,EAAA,EAIA,IAAI,WAAW,KAAK,UAAU,aAAa,KAAA,CAAM,CAAC;AACpD,KAAG;AAAA,IACD;AAAA;AAAA;AAAA,EAAA,EAGA,IAAI,SAAS;AACf,cAAY,IAAI,MAAM;AACxB;AAEO,SAAS,YAAY,IAAc,OAAqB;AAC7D,KAAG;AAAA,IACD;AAAA;AAAA;AAAA,EAAA,EAGA,IAAI,KAAK;AACb;AAEO,SAAS,mBAAmB,IAAc;AAC/C,QAAM,SAAS,GACZ;AAAA,IACC;AAAA;AAAA;AAAA,EAAA,EAID,IAAA;AACH,SAAO,OAAO,IAAI,CAAC,EAAC,OAAO,iBAAgB;AAAA,IACzC;AAAA,IACA,WAAW,oBAAI,KAAK,YAAY,GAAG;AAAA,EAAA,EACnC;AACJ;AAEO,SAAS,qBAAqB,IAAwC;AAC3E,QAAM,SAAS,GAAG;AAAA,IAChB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAAA;AAOF,SAAOE,MAAQ,QAAQ,uBAAuB;AAChD;AAEO,SAAS,2BACd,IACA,WACA;AACA,KAAG,IAAI,sDAAsD,SAAS;AACxE;AAEO,SAAS,oBAAoB,IAAuC;AACzE,QAAM,SAAS,GAAG,IAAI,mDAAmD;AACzE,SAAOA,MAAQ,QAAQ,sBAAsB;AAC/C;"}
|
|
@@ -56,7 +56,7 @@ async function runAst(lc, clientSchema, ast, isTransformed, options, yieldProces
|
|
|
56
56
|
await yieldProcess();
|
|
57
57
|
continue;
|
|
58
58
|
}
|
|
59
|
-
assert(rowChange.type === "add");
|
|
59
|
+
assert(rowChange.type === "add", "Hydration only handles add row changes");
|
|
60
60
|
if (syncedRowCount % 10 === 0) {
|
|
61
61
|
await Promise.resolve();
|
|
62
62
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"run-ast.js","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\n// @circular-dep-ignore\nimport {astToZQL} from '../../../ast-to-zql/src/ast-to-zql.ts';\n// @circular-dep-ignore\nimport {formatOutput} from '../../../ast-to-zql/src/format.ts';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST} from '../../../zero-protocol/src/ast.ts';\nimport {mapAST} from '../../../zero-protocol/src/ast.ts';\nimport type {Row} from '../../../zero-protocol/src/data.ts';\nimport {hashOfAST} from '../../../zero-protocol/src/query-hash.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport type {NameMapper} from '../../../zero-schema/src/name-mapper.ts';\nimport {\n buildPipeline,\n type BuilderDelegate,\n} from '../../../zql/src/builder/builder.ts';\nimport type {PlanDebugger} from '../../../zql/src/planner/planner-debug.ts';\nimport type {ConnectionCostModel} from '../../../zql/src/planner/planner-connection.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport {transformAndHashQuery} from '../auth/read-authorizer.ts';\nimport type {LiteAndZqlSpec} from '../db/specs.ts';\nimport {hydrate} from './view-syncer/pipeline-driver.ts';\nimport type {TokenData} from './view-syncer/view-syncer.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\n\nexport type RunAstOptions = {\n applyPermissions?: boolean | undefined;\n authData?: TokenData | undefined;\n clientToServerMapper?: NameMapper | undefined;\n costModel?: ConnectionCostModel | undefined;\n db: Database;\n host: BuilderDelegate;\n permissions?: PermissionsConfig | undefined;\n planDebugger?: PlanDebugger | undefined;\n syncedRows?: boolean | undefined;\n tableSpecs: Map<string, LiteAndZqlSpec>;\n vendedRows?: boolean | undefined;\n};\n\nexport async function runAst(\n lc: LogContext,\n clientSchema: ClientSchema,\n ast: AST,\n isTransformed: boolean,\n options: RunAstOptions,\n yieldProcess: () => Promise<void>,\n): Promise<AnalyzeQueryResult> {\n const {clientToServerMapper, permissions, host} = options;\n const result: AnalyzeQueryResult = {\n warnings: [],\n syncedRows: undefined,\n syncedRowCount: 0,\n start: 0,\n end: 0,\n elapsed: 0,\n afterPermissions: undefined,\n readRows: undefined,\n readRowCountsByQuery: {},\n readRowCount: undefined,\n };\n\n if (!isTransformed) {\n // map the AST to server names if not already transformed\n ast = mapAST(ast, must(clientToServerMapper));\n }\n if (options.applyPermissions) {\n const authData = options.authData?.decoded;\n if (!authData) {\n result.warnings.push(\n 'No auth data provided. Permission rules will compare to `NULL` wherever an auth data field is referenced.',\n );\n }\n ast = transformAndHashQuery(\n lc,\n 'clientGroupIDForAnalyze',\n ast,\n must(permissions),\n authData,\n false,\n ).transformedAst;\n result.afterPermissions = await formatOutput(ast.table + astToZQL(ast));\n }\n\n const pipeline = buildPipeline(\n ast,\n host,\n 'query-id',\n options.costModel,\n lc,\n options.planDebugger,\n );\n\n const start = performance.now();\n\n let syncedRowCount = 0;\n const rowsByTable: Record<string, Row[]> = {};\n const seenByTable: Set<string> = new Set();\n for (const rowChange of hydrate(pipeline, hashOfAST(ast), clientSchema)) {\n if (rowChange === 'yield') {\n await yieldProcess();\n continue;\n }\n assert(rowChange.type === 'add');\n\n // yield to other tasks to avoid blocking for too long\n if (syncedRowCount % 10 === 0) {\n await Promise.resolve();\n }\n if (syncedRowCount % 100 === 0) {\n await sleep(1);\n }\n\n let rows: Row[] = rowsByTable[rowChange.table];\n const s = rowChange.table + '.' + JSON.stringify(rowChange.row);\n if (seenByTable.has(s)) {\n continue; // skip duplicates\n }\n syncedRowCount++;\n seenByTable.add(s);\n if (options.syncedRows) {\n if (!rows) {\n rows = [];\n rowsByTable[rowChange.table] = rows;\n }\n rows.push(rowChange.row);\n }\n }\n\n const end = performance.now();\n if (options.syncedRows) {\n result.syncedRows = rowsByTable;\n }\n result.start = start;\n result.end = end;\n result.elapsed = end - start;\n\n // Always include the count of synced and vended rows.\n result.syncedRowCount = syncedRowCount;\n result.readRowCountsByQuery = host.debug?.getVendedRowCounts() ?? {};\n let readRowCount = 0;\n for (const c of Object.values(result.readRowCountsByQuery)) {\n for (const v of Object.values(c)) {\n readRowCount += v;\n }\n }\n result.readRowCount = readRowCount;\n result.dbScansByQuery = host.debug?.getNVisitCounts() ?? {};\n\n if (options.vendedRows) {\n result.readRows = host.debug?.getVendedRows();\n }\n return result;\n}\n"],"names":[],"mappings":";;;;;;;;;;AA0CA,eAAsB,OACpB,IACA,cACA,KACA,eACA,SACA,cAC6B;AAC7B,QAAM,EAAuB,aAAa,KAAA,IAAQ;AAClD,QAAM,SAA6B;AAAA,IACjC,UAAU,CAAA;AAAA,IACV,YAAY;AAAA,IACZ,gBAAgB;AAAA,IAChB,OAAO;AAAA,IACP,KAAK;AAAA,IACL,SAAS;AAAA,IACT,kBAAkB;AAAA,IAClB,UAAU;AAAA,IACV,sBAAsB,CAAA;AAAA,IACtB,cAAc;AAAA,EAAA;AAOhB,MAAI,QAAQ,kBAAkB;AAC5B,UAAM,WAAW,QAAQ,UAAU;AACnC,QAAI,CAAC,UAAU;AACb,aAAO,SAAS;AAAA,QACd;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,KAAK,WAAW;AAAA,MAChB;AAAA,MACA;AAAA,IAAA,EACA;AACF,WAAO,mBAAmB,MAAM,aAAa,IAAI,QAAQ,SAAS,GAAG,CAAC;AAAA,EACxE;AAEA,QAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA,QAAQ;AAAA,IACR;AAAA,IACA,QAAQ;AAAA,EAAA;AAGV,QAAM,QAAQ,YAAY,IAAA;AAE1B,MAAI,iBAAiB;AACrB,QAAM,cAAqC,CAAA;AAC3C,QAAM,kCAA+B,IAAA;AACrC,aAAW,aAAa,QAAQ,UAAU,UAAU,GAAG,GAAG,YAAY,GAAG;AACvE,QAAI,cAAc,SAAS;AACzB,YAAM,aAAA;AACN;AAAA,IACF;AACA,WAAO,UAAU,SAAS,
|
|
1
|
+
{"version":3,"file":"run-ast.js","sources":["../../../../../zero-cache/src/services/run-ast.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\n// @circular-dep-ignore\nimport {astToZQL} from '../../../ast-to-zql/src/ast-to-zql.ts';\n// @circular-dep-ignore\nimport {formatOutput} from '../../../ast-to-zql/src/format.ts';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {sleep} from '../../../shared/src/sleep.ts';\nimport type {AnalyzeQueryResult} from '../../../zero-protocol/src/analyze-query-result.ts';\nimport type {AST} from '../../../zero-protocol/src/ast.ts';\nimport {mapAST} from '../../../zero-protocol/src/ast.ts';\nimport type {Row} from '../../../zero-protocol/src/data.ts';\nimport {hashOfAST} from '../../../zero-protocol/src/query-hash.ts';\nimport type {PermissionsConfig} from '../../../zero-schema/src/compiled-permissions.ts';\nimport type {NameMapper} from '../../../zero-schema/src/name-mapper.ts';\nimport {\n buildPipeline,\n type BuilderDelegate,\n} from '../../../zql/src/builder/builder.ts';\nimport type {PlanDebugger} from '../../../zql/src/planner/planner-debug.ts';\nimport type {ConnectionCostModel} from '../../../zql/src/planner/planner-connection.ts';\nimport type {Database} from '../../../zqlite/src/db.ts';\nimport {transformAndHashQuery} from '../auth/read-authorizer.ts';\nimport type {LiteAndZqlSpec} from '../db/specs.ts';\nimport {hydrate} from './view-syncer/pipeline-driver.ts';\nimport type {TokenData} from './view-syncer/view-syncer.ts';\nimport type {ClientSchema} from '../../../zero-protocol/src/client-schema.ts';\n\nexport type RunAstOptions = {\n applyPermissions?: boolean | undefined;\n authData?: TokenData | undefined;\n clientToServerMapper?: NameMapper | undefined;\n costModel?: ConnectionCostModel | undefined;\n db: Database;\n host: BuilderDelegate;\n permissions?: PermissionsConfig | undefined;\n planDebugger?: PlanDebugger | undefined;\n syncedRows?: boolean | undefined;\n tableSpecs: Map<string, LiteAndZqlSpec>;\n vendedRows?: boolean | undefined;\n};\n\nexport async function runAst(\n lc: LogContext,\n clientSchema: ClientSchema,\n ast: AST,\n isTransformed: boolean,\n options: RunAstOptions,\n yieldProcess: () => Promise<void>,\n): Promise<AnalyzeQueryResult> {\n const {clientToServerMapper, permissions, host} = options;\n const result: AnalyzeQueryResult = {\n warnings: [],\n syncedRows: undefined,\n syncedRowCount: 0,\n start: 0,\n end: 0,\n elapsed: 0,\n afterPermissions: undefined,\n readRows: undefined,\n readRowCountsByQuery: {},\n readRowCount: undefined,\n };\n\n if (!isTransformed) {\n // map the AST to server names if not already transformed\n ast = mapAST(ast, must(clientToServerMapper));\n }\n if (options.applyPermissions) {\n const authData = options.authData?.decoded;\n if (!authData) {\n result.warnings.push(\n 'No auth data provided. Permission rules will compare to `NULL` wherever an auth data field is referenced.',\n );\n }\n ast = transformAndHashQuery(\n lc,\n 'clientGroupIDForAnalyze',\n ast,\n must(permissions),\n authData,\n false,\n ).transformedAst;\n result.afterPermissions = await formatOutput(ast.table + astToZQL(ast));\n }\n\n const pipeline = buildPipeline(\n ast,\n host,\n 'query-id',\n options.costModel,\n lc,\n options.planDebugger,\n );\n\n const start = performance.now();\n\n let syncedRowCount = 0;\n const rowsByTable: Record<string, Row[]> = {};\n const seenByTable: Set<string> = new Set();\n for (const rowChange of hydrate(pipeline, hashOfAST(ast), clientSchema)) {\n if (rowChange === 'yield') {\n await yieldProcess();\n continue;\n }\n assert(rowChange.type === 'add', 'Hydration only handles add row changes');\n\n // yield to other tasks to avoid blocking for too long\n if (syncedRowCount % 10 === 0) {\n await Promise.resolve();\n }\n if (syncedRowCount % 100 === 0) {\n await sleep(1);\n }\n\n let rows: Row[] = rowsByTable[rowChange.table];\n const s = rowChange.table + '.' + JSON.stringify(rowChange.row);\n if (seenByTable.has(s)) {\n continue; // skip duplicates\n }\n syncedRowCount++;\n seenByTable.add(s);\n if (options.syncedRows) {\n if (!rows) {\n rows = [];\n rowsByTable[rowChange.table] = rows;\n }\n rows.push(rowChange.row);\n }\n }\n\n const end = performance.now();\n if (options.syncedRows) {\n result.syncedRows = rowsByTable;\n }\n result.start = start;\n result.end = end;\n result.elapsed = end - start;\n\n // Always include the count of synced and vended rows.\n result.syncedRowCount = syncedRowCount;\n result.readRowCountsByQuery = host.debug?.getVendedRowCounts() ?? {};\n let readRowCount = 0;\n for (const c of Object.values(result.readRowCountsByQuery)) {\n for (const v of Object.values(c)) {\n readRowCount += v;\n }\n }\n result.readRowCount = readRowCount;\n result.dbScansByQuery = host.debug?.getNVisitCounts() ?? {};\n\n if (options.vendedRows) {\n result.readRows = host.debug?.getVendedRows();\n }\n return result;\n}\n"],"names":[],"mappings":";;;;;;;;;;AA0CA,eAAsB,OACpB,IACA,cACA,KACA,eACA,SACA,cAC6B;AAC7B,QAAM,EAAuB,aAAa,KAAA,IAAQ;AAClD,QAAM,SAA6B;AAAA,IACjC,UAAU,CAAA;AAAA,IACV,YAAY;AAAA,IACZ,gBAAgB;AAAA,IAChB,OAAO;AAAA,IACP,KAAK;AAAA,IACL,SAAS;AAAA,IACT,kBAAkB;AAAA,IAClB,UAAU;AAAA,IACV,sBAAsB,CAAA;AAAA,IACtB,cAAc;AAAA,EAAA;AAOhB,MAAI,QAAQ,kBAAkB;AAC5B,UAAM,WAAW,QAAQ,UAAU;AACnC,QAAI,CAAC,UAAU;AACb,aAAO,SAAS;AAAA,QACd;AAAA,MAAA;AAAA,IAEJ;AACA,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,KAAK,WAAW;AAAA,MAChB;AAAA,MACA;AAAA,IAAA,EACA;AACF,WAAO,mBAAmB,MAAM,aAAa,IAAI,QAAQ,SAAS,GAAG,CAAC;AAAA,EACxE;AAEA,QAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA,QAAQ;AAAA,IACR;AAAA,IACA,QAAQ;AAAA,EAAA;AAGV,QAAM,QAAQ,YAAY,IAAA;AAE1B,MAAI,iBAAiB;AACrB,QAAM,cAAqC,CAAA;AAC3C,QAAM,kCAA+B,IAAA;AACrC,aAAW,aAAa,QAAQ,UAAU,UAAU,GAAG,GAAG,YAAY,GAAG;AACvE,QAAI,cAAc,SAAS;AACzB,YAAM,aAAA;AACN;AAAA,IACF;AACA,WAAO,UAAU,SAAS,OAAO,wCAAwC;AAGzE,QAAI,iBAAiB,OAAO,GAAG;AAC7B,YAAM,QAAQ,QAAA;AAAA,IAChB;AACA,QAAI,iBAAiB,QAAQ,GAAG;AAC9B,YAAM,MAAM,CAAC;AAAA,IACf;AAEA,QAAI,OAAc,YAAY,UAAU,KAAK;AAC7C,UAAM,IAAI,UAAU,QAAQ,MAAM,KAAK,UAAU,UAAU,GAAG;AAC9D,QAAI,YAAY,IAAI,CAAC,GAAG;AACtB;AAAA,IACF;AACA;AACA,gBAAY,IAAI,CAAC;AACjB,QAAI,QAAQ,YAAY;AACtB,UAAI,CAAC,MAAM;AACT,eAAO,CAAA;AACP,oBAAY,UAAU,KAAK,IAAI;AAAA,MACjC;AACA,WAAK,KAAK,UAAU,GAAG;AAAA,IACzB;AAAA,EACF;AAEA,QAAM,MAAM,YAAY,IAAA;AACxB,MAAI,QAAQ,YAAY;AACtB,WAAO,aAAa;AAAA,EACtB;AACA,SAAO,QAAQ;AACf,SAAO,MAAM;AACb,SAAO,UAAU,MAAM;AAGvB,SAAO,iBAAiB;AACxB,SAAO,uBAAuB,KAAK,OAAO,mBAAA,KAAwB,CAAA;AAClE,MAAI,eAAe;AACnB,aAAW,KAAK,OAAO,OAAO,OAAO,oBAAoB,GAAG;AAC1D,eAAW,KAAK,OAAO,OAAO,CAAC,GAAG;AAChC,sBAAgB;AAAA,IAClB;AAAA,EACF;AACA,SAAO,eAAe;AACtB,SAAO,iBAAiB,KAAK,OAAO,gBAAA,KAAqB,CAAA;AAEzD,MAAI,QAAQ,YAAY;AACtB,WAAO,WAAW,KAAK,OAAO,cAAA;AAAA,EAChC;AACA,SAAO;AACT;"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"statz.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/statz.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,YAAY,EAAE,cAAc,EAAC,MAAM,SAAS,CAAC;AAM1D,OAAO,KAAK,EAAC,oBAAoB,IAAI,UAAU,EAAC,MAAM,wBAAwB,CAAC;
|
|
1
|
+
{"version":3,"file":"statz.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/services/statz.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,YAAY,EAAE,cAAc,EAAC,MAAM,SAAS,CAAC;AAM1D,OAAO,KAAK,EAAC,oBAAoB,IAAI,UAAU,EAAC,MAAM,wBAAwB,CAAC;AAuS/E,wBAAsB,kBAAkB,CACtC,EAAE,EAAE,UAAU,EACd,MAAM,EAAE,UAAU,EAClB,GAAG,EAAE,cAAc,EACnB,GAAG,EAAE,YAAY,iBAqBlB"}
|
|
@@ -230,6 +230,7 @@ function osStats(out) {
|
|
|
230
230
|
["total mem", os.totalmem()],
|
|
231
231
|
["free mem", os.freemem()],
|
|
232
232
|
["cpus", os.cpus().length],
|
|
233
|
+
["available parallelism", os.availableParallelism()],
|
|
233
234
|
["platform", os.platform()],
|
|
234
235
|
["arch", os.arch()],
|
|
235
236
|
["release", os.release()],
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"statz.js","sources":["../../../../../zero-cache/src/services/statz.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport auth from 'basic-auth';\nimport type {FastifyReply, FastifyRequest} from 'fastify';\nimport fs from 'fs';\nimport os from 'os';\nimport type {Writable} from 'stream';\nimport {BigIntJSON} from '../../../shared/src/bigint-json.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\nimport type {NormalizedZeroConfig as ZeroConfig} from '../config/normalize.ts';\nimport {isAdminPasswordValid} from '../config/zero-config.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {getShardID, upstreamSchema} from '../types/shards.ts';\n\nasync function upstreamStats(\n lc: LogContext,\n config: ZeroConfig,\n out: Writable,\n) {\n const schema = upstreamSchema(getShardID(config));\n const sql = pgClient(lc, config.upstream.db);\n\n out.write(header('Upstream'));\n\n await printPgStats(\n [\n [\n 'num replicas',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"replicas\"`,\n ],\n [\n 'num clients with mutations',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"clients\"`,\n ],\n [\n 'num mutations processed',\n sql`SELECT SUM(\"lastMutationID\") as \"c\" FROM ${sql(schema)}.\"clients\"`,\n ],\n ],\n out,\n );\n\n await sql.end();\n}\n\nasync function cvrStats(lc: LogContext, config: ZeroConfig, out: Writable) {\n out.write(header('CVR'));\n\n const schema = upstreamSchema(getShardID(config)) + '/cvr';\n const sql = pgClient(lc, config.cvr.db);\n\n function numQueriesPerClientGroup(\n active: boolean,\n ): ReturnType<ReturnType<typeof pgClient>> {\n const filter = active\n ? sql`WHERE \"inactivatedAt\" IS NULL AND deleted = false`\n : sql`WHERE \"inactivatedAt\" IS NOT NULL AND (\"inactivatedAt\" + \"ttl\") > NOW()`;\n return sql`WITH\n group_counts AS (\n SELECT\n \"clientGroupID\",\n COUNT(*) AS num_queries\n FROM ${sql(schema)}.\"desires\"\n ${filter}\n GROUP BY \"clientGroupID\"\n ),\n -- Count distinct clientIDs per clientGroupID\n client_per_group_counts AS (\n SELECT\n \"clientGroupID\",\n COUNT(DISTINCT \"clientID\") AS num_clients\n FROM ${sql(schema)}.\"desires\"\n ${filter}\n GROUP BY \"clientGroupID\"\n )\n -- Combine all the information\n SELECT\n g.\"clientGroupID\",\n cpg.num_clients,\n g.num_queries\n FROM group_counts g\n JOIN client_per_group_counts cpg ON g.\"clientGroupID\" = cpg.\"clientGroupID\"\n ORDER BY g.num_queries DESC;`;\n }\n\n await printPgStats(\n [\n [\n 'total num queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\"`,\n ],\n [\n 'num unique query hashes',\n sql`SELECT COUNT(DISTINCT \"queryHash\") as \"c\" FROM ${sql(\n schema,\n )}.\"desires\"`,\n ],\n [\n 'num active queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"inactivatedAt\" IS NULL AND \"deleted\" = false`,\n ],\n [\n 'num inactive queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"inactivatedAt\" IS NOT NULL AND (\"inactivatedAt\" + \"ttl\") > NOW()`,\n ],\n [\n 'num deleted queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"deleted\" = true`,\n ],\n [\n 'fresh queries percentiles',\n sql`WITH client_group_counts AS (\n -- Count inactive desires per clientGroupID\n SELECT\n \"clientGroupID\",\n COUNT(*) AS fresh_count\n FROM ${sql(schema)}.\"desires\"\n WHERE\n (\"inactivatedAt\" IS NOT NULL\n AND (\"inactivatedAt\" + \"ttl\") > NOW()) OR (\"inactivatedAt\" IS NULL\n AND deleted = false)\n GROUP BY \"clientGroupID\"\n )\n\n SELECT\n percentile_cont(0.50) WITHIN GROUP (ORDER BY fresh_count) AS \"p50\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY fresh_count) AS \"p75\",\n percentile_cont(0.90) WITHIN GROUP (ORDER BY fresh_count) AS \"p90\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY fresh_count) AS \"p95\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY fresh_count) AS \"p99\",\n MIN(fresh_count) AS \"min\",\n MAX(fresh_count) AS \"max\",\n AVG(fresh_count) AS \"avg\"\n FROM client_group_counts;`,\n ],\n [\n 'rows per client group percentiles',\n sql`WITH client_group_counts AS (\n -- Count inactive desires per clientGroupID\n SELECT\n \"clientGroupID\",\n COUNT(*) AS row_count\n FROM ${sql(schema)}.\"rows\"\n GROUP BY \"clientGroupID\"\n )\n SELECT\n percentile_cont(0.50) WITHIN GROUP (ORDER BY row_count) AS \"p50\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY row_count) AS \"p75\",\n percentile_cont(0.90) WITHIN GROUP (ORDER BY row_count) AS \"p90\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY row_count) AS \"p95\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY row_count) AS \"p99\",\n MIN(row_count) AS \"min\",\n MAX(row_count) AS \"max\",\n AVG(row_count) AS \"avg\"\n FROM client_group_counts;`,\n ],\n [\n // check for AST blowup due to DNF conversion.\n 'ast sizes',\n sql`SELECT\n percentile_cont(0.25) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"25th_percentile\",\n percentile_cont(0.5) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"50th_percentile\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"75th_percentile\",\n percentile_cont(0.9) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"90th_percentile\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"95th_percentile\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"99th_percentile\",\n MIN(length(\"clientAST\"::text)) AS \"minimum_length\",\n MAX(length(\"clientAST\"::text)) AS \"maximum_length\",\n AVG(length(\"clientAST\"::text))::integer AS \"average_length\",\n COUNT(*) AS \"total_records\"\n FROM ${sql(schema)}.\"queries\";`,\n ],\n [\n // output the hash of the largest AST\n 'biggest ast hash',\n sql`SELECT \"queryHash\", length(\"clientAST\"::text) AS \"ast_length\"\n FROM ${sql(schema)}.\"queries\"\n ORDER BY length(\"clientAST\"::text) DESC\n LIMIT 1;`,\n ],\n [\n 'total active queries per client and client group',\n numQueriesPerClientGroup(true),\n ],\n [\n 'total inactive queries per client and client group',\n numQueriesPerClientGroup(false),\n ],\n [\n 'total rows per client group',\n sql`SELECT \"clientGroupID\", COUNT(*) as \"c\" FROM ${sql(\n schema,\n )}.\"rows\" GROUP BY \"clientGroupID\" ORDER BY \"c\" DESC`,\n ],\n [\n 'num rows per query',\n sql`SELECT\n k.key AS \"queryHash\",\n COUNT(*) AS row_count\n FROM ${sql(schema)}.\"rows\" r,\n LATERAL jsonb_each(r.\"refCounts\") k\n GROUP BY k.key\n ORDER BY row_count DESC;`,\n ],\n ] satisfies [\n name: string,\n query: ReturnType<ReturnType<typeof pgClient>>,\n ][],\n out,\n );\n\n await sql.end();\n}\n\nasync function changelogStats(\n lc: LogContext,\n config: ZeroConfig,\n out: Writable,\n) {\n out.write(header('Change DB'));\n const schema = upstreamSchema(getShardID(config)) + '/cdc';\n const sql = pgClient(lc, config.change.db);\n\n await printPgStats(\n [\n [\n 'change log size',\n sql`SELECT COUNT(*) as \"change_log_size\" FROM ${sql(schema)}.\"changeLog\"`,\n ],\n ],\n out,\n );\n await sql.end();\n}\n\nfunction replicaStats(lc: LogContext, config: ZeroConfig, out: Writable) {\n out.write(header('Replica'));\n const db = new Database(lc, config.replica.file);\n printStats(\n 'replica',\n [\n ['wal checkpoint', pick(first(db.pragma('WAL_CHECKPOINT')))],\n ['page count', pick(first(db.pragma('PAGE_COUNT')))],\n ['page size', pick(first(db.pragma('PAGE_SIZE')))],\n ['journal mode', pick(first(db.pragma('JOURNAL_MODE')))],\n ['synchronous', pick(first(db.pragma('SYNCHRONOUS')))],\n ['cache size', pick(first(db.pragma('CACHE_SIZE')))],\n ['auto vacuum', pick(first(db.pragma('AUTO_VACUUM')))],\n ['freelist count', pick(first(db.pragma('FREELIST_COUNT')))],\n ['wal autocheckpoint', pick(first(db.pragma('WAL_AUTOCHECKPOINT')))],\n ['db file stats', fs.statSync(config.replica.file)],\n ] as const,\n out,\n );\n}\n\nfunction osStats(out: Writable) {\n printStats(\n 'os',\n [\n ['load avg', os.loadavg()],\n ['uptime', os.uptime()],\n ['total mem', os.totalmem()],\n ['free mem', os.freemem()],\n ['cpus', os.cpus().length],\n ['platform', os.platform()],\n ['arch', os.arch()],\n ['release', os.release()],\n ['uptime', os.uptime()],\n ] as const,\n out,\n );\n}\n\nasync function printPgStats(\n pendingQueries: [\n name: string,\n query: ReturnType<ReturnType<typeof pgClient>>,\n ][],\n out: Writable,\n) {\n const results = await Promise.all(\n pendingQueries.map(async ([name, query]) => [name, await query]),\n );\n for (const [name, data] of results) {\n out.write('\\n');\n out.write(name);\n out.write('\\n');\n out.write(BigIntJSON.stringify(data, null, 2));\n }\n}\n\nfunction printStats(\n group: string,\n queries: readonly [name: string, result: unknown][],\n out: Writable,\n) {\n out.write('\\n' + header(group));\n for (const [name, result] of queries) {\n out.write('\\n' + name + BigIntJSON.stringify(result, null, 2));\n }\n}\n\nexport async function handleStatzRequest(\n lc: LogContext,\n config: ZeroConfig,\n req: FastifyRequest,\n res: FastifyReply,\n) {\n const credentials = auth(req);\n if (!isAdminPasswordValid(lc, config, credentials?.pass)) {\n void res\n .code(401)\n .header('WWW-Authenticate', 'Basic realm=\"Statz Protected Area\"')\n .send('Unauthorized');\n return;\n }\n\n await upstreamStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n await cvrStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n await changelogStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n replicaStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n osStats(res.raw);\n res.raw.end();\n}\n\nfunction first(x: object[]): object {\n return x[0];\n}\n\nfunction pick(x: object): unknown {\n return Object.values(x)[0];\n}\n\nfunction header(name: string): string {\n return `=== ${name} ===\\n`;\n}\n"],"names":[],"mappings":";;;;;;;;AAaA,eAAe,cACb,IACA,QACA,KACA;AACA,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC;AAChD,QAAM,MAAM,SAAS,IAAI,OAAO,SAAS,EAAE;AAE3C,MAAI,MAAM,OAAO,UAAU,CAAC;AAE5B,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,+CAA+C,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,IAC5D;AAAA,IAEF;AAAA,EAAA;AAGF,QAAM,IAAI,IAAA;AACZ;AAEA,eAAe,SAAS,IAAgB,QAAoB,KAAe;AACzE,MAAI,MAAM,OAAO,KAAK,CAAC;AAEvB,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC,IAAI;AACpD,QAAM,MAAM,SAAS,IAAI,OAAO,IAAI,EAAE;AAEtC,WAAS,yBACP,QACyC;AACzC,UAAM,SAAS,SACX,yDACA;AACJ,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA,aAKE,IAAI,MAAM,CAAC;AAAA,QAChB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aAQD,IAAI,MAAM,CAAC;AAAA,QAChB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWZ;AAEA,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,qDAAqD;AAAA,UACnD;AAAA,QAAA,CACD;AAAA,MAAA;AAAA,MAEH;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA,eAKO,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,MAmBpB;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA,eAKO,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,MAcpB;AAAA;AAAA,QAEE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aAWK,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAElB;AAAA;AAAA,QAEE;AAAA,QACA;AAAA,aACK,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA,MAAA;AAAA,MAIlB;AAAA,QACE;AAAA,QACA,yBAAyB,IAAI;AAAA,MAAA;AAAA,MAE/B;AAAA,QACE;AAAA,QACA,yBAAyB,KAAK;AAAA,MAAA;AAAA,MAEhC;AAAA,QACE;AAAA,QACA,mDAAmD;AAAA,UACjD;AAAA,QAAA,CACD;AAAA,MAAA;AAAA,MAEH;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA,aAGK,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,IAIlB;AAAA,IAKF;AAAA,EAAA;AAGF,QAAM,IAAI,IAAA;AACZ;AAEA,eAAe,eACb,IACA,QACA,KACA;AACA,MAAI,MAAM,OAAO,WAAW,CAAC;AAC7B,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC,IAAI;AACpD,QAAM,MAAM,SAAS,IAAI,OAAO,OAAO,EAAE;AAEzC,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,gDAAgD,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,IAC7D;AAAA,IAEF;AAAA,EAAA;AAEF,QAAM,IAAI,IAAA;AACZ;AAEA,SAAS,aAAa,IAAgB,QAAoB,KAAe;AACvE,MAAI,MAAM,OAAO,SAAS,CAAC;AAC3B,QAAM,KAAK,IAAI,SAAS,IAAI,OAAO,QAAQ,IAAI;AAC/C;AAAA,IACE;AAAA,IACA;AAAA,MACE,CAAC,kBAAkB,KAAK,MAAM,GAAG,OAAO,gBAAgB,CAAC,CAAC,CAAC;AAAA,MAC3D,CAAC,cAAc,KAAK,MAAM,GAAG,OAAO,YAAY,CAAC,CAAC,CAAC;AAAA,MACnD,CAAC,aAAa,KAAK,MAAM,GAAG,OAAO,WAAW,CAAC,CAAC,CAAC;AAAA,MACjD,CAAC,gBAAgB,KAAK,MAAM,GAAG,OAAO,cAAc,CAAC,CAAC,CAAC;AAAA,MACvD,CAAC,eAAe,KAAK,MAAM,GAAG,OAAO,aAAa,CAAC,CAAC,CAAC;AAAA,MACrD,CAAC,cAAc,KAAK,MAAM,GAAG,OAAO,YAAY,CAAC,CAAC,CAAC;AAAA,MACnD,CAAC,eAAe,KAAK,MAAM,GAAG,OAAO,aAAa,CAAC,CAAC,CAAC;AAAA,MACrD,CAAC,kBAAkB,KAAK,MAAM,GAAG,OAAO,gBAAgB,CAAC,CAAC,CAAC;AAAA,MAC3D,CAAC,sBAAsB,KAAK,MAAM,GAAG,OAAO,oBAAoB,CAAC,CAAC,CAAC;AAAA,MACnE,CAAC,iBAAiB,GAAG,SAAS,OAAO,QAAQ,IAAI,CAAC;AAAA,IAAA;AAAA,IAEpD;AAAA,EAAA;AAEJ;AAEA,SAAS,QAAQ,KAAe;AAC9B;AAAA,IACE;AAAA,IACA;AAAA,MACE,CAAC,YAAY,GAAG,SAAS;AAAA,MACzB,CAAC,UAAU,GAAG,QAAQ;AAAA,MACtB,CAAC,aAAa,GAAG,UAAU;AAAA,MAC3B,CAAC,YAAY,GAAG,SAAS;AAAA,MACzB,CAAC,QAAQ,GAAG,KAAA,EAAO,MAAM;AAAA,MACzB,CAAC,YAAY,GAAG,UAAU;AAAA,MAC1B,CAAC,QAAQ,GAAG,MAAM;AAAA,MAClB,CAAC,WAAW,GAAG,SAAS;AAAA,MACxB,CAAC,UAAU,GAAG,OAAA,CAAQ;AAAA,IAAA;AAAA,IAExB;AAAA,EAAA;AAEJ;AAEA,eAAe,aACb,gBAIA,KACA;AACA,QAAM,UAAU,MAAM,QAAQ;AAAA,IAC5B,eAAe,IAAI,OAAO,CAAC,MAAM,KAAK,MAAM,CAAC,MAAM,MAAM,KAAK,CAAC;AAAA,EAAA;AAEjE,aAAW,CAAC,MAAM,IAAI,KAAK,SAAS;AAClC,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,WAAW,UAAU,MAAM,MAAM,CAAC,CAAC;AAAA,EAC/C;AACF;AAEA,SAAS,WACP,OACA,SACA,KACA;AACA,MAAI,MAAM,OAAO,OAAO,KAAK,CAAC;AAC9B,aAAW,CAAC,MAAM,MAAM,KAAK,SAAS;AACpC,QAAI,MAAM,OAAO,OAAO,WAAW,UAAU,QAAQ,MAAM,CAAC,CAAC;AAAA,EAC/D;AACF;AAEA,eAAsB,mBACpB,IACA,QACA,KACA,KACA;AACA,QAAM,cAAc,KAAK,GAAG;AAC5B,MAAI,CAAC,qBAAqB,IAAI,QAAQ,aAAa,IAAI,GAAG;AACxD,SAAK,IACF,KAAK,GAAG,EACR,OAAO,oBAAoB,oCAAoC,EAC/D,KAAK,cAAc;AACtB;AAAA,EACF;AAEA,QAAM,cAAc,IAAI,QAAQ,IAAI,GAAG;AACvC,MAAI,IAAI,MAAM,MAAM;AACpB,QAAM,SAAS,IAAI,QAAQ,IAAI,GAAG;AAClC,MAAI,IAAI,MAAM,MAAM;AACpB,QAAM,eAAe,IAAI,QAAQ,IAAI,GAAG;AACxC,MAAI,IAAI,MAAM,MAAM;AACpB,eAAa,IAAI,QAAQ,IAAI,GAAG;AAChC,MAAI,IAAI,MAAM,MAAM;AACpB,UAAQ,IAAI,GAAG;AACf,MAAI,IAAI,IAAA;AACV;AAEA,SAAS,MAAM,GAAqB;AAClC,SAAO,EAAE,CAAC;AACZ;AAEA,SAAS,KAAK,GAAoB;AAChC,SAAO,OAAO,OAAO,CAAC,EAAE,CAAC;AAC3B;AAEA,SAAS,OAAO,MAAsB;AACpC,SAAO,OAAO,IAAI;AAAA;AACpB;"}
|
|
1
|
+
{"version":3,"file":"statz.js","sources":["../../../../../zero-cache/src/services/statz.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport auth from 'basic-auth';\nimport type {FastifyReply, FastifyRequest} from 'fastify';\nimport fs from 'fs';\nimport os from 'os';\nimport type {Writable} from 'stream';\nimport {BigIntJSON} from '../../../shared/src/bigint-json.ts';\nimport {Database} from '../../../zqlite/src/db.ts';\nimport type {NormalizedZeroConfig as ZeroConfig} from '../config/normalize.ts';\nimport {isAdminPasswordValid} from '../config/zero-config.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {getShardID, upstreamSchema} from '../types/shards.ts';\n\nasync function upstreamStats(\n lc: LogContext,\n config: ZeroConfig,\n out: Writable,\n) {\n const schema = upstreamSchema(getShardID(config));\n const sql = pgClient(lc, config.upstream.db);\n\n out.write(header('Upstream'));\n\n await printPgStats(\n [\n [\n 'num replicas',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"replicas\"`,\n ],\n [\n 'num clients with mutations',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"clients\"`,\n ],\n [\n 'num mutations processed',\n sql`SELECT SUM(\"lastMutationID\") as \"c\" FROM ${sql(schema)}.\"clients\"`,\n ],\n ],\n out,\n );\n\n await sql.end();\n}\n\nasync function cvrStats(lc: LogContext, config: ZeroConfig, out: Writable) {\n out.write(header('CVR'));\n\n const schema = upstreamSchema(getShardID(config)) + '/cvr';\n const sql = pgClient(lc, config.cvr.db);\n\n function numQueriesPerClientGroup(\n active: boolean,\n ): ReturnType<ReturnType<typeof pgClient>> {\n const filter = active\n ? sql`WHERE \"inactivatedAt\" IS NULL AND deleted = false`\n : sql`WHERE \"inactivatedAt\" IS NOT NULL AND (\"inactivatedAt\" + \"ttl\") > NOW()`;\n return sql`WITH\n group_counts AS (\n SELECT\n \"clientGroupID\",\n COUNT(*) AS num_queries\n FROM ${sql(schema)}.\"desires\"\n ${filter}\n GROUP BY \"clientGroupID\"\n ),\n -- Count distinct clientIDs per clientGroupID\n client_per_group_counts AS (\n SELECT\n \"clientGroupID\",\n COUNT(DISTINCT \"clientID\") AS num_clients\n FROM ${sql(schema)}.\"desires\"\n ${filter}\n GROUP BY \"clientGroupID\"\n )\n -- Combine all the information\n SELECT\n g.\"clientGroupID\",\n cpg.num_clients,\n g.num_queries\n FROM group_counts g\n JOIN client_per_group_counts cpg ON g.\"clientGroupID\" = cpg.\"clientGroupID\"\n ORDER BY g.num_queries DESC;`;\n }\n\n await printPgStats(\n [\n [\n 'total num queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\"`,\n ],\n [\n 'num unique query hashes',\n sql`SELECT COUNT(DISTINCT \"queryHash\") as \"c\" FROM ${sql(\n schema,\n )}.\"desires\"`,\n ],\n [\n 'num active queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"inactivatedAt\" IS NULL AND \"deleted\" = false`,\n ],\n [\n 'num inactive queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"inactivatedAt\" IS NOT NULL AND (\"inactivatedAt\" + \"ttl\") > NOW()`,\n ],\n [\n 'num deleted queries',\n sql`SELECT COUNT(*) as \"c\" FROM ${sql(schema)}.\"desires\" WHERE \"deleted\" = true`,\n ],\n [\n 'fresh queries percentiles',\n sql`WITH client_group_counts AS (\n -- Count inactive desires per clientGroupID\n SELECT\n \"clientGroupID\",\n COUNT(*) AS fresh_count\n FROM ${sql(schema)}.\"desires\"\n WHERE\n (\"inactivatedAt\" IS NOT NULL\n AND (\"inactivatedAt\" + \"ttl\") > NOW()) OR (\"inactivatedAt\" IS NULL\n AND deleted = false)\n GROUP BY \"clientGroupID\"\n )\n\n SELECT\n percentile_cont(0.50) WITHIN GROUP (ORDER BY fresh_count) AS \"p50\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY fresh_count) AS \"p75\",\n percentile_cont(0.90) WITHIN GROUP (ORDER BY fresh_count) AS \"p90\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY fresh_count) AS \"p95\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY fresh_count) AS \"p99\",\n MIN(fresh_count) AS \"min\",\n MAX(fresh_count) AS \"max\",\n AVG(fresh_count) AS \"avg\"\n FROM client_group_counts;`,\n ],\n [\n 'rows per client group percentiles',\n sql`WITH client_group_counts AS (\n -- Count inactive desires per clientGroupID\n SELECT\n \"clientGroupID\",\n COUNT(*) AS row_count\n FROM ${sql(schema)}.\"rows\"\n GROUP BY \"clientGroupID\"\n )\n SELECT\n percentile_cont(0.50) WITHIN GROUP (ORDER BY row_count) AS \"p50\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY row_count) AS \"p75\",\n percentile_cont(0.90) WITHIN GROUP (ORDER BY row_count) AS \"p90\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY row_count) AS \"p95\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY row_count) AS \"p99\",\n MIN(row_count) AS \"min\",\n MAX(row_count) AS \"max\",\n AVG(row_count) AS \"avg\"\n FROM client_group_counts;`,\n ],\n [\n // check for AST blowup due to DNF conversion.\n 'ast sizes',\n sql`SELECT\n percentile_cont(0.25) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"25th_percentile\",\n percentile_cont(0.5) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"50th_percentile\",\n percentile_cont(0.75) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"75th_percentile\",\n percentile_cont(0.9) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"90th_percentile\",\n percentile_cont(0.95) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"95th_percentile\",\n percentile_cont(0.99) WITHIN GROUP (ORDER BY length(\"clientAST\"::text)) AS \"99th_percentile\",\n MIN(length(\"clientAST\"::text)) AS \"minimum_length\",\n MAX(length(\"clientAST\"::text)) AS \"maximum_length\",\n AVG(length(\"clientAST\"::text))::integer AS \"average_length\",\n COUNT(*) AS \"total_records\"\n FROM ${sql(schema)}.\"queries\";`,\n ],\n [\n // output the hash of the largest AST\n 'biggest ast hash',\n sql`SELECT \"queryHash\", length(\"clientAST\"::text) AS \"ast_length\"\n FROM ${sql(schema)}.\"queries\"\n ORDER BY length(\"clientAST\"::text) DESC\n LIMIT 1;`,\n ],\n [\n 'total active queries per client and client group',\n numQueriesPerClientGroup(true),\n ],\n [\n 'total inactive queries per client and client group',\n numQueriesPerClientGroup(false),\n ],\n [\n 'total rows per client group',\n sql`SELECT \"clientGroupID\", COUNT(*) as \"c\" FROM ${sql(\n schema,\n )}.\"rows\" GROUP BY \"clientGroupID\" ORDER BY \"c\" DESC`,\n ],\n [\n 'num rows per query',\n sql`SELECT\n k.key AS \"queryHash\",\n COUNT(*) AS row_count\n FROM ${sql(schema)}.\"rows\" r,\n LATERAL jsonb_each(r.\"refCounts\") k\n GROUP BY k.key\n ORDER BY row_count DESC;`,\n ],\n ] satisfies [\n name: string,\n query: ReturnType<ReturnType<typeof pgClient>>,\n ][],\n out,\n );\n\n await sql.end();\n}\n\nasync function changelogStats(\n lc: LogContext,\n config: ZeroConfig,\n out: Writable,\n) {\n out.write(header('Change DB'));\n const schema = upstreamSchema(getShardID(config)) + '/cdc';\n const sql = pgClient(lc, config.change.db);\n\n await printPgStats(\n [\n [\n 'change log size',\n sql`SELECT COUNT(*) as \"change_log_size\" FROM ${sql(schema)}.\"changeLog\"`,\n ],\n ],\n out,\n );\n await sql.end();\n}\n\nfunction replicaStats(lc: LogContext, config: ZeroConfig, out: Writable) {\n out.write(header('Replica'));\n const db = new Database(lc, config.replica.file);\n printStats(\n 'replica',\n [\n ['wal checkpoint', pick(first(db.pragma('WAL_CHECKPOINT')))],\n ['page count', pick(first(db.pragma('PAGE_COUNT')))],\n ['page size', pick(first(db.pragma('PAGE_SIZE')))],\n ['journal mode', pick(first(db.pragma('JOURNAL_MODE')))],\n ['synchronous', pick(first(db.pragma('SYNCHRONOUS')))],\n ['cache size', pick(first(db.pragma('CACHE_SIZE')))],\n ['auto vacuum', pick(first(db.pragma('AUTO_VACUUM')))],\n ['freelist count', pick(first(db.pragma('FREELIST_COUNT')))],\n ['wal autocheckpoint', pick(first(db.pragma('WAL_AUTOCHECKPOINT')))],\n ['db file stats', fs.statSync(config.replica.file)],\n ] as const,\n out,\n );\n}\n\nfunction osStats(out: Writable) {\n printStats(\n 'os',\n [\n ['load avg', os.loadavg()],\n ['uptime', os.uptime()],\n ['total mem', os.totalmem()],\n ['free mem', os.freemem()],\n ['cpus', os.cpus().length],\n ['available parallelism', os.availableParallelism()],\n ['platform', os.platform()],\n ['arch', os.arch()],\n ['release', os.release()],\n ['uptime', os.uptime()],\n ] as const,\n out,\n );\n}\n\nasync function printPgStats(\n pendingQueries: [\n name: string,\n query: ReturnType<ReturnType<typeof pgClient>>,\n ][],\n out: Writable,\n) {\n const results = await Promise.all(\n pendingQueries.map(async ([name, query]) => [name, await query]),\n );\n for (const [name, data] of results) {\n out.write('\\n');\n out.write(name);\n out.write('\\n');\n out.write(BigIntJSON.stringify(data, null, 2));\n }\n}\n\nfunction printStats(\n group: string,\n queries: readonly [name: string, result: unknown][],\n out: Writable,\n) {\n out.write('\\n' + header(group));\n for (const [name, result] of queries) {\n out.write('\\n' + name + BigIntJSON.stringify(result, null, 2));\n }\n}\n\nexport async function handleStatzRequest(\n lc: LogContext,\n config: ZeroConfig,\n req: FastifyRequest,\n res: FastifyReply,\n) {\n const credentials = auth(req);\n if (!isAdminPasswordValid(lc, config, credentials?.pass)) {\n void res\n .code(401)\n .header('WWW-Authenticate', 'Basic realm=\"Statz Protected Area\"')\n .send('Unauthorized');\n return;\n }\n\n await upstreamStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n await cvrStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n await changelogStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n replicaStats(lc, config, res.raw);\n res.raw.write('\\n\\n');\n osStats(res.raw);\n res.raw.end();\n}\n\nfunction first(x: object[]): object {\n return x[0];\n}\n\nfunction pick(x: object): unknown {\n return Object.values(x)[0];\n}\n\nfunction header(name: string): string {\n return `=== ${name} ===\\n`;\n}\n"],"names":[],"mappings":";;;;;;;;AAaA,eAAe,cACb,IACA,QACA,KACA;AACA,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC;AAChD,QAAM,MAAM,SAAS,IAAI,OAAO,SAAS,EAAE;AAE3C,MAAI,MAAM,OAAO,UAAU,CAAC;AAE5B,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,+CAA+C,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,IAC5D;AAAA,IAEF;AAAA,EAAA;AAGF,QAAM,IAAI,IAAA;AACZ;AAEA,eAAe,SAAS,IAAgB,QAAoB,KAAe;AACzE,MAAI,MAAM,OAAO,KAAK,CAAC;AAEvB,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC,IAAI;AACpD,QAAM,MAAM,SAAS,IAAI,OAAO,IAAI,EAAE;AAEtC,WAAS,yBACP,QACyC;AACzC,UAAM,SAAS,SACX,yDACA;AACJ,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA,aAKE,IAAI,MAAM,CAAC;AAAA,QAChB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aAQD,IAAI,MAAM,CAAC;AAAA,QAChB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWZ;AAEA,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,qDAAqD;AAAA,UACnD;AAAA,QAAA,CACD;AAAA,MAAA;AAAA,MAEH;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA,kCAAkC,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAE/C;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA,eAKO,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,MAmBpB;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA,eAKO,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,MAcpB;AAAA;AAAA,QAEE;AAAA,QACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,aAWK,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,MAElB;AAAA;AAAA,QAEE;AAAA,QACA;AAAA,aACK,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA,MAAA;AAAA,MAIlB;AAAA,QACE;AAAA,QACA,yBAAyB,IAAI;AAAA,MAAA;AAAA,MAE/B;AAAA,QACE;AAAA,QACA,yBAAyB,KAAK;AAAA,MAAA;AAAA,MAEhC;AAAA,QACE;AAAA,QACA,mDAAmD;AAAA,UACjD;AAAA,QAAA,CACD;AAAA,MAAA;AAAA,MAEH;AAAA,QACE;AAAA,QACA;AAAA;AAAA;AAAA,aAGK,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA,MAAA;AAAA,IAIlB;AAAA,IAKF;AAAA,EAAA;AAGF,QAAM,IAAI,IAAA;AACZ;AAEA,eAAe,eACb,IACA,QACA,KACA;AACA,MAAI,MAAM,OAAO,WAAW,CAAC;AAC7B,QAAM,SAAS,eAAe,WAAW,MAAM,CAAC,IAAI;AACpD,QAAM,MAAM,SAAS,IAAI,OAAO,OAAO,EAAE;AAEzC,QAAM;AAAA,IACJ;AAAA,MACE;AAAA,QACE;AAAA,QACA,gDAAgD,IAAI,MAAM,CAAC;AAAA,MAAA;AAAA,IAC7D;AAAA,IAEF;AAAA,EAAA;AAEF,QAAM,IAAI,IAAA;AACZ;AAEA,SAAS,aAAa,IAAgB,QAAoB,KAAe;AACvE,MAAI,MAAM,OAAO,SAAS,CAAC;AAC3B,QAAM,KAAK,IAAI,SAAS,IAAI,OAAO,QAAQ,IAAI;AAC/C;AAAA,IACE;AAAA,IACA;AAAA,MACE,CAAC,kBAAkB,KAAK,MAAM,GAAG,OAAO,gBAAgB,CAAC,CAAC,CAAC;AAAA,MAC3D,CAAC,cAAc,KAAK,MAAM,GAAG,OAAO,YAAY,CAAC,CAAC,CAAC;AAAA,MACnD,CAAC,aAAa,KAAK,MAAM,GAAG,OAAO,WAAW,CAAC,CAAC,CAAC;AAAA,MACjD,CAAC,gBAAgB,KAAK,MAAM,GAAG,OAAO,cAAc,CAAC,CAAC,CAAC;AAAA,MACvD,CAAC,eAAe,KAAK,MAAM,GAAG,OAAO,aAAa,CAAC,CAAC,CAAC;AAAA,MACrD,CAAC,cAAc,KAAK,MAAM,GAAG,OAAO,YAAY,CAAC,CAAC,CAAC;AAAA,MACnD,CAAC,eAAe,KAAK,MAAM,GAAG,OAAO,aAAa,CAAC,CAAC,CAAC;AAAA,MACrD,CAAC,kBAAkB,KAAK,MAAM,GAAG,OAAO,gBAAgB,CAAC,CAAC,CAAC;AAAA,MAC3D,CAAC,sBAAsB,KAAK,MAAM,GAAG,OAAO,oBAAoB,CAAC,CAAC,CAAC;AAAA,MACnE,CAAC,iBAAiB,GAAG,SAAS,OAAO,QAAQ,IAAI,CAAC;AAAA,IAAA;AAAA,IAEpD;AAAA,EAAA;AAEJ;AAEA,SAAS,QAAQ,KAAe;AAC9B;AAAA,IACE;AAAA,IACA;AAAA,MACE,CAAC,YAAY,GAAG,SAAS;AAAA,MACzB,CAAC,UAAU,GAAG,QAAQ;AAAA,MACtB,CAAC,aAAa,GAAG,UAAU;AAAA,MAC3B,CAAC,YAAY,GAAG,SAAS;AAAA,MACzB,CAAC,QAAQ,GAAG,KAAA,EAAO,MAAM;AAAA,MACzB,CAAC,yBAAyB,GAAG,sBAAsB;AAAA,MACnD,CAAC,YAAY,GAAG,UAAU;AAAA,MAC1B,CAAC,QAAQ,GAAG,MAAM;AAAA,MAClB,CAAC,WAAW,GAAG,SAAS;AAAA,MACxB,CAAC,UAAU,GAAG,OAAA,CAAQ;AAAA,IAAA;AAAA,IAExB;AAAA,EAAA;AAEJ;AAEA,eAAe,aACb,gBAIA,KACA;AACA,QAAM,UAAU,MAAM,QAAQ;AAAA,IAC5B,eAAe,IAAI,OAAO,CAAC,MAAM,KAAK,MAAM,CAAC,MAAM,MAAM,KAAK,CAAC;AAAA,EAAA;AAEjE,aAAW,CAAC,MAAM,IAAI,KAAK,SAAS;AAClC,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,IAAI;AACd,QAAI,MAAM,WAAW,UAAU,MAAM,MAAM,CAAC,CAAC;AAAA,EAC/C;AACF;AAEA,SAAS,WACP,OACA,SACA,KACA;AACA,MAAI,MAAM,OAAO,OAAO,KAAK,CAAC;AAC9B,aAAW,CAAC,MAAM,MAAM,KAAK,SAAS;AACpC,QAAI,MAAM,OAAO,OAAO,WAAW,UAAU,QAAQ,MAAM,CAAC,CAAC;AAAA,EAC/D;AACF;AAEA,eAAsB,mBACpB,IACA,QACA,KACA,KACA;AACA,QAAM,cAAc,KAAK,GAAG;AAC5B,MAAI,CAAC,qBAAqB,IAAI,QAAQ,aAAa,IAAI,GAAG;AACxD,SAAK,IACF,KAAK,GAAG,EACR,OAAO,oBAAoB,oCAAoC,EAC/D,KAAK,cAAc;AACtB;AAAA,EACF;AAEA,QAAM,cAAc,IAAI,QAAQ,IAAI,GAAG;AACvC,MAAI,IAAI,MAAM,MAAM;AACpB,QAAM,SAAS,IAAI,QAAQ,IAAI,GAAG;AAClC,MAAI,IAAI,MAAM,MAAM;AACpB,QAAM,eAAe,IAAI,QAAQ,IAAI,GAAG;AACxC,MAAI,IAAI,MAAM,MAAM;AACpB,eAAa,IAAI,QAAQ,IAAI,GAAG;AAChC,MAAI,IAAI,MAAM,MAAM;AACpB,UAAQ,IAAI,GAAG;AACf,MAAI,IAAI,IAAA;AACV;AAEA,SAAS,MAAM,GAAqB;AAClC,SAAO,EAAE,CAAC;AACZ;AAEA,SAAS,KAAK,GAAoB;AAChC,SAAO,OAAO,OAAO,CAAC,EAAE,CAAC;AAC3B;AAEA,SAAS,OAAO,MAAsB;AACpC,SAAO,OAAO,IAAI;AAAA;AACpB;"}
|
|
@@ -19,7 +19,7 @@ export type CVRFlushStats = {
|
|
|
19
19
|
};
|
|
20
20
|
export declare class CVRStore {
|
|
21
21
|
#private;
|
|
22
|
-
constructor(lc: LogContext, cvrDb: PostgresDB,
|
|
22
|
+
constructor(lc: LogContext, cvrDb: PostgresDB, shard: ShardID, taskID: string, cvrID: string, failService: (e: unknown) => void, loadAttemptIntervalMs?: number, maxLoadAttempts?: number, deferredRowFlushThreshold?: number, // somewhat arbitrary
|
|
23
23
|
setTimeoutFn?: typeof setTimeout);
|
|
24
24
|
load(lc: LogContext, lastConnectTime: number): Promise<CVR>;
|
|
25
25
|
getRowRecords(): Promise<ReadonlyMap<RowID, RowRecord>>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"cvr-store.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/cvr-store.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAiBjD,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,+CAA+C,CAAC;AAKnF,OAAO,EAAC,sBAAsB,EAAC,MAAM,iCAAiC,CAAC;AACvE,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,mBAAmB,CAAC;AAEvE,OAAO,EAAY,KAAK,OAAO,
|
|
1
|
+
{"version":3,"file":"cvr-store.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/cvr-store.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAiBjD,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,+CAA+C,CAAC;AAKnF,OAAO,EAAC,sBAAsB,EAAC,MAAM,iCAAiC,CAAC;AACvE,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,mBAAmB,CAAC;AAEvE,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,KAAK,EAAQ,cAAc,EAAC,MAAM,qBAAqB,CAAC;AAC/D,OAAO,KAAK,EAAC,GAAG,EAAE,WAAW,EAAC,MAAM,UAAU,CAAC;AAE/C,OAAO,EAKL,KAAK,OAAO,EACb,MAAM,iBAAiB,CAAC;AACzB,OAAO,EAEL,KAAK,YAAY,EAGjB,KAAK,UAAU,EAGf,KAAK,kBAAkB,EACvB,KAAK,UAAU,EACf,KAAK,WAAW,EAEhB,KAAK,KAAK,EACV,KAAK,SAAS,EAGf,MAAM,mBAAmB,CAAC;AAC3B,OAAO,EACL,KAAK,QAAQ,EAGd,MAAM,gBAAgB,CAAC;AAExB,MAAM,MAAM,aAAa,GAAG;IAC1B,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,IAAI,EAAE,MAAM,CAAC;IACb,YAAY,EAAE,MAAM,CAAC;IACrB,UAAU,EAAE,MAAM,CAAC;CACpB,CAAC;AA0DF,qBAAa,QAAQ;;gBAuBjB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,UAAU,EACjB,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,KAAK,EAAE,MAAM,EACb,WAAW,EAAE,CAAC,CAAC,EAAE,OAAO,KAAK,IAAI,EACjC,qBAAqB,SAA2B,EAChD,eAAe,SAAoB,EACnC,yBAAyB,SAAM,EAAE,qBAAqB;IACtD,YAAY,oBAAa;IAwB3B,IAAI,CAAC,EAAE,EAAE,UAAU,EAAE,eAAe,EAAE,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC;IAsN3D,aAAa,IAAI,OAAO,CAAC,WAAW,CAAC,KAAK,EAAE,SAAS,CAAC,CAAC;IAIvD,YAAY,CAAC,GAAG,EAAE,SAAS,GAAG,IAAI;IAIlC;;;;;;;OAOG;IACH,YAAY,CAAC,EAAE,EAAE,KAAK,GAAG,IAAI;IAI7B;;;;OAIG;IACH,YAAY,CAAC,GAAG,GAAG,EAAE,KAAK,EAAE;IAM5B;;;;OAIG;IACG,cAAc,CAAC,QAAQ,EAAE,QAAQ,EAAE,UAAU,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAO3E;;;;;OAKG;IACG,WAAW,IAAI,OAAO,CAAC,QAAQ,GAAG,SAAS,CAAC;IAelD,WAAW,CAAC,EACV,OAAO,EACP,cAAc,EACd,UAAU,EACV,YAAY,EACZ,SAAS,EACT,QAAQ,GACT,EAAE,IAAI,CACL,WAAW,EACT,SAAS,GACT,gBAAgB,GAChB,YAAY,GACZ,cAAc,GACd,WAAW,GACX,UAAU,CACb,GAAG,IAAI;IAsBR,kBAAkB,CAAC,OAAO,EAAE,UAAU,EAAE,UAAU,EAAE,UAAU,GAAG,IAAI;IAarE,QAAQ,CAAC,KAAK,EAAE,WAAW,GAAG,IAAI;IA4ClC,WAAW,CAAC,KAAK,EAAE,WAAW;IA2B9B,YAAY,CAAC,MAAM,EAAE,YAAY,GAAG,IAAI;IAYxC,YAAY,CAAC,QAAQ,EAAE,MAAM;IAU7B,eAAe,CACb,UAAU,EAAE,UAAU,EACtB,KAAK,EAAE;QAAC,EAAE,EAAE,MAAM,CAAA;KAAC,EACnB,MAAM,EAAE;QAAC,EAAE,EAAE,MAAM,CAAA;KAAC,EACpB,OAAO,EAAE,OAAO,EAChB,aAAa,EAAE,QAAQ,GAAG,SAAS,EACnC,GAAG,EAAE,MAAM,GACV,IAAI;IA+CP,iBAAiB,CACf,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,kBAAkB,EAChC,OAAO,EAAE,WAAW,EACpB,OAAO,EAAE,UAAU,EACnB,kBAAkB,GAAE,MAAM,EAAO,GAChC,cAAc,CAAC,OAAO,EAAE,EAAE,IAAI,EAAE,SAAS,CAAC;IAUvC,oBAAoB,CACxB,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,kBAAkB,EAChC,OAAO,EAAE,WAAW,EACpB,OAAO,EAAE,UAAU,GAClB,OAAO,CAAC,cAAc,EAAE,CAAC;IAkN5B,IAAI,QAAQ,IAAI,MAAM,CAErB;IAEK,KAAK,CACT,EAAE,EAAE,UAAU,EACd,sBAAsB,EAAE,UAAU,EAClC,GAAG,EAAE,WAAW,EAChB,eAAe,EAAE,MAAM,GACtB,OAAO,CAAC,aAAa,GAAG,IAAI,CAAC;IA8BhC,iBAAiB,IAAI,OAAO;IAI5B,qDAAqD;IACrD,OAAO,CAAC,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;IAIhC,cAAc,CAClB,EAAE,EAAE,UAAU,EACd,QAAQ,EAAE,QAAQ,EAClB,QAAQ,CAAC,EAAE,MAAM,GAChB,OAAO,CAAC,eAAe,EAAE,CAAC;CAsC9B;AAED;;;;GAIG;AACH,wBAAsB,YAAY,CAChC,EAAE,EAAE,mBAAmB,EACvB,MAAM,EAAE,MAAM,EACd,aAAa,EAAE,MAAM,EACrB,sBAAsB,EAAE,UAAU,GACjC,OAAO,CAAC,IAAI,CAAC,CAUf;AAED,qBAAa,mBAAoB,SAAQ,sBAAsB;gBACjD,OAAO,EAAE,MAAM;CAO5B;AAED,qBAAa,+BAAgC,SAAQ,sBAAsB;IACzE,QAAQ,CAAC,IAAI,qCAAqC;gBAEtC,eAAe,EAAE,MAAM,EAAE,aAAa,EAAE,MAAM;CAU3D;AAED,qBAAa,cAAe,SAAQ,sBAAsB;IACxD,QAAQ,CAAC,IAAI,oBAAoB;gBAG/B,KAAK,EAAE,MAAM,GAAG,IAAI,EACpB,SAAS,EAAE,MAAM,GAAG,IAAI,EACxB,eAAe,EAAE,MAAM;CAe1B;AAED,qBAAa,wBAAyB,SAAQ,sBAAsB;IAClE,QAAQ,CAAC,IAAI,8BAA8B;gBAE/B,KAAK,EAAE,OAAO;CAW3B;AAED,qBAAa,sBAAuB,SAAQ,KAAK;IAC/C,QAAQ,CAAC,IAAI,4BAA4B;IACzC,QAAQ,CAAC,UAAU,EAAE,MAAM,CAAC;IAC5B,QAAQ,CAAC,WAAW,EAAE,MAAM,GAAG,IAAI,CAAC;gBAExB,UAAU,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,GAAG,IAAI;CAK3D"}
|
|
@@ -17,12 +17,13 @@ import { TransactionPool } from "../../db/transaction-pool.js";
|
|
|
17
17
|
import { recordRowsSynced } from "../../server/anonymous-otel-start.js";
|
|
18
18
|
import { ProtocolErrorWithLevel } from "../../types/error-with-level.js";
|
|
19
19
|
import { rowIDString } from "../../types/row-key.js";
|
|
20
|
-
import { cvrSchema
|
|
20
|
+
import { cvrSchema } from "../../types/shards.js";
|
|
21
21
|
import { RowRecordCache } from "./row-record-cache.js";
|
|
22
22
|
import "pg-format";
|
|
23
23
|
import "../../../../shared/src/bigint-json.js";
|
|
24
24
|
import { EMPTY_CVR_VERSION, versionFromString, versionString, queryRecordToQueryRow, cmpVersions } from "./schema/types.js";
|
|
25
25
|
import { ttlClockFromNumber, ttlClockAsNumber } from "./ttl-clock.js";
|
|
26
|
+
let flushCounter = 0;
|
|
26
27
|
const tracer = trace.getTracer("cvr-store", version);
|
|
27
28
|
function asQuery(row) {
|
|
28
29
|
const maybeVersion = (s) => s === null ? void 0 : versionFromString(s);
|
|
@@ -67,9 +68,7 @@ class CVRStore {
|
|
|
67
68
|
#id;
|
|
68
69
|
#failService;
|
|
69
70
|
#db;
|
|
70
|
-
#upstreamDb;
|
|
71
71
|
#writes = /* @__PURE__ */ new Set();
|
|
72
|
-
#upstreamWrites = [];
|
|
73
72
|
#pendingRowRecordUpdates = new CustomKeyMap(
|
|
74
73
|
rowIDString
|
|
75
74
|
);
|
|
@@ -77,12 +76,10 @@ class CVRStore {
|
|
|
77
76
|
#rowCache;
|
|
78
77
|
#loadAttemptIntervalMs;
|
|
79
78
|
#maxLoadAttempts;
|
|
80
|
-
#upstreamSchemaName;
|
|
81
79
|
#rowCount = 0;
|
|
82
|
-
constructor(lc, cvrDb,
|
|
80
|
+
constructor(lc, cvrDb, shard, taskID, cvrID, failService, loadAttemptIntervalMs = LOAD_ATTEMPT_INTERVAL_MS, maxLoadAttempts = MAX_LOAD_ATTEMPTS, deferredRowFlushThreshold = 100, setTimeoutFn = setTimeout) {
|
|
83
81
|
this.#failService = failService;
|
|
84
82
|
this.#db = cvrDb;
|
|
85
|
-
this.#upstreamDb = upstreamDb;
|
|
86
83
|
this.#schema = cvrSchema(shard);
|
|
87
84
|
this.#taskID = taskID;
|
|
88
85
|
this.#id = cvrID;
|
|
@@ -97,7 +94,6 @@ class CVRStore {
|
|
|
97
94
|
);
|
|
98
95
|
this.#loadAttemptIntervalMs = loadAttemptIntervalMs;
|
|
99
96
|
this.#maxLoadAttempts = maxLoadAttempts;
|
|
100
|
-
this.#upstreamSchemaName = upstreamSchema(shard);
|
|
101
97
|
}
|
|
102
98
|
#cvr(table) {
|
|
103
99
|
return this.#db(`${this.#schema}.${table}`);
|
|
@@ -117,7 +113,7 @@ class CVRStore {
|
|
|
117
113
|
}
|
|
118
114
|
return result;
|
|
119
115
|
}
|
|
120
|
-
assert(err);
|
|
116
|
+
assert(err, "Expected error to be set after retry loop exhausted");
|
|
121
117
|
throw new ClientNotFoundError(
|
|
122
118
|
`max attempts exceeded waiting for CVR@${err.cvrVersion} to catch up from ${err.rowsVersion}`
|
|
123
119
|
);
|
|
@@ -138,28 +134,30 @@ class CVRStore {
|
|
|
138
134
|
clientSchema: null,
|
|
139
135
|
profileID: null
|
|
140
136
|
};
|
|
141
|
-
const [instance, clientsRows, queryRows, desiresRows] = await this.#db.begin(READONLY, (tx) =>
|
|
142
|
-
|
|
137
|
+
const [instance, clientsRows, queryRows, desiresRows] = await this.#db.begin(READONLY, (tx) => {
|
|
138
|
+
lc.debug?.(`CVR tx started after ${Date.now() - start} ms`);
|
|
139
|
+
return [
|
|
140
|
+
tx`SELECT cvr."version",
|
|
143
141
|
"lastActive",
|
|
144
142
|
"ttlClock",
|
|
145
|
-
"replicaVersion",
|
|
146
|
-
"owner",
|
|
143
|
+
"replicaVersion",
|
|
144
|
+
"owner",
|
|
147
145
|
"grantedAt",
|
|
148
|
-
"clientSchema",
|
|
146
|
+
"clientSchema",
|
|
149
147
|
"profileID",
|
|
150
148
|
"deleted",
|
|
151
149
|
rows."version" as "rowsVersion"
|
|
152
150
|
FROM ${this.#cvr("instances")} AS cvr
|
|
153
|
-
LEFT JOIN ${this.#cvr("rowsVersion")} AS rows
|
|
151
|
+
LEFT JOIN ${this.#cvr("rowsVersion")} AS rows
|
|
154
152
|
ON cvr."clientGroupID" = rows."clientGroupID"
|
|
155
153
|
WHERE cvr."clientGroupID" = ${id}`,
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
154
|
+
tx`SELECT "clientID" FROM ${this.#cvr(
|
|
155
|
+
"clients"
|
|
156
|
+
)}
|
|
159
157
|
WHERE "clientGroupID" = ${id}`,
|
|
160
|
-
|
|
158
|
+
tx`SELECT * FROM ${this.#cvr("queries")}
|
|
161
159
|
WHERE "clientGroupID" = ${id} AND deleted IS DISTINCT FROM true`,
|
|
162
|
-
|
|
160
|
+
tx`SELECT
|
|
163
161
|
"clientGroupID",
|
|
164
162
|
"clientID",
|
|
165
163
|
"queryHash",
|
|
@@ -169,7 +167,11 @@ class CVRStore {
|
|
|
169
167
|
"inactivatedAtMs" AS "inactivatedAt"
|
|
170
168
|
FROM ${this.#cvr("desires")}
|
|
171
169
|
WHERE "clientGroupID" = ${id}`
|
|
172
|
-
|
|
170
|
+
];
|
|
171
|
+
});
|
|
172
|
+
lc.debug?.(
|
|
173
|
+
`CVR tx completed after ${Date.now() - start} ms (${clientsRows.length} clients, ${queryRows.length} queries, ${desiresRows.length} desires)`
|
|
174
|
+
);
|
|
173
175
|
if (instance.length === 0) {
|
|
174
176
|
this.putInstance({
|
|
175
177
|
version: cvr.version,
|
|
@@ -181,7 +183,10 @@ class CVRStore {
|
|
|
181
183
|
profileID: null
|
|
182
184
|
});
|
|
183
185
|
} else {
|
|
184
|
-
assert(
|
|
186
|
+
assert(
|
|
187
|
+
instance.length === 1,
|
|
188
|
+
() => `Expected exactly one CVR instance, got ${instance.length}`
|
|
189
|
+
);
|
|
185
190
|
const {
|
|
186
191
|
version: version2,
|
|
187
192
|
lastActive,
|
|
@@ -313,7 +318,10 @@ class CVRStore {
|
|
|
313
318
|
if (result.length === 0) {
|
|
314
319
|
return void 0;
|
|
315
320
|
}
|
|
316
|
-
assert(
|
|
321
|
+
assert(
|
|
322
|
+
result.length === 1,
|
|
323
|
+
() => `Expected exactly one rowsVersion result, got ${result.length}`
|
|
324
|
+
);
|
|
317
325
|
return result[0][0];
|
|
318
326
|
}
|
|
319
327
|
putInstance({
|
|
@@ -422,15 +430,10 @@ class CVRStore {
|
|
|
422
430
|
deleteClient(clientID) {
|
|
423
431
|
this.#writes.add({
|
|
424
432
|
stats: { clients: 1 },
|
|
425
|
-
write: (sql) => sql`DELETE FROM ${this.#cvr("clients")}
|
|
426
|
-
WHERE "clientGroupID" = ${this.#id}
|
|
433
|
+
write: (sql) => sql`DELETE FROM ${this.#cvr("clients")}
|
|
434
|
+
WHERE "clientGroupID" = ${this.#id}
|
|
427
435
|
AND "clientID" = ${clientID}`
|
|
428
436
|
});
|
|
429
|
-
this.#upstreamWrites.push(
|
|
430
|
-
(sql) => sql`DELETE FROM ${sql(this.#upstreamSchemaName)}."mutations"
|
|
431
|
-
WHERE "clientGroupID" = ${this.#id}
|
|
432
|
-
AND "clientID" = ${clientID}`
|
|
433
|
-
);
|
|
434
437
|
}
|
|
435
438
|
putDesiredQuery(newVersion, query, client, deleted, inactivatedAt, ttl) {
|
|
436
439
|
const change = {
|
|
@@ -507,7 +510,7 @@ class CVRStore {
|
|
|
507
510
|
const { queryHash: id } = row;
|
|
508
511
|
const patch = row.deleted ? { type: "query", op: "del", id } : { type: "query", op: "put", id };
|
|
509
512
|
const v2 = row.patchVersion;
|
|
510
|
-
assert(v2);
|
|
513
|
+
assert(v2, "patchVersion must be set for query patches");
|
|
511
514
|
patches.push({ patch, toVersion: versionFromString(v2) });
|
|
512
515
|
}
|
|
513
516
|
for (const row of allDesires) {
|
|
@@ -523,7 +526,9 @@ class CVRStore {
|
|
|
523
526
|
reader.setDone();
|
|
524
527
|
}
|
|
525
528
|
}
|
|
526
|
-
async #checkVersionAndOwnership(tx, expectedCurrentVersion, lastConnectTime) {
|
|
529
|
+
async #checkVersionAndOwnership(lc, tx, expectedCurrentVersion, lastConnectTime) {
|
|
530
|
+
const start = Date.now();
|
|
531
|
+
lc.debug?.("checking cvr version and ownership");
|
|
527
532
|
const expected = versionString(expectedCurrentVersion);
|
|
528
533
|
const result = await tx`SELECT "version", "owner", "grantedAt" FROM ${this.#cvr("instances")}
|
|
529
534
|
WHERE "clientGroupID" = ${this.#id}
|
|
@@ -533,6 +538,9 @@ class CVRStore {
|
|
|
533
538
|
owner: null,
|
|
534
539
|
grantedAt: null
|
|
535
540
|
};
|
|
541
|
+
lc.debug?.(
|
|
542
|
+
"checked cvr version and ownership in " + (Date.now() - start) + " ms"
|
|
543
|
+
);
|
|
536
544
|
if (owner !== this.#taskID && (grantedAt ?? 0) > lastConnectTime) {
|
|
537
545
|
throw new OwnershipError(owner, grantedAt, lastConnectTime);
|
|
538
546
|
}
|
|
@@ -540,7 +548,7 @@ class CVRStore {
|
|
|
540
548
|
throw new ConcurrentModificationException(expected, version2);
|
|
541
549
|
}
|
|
542
550
|
}
|
|
543
|
-
async #flush(expectedCurrentVersion, cvr, lastConnectTime) {
|
|
551
|
+
async #flush(lc, expectedCurrentVersion, cvr, lastConnectTime) {
|
|
544
552
|
const stats = {
|
|
545
553
|
instances: 0,
|
|
546
554
|
queries: 0,
|
|
@@ -574,7 +582,10 @@ class CVRStore {
|
|
|
574
582
|
return null;
|
|
575
583
|
}
|
|
576
584
|
this.putInstance(cvr);
|
|
585
|
+
const start = Date.now();
|
|
586
|
+
lc.debug?.("flush tx beginning");
|
|
577
587
|
const rowsFlushed = await this.#db.begin(READ_COMMITTED, async (tx) => {
|
|
588
|
+
lc.debug?.(`flush tx begun after ${Date.now() - start} ms`);
|
|
578
589
|
const pipelined = [
|
|
579
590
|
// #checkVersionAndOwnership() executes a `SELECT ... FOR UPDATE`
|
|
580
591
|
// query to acquire a row-level lock so that version-updating
|
|
@@ -584,29 +595,41 @@ class CVRStore {
|
|
|
584
595
|
// to this lock and can thus commit / be-committed independently of
|
|
585
596
|
// cvr.instances.
|
|
586
597
|
this.#checkVersionAndOwnership(
|
|
598
|
+
lc,
|
|
587
599
|
tx,
|
|
588
600
|
expectedCurrentVersion,
|
|
589
601
|
lastConnectTime
|
|
590
602
|
)
|
|
591
603
|
];
|
|
604
|
+
let i = 0;
|
|
592
605
|
for (const write of this.#writes) {
|
|
593
606
|
stats.instances += write.stats.instances ?? 0;
|
|
594
607
|
stats.queries += write.stats.queries ?? 0;
|
|
595
608
|
stats.desires += write.stats.desires ?? 0;
|
|
596
609
|
stats.clients += write.stats.clients ?? 0;
|
|
597
610
|
stats.rows += write.stats.rows ?? 0;
|
|
598
|
-
|
|
611
|
+
const writeIndex = i++;
|
|
612
|
+
const writeStart = Date.now();
|
|
613
|
+
pipelined.push(
|
|
614
|
+
write.write(tx, lastConnectTime).execute().then(() => {
|
|
615
|
+
lc.debug?.(
|
|
616
|
+
`write ${writeIndex}/${this.#writes.size} completed in ${Date.now() - writeStart} ms`
|
|
617
|
+
);
|
|
618
|
+
})
|
|
619
|
+
);
|
|
599
620
|
stats.statements++;
|
|
600
621
|
}
|
|
601
622
|
const rowUpdates = this.#rowCache.executeRowUpdates(
|
|
602
623
|
tx,
|
|
603
624
|
cvr.version,
|
|
604
625
|
this.#pendingRowRecordUpdates,
|
|
605
|
-
"allow-defer"
|
|
626
|
+
"allow-defer",
|
|
627
|
+
lc
|
|
606
628
|
);
|
|
607
629
|
pipelined.push(...rowUpdates);
|
|
608
630
|
stats.statements += rowUpdates.length;
|
|
609
631
|
await Promise.all(pipelined);
|
|
632
|
+
lc.debug?.(`flush tx returning after ${Date.now() - start} ms`);
|
|
610
633
|
if (rowUpdates.length === 0) {
|
|
611
634
|
stats.rowsDeferred = this.#pendingRowRecordUpdates.size;
|
|
612
635
|
return false;
|
|
@@ -620,11 +643,6 @@ class CVRStore {
|
|
|
620
643
|
rowsFlushed
|
|
621
644
|
);
|
|
622
645
|
recordRowsSynced(this.#rowCount);
|
|
623
|
-
if (this.#upstreamDb) {
|
|
624
|
-
await this.#upstreamDb.begin(READ_COMMITTED, async (tx) => {
|
|
625
|
-
await Promise.all(this.#upstreamWrites.map((write) => write(tx)));
|
|
626
|
-
});
|
|
627
|
-
}
|
|
628
646
|
return stats;
|
|
629
647
|
}
|
|
630
648
|
get rowCount() {
|
|
@@ -632,8 +650,10 @@ class CVRStore {
|
|
|
632
650
|
}
|
|
633
651
|
async flush(lc, expectedCurrentVersion, cvr, lastConnectTime) {
|
|
634
652
|
const start = performance.now();
|
|
653
|
+
lc = lc.withContext("cvrFlushID", flushCounter++);
|
|
635
654
|
try {
|
|
636
655
|
const stats = await this.#flush(
|
|
656
|
+
lc,
|
|
637
657
|
expectedCurrentVersion,
|
|
638
658
|
cvr,
|
|
639
659
|
lastConnectTime
|
|
@@ -651,7 +671,6 @@ class CVRStore {
|
|
|
651
671
|
throw e;
|
|
652
672
|
} finally {
|
|
653
673
|
this.#writes.clear();
|
|
654
|
-
this.#upstreamWrites.length = 0;
|
|
655
674
|
this.#pendingRowRecordUpdates.clear();
|
|
656
675
|
this.#forceUpdates.clear();
|
|
657
676
|
}
|