@rocicorp/zero 0.25.0-canary.18 → 0.25.0-canary.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/out/replicache/src/persist/idb-databases-store.d.ts +1 -0
- package/out/replicache/src/persist/idb-databases-store.d.ts.map +1 -1
- package/out/replicache/src/persist/idb-databases-store.js +13 -2
- package/out/replicache/src/persist/idb-databases-store.js.map +1 -1
- package/out/zero/package.json.js +1 -1
- package/out/zero/src/adapters/drizzle.d.ts +1 -1
- package/out/zero/src/adapters/drizzle.d.ts.map +1 -1
- package/out/zero/src/adapters/drizzle.js +4 -1
- package/out/zero/src/bindings.d.ts +2 -0
- package/out/zero/src/bindings.d.ts.map +1 -0
- package/out/zero/src/bindings.js +27 -0
- package/out/zero/src/bindings.js.map +1 -0
- package/out/zero/src/react.js +2 -4
- package/out/zero/src/react.js.map +1 -1
- package/out/zero/src/solid.js +2 -2
- package/out/zero/src/zero.js +3 -5
- package/out/zero-cache/src/auth/read-authorizer.d.ts +1 -1
- package/out/zero-cache/src/auth/read-authorizer.d.ts.map +1 -1
- package/out/zero-cache/src/auth/read-authorizer.js.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
- package/out/zero-cache/src/auth/write-authorizer.js +5 -4
- package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
- package/out/zero-cache/src/config/zero-config.d.ts +24 -0
- package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
- package/out/zero-cache/src/config/zero-config.js +23 -4
- package/out/zero-cache/src/config/zero-config.js.map +1 -1
- package/out/zero-cache/src/server/anonymous-otel-start.d.ts +10 -1
- package/out/zero-cache/src/server/anonymous-otel-start.d.ts.map +1 -1
- package/out/zero-cache/src/server/anonymous-otel-start.js +34 -18
- package/out/zero-cache/src/server/anonymous-otel-start.js.map +1 -1
- package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/server/syncer.js +1 -0
- package/out/zero-cache/src/server/syncer.js.map +1 -1
- package/out/zero-cache/src/services/analyze.d.ts +1 -1
- package/out/zero-cache/src/services/analyze.d.ts.map +1 -1
- package/out/zero-cache/src/services/analyze.js +5 -5
- package/out/zero-cache/src/services/analyze.js.map +1 -1
- package/out/zero-cache/src/services/mutagen/pusher.d.ts +4 -4
- package/out/zero-cache/src/services/view-syncer/active-users-gauge.d.ts +2 -1
- package/out/zero-cache/src/services/view-syncer/active-users-gauge.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/active-users-gauge.js +26 -13
- package/out/zero-cache/src/services/view-syncer/active-users-gauge.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-purger.d.ts +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-purger.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-purger.js +39 -15
- package/out/zero-cache/src/services/view-syncer/cvr-purger.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts +4 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.js +31 -9
- package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts +3 -0
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js +11 -0
- package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/inspect-handler.js +1 -1
- package/out/zero-cache/src/services/view-syncer/inspect-handler.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +1 -2
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +6 -6
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/cvr.d.ts +1 -0
- package/out/zero-cache/src/services/view-syncer/schema/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/cvr.js +23 -10
- package/out/zero-cache/src/services/view-syncer/schema/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/init.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/init.js +31 -1
- package/out/zero-cache/src/services/view-syncer/schema/init.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +1 -0
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.js +20 -16
- package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
- package/out/zero-cache/src/workers/connect-params.d.ts +1 -0
- package/out/zero-cache/src/workers/connect-params.d.ts.map +1 -1
- package/out/zero-cache/src/workers/connect-params.js +2 -0
- package/out/zero-cache/src/workers/connect-params.js.map +1 -1
- package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
- package/out/zero-cache/src/workers/syncer-ws-message-handler.js +2 -0
- package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
- package/out/zero-client/src/client/bindings.d.ts +12 -41
- package/out/zero-client/src/client/bindings.d.ts.map +1 -1
- package/out/zero-client/src/client/custom.d.ts +3 -0
- package/out/zero-client/src/client/custom.d.ts.map +1 -1
- package/out/zero-client/src/client/custom.js +3 -0
- package/out/zero-client/src/client/custom.js.map +1 -1
- package/out/zero-client/src/client/delete-clients-manager.d.ts +1 -1
- package/out/zero-client/src/client/delete-clients-manager.d.ts.map +1 -1
- package/out/zero-client/src/client/delete-clients-manager.js +30 -3
- package/out/zero-client/src/client/delete-clients-manager.js.map +1 -1
- package/out/zero-client/src/client/make-replicache-mutators.js +1 -3
- package/out/zero-client/src/client/make-replicache-mutators.js.map +1 -1
- package/out/zero-client/src/client/options.d.ts +1 -1
- package/out/zero-client/src/client/options.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zero-client/src/client/zero.d.ts +1 -0
- package/out/zero-client/src/client/zero.d.ts.map +1 -1
- package/out/zero-client/src/client/zero.js +43 -26
- package/out/zero-client/src/client/zero.js.map +1 -1
- package/out/zero-client/src/mod.d.ts +6 -4
- package/out/zero-client/src/mod.d.ts.map +1 -1
- package/out/zero-protocol/src/analyze-query-result.d.ts +2 -2
- package/out/zero-protocol/src/analyze-query-result.js +2 -2
- package/out/zero-protocol/src/analyze-query-result.js.map +1 -1
- package/out/zero-protocol/src/down.d.ts +2 -2
- package/out/zero-protocol/src/inspect-down.d.ts +6 -6
- package/out/zero-protocol/src/inspect-up.d.ts +4 -4
- package/out/zero-protocol/src/inspect-up.js +1 -1
- package/out/zero-protocol/src/inspect-up.js.map +1 -1
- package/out/zero-protocol/src/protocol-version.d.ts +1 -1
- package/out/zero-protocol/src/protocol-version.d.ts.map +1 -1
- package/out/zero-protocol/src/protocol-version.js +1 -1
- package/out/zero-protocol/src/protocol-version.js.map +1 -1
- package/out/zero-protocol/src/up.d.ts +1 -1
- package/out/zero-react/src/bindings.d.ts +2 -0
- package/out/zero-react/src/bindings.d.ts.map +1 -0
- package/out/zero-react/src/mod.d.ts +1 -10
- package/out/zero-react/src/mod.d.ts.map +1 -1
- package/out/zero-react/src/{use-zero-connection-state.d.ts → use-connection-state.d.ts} +3 -3
- package/out/zero-react/src/use-connection-state.d.ts.map +1 -0
- package/out/zero-react/src/{use-zero-connection-state.js → use-connection-state.js} +3 -3
- package/out/zero-react/src/use-connection-state.js.map +1 -0
- package/out/zero-react/src/use-query.d.ts +2 -10
- package/out/zero-react/src/use-query.d.ts.map +1 -1
- package/out/zero-react/src/use-query.js +24 -22
- package/out/zero-react/src/use-query.js.map +1 -1
- package/out/zero-react/src/use-zero-online.d.ts +1 -1
- package/out/zero-react/src/use-zero-online.js.map +1 -1
- package/out/zero-react/src/zero-provider.d.ts +1 -5
- package/out/zero-react/src/zero-provider.d.ts.map +1 -1
- package/out/zero-react/src/zero-provider.js +16 -0
- package/out/zero-react/src/zero-provider.js.map +1 -1
- package/out/zero-react/src/zero.d.ts +2 -0
- package/out/zero-react/src/zero.d.ts.map +1 -0
- package/out/zero-schema/src/permissions.d.ts.map +1 -1
- package/out/zero-schema/src/permissions.js +2 -8
- package/out/zero-schema/src/permissions.js.map +1 -1
- package/out/zero-server/src/custom.d.ts +3 -0
- package/out/zero-server/src/custom.d.ts.map +1 -1
- package/out/zero-server/src/custom.js +3 -0
- package/out/zero-server/src/custom.js.map +1 -1
- package/out/zero-solid/src/bindings.d.ts +2 -0
- package/out/zero-solid/src/bindings.d.ts.map +1 -0
- package/out/zero-solid/src/mod.d.ts +1 -8
- package/out/zero-solid/src/mod.d.ts.map +1 -1
- package/out/zero-solid/src/solid-view.d.ts +1 -8
- package/out/zero-solid/src/solid-view.d.ts.map +1 -1
- package/out/zero-solid/src/solid-view.js +31 -0
- package/out/zero-solid/src/solid-view.js.map +1 -1
- package/out/zero-solid/src/{use-zero-connection-state.d.ts → use-connection-state.d.ts} +3 -3
- package/out/zero-solid/src/use-connection-state.d.ts.map +1 -0
- package/out/zero-solid/src/{use-zero-connection-state.js → use-connection-state.js} +3 -3
- package/out/zero-solid/src/use-connection-state.js.map +1 -0
- package/out/zero-solid/src/use-query.d.ts +1 -7
- package/out/zero-solid/src/use-query.d.ts.map +1 -1
- package/out/zero-solid/src/use-query.js +43 -12
- package/out/zero-solid/src/use-query.js.map +1 -1
- package/out/zero-solid/src/use-zero-online.d.ts +1 -1
- package/out/zero-solid/src/use-zero-online.js.map +1 -1
- package/out/zero-solid/src/use-zero.d.ts +1 -5
- package/out/zero-solid/src/use-zero.d.ts.map +1 -1
- package/out/zero-solid/src/use-zero.js +16 -0
- package/out/zero-solid/src/use-zero.js.map +1 -1
- package/out/zero-solid/src/zero.d.ts +2 -0
- package/out/zero-solid/src/zero.d.ts.map +1 -0
- package/out/zql/src/ivm/flipped-join.d.ts.map +1 -1
- package/out/zql/src/ivm/flipped-join.js +29 -27
- package/out/zql/src/ivm/flipped-join.js.map +1 -1
- package/out/zql/src/ivm/join-utils.d.ts +7 -1
- package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
- package/out/zql/src/ivm/join-utils.js +12 -0
- package/out/zql/src/ivm/join-utils.js.map +1 -1
- package/out/zql/src/ivm/join.d.ts.map +1 -1
- package/out/zql/src/ivm/join.js +11 -25
- package/out/zql/src/ivm/join.js.map +1 -1
- package/out/zql/src/mutate/custom.d.ts +3 -0
- package/out/zql/src/mutate/custom.d.ts.map +1 -1
- package/out/zql/src/mutate/custom.js.map +1 -1
- package/out/zql/src/mutate/mutator-registry.d.ts +2 -2
- package/out/zql/src/mutate/mutator-registry.d.ts.map +1 -1
- package/out/zql/src/mutate/mutator-registry.js.map +1 -1
- package/out/zql/src/mutate/mutator.d.ts +1 -1
- package/out/zql/src/mutate/mutator.d.ts.map +1 -1
- package/out/zql/src/mutate/mutator.js.map +1 -1
- package/out/zql/src/planner/planner-debug.d.ts +3 -3
- package/out/zql/src/planner/planner-debug.js.map +1 -1
- package/out/zql/src/query/create-builder.d.ts +2 -1
- package/out/zql/src/query/create-builder.d.ts.map +1 -1
- package/out/zql/src/query/create-builder.js +3 -0
- package/out/zql/src/query/create-builder.js.map +1 -1
- package/out/zql/src/query/query-impl.d.ts +39 -6
- package/out/zql/src/query/query-impl.d.ts.map +1 -1
- package/out/zql/src/query/query-impl.js +414 -23
- package/out/zql/src/query/query-impl.js.map +1 -1
- package/out/zql/src/query/query-registry.d.ts +2 -2
- package/out/zql/src/query/query-registry.d.ts.map +1 -1
- package/out/zql/src/query/query-registry.js.map +1 -1
- package/out/zql/src/query/runnable-query-impl.d.ts +2 -2
- package/out/zql/src/query/runnable-query-impl.d.ts.map +1 -1
- package/out/zql/src/query/runnable-query-impl.js +2 -2
- package/out/zql/src/query/runnable-query-impl.js.map +1 -1
- package/out/zql/src/query/schema-query.d.ts +4 -2
- package/out/zql/src/query/schema-query.d.ts.map +1 -1
- package/out/zql/src/query/static-query.d.ts +2 -16
- package/out/zql/src/query/static-query.d.ts.map +1 -1
- package/out/zql/src/query/static-query.js +10 -37
- package/out/zql/src/query/static-query.js.map +1 -1
- package/package.json +7 -3
- package/out/zero-client/src/client/bindings.js +0 -33
- package/out/zero-client/src/client/bindings.js.map +0 -1
- package/out/zero-react/src/components/inspector.d.ts +0 -9
- package/out/zero-react/src/components/inspector.d.ts.map +0 -1
- package/out/zero-react/src/components/inspector.js +0 -38
- package/out/zero-react/src/components/inspector.js.map +0 -1
- package/out/zero-react/src/components/mark-icon.d.ts +0 -3
- package/out/zero-react/src/components/mark-icon.d.ts.map +0 -1
- package/out/zero-react/src/components/mark-icon.js +0 -28
- package/out/zero-react/src/components/mark-icon.js.map +0 -1
- package/out/zero-react/src/components/zero-inspector.d.ts +0 -8
- package/out/zero-react/src/components/zero-inspector.d.ts.map +0 -1
- package/out/zero-react/src/components/zero-inspector.js +0 -44
- package/out/zero-react/src/components/zero-inspector.js.map +0 -1
- package/out/zero-react/src/use-zero-connection-state.d.ts.map +0 -1
- package/out/zero-react/src/use-zero-connection-state.js.map +0 -1
- package/out/zero-solid/src/use-zero-connection-state.d.ts.map +0 -1
- package/out/zero-solid/src/use-zero-connection-state.js.map +0 -1
- package/out/zql/src/query/abstract-query.d.ts +0 -42
- package/out/zql/src/query/abstract-query.d.ts.map +0 -1
- package/out/zql/src/query/abstract-query.js +0 -405
- package/out/zql/src/query/abstract-query.js.map +0 -1
|
@@ -38,13 +38,12 @@ export type Timer = {
|
|
|
38
38
|
elapsedLap: () => number;
|
|
39
39
|
totalElapsed: () => number;
|
|
40
40
|
};
|
|
41
|
-
export declare const YIELD_THRESHOLD_MS = 200;
|
|
42
41
|
/**
|
|
43
42
|
* Manages the state of IVM pipelines for a given ViewSyncer (i.e. client group).
|
|
44
43
|
*/
|
|
45
44
|
export declare class PipelineDriver {
|
|
46
45
|
#private;
|
|
47
|
-
constructor(lc: LogContext, logConfig: LogConfig, snapshotter: Snapshotter, shardID: ShardID, storage: ClientGroupStorage, clientGroupID: string, inspectorDelegate: InspectorDelegate, enablePlanner?: boolean);
|
|
46
|
+
constructor(lc: LogContext, logConfig: LogConfig, snapshotter: Snapshotter, shardID: ShardID, storage: ClientGroupStorage, clientGroupID: string, inspectorDelegate: InspectorDelegate, yieldThresholdMs: number, enablePlanner?: boolean);
|
|
48
47
|
/**
|
|
49
48
|
* Initializes the PipelineDriver to the current head of the database.
|
|
50
49
|
* Queries can then be added (i.e. hydrated) with {@link addQuery()}.
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"pipeline-driver.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/pipeline-driver.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,sCAAsC,CAAC;AAC9D,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,gDAAgD,CAAC;AACjF,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,uCAAuC,CAAC;AAC/D,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,8CAA8C,CAAC;AAQ7E,OAAO,EAAC,KAAK,KAAK,EAAe,MAAM,qCAAqC,CAAC;AAS7E,OAAO,KAAK,EAAC,kBAAkB,EAAC,MAAM,4CAA4C,CAAC;AAInF,OAAO,EAEL,KAAK,iBAAiB,EACvB,MAAM,gCAAgC,CAAC;AACxC,OAAO,KAAK,EAAC,SAAS,EAAC,MAAM,6BAA6B,CAAC;AAO3D,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,oCAAoC,CAAC;AAC1E,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,wBAAwB,CAAC;AACnD,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,gCAAgC,CAAC;AACnE,OAAO,EAAiB,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAGnE,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,kBAAkB,CAAC;AAGlD,MAAM,MAAM,MAAM,GAAG;IACnB,QAAQ,CAAC,IAAI,EAAE,KAAK,CAAC;IACrB,QAAQ,CAAC,SAAS,EAAE,MAAM,CAAC;IAC3B,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC;IACvB,QAAQ,CAAC,MAAM,EAAE,GAAG,CAAC;IACrB,QAAQ,CAAC,GAAG,EAAE,GAAG,CAAC;CACnB,CAAC;AAEF,MAAM,MAAM,SAAS,GAAG;IACtB,QAAQ,CAAC,IAAI,EAAE,QAAQ,CAAC;IACxB,QAAQ,CAAC,SAAS,EAAE,MAAM,CAAC;IAC3B,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC;IACvB,QAAQ,CAAC,MAAM,EAAE,GAAG,CAAC;IACrB,QAAQ,CAAC,GAAG,EAAE,SAAS,CAAC;CACzB,CAAC;AAEF,MAAM,MAAM,OAAO,GAAG;IACpB,QAAQ,CAAC,IAAI,EAAE,MAAM,CAAC;IACtB,QAAQ,CAAC,SAAS,EAAE,MAAM,CAAC;IAC3B,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC;IACvB,QAAQ,CAAC,MAAM,EAAE,GAAG,CAAC;IACrB,QAAQ,CAAC,GAAG,EAAE,GAAG,CAAC;CACnB,CAAC;AAEF,MAAM,MAAM,SAAS,GAAG,MAAM,GAAG,SAAS,GAAG,OAAO,CAAC;AAqBrD,MAAM,MAAM,KAAK,GAAG;IAClB,UAAU,EAAE,MAAM,MAAM,CAAC;IACzB,YAAY,EAAE,MAAM,MAAM,CAAC;CAC5B,CAAC;
|
|
1
|
+
{"version":3,"file":"pipeline-driver.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/pipeline-driver.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,sCAAsC,CAAC;AAC9D,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,gDAAgD,CAAC;AACjF,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,uCAAuC,CAAC;AAC/D,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,8CAA8C,CAAC;AAQ7E,OAAO,EAAC,KAAK,KAAK,EAAe,MAAM,qCAAqC,CAAC;AAS7E,OAAO,KAAK,EAAC,kBAAkB,EAAC,MAAM,4CAA4C,CAAC;AAInF,OAAO,EAEL,KAAK,iBAAiB,EACvB,MAAM,gCAAgC,CAAC;AACxC,OAAO,KAAK,EAAC,SAAS,EAAC,MAAM,6BAA6B,CAAC;AAO3D,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,oCAAoC,CAAC;AAC1E,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,wBAAwB,CAAC;AACnD,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,gCAAgC,CAAC;AACnE,OAAO,EAAiB,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAGnE,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,kBAAkB,CAAC;AAGlD,MAAM,MAAM,MAAM,GAAG;IACnB,QAAQ,CAAC,IAAI,EAAE,KAAK,CAAC;IACrB,QAAQ,CAAC,SAAS,EAAE,MAAM,CAAC;IAC3B,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC;IACvB,QAAQ,CAAC,MAAM,EAAE,GAAG,CAAC;IACrB,QAAQ,CAAC,GAAG,EAAE,GAAG,CAAC;CACnB,CAAC;AAEF,MAAM,MAAM,SAAS,GAAG;IACtB,QAAQ,CAAC,IAAI,EAAE,QAAQ,CAAC;IACxB,QAAQ,CAAC,SAAS,EAAE,MAAM,CAAC;IAC3B,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC;IACvB,QAAQ,CAAC,MAAM,EAAE,GAAG,CAAC;IACrB,QAAQ,CAAC,GAAG,EAAE,SAAS,CAAC;CACzB,CAAC;AAEF,MAAM,MAAM,OAAO,GAAG;IACpB,QAAQ,CAAC,IAAI,EAAE,MAAM,CAAC;IACtB,QAAQ,CAAC,SAAS,EAAE,MAAM,CAAC;IAC3B,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC;IACvB,QAAQ,CAAC,MAAM,EAAE,GAAG,CAAC;IACrB,QAAQ,CAAC,GAAG,EAAE,GAAG,CAAC;CACnB,CAAC;AAEF,MAAM,MAAM,SAAS,GAAG,MAAM,GAAG,SAAS,GAAG,OAAO,CAAC;AAqBrD,MAAM,MAAM,KAAK,GAAG;IAClB,UAAU,EAAE,MAAM,MAAM,CAAC;IACzB,YAAY,EAAE,MAAM,MAAM,CAAC;CAC5B,CAAC;AAQF;;GAEG;AACH,qBAAa,cAAc;;gBAqCvB,EAAE,EAAE,UAAU,EACd,SAAS,EAAE,SAAS,EACpB,WAAW,EAAE,WAAW,EACxB,OAAO,EAAE,OAAO,EAChB,OAAO,EAAE,kBAAkB,EAC3B,aAAa,EAAE,MAAM,EACrB,iBAAiB,EAAE,iBAAiB,EACpC,gBAAgB,EAAE,MAAM,EACxB,aAAa,CAAC,EAAE,OAAO;IAYzB;;;;;OAKG;IACH,IAAI,CAAC,YAAY,EAAE,YAAY;IAM/B;;OAEG;IACH,WAAW,IAAI,OAAO;IAItB;;;;OAIG;IACH,KAAK,CAAC,YAAY,EAAE,YAAY;IAgChC,mFAAmF;IACnF,IAAI,cAAc,IAAI,MAAM,CAE3B;IAED;;;;OAIG;IACH,cAAc,IAAI,MAAM;IAKxB;;;;OAIG;IACH,qBAAqB,IAAI,cAAc;IAKvC;;OAEG;IACH,kBAAkB,IAAI,iBAAiB,GAAG,IAAI;IAkB9C,kBAAkB,IAAI,MAAM;IAqB5B;;;OAGG;IACH,OAAO;IAKP,6DAA6D;IAC7D,YAAY,IAAI;QACd,oBAAoB,EAAE,GAAG,CAAC,MAAM,CAAC;QACjC,cAAc,EAAE,GAAG,CACjB,MAAM,EACN;YACE,kBAAkB,EAAE,MAAM,CAAC;YAC3B,cAAc,EAAE,GAAG,CAAC;SACrB,EAAE,CACJ;KACF;IAmBD,oBAAoB,IAAI,MAAM;IAQ9B;;;;;;;;;;;;;;OAcG;IACF,QAAQ,CACP,kBAAkB,EAAE,MAAM,EAC1B,OAAO,EAAE,MAAM,EACf,KAAK,EAAE,GAAG,EACV,KAAK,EAAE,KAAK,GACX,QAAQ,CAAC,SAAS,GAAG,OAAO,CAAC;IA8FhC;;;OAGG;IACH,WAAW,CAAC,IAAI,EAAE,MAAM;IAQxB;;;;OAIG;IACH,MAAM,CAAC,KAAK,EAAE,MAAM,EAAE,EAAE,EAAE,MAAM,GAAG,GAAG,GAAG,SAAS;IAMlD;;;;;;;;;;OAUG;IACH,OAAO,CAAC,KAAK,EAAE,KAAK,GAAG;QACrB,OAAO,EAAE,MAAM,CAAC;QAChB,UAAU,EAAE,MAAM,CAAC;QACnB,OAAO,EAAE,QAAQ,CAAC,SAAS,GAAG,OAAO,CAAC,CAAC;KACxC;CAkNF;AAkID;;;;GAIG;AACH,wBAAiB,OAAO,CACtB,KAAK,EAAE,KAAK,EACZ,IAAI,EAAE,MAAM,EACZ,YAAY,EAAE,YAAY,GACzB,QAAQ,CAAC,SAAS,GAAG,OAAO,CAAC,CAQ/B;AAED,wBAAiB,eAAe,CAC9B,KAAK,EAAE,KAAK,EACZ,IAAI,EAAE,MAAM,EACZ,WAAW,EAAE,GAAG,CAAC,MAAM,EAAE,UAAU,CAAC,GACnC,QAAQ,CAAC,SAAS,GAAG,OAAO,CAAC,CAQ/B"}
|
|
@@ -15,8 +15,7 @@ import { upstreamSchema } from "../../types/shards.js";
|
|
|
15
15
|
import { getSubscriptionState } from "../replicator/schema/replication-state.js";
|
|
16
16
|
import { checkClientSchema } from "./client-schema.js";
|
|
17
17
|
import { ResetPipelinesSignal } from "./snapshotter.js";
|
|
18
|
-
const MIN_ADVANCEMENT_TIME_LIMIT_MS =
|
|
19
|
-
const YIELD_THRESHOLD_MS = 200;
|
|
18
|
+
const MIN_ADVANCEMENT_TIME_LIMIT_MS = 50;
|
|
20
19
|
class PipelineDriver {
|
|
21
20
|
#tables = /* @__PURE__ */ new Map();
|
|
22
21
|
// We probs need the original query hash
|
|
@@ -30,6 +29,7 @@ class PipelineDriver {
|
|
|
30
29
|
#logConfig;
|
|
31
30
|
#tableSpecs = /* @__PURE__ */ new Map();
|
|
32
31
|
#costModels;
|
|
32
|
+
#yieldThresholdMs;
|
|
33
33
|
#streamer = null;
|
|
34
34
|
#hydrateContext = null;
|
|
35
35
|
#advanceContext = null;
|
|
@@ -46,7 +46,7 @@ class PipelineDriver {
|
|
|
46
46
|
"Number of rows deleted because they conflicted with added row"
|
|
47
47
|
);
|
|
48
48
|
#inspectorDelegate;
|
|
49
|
-
constructor(lc, logConfig, snapshotter, shardID, storage, clientGroupID, inspectorDelegate, enablePlanner) {
|
|
49
|
+
constructor(lc, logConfig, snapshotter, shardID, storage, clientGroupID, inspectorDelegate, yieldThresholdMs, enablePlanner) {
|
|
50
50
|
this.#lc = lc.withContext("clientGroupID", clientGroupID);
|
|
51
51
|
this.#snapshotter = snapshotter;
|
|
52
52
|
this.#storage = storage;
|
|
@@ -54,6 +54,7 @@ class PipelineDriver {
|
|
|
54
54
|
this.#logConfig = logConfig;
|
|
55
55
|
this.#inspectorDelegate = inspectorDelegate;
|
|
56
56
|
this.#costModels = enablePlanner ? /* @__PURE__ */ new WeakMap() : void 0;
|
|
57
|
+
this.#yieldThresholdMs = yieldThresholdMs;
|
|
57
58
|
}
|
|
58
59
|
/**
|
|
59
60
|
* Initializes the PipelineDriver to the current head of the database.
|
|
@@ -435,7 +436,7 @@ class PipelineDriver {
|
|
|
435
436
|
}
|
|
436
437
|
#shouldYield() {
|
|
437
438
|
if (this.#hydrateContext) {
|
|
438
|
-
return this.#hydrateContext.timer.elapsedLap() >
|
|
439
|
+
return this.#hydrateContext.timer.elapsedLap() > this.#yieldThresholdMs;
|
|
439
440
|
}
|
|
440
441
|
if (this.#advanceContext) {
|
|
441
442
|
return this.#shouldAdvanceYieldMaybeAbortAdvance();
|
|
@@ -468,7 +469,7 @@ class PipelineDriver {
|
|
|
468
469
|
`Advancement exceeded timeout at ${pos} of ${numChanges} changes after ${elapsed} ms. Advancement time limited based on total hydration time of ${totalHydrationTimeMs} ms.`
|
|
469
470
|
);
|
|
470
471
|
}
|
|
471
|
-
return advanceTimer.elapsedLap() >
|
|
472
|
+
return advanceTimer.elapsedLap() > this.#yieldThresholdMs;
|
|
472
473
|
}
|
|
473
474
|
/** Implements `BuilderDelegate.createStorage()` */
|
|
474
475
|
#createStorage() {
|
|
@@ -626,7 +627,6 @@ function mustGetPrimaryKey(primaryKeys, table) {
|
|
|
626
627
|
}
|
|
627
628
|
export {
|
|
628
629
|
PipelineDriver,
|
|
629
|
-
YIELD_THRESHOLD_MS,
|
|
630
630
|
hydrate,
|
|
631
631
|
hydrateInternal
|
|
632
632
|
};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"pipeline-driver.js","sources":["../../../../../../zero-cache/src/services/view-syncer/pipeline-driver.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert, unreachable} from '../../../../shared/src/asserts.ts';\nimport {deepEqual, type JSONValue} from '../../../../shared/src/json.ts';\nimport {must} from '../../../../shared/src/must.ts';\nimport type {AST} from '../../../../zero-protocol/src/ast.ts';\nimport type {ClientSchema} from '../../../../zero-protocol/src/client-schema.ts';\nimport type {Row} from '../../../../zero-protocol/src/data.ts';\nimport type {PrimaryKey} from '../../../../zero-protocol/src/primary-key.ts';\nimport {buildPipeline} from '../../../../zql/src/builder/builder.ts';\nimport {\n Debug,\n runtimeDebugFlags,\n} from '../../../../zql/src/builder/debug-delegate.ts';\nimport type {Change} from '../../../../zql/src/ivm/change.ts';\nimport type {Node} from '../../../../zql/src/ivm/data.ts';\nimport {type Input, type Storage} from '../../../../zql/src/ivm/operator.ts';\nimport type {SourceSchema} from '../../../../zql/src/ivm/schema.ts';\nimport type {\n Source,\n SourceChange,\n SourceInput,\n} from '../../../../zql/src/ivm/source.ts';\nimport type {ConnectionCostModel} from '../../../../zql/src/planner/planner-connection.ts';\nimport {MeasurePushOperator} from '../../../../zql/src/query/measure-push-operator.ts';\nimport type {ClientGroupStorage} from '../../../../zqlite/src/database-storage.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {createSQLiteCostModel} from '../../../../zqlite/src/sqlite-cost-model.ts';\nimport {TableSource} from '../../../../zqlite/src/table-source.ts';\nimport {\n reloadPermissionsIfChanged,\n type LoadedPermissions,\n} from '../../auth/load-permissions.ts';\nimport type {LogConfig} from '../../config/zero-config.ts';\nimport {computeZqlSpecs, mustGetTableSpec} from '../../db/lite-tables.ts';\nimport type {LiteAndZqlSpec, LiteTableSpec} from '../../db/specs.ts';\nimport {\n getOrCreateCounter,\n getOrCreateHistogram,\n} from '../../observability/metrics.ts';\nimport type {InspectorDelegate} from '../../server/inspector-delegate.ts';\nimport {type RowKey} from '../../types/row-key.ts';\nimport type {SchemaVersions} from '../../types/schema-versions.ts';\nimport {upstreamSchema, type ShardID} from '../../types/shards.ts';\nimport {getSubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {checkClientSchema} from './client-schema.ts';\nimport type {Snapshotter} from './snapshotter.ts';\nimport {ResetPipelinesSignal, type SnapshotDiff} from './snapshotter.ts';\n\nexport type RowAdd = {\n readonly type: 'add';\n readonly queryHash: string;\n readonly table: string;\n readonly rowKey: Row;\n readonly row: Row;\n};\n\nexport type RowRemove = {\n readonly type: 'remove';\n readonly queryHash: string;\n readonly table: string;\n readonly rowKey: Row;\n readonly row: undefined;\n};\n\nexport type RowEdit = {\n readonly type: 'edit';\n readonly queryHash: string;\n readonly table: string;\n readonly rowKey: Row;\n readonly row: Row;\n};\n\nexport type RowChange = RowAdd | RowRemove | RowEdit;\n\ntype Pipeline = {\n readonly input: Input;\n readonly hydrationTimeMs: number;\n readonly originalHash: string;\n readonly transformedAst: AST; // Optional, only set after hydration\n readonly transformationHash: string; // The hash of the transformed AST\n};\n\ntype AdvanceContext = {\n readonly timer: Timer;\n readonly totalHydrationTimeMs: number;\n readonly numChanges: number;\n pos: number;\n};\n\ntype HydrateContext = {\n readonly timer: Timer;\n};\n\nexport type Timer = {\n elapsedLap: () => number;\n totalElapsed: () => number;\n};\n\n/**\n * No matter how fast hydration is, advancement is given at least this long to\n * complete before doing a pipeline reset.\n */\nconst MIN_ADVANCEMENT_TIME_LIMIT_MS = 30;\nexport const YIELD_THRESHOLD_MS = 200;\n\n/**\n * Manages the state of IVM pipelines for a given ViewSyncer (i.e. client group).\n */\nexport class PipelineDriver {\n readonly #tables = new Map<string, TableSource>();\n // We probs need the original query hash\n // so we can decide not to re-transform a custom query\n // that is already hydrated.\n readonly #pipelines = new Map<string, Pipeline>();\n\n readonly #lc: LogContext;\n readonly #snapshotter: Snapshotter;\n readonly #storage: ClientGroupStorage;\n readonly #shardID: ShardID;\n readonly #logConfig: LogConfig;\n readonly #tableSpecs = new Map<string, LiteAndZqlSpec>();\n readonly #costModels: WeakMap<Database, ConnectionCostModel> | undefined;\n #streamer: Streamer | null = null;\n #hydrateContext: HydrateContext | null = null;\n #advanceContext: AdvanceContext | null = null;\n #replicaVersion: string | null = null;\n #primaryKeys: Map<string, PrimaryKey> | null = null;\n #permissions: LoadedPermissions | null = null;\n\n readonly #advanceTime = getOrCreateHistogram('sync', 'ivm.advance-time', {\n description:\n 'Time to advance all queries for a given client group for in response to a single change.',\n unit: 's',\n });\n\n readonly #conflictRowsDeleted = getOrCreateCounter(\n 'sync',\n 'ivm.conflict-rows-deleted',\n 'Number of rows deleted because they conflicted with added row',\n );\n\n readonly #inspectorDelegate: InspectorDelegate;\n\n constructor(\n lc: LogContext,\n logConfig: LogConfig,\n snapshotter: Snapshotter,\n shardID: ShardID,\n storage: ClientGroupStorage,\n clientGroupID: string,\n inspectorDelegate: InspectorDelegate,\n enablePlanner?: boolean,\n ) {\n this.#lc = lc.withContext('clientGroupID', clientGroupID);\n this.#snapshotter = snapshotter;\n this.#storage = storage;\n this.#shardID = shardID;\n this.#logConfig = logConfig;\n this.#inspectorDelegate = inspectorDelegate;\n this.#costModels = enablePlanner ? new WeakMap() : undefined;\n }\n\n /**\n * Initializes the PipelineDriver to the current head of the database.\n * Queries can then be added (i.e. hydrated) with {@link addQuery()}.\n *\n * Must only be called once.\n */\n init(clientSchema: ClientSchema) {\n assert(!this.#snapshotter.initialized(), 'Already initialized');\n this.#snapshotter.init();\n this.#initAndResetCommon(clientSchema);\n }\n\n /**\n * @returns Whether the PipelineDriver has been initialized.\n */\n initialized(): boolean {\n return this.#snapshotter.initialized();\n }\n\n /**\n * Clears the current pipelines and TableSources, returning the PipelineDriver\n * to its initial state. This should be called in response to a schema change,\n * as TableSources need to be recomputed.\n */\n reset(clientSchema: ClientSchema) {\n for (const {input} of this.#pipelines.values()) {\n input.destroy();\n }\n this.#pipelines.clear();\n this.#tables.clear();\n this.#initAndResetCommon(clientSchema);\n }\n\n #initAndResetCommon(clientSchema: ClientSchema) {\n const {db} = this.#snapshotter.current();\n const fullTables = new Map<string, LiteTableSpec>();\n computeZqlSpecs(this.#lc, db.db, this.#tableSpecs, fullTables);\n checkClientSchema(\n this.#shardID,\n clientSchema,\n this.#tableSpecs,\n fullTables,\n );\n const primaryKeys = this.#primaryKeys ?? new Map<string, PrimaryKey>();\n this.#primaryKeys = primaryKeys;\n primaryKeys.clear();\n for (const [table, spec] of this.#tableSpecs.entries()) {\n if (table.startsWith(upstreamSchema(this.#shardID))) {\n primaryKeys.set(table, spec.tableSpec.primaryKey);\n }\n }\n buildPrimaryKeys(clientSchema, primaryKeys);\n const {replicaVersion} = getSubscriptionState(db);\n this.#replicaVersion = replicaVersion;\n }\n\n /** @returns The replica version. The PipelineDriver must have been initialized. */\n get replicaVersion(): string {\n return must(this.#replicaVersion, 'Not yet initialized');\n }\n\n /**\n * Returns the current version of the database. This will reflect the\n * latest version change when calling {@link advance()} once the\n * iteration has begun.\n */\n currentVersion(): string {\n assert(this.initialized(), 'Not yet initialized');\n return this.#snapshotter.current().version;\n }\n\n /**\n * Returns the current supported schema version range of the database. This\n * will reflect changes to supported schema version range when calling\n * {@link advance()} once the iteration has begun.\n */\n currentSchemaVersions(): SchemaVersions {\n assert(this.initialized(), 'Not yet initialized');\n return this.#snapshotter.current().schemaVersions;\n }\n\n /**\n * Returns the current upstream {app}.permissions, or `null` if none are defined.\n */\n currentPermissions(): LoadedPermissions | null {\n assert(this.initialized(), 'Not yet initialized');\n const res = reloadPermissionsIfChanged(\n this.#lc,\n this.#snapshotter.current().db,\n this.#shardID.appID,\n this.#permissions,\n );\n if (res.changed) {\n this.#permissions = res.permissions;\n this.#lc.debug?.(\n 'Reloaded permissions',\n JSON.stringify(this.#permissions),\n );\n }\n return this.#permissions;\n }\n\n advanceWithoutDiff(): string {\n const {db, version} = this.#snapshotter.advanceWithoutDiff().curr;\n for (const table of this.#tables.values()) {\n table.setDB(db.db);\n }\n return version;\n }\n\n #ensureCostModelExistsIfEnabled(db: Database) {\n let existing = this.#costModels?.get(db);\n if (existing) {\n return existing;\n }\n if (this.#costModels) {\n const costModel = createSQLiteCostModel(db, this.#tableSpecs);\n this.#costModels.set(db, costModel);\n return costModel;\n }\n return undefined;\n }\n\n /**\n * Clears storage used for the pipelines. Call this when the\n * PipelineDriver will no longer be used.\n */\n destroy() {\n this.#storage.destroy();\n this.#snapshotter.destroy();\n }\n\n /** @return The Set of query hashes for all added queries. */\n addedQueries(): [\n transformationHashes: Set<string>,\n byOriginalHash: Map<\n string,\n {\n transformationHash: string;\n transformedAst: AST;\n }[]\n >,\n ] {\n const byOriginalHash = new Map<\n string,\n {transformationHash: string; transformedAst: AST}[]\n >();\n for (const pipeline of this.#pipelines.values()) {\n const {originalHash, transformedAst, transformationHash} = pipeline;\n\n if (!byOriginalHash.has(originalHash)) {\n byOriginalHash.set(originalHash, []);\n }\n byOriginalHash.get(originalHash)!.push({\n transformationHash,\n transformedAst,\n });\n }\n return [new Set(this.#pipelines.keys()), byOriginalHash];\n }\n\n totalHydrationTimeMs(): number {\n let total = 0;\n for (const pipeline of this.#pipelines.values()) {\n total += pipeline.hydrationTimeMs;\n }\n return total;\n }\n\n /**\n * Adds a pipeline for the query. The method will hydrate the query using the\n * driver's current snapshot of the database and return a stream of results.\n * Henceforth, updates to the query will be returned when the driver is\n * {@link advance}d. The query and its pipeline can be removed with\n * {@link removeQuery()}.\n *\n * If a query with an identical hash has already been added, this method is a\n * no-op and no RowChanges are generated.\n *\n * @param timer The caller-controlled {@link Timer} used to determine the\n * final hydration time. (The caller may pause and resume the timer\n * when yielding the thread for time-slicing).\n * @return The rows from the initial hydration of the query.\n */\n *addQuery(\n transformationHash: string,\n queryID: string,\n query: AST,\n timer: Timer,\n ): Iterable<RowChange | 'yield'> {\n assert(this.initialized());\n this.#inspectorDelegate.addQuery(transformationHash, queryID, query);\n if (this.#pipelines.has(transformationHash)) {\n this.#lc.info?.(`query ${transformationHash} already added`, query);\n return;\n }\n const debugDelegate = runtimeDebugFlags.trackRowsVended\n ? new Debug()\n : undefined;\n\n const costModel = this.#ensureCostModelExistsIfEnabled(\n this.#snapshotter.current().db.db,\n );\n\n const input = buildPipeline(\n query,\n {\n debug: debugDelegate,\n enableNotExists: true, // Server-side can handle NOT EXISTS\n getSource: name => this.#getSource(name),\n createStorage: () => this.#createStorage(),\n decorateSourceInput: (input: SourceInput, _queryID: string): Input =>\n new MeasurePushOperator(\n input,\n transformationHash,\n this.#inspectorDelegate,\n 'query-update-server',\n ),\n decorateInput: input => input,\n addEdge() {},\n decorateFilterInput: input => input,\n },\n queryID,\n costModel,\n );\n const schema = input.getSchema();\n input.setOutput({\n push: change => {\n const streamer = this.#streamer;\n assert(streamer, 'must #startAccumulating() before pushing changes');\n streamer.accumulate(transformationHash, schema, [change]);\n return [];\n },\n });\n\n assert(this.#advanceContext === null);\n this.#hydrateContext = {\n timer,\n };\n try {\n yield* hydrateInternal(\n input,\n transformationHash,\n must(this.#primaryKeys),\n );\n } finally {\n this.#hydrateContext = null;\n }\n\n const hydrationTimeMs = timer.totalElapsed();\n if (runtimeDebugFlags.trackRowCountsVended) {\n if (hydrationTimeMs > this.#logConfig.slowHydrateThreshold) {\n let totalRowsConsidered = 0;\n const lc = this.#lc\n .withContext('hash', transformationHash)\n .withContext('hydrationTimeMs', hydrationTimeMs);\n for (const tableName of this.#tables.keys()) {\n const entries = Object.entries(\n debugDelegate?.getVendedRowCounts()[tableName] ?? {},\n );\n totalRowsConsidered += entries.reduce(\n (acc, entry) => acc + entry[1],\n 0,\n );\n lc.info?.(tableName + ' VENDED: ', entries);\n }\n lc.info?.(`Total rows considered: ${totalRowsConsidered}`);\n }\n }\n debugDelegate?.reset();\n\n // Note: This hydrationTime is a wall-clock overestimate, as it does\n // not take time slicing into account. The view-syncer resets this\n // to a more precise processing-time measurement with setHydrationTime().\n this.#pipelines.set(transformationHash, {\n input,\n hydrationTimeMs,\n originalHash: queryID,\n transformedAst: query,\n transformationHash,\n });\n }\n\n /**\n * Removes the pipeline for the query. This is a no-op if the query\n * was not added.\n */\n removeQuery(hash: string) {\n const pipeline = this.#pipelines.get(hash);\n if (pipeline) {\n this.#pipelines.delete(hash);\n pipeline.input.destroy();\n }\n }\n\n /**\n * Returns the value of the row with the given primary key `pk`,\n * or `undefined` if there is no such row. The pipeline must have been\n * initialized.\n */\n getRow(table: string, pk: RowKey): Row | undefined {\n assert(this.initialized(), 'Not yet initialized');\n const source = must(this.#tables.get(table));\n return source.getRow(pk as Row);\n }\n\n /**\n * Advances to the new head of the database.\n *\n * @param timer The caller-controlled {@link Timer} that will be used to\n * measure the progress of the advancement and abort with a\n * {@link ResetPipelinesSignal} if it is estimated to take longer\n * than a hydration.\n * @return The resulting row changes for all added queries. Note that the\n * `changes` must be iterated over in their entirety in order to\n * advance the database snapshot.\n */\n advance(timer: Timer): {\n version: string;\n numChanges: number;\n changes: Iterable<RowChange | 'yield'>;\n } {\n assert(this.initialized());\n const diff = this.#snapshotter.advance(this.#tableSpecs);\n const {prev, curr, changes} = diff;\n this.#lc.debug?.(\n `advance ${prev.version} => ${curr.version}: ${changes} changes`,\n );\n\n return {\n version: curr.version,\n numChanges: changes,\n changes: this.#advance(diff, timer, changes),\n };\n }\n\n *#advance(\n diff: SnapshotDiff,\n timer: Timer,\n numChanges: number,\n ): Iterable<RowChange | 'yield'> {\n assert(this.#hydrateContext === null);\n this.#advanceContext = {\n timer,\n totalHydrationTimeMs: this.totalHydrationTimeMs(),\n numChanges,\n pos: 0,\n };\n try {\n for (const {table, prevValues, nextValue} of diff) {\n // Advance progress is checked each time a row is fetched\n // from a TableSource during push processing, but some pushes\n // don't read any rows. Check progress here before processing\n // the next change.\n if (this.#shouldAdvanceYieldMaybeAbortAdvance()) {\n yield 'yield';\n }\n const start = performance.now();\n let type;\n try {\n const tableSource = this.#tables.get(table);\n if (!tableSource) {\n // no pipelines read from this table, so no need to process the change\n continue;\n }\n const primaryKey = mustGetPrimaryKey(this.#primaryKeys, table);\n let editOldRow: Row | undefined = undefined;\n for (const prevValue of prevValues) {\n if (\n nextValue &&\n deepEqual(\n getRowKey(primaryKey, prevValue as Row) as JSONValue,\n getRowKey(primaryKey, nextValue as Row) as JSONValue,\n )\n ) {\n editOldRow = prevValue;\n } else {\n if (nextValue) {\n this.#conflictRowsDeleted.add(1);\n }\n yield* this.#push(tableSource, {\n type: 'remove',\n row: prevValue,\n });\n }\n }\n if (nextValue) {\n if (editOldRow) {\n yield* this.#push(tableSource, {\n type: 'edit',\n row: nextValue,\n oldRow: editOldRow,\n });\n } else {\n yield* this.#push(tableSource, {\n type: 'add',\n row: nextValue,\n });\n }\n }\n } finally {\n this.#advanceContext.pos++;\n }\n\n const elapsed = performance.now() - start;\n this.#advanceTime.record(elapsed / 1000, {\n table,\n type,\n });\n }\n\n // Set the new snapshot on all TableSources.\n const {curr} = diff;\n for (const table of this.#tables.values()) {\n table.setDB(curr.db.db);\n }\n this.#ensureCostModelExistsIfEnabled(curr.db.db);\n this.#lc.debug?.(`Advanced to ${curr.version}`);\n } finally {\n this.#advanceContext = null;\n }\n }\n\n /** Implements `BuilderDelegate.getSource()` */\n #getSource(tableName: string): Source {\n let source = this.#tables.get(tableName);\n if (source) {\n return source;\n }\n\n const tableSpec = mustGetTableSpec(this.#tableSpecs, tableName);\n const primaryKey = mustGetPrimaryKey(this.#primaryKeys, tableName);\n\n const {db} = this.#snapshotter.current();\n source = new TableSource(\n this.#lc,\n this.#logConfig,\n db.db,\n tableName,\n tableSpec.zqlSpec,\n primaryKey,\n () => this.#shouldYield(),\n );\n this.#tables.set(tableName, source);\n this.#lc.debug?.(`created TableSource for ${tableName}`);\n return source;\n }\n\n #shouldYield(): boolean {\n if (this.#hydrateContext) {\n return this.#hydrateContext.timer.elapsedLap() > YIELD_THRESHOLD_MS;\n }\n if (this.#advanceContext) {\n return this.#shouldAdvanceYieldMaybeAbortAdvance();\n }\n throw new Error('shouldYield called outside of hydration or advancement');\n }\n\n /**\n * Cancel the advancement processing, by throwing a ResetPipelinesSignal, if\n * it has taken longer than half the total hydration time to make it through\n * half of the advancement, or if processing time exceeds total hydration\n * time. This serves as both a circuit breaker for very large transactions,\n * as well as a bound on the amount of time the previous connection locks\n * the inactive WAL file (as the lock prevents WAL2 from switching to the\n * free WAL when the current one is over the size limit, which can make\n * the WAL grow continuously and compound slowness).\n * This is checked:\n * 1. before starting to process each change in an advancement is processed\n * 2. whenever a row is fetched from a TableSource during push processing\n */\n #shouldAdvanceYieldMaybeAbortAdvance(): boolean {\n const {\n pos,\n numChanges,\n timer: advanceTimer,\n totalHydrationTimeMs,\n } = must(this.#advanceContext);\n const elapsed = advanceTimer.totalElapsed();\n if (\n elapsed > MIN_ADVANCEMENT_TIME_LIMIT_MS &&\n (elapsed > totalHydrationTimeMs ||\n (elapsed > totalHydrationTimeMs / 2 && pos <= numChanges / 2))\n ) {\n throw new ResetPipelinesSignal(\n `Advancement exceeded timeout at ${pos} of ${numChanges} changes ` +\n `after ${elapsed} ms. Advancement time limited based on total ` +\n `hydration time of ${totalHydrationTimeMs} ms.`,\n );\n }\n return advanceTimer.elapsedLap() > YIELD_THRESHOLD_MS;\n }\n\n /** Implements `BuilderDelegate.createStorage()` */\n #createStorage(): Storage {\n return this.#storage.createStorage();\n }\n\n *#push(\n source: TableSource,\n change: SourceChange,\n ): Iterable<RowChange | 'yield'> {\n this.#startAccumulating();\n try {\n for (const val of source.genPush(change)) {\n if (val === 'yield') {\n yield 'yield';\n }\n for (const changeOrYield of this.#stopAccumulating().stream()) {\n yield changeOrYield;\n }\n this.#startAccumulating();\n }\n } finally {\n if (this.#streamer !== null) {\n this.#stopAccumulating();\n }\n }\n }\n\n #startAccumulating() {\n assert(this.#streamer === null);\n this.#streamer = new Streamer(must(this.#primaryKeys));\n }\n\n #stopAccumulating(): Streamer {\n const streamer = this.#streamer;\n assert(streamer);\n this.#streamer = null;\n return streamer;\n }\n}\n\nclass Streamer {\n readonly #primaryKeys: Map<string, PrimaryKey>;\n\n constructor(primaryKeys: Map<string, PrimaryKey>) {\n this.#primaryKeys = primaryKeys;\n }\n\n readonly #changes: [\n hash: string,\n schema: SourceSchema,\n changes: Iterable<Change | 'yield'>,\n ][] = [];\n\n accumulate(\n hash: string,\n schema: SourceSchema,\n changes: Iterable<Change | 'yield'>,\n ): this {\n this.#changes.push([hash, schema, changes]);\n return this;\n }\n\n *stream(): Iterable<RowChange | 'yield'> {\n for (const [hash, schema, changes] of this.#changes) {\n yield* this.#streamChanges(hash, schema, changes);\n }\n }\n\n *#streamChanges(\n queryHash: string,\n schema: SourceSchema,\n changes: Iterable<Change | 'yield'>,\n ): Iterable<RowChange | 'yield'> {\n // We do not sync rows gathered by the permissions\n // system to the client.\n if (schema.system === 'permissions') {\n return;\n }\n\n for (const change of changes) {\n if (change === 'yield') {\n yield change;\n continue;\n }\n const {type} = change;\n\n switch (type) {\n case 'add':\n case 'remove': {\n yield* this.#streamNodes(queryHash, schema, type, () => [\n change.node,\n ]);\n break;\n }\n case 'child': {\n const {child} = change;\n const childSchema = must(\n schema.relationships[child.relationshipName],\n );\n\n yield* this.#streamChanges(queryHash, childSchema, [child.change]);\n break;\n }\n case 'edit':\n yield* this.#streamNodes(queryHash, schema, type, () => [\n {row: change.node.row, relationships: {}},\n ]);\n break;\n default:\n unreachable(type);\n }\n }\n }\n\n *#streamNodes(\n queryHash: string,\n schema: SourceSchema,\n op: 'add' | 'remove' | 'edit',\n nodes: () => Iterable<Node | 'yield'>,\n ): Iterable<RowChange | 'yield'> {\n const {tableName: table, system} = schema;\n\n const primaryKey = must(this.#primaryKeys.get(table));\n\n // We do not sync rows gathered by the permissions\n // system to the client.\n if (system === 'permissions') {\n return;\n }\n\n for (const node of nodes()) {\n if (node === 'yield') {\n yield node;\n continue;\n }\n const {relationships, row} = node;\n const rowKey = getRowKey(primaryKey, row);\n\n yield {\n type: op,\n queryHash,\n table,\n rowKey,\n row: op === 'remove' ? undefined : row,\n } as RowChange;\n\n for (const [relationship, children] of Object.entries(relationships)) {\n const childSchema = must(schema.relationships[relationship]);\n yield* this.#streamNodes(queryHash, childSchema, op, children);\n }\n }\n }\n}\n\nfunction* toAdds(nodes: Iterable<Node | 'yield'>): Iterable<Change | 'yield'> {\n for (const node of nodes) {\n if (node === 'yield') {\n yield node;\n continue;\n }\n yield {type: 'add', node};\n }\n}\n\nfunction getRowKey(cols: PrimaryKey, row: Row): RowKey {\n return Object.fromEntries(cols.map(col => [col, must(row[col])]));\n}\n\n/**\n * Core hydration logic used by {@link PipelineDriver#addQuery}, extracted to a\n * function for reuse by bin-analyze so that bin-analyze's hydration logic\n * is as close as possible to zero-cache's real hydration logic.\n */\nexport function* hydrate(\n input: Input,\n hash: string,\n clientSchema: ClientSchema,\n): Iterable<RowChange | 'yield'> {\n const res = input.fetch({});\n const streamer = new Streamer(buildPrimaryKeys(clientSchema)).accumulate(\n hash,\n input.getSchema(),\n toAdds(res),\n );\n yield* streamer.stream();\n}\n\nexport function* hydrateInternal(\n input: Input,\n hash: string,\n primaryKeys: Map<string, PrimaryKey>,\n): Iterable<RowChange | 'yield'> {\n const res = input.fetch({});\n const streamer = new Streamer(primaryKeys).accumulate(\n hash,\n input.getSchema(),\n toAdds(res),\n );\n yield* streamer.stream();\n}\n\nfunction buildPrimaryKeys(\n clientSchema: ClientSchema,\n primaryKeys: Map<string, PrimaryKey> = new Map<string, PrimaryKey>(),\n) {\n for (const [tableName, {primaryKey}] of Object.entries(clientSchema.tables)) {\n primaryKeys.set(tableName, primaryKey as unknown as PrimaryKey);\n }\n return primaryKeys;\n}\n\nfunction mustGetPrimaryKey(\n primaryKeys: Map<string, PrimaryKey> | null,\n table: string,\n): PrimaryKey {\n const pKeys = must(primaryKeys, 'primaryKey map must be non-null');\n\n return must(\n pKeys.get(table),\n `table '${table}' is not one of: ${[...pKeys.keys()].sort()}. ` +\n `Check the spelling and ensure that the table has a primary key.`,\n );\n}\n"],"names":["input"],"mappings":";;;;;;;;;;;;;;;;;AAsGA,MAAM,gCAAgC;AAC/B,MAAM,qBAAqB;AAK3B,MAAM,eAAe;AAAA,EACjB,8BAAc,IAAA;AAAA;AAAA;AAAA;AAAA,EAId,iCAAiB,IAAA;AAAA,EAEjB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA,kCAAkB,IAAA;AAAA,EAClB;AAAA,EACT,YAA6B;AAAA,EAC7B,kBAAyC;AAAA,EACzC,kBAAyC;AAAA,EACzC,kBAAiC;AAAA,EACjC,eAA+C;AAAA,EAC/C,eAAyC;AAAA,EAEhC,eAAe,qBAAqB,QAAQ,oBAAoB;AAAA,IACvE,aACE;AAAA,IACF,MAAM;AAAA,EAAA,CACP;AAAA,EAEQ,uBAAuB;AAAA,IAC9B;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAAA,EAGO;AAAA,EAET,YACE,IACA,WACA,aACA,SACA,SACA,eACA,mBACA,eACA;AACA,SAAK,MAAM,GAAG,YAAY,iBAAiB,aAAa;AACxD,SAAK,eAAe;AACpB,SAAK,WAAW;AAChB,SAAK,WAAW;AAChB,SAAK,aAAa;AAClB,SAAK,qBAAqB;AAC1B,SAAK,cAAc,gBAAgB,oBAAI,QAAA,IAAY;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,KAAK,cAA4B;AAC/B,WAAO,CAAC,KAAK,aAAa,YAAA,GAAe,qBAAqB;AAC9D,SAAK,aAAa,KAAA;AAClB,SAAK,oBAAoB,YAAY;AAAA,EACvC;AAAA;AAAA;AAAA;AAAA,EAKA,cAAuB;AACrB,WAAO,KAAK,aAAa,YAAA;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,cAA4B;AAChC,eAAW,EAAC,MAAA,KAAU,KAAK,WAAW,UAAU;AAC9C,YAAM,QAAA;AAAA,IACR;AACA,SAAK,WAAW,MAAA;AAChB,SAAK,QAAQ,MAAA;AACb,SAAK,oBAAoB,YAAY;AAAA,EACvC;AAAA,EAEA,oBAAoB,cAA4B;AAC9C,UAAM,EAAC,GAAA,IAAM,KAAK,aAAa,QAAA;AAC/B,UAAM,iCAAiB,IAAA;AACvB,oBAAgB,KAAK,KAAK,GAAG,IAAI,KAAK,aAAa,UAAU;AAC7D;AAAA,MACE,KAAK;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL;AAAA,IAAA;AAEF,UAAM,cAAc,KAAK,gBAAgB,oBAAI,IAAA;AAC7C,SAAK,eAAe;AACpB,gBAAY,MAAA;AACZ,eAAW,CAAC,OAAO,IAAI,KAAK,KAAK,YAAY,WAAW;AACtD,UAAI,MAAM,WAAW,eAAe,KAAK,QAAQ,CAAC,GAAG;AACnD,oBAAY,IAAI,OAAO,KAAK,UAAU,UAAU;AAAA,MAClD;AAAA,IACF;AACA,qBAAiB,cAAc,WAAW;AAC1C,UAAM,EAAC,eAAA,IAAkB,qBAAqB,EAAE;AAChD,SAAK,kBAAkB;AAAA,EACzB;AAAA;AAAA,EAGA,IAAI,iBAAyB;AAC3B,WAAO,KAAK,KAAK,iBAAiB,qBAAqB;AAAA,EACzD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,iBAAyB;AACvB,WAAO,KAAK,YAAA,GAAe,qBAAqB;AAChD,WAAO,KAAK,aAAa,QAAA,EAAU;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,wBAAwC;AACtC,WAAO,KAAK,YAAA,GAAe,qBAAqB;AAChD,WAAO,KAAK,aAAa,QAAA,EAAU;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA,EAKA,qBAA+C;AAC7C,WAAO,KAAK,YAAA,GAAe,qBAAqB;AAChD,UAAM,MAAM;AAAA,MACV,KAAK;AAAA,MACL,KAAK,aAAa,QAAA,EAAU;AAAA,MAC5B,KAAK,SAAS;AAAA,MACd,KAAK;AAAA,IAAA;AAEP,QAAI,IAAI,SAAS;AACf,WAAK,eAAe,IAAI;AACxB,WAAK,IAAI;AAAA,QACP;AAAA,QACA,KAAK,UAAU,KAAK,YAAY;AAAA,MAAA;AAAA,IAEpC;AACA,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,qBAA6B;AAC3B,UAAM,EAAC,IAAI,QAAA,IAAW,KAAK,aAAa,qBAAqB;AAC7D,eAAW,SAAS,KAAK,QAAQ,OAAA,GAAU;AACzC,YAAM,MAAM,GAAG,EAAE;AAAA,IACnB;AACA,WAAO;AAAA,EACT;AAAA,EAEA,gCAAgC,IAAc;AAC5C,QAAI,WAAW,KAAK,aAAa,IAAI,EAAE;AACvC,QAAI,UAAU;AACZ,aAAO;AAAA,IACT;AACA,QAAI,KAAK,aAAa;AACpB,YAAM,YAAY,sBAAsB,IAAI,KAAK,WAAW;AAC5D,WAAK,YAAY,IAAI,IAAI,SAAS;AAClC,aAAO;AAAA,IACT;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,UAAU;AACR,SAAK,SAAS,QAAA;AACd,SAAK,aAAa,QAAA;AAAA,EACpB;AAAA;AAAA,EAGA,eASE;AACA,UAAM,qCAAqB,IAAA;AAI3B,eAAW,YAAY,KAAK,WAAW,OAAA,GAAU;AAC/C,YAAM,EAAC,cAAc,gBAAgB,mBAAA,IAAsB;AAE3D,UAAI,CAAC,eAAe,IAAI,YAAY,GAAG;AACrC,uBAAe,IAAI,cAAc,EAAE;AAAA,MACrC;AACA,qBAAe,IAAI,YAAY,EAAG,KAAK;AAAA,QACrC;AAAA,QACA;AAAA,MAAA,CACD;AAAA,IACH;AACA,WAAO,CAAC,IAAI,IAAI,KAAK,WAAW,KAAA,CAAM,GAAG,cAAc;AAAA,EACzD;AAAA,EAEA,uBAA+B;AAC7B,QAAI,QAAQ;AACZ,eAAW,YAAY,KAAK,WAAW,OAAA,GAAU;AAC/C,eAAS,SAAS;AAAA,IACpB;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBA,CAAC,SACC,oBACA,SACA,OACA,OAC+B;AAC/B,WAAO,KAAK,aAAa;AACzB,SAAK,mBAAmB,SAAS,oBAAoB,SAAS,KAAK;AACnE,QAAI,KAAK,WAAW,IAAI,kBAAkB,GAAG;AAC3C,WAAK,IAAI,OAAO,SAAS,kBAAkB,kBAAkB,KAAK;AAClE;AAAA,IACF;AACA,UAAM,gBAAgB,kBAAkB,kBACpC,IAAI,UACJ;AAEJ,UAAM,YAAY,KAAK;AAAA,MACrB,KAAK,aAAa,QAAA,EAAU,GAAG;AAAA,IAAA;AAGjC,UAAM,QAAQ;AAAA,MACZ;AAAA,MACA;AAAA,QACE,OAAO;AAAA,QACP,iBAAiB;AAAA;AAAA,QACjB,WAAW,CAAA,SAAQ,KAAK,WAAW,IAAI;AAAA,QACvC,eAAe,MAAM,KAAK,eAAA;AAAA,QAC1B,qBAAqB,CAACA,QAAoB,aACxC,IAAI;AAAA,UACFA;AAAAA,UACA;AAAA,UACA,KAAK;AAAA,UACL;AAAA,QAAA;AAAA,QAEJ,eAAe,CAAAA,WAASA;AAAAA,QACxB,UAAU;AAAA,QAAC;AAAA,QACX,qBAAqB,CAAAA,WAASA;AAAAA,MAAA;AAAA,MAEhC;AAAA,MACA;AAAA,IAAA;AAEF,UAAM,SAAS,MAAM,UAAA;AACrB,UAAM,UAAU;AAAA,MACd,MAAM,CAAA,WAAU;AACd,cAAM,WAAW,KAAK;AACtB,eAAO,UAAU,kDAAkD;AACnE,iBAAS,WAAW,oBAAoB,QAAQ,CAAC,MAAM,CAAC;AACxD,eAAO,CAAA;AAAA,MACT;AAAA,IAAA,CACD;AAED,WAAO,KAAK,oBAAoB,IAAI;AACpC,SAAK,kBAAkB;AAAA,MACrB;AAAA,IAAA;AAEF,QAAI;AACF,aAAO;AAAA,QACL;AAAA,QACA;AAAA,QACA,KAAK,KAAK,YAAY;AAAA,MAAA;AAAA,IAE1B,UAAA;AACE,WAAK,kBAAkB;AAAA,IACzB;AAEA,UAAM,kBAAkB,MAAM,aAAA;AAC9B,QAAI,kBAAkB,sBAAsB;AAC1C,UAAI,kBAAkB,KAAK,WAAW,sBAAsB;AAC1D,YAAI,sBAAsB;AAC1B,cAAM,KAAK,KAAK,IACb,YAAY,QAAQ,kBAAkB,EACtC,YAAY,mBAAmB,eAAe;AACjD,mBAAW,aAAa,KAAK,QAAQ,KAAA,GAAQ;AAC3C,gBAAM,UAAU,OAAO;AAAA,YACrB,eAAe,qBAAqB,SAAS,KAAK,CAAA;AAAA,UAAC;AAErD,iCAAuB,QAAQ;AAAA,YAC7B,CAAC,KAAK,UAAU,MAAM,MAAM,CAAC;AAAA,YAC7B;AAAA,UAAA;AAEF,aAAG,OAAO,YAAY,aAAa,OAAO;AAAA,QAC5C;AACA,WAAG,OAAO,0BAA0B,mBAAmB,EAAE;AAAA,MAC3D;AAAA,IACF;AACA,mBAAe,MAAA;AAKf,SAAK,WAAW,IAAI,oBAAoB;AAAA,MACtC;AAAA,MACA;AAAA,MACA,cAAc;AAAA,MACd,gBAAgB;AAAA,MAChB;AAAA,IAAA,CACD;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,YAAY,MAAc;AACxB,UAAM,WAAW,KAAK,WAAW,IAAI,IAAI;AACzC,QAAI,UAAU;AACZ,WAAK,WAAW,OAAO,IAAI;AAC3B,eAAS,MAAM,QAAA;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,OAAO,OAAe,IAA6B;AACjD,WAAO,KAAK,YAAA,GAAe,qBAAqB;AAChD,UAAM,SAAS,KAAK,KAAK,QAAQ,IAAI,KAAK,CAAC;AAC3C,WAAO,OAAO,OAAO,EAAS;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,QAAQ,OAIN;AACA,WAAO,KAAK,aAAa;AACzB,UAAM,OAAO,KAAK,aAAa,QAAQ,KAAK,WAAW;AACvD,UAAM,EAAC,MAAM,MAAM,QAAA,IAAW;AAC9B,SAAK,IAAI;AAAA,MACP,WAAW,KAAK,OAAO,OAAO,KAAK,OAAO,KAAK,OAAO;AAAA,IAAA;AAGxD,WAAO;AAAA,MACL,SAAS,KAAK;AAAA,MACd,YAAY;AAAA,MACZ,SAAS,KAAK,SAAS,MAAM,OAAO,OAAO;AAAA,IAAA;AAAA,EAE/C;AAAA,EAEA,CAAC,SACC,MACA,OACA,YAC+B;AAC/B,WAAO,KAAK,oBAAoB,IAAI;AACpC,SAAK,kBAAkB;AAAA,MACrB;AAAA,MACA,sBAAsB,KAAK,qBAAA;AAAA,MAC3B;AAAA,MACA,KAAK;AAAA,IAAA;AAEP,QAAI;AACF,iBAAW,EAAC,OAAO,YAAY,UAAA,KAAc,MAAM;AAKjD,YAAI,KAAK,wCAAwC;AAC/C,gBAAM;AAAA,QACR;AACA,cAAM,QAAQ,YAAY,IAAA;AAC1B,YAAI;AACJ,YAAI;AACF,gBAAM,cAAc,KAAK,QAAQ,IAAI,KAAK;AAC1C,cAAI,CAAC,aAAa;AAEhB;AAAA,UACF;AACA,gBAAM,aAAa,kBAAkB,KAAK,cAAc,KAAK;AAC7D,cAAI,aAA8B;AAClC,qBAAW,aAAa,YAAY;AAClC,gBACE,aACA;AAAA,cACE,UAAU,YAAY,SAAgB;AAAA,cACtC,UAAU,YAAY,SAAgB;AAAA,YAAA,GAExC;AACA,2BAAa;AAAA,YACf,OAAO;AACL,kBAAI,WAAW;AACb,qBAAK,qBAAqB,IAAI,CAAC;AAAA,cACjC;AACA,qBAAO,KAAK,MAAM,aAAa;AAAA,gBAC7B,MAAM;AAAA,gBACN,KAAK;AAAA,cAAA,CACN;AAAA,YACH;AAAA,UACF;AACA,cAAI,WAAW;AACb,gBAAI,YAAY;AACd,qBAAO,KAAK,MAAM,aAAa;AAAA,gBAC7B,MAAM;AAAA,gBACN,KAAK;AAAA,gBACL,QAAQ;AAAA,cAAA,CACT;AAAA,YACH,OAAO;AACL,qBAAO,KAAK,MAAM,aAAa;AAAA,gBAC7B,MAAM;AAAA,gBACN,KAAK;AAAA,cAAA,CACN;AAAA,YACH;AAAA,UACF;AAAA,QACF,UAAA;AACE,eAAK,gBAAgB;AAAA,QACvB;AAEA,cAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,aAAK,aAAa,OAAO,UAAU,KAAM;AAAA,UACvC;AAAA,UACA;AAAA,QAAA,CACD;AAAA,MACH;AAGA,YAAM,EAAC,SAAQ;AACf,iBAAW,SAAS,KAAK,QAAQ,OAAA,GAAU;AACzC,cAAM,MAAM,KAAK,GAAG,EAAE;AAAA,MACxB;AACA,WAAK,gCAAgC,KAAK,GAAG,EAAE;AAC/C,WAAK,IAAI,QAAQ,eAAe,KAAK,OAAO,EAAE;AAAA,IAChD,UAAA;AACE,WAAK,kBAAkB;AAAA,IACzB;AAAA,EACF;AAAA;AAAA,EAGA,WAAW,WAA2B;AACpC,QAAI,SAAS,KAAK,QAAQ,IAAI,SAAS;AACvC,QAAI,QAAQ;AACV,aAAO;AAAA,IACT;AAEA,UAAM,YAAY,iBAAiB,KAAK,aAAa,SAAS;AAC9D,UAAM,aAAa,kBAAkB,KAAK,cAAc,SAAS;AAEjE,UAAM,EAAC,GAAA,IAAM,KAAK,aAAa,QAAA;AAC/B,aAAS,IAAI;AAAA,MACX,KAAK;AAAA,MACL,KAAK;AAAA,MACL,GAAG;AAAA,MACH;AAAA,MACA,UAAU;AAAA,MACV;AAAA,MACA,MAAM,KAAK,aAAA;AAAA,IAAa;AAE1B,SAAK,QAAQ,IAAI,WAAW,MAAM;AAClC,SAAK,IAAI,QAAQ,2BAA2B,SAAS,EAAE;AACvD,WAAO;AAAA,EACT;AAAA,EAEA,eAAwB;AACtB,QAAI,KAAK,iBAAiB;AACxB,aAAO,KAAK,gBAAgB,MAAM,WAAA,IAAe;AAAA,IACnD;AACA,QAAI,KAAK,iBAAiB;AACxB,aAAO,KAAK,qCAAA;AAAA,IACd;AACA,UAAM,IAAI,MAAM,wDAAwD;AAAA,EAC1E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAeA,uCAAgD;AAC9C,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA,OAAO;AAAA,MACP;AAAA,IAAA,IACE,KAAK,KAAK,eAAe;AAC7B,UAAM,UAAU,aAAa,aAAA;AAC7B,QACE,UAAU,kCACT,UAAU,wBACR,UAAU,uBAAuB,KAAK,OAAO,aAAa,IAC7D;AACA,YAAM,IAAI;AAAA,QACR,mCAAmC,GAAG,OAAO,UAAU,kBAC5C,OAAO,kEACK,oBAAoB;AAAA,MAAA;AAAA,IAE/C;AACA,WAAO,aAAa,eAAe;AAAA,EACrC;AAAA;AAAA,EAGA,iBAA0B;AACxB,WAAO,KAAK,SAAS,cAAA;AAAA,EACvB;AAAA,EAEA,CAAC,MACC,QACA,QAC+B;AAC/B,SAAK,mBAAA;AACL,QAAI;AACF,iBAAW,OAAO,OAAO,QAAQ,MAAM,GAAG;AACxC,YAAI,QAAQ,SAAS;AACnB,gBAAM;AAAA,QACR;AACA,mBAAW,iBAAiB,KAAK,kBAAA,EAAoB,UAAU;AAC7D,gBAAM;AAAA,QACR;AACA,aAAK,mBAAA;AAAA,MACP;AAAA,IACF,UAAA;AACE,UAAI,KAAK,cAAc,MAAM;AAC3B,aAAK,kBAAA;AAAA,MACP;AAAA,IACF;AAAA,EACF;AAAA,EAEA,qBAAqB;AACnB,WAAO,KAAK,cAAc,IAAI;AAC9B,SAAK,YAAY,IAAI,SAAS,KAAK,KAAK,YAAY,CAAC;AAAA,EACvD;AAAA,EAEA,oBAA8B;AAC5B,UAAM,WAAW,KAAK;AACtB,WAAO,QAAQ;AACf,SAAK,YAAY;AACjB,WAAO;AAAA,EACT;AACF;AAEA,MAAM,SAAS;AAAA,EACJ;AAAA,EAET,YAAY,aAAsC;AAChD,SAAK,eAAe;AAAA,EACtB;AAAA,EAES,WAIH,CAAA;AAAA,EAEN,WACE,MACA,QACA,SACM;AACN,SAAK,SAAS,KAAK,CAAC,MAAM,QAAQ,OAAO,CAAC;AAC1C,WAAO;AAAA,EACT;AAAA,EAEA,CAAC,SAAwC;AACvC,eAAW,CAAC,MAAM,QAAQ,OAAO,KAAK,KAAK,UAAU;AACnD,aAAO,KAAK,eAAe,MAAM,QAAQ,OAAO;AAAA,IAClD;AAAA,EACF;AAAA,EAEA,CAAC,eACC,WACA,QACA,SAC+B;AAG/B,QAAI,OAAO,WAAW,eAAe;AACnC;AAAA,IACF;AAEA,eAAW,UAAU,SAAS;AAC5B,UAAI,WAAW,SAAS;AACtB,cAAM;AACN;AAAA,MACF;AACA,YAAM,EAAC,SAAQ;AAEf,cAAQ,MAAA;AAAA,QACN,KAAK;AAAA,QACL,KAAK,UAAU;AACb,iBAAO,KAAK,aAAa,WAAW,QAAQ,MAAM,MAAM;AAAA,YACtD,OAAO;AAAA,UAAA,CACR;AACD;AAAA,QACF;AAAA,QACA,KAAK,SAAS;AACZ,gBAAM,EAAC,UAAS;AAChB,gBAAM,cAAc;AAAA,YAClB,OAAO,cAAc,MAAM,gBAAgB;AAAA,UAAA;AAG7C,iBAAO,KAAK,eAAe,WAAW,aAAa,CAAC,MAAM,MAAM,CAAC;AACjE;AAAA,QACF;AAAA,QACA,KAAK;AACH,iBAAO,KAAK,aAAa,WAAW,QAAQ,MAAM,MAAM;AAAA,YACtD,EAAC,KAAK,OAAO,KAAK,KAAK,eAAe,CAAA,EAAC;AAAA,UAAC,CACzC;AACD;AAAA,QACF;AACE,sBAAgB;AAAA,MAAA;AAAA,IAEtB;AAAA,EACF;AAAA,EAEA,CAAC,aACC,WACA,QACA,IACA,OAC+B;AAC/B,UAAM,EAAC,WAAW,OAAO,OAAA,IAAU;AAEnC,UAAM,aAAa,KAAK,KAAK,aAAa,IAAI,KAAK,CAAC;AAIpD,QAAI,WAAW,eAAe;AAC5B;AAAA,IACF;AAEA,eAAW,QAAQ,SAAS;AAC1B,UAAI,SAAS,SAAS;AACpB,cAAM;AACN;AAAA,MACF;AACA,YAAM,EAAC,eAAe,IAAA,IAAO;AAC7B,YAAM,SAAS,UAAU,YAAY,GAAG;AAExC,YAAM;AAAA,QACJ,MAAM;AAAA,QACN;AAAA,QACA;AAAA,QACA;AAAA,QACA,KAAK,OAAO,WAAW,SAAY;AAAA,MAAA;AAGrC,iBAAW,CAAC,cAAc,QAAQ,KAAK,OAAO,QAAQ,aAAa,GAAG;AACpE,cAAM,cAAc,KAAK,OAAO,cAAc,YAAY,CAAC;AAC3D,eAAO,KAAK,aAAa,WAAW,aAAa,IAAI,QAAQ;AAAA,MAC/D;AAAA,IACF;AAAA,EACF;AACF;AAEA,UAAU,OAAO,OAA6D;AAC5E,aAAW,QAAQ,OAAO;AACxB,QAAI,SAAS,SAAS;AACpB,YAAM;AACN;AAAA,IACF;AACA,UAAM,EAAC,MAAM,OAAO,KAAA;AAAA,EACtB;AACF;AAEA,SAAS,UAAU,MAAkB,KAAkB;AACrD,SAAO,OAAO,YAAY,KAAK,IAAI,CAAA,QAAO,CAAC,KAAK,KAAK,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC;AAClE;AAOO,UAAU,QACf,OACA,MACA,cAC+B;AAC/B,QAAM,MAAM,MAAM,MAAM,EAAE;AAC1B,QAAM,WAAW,IAAI,SAAS,iBAAiB,YAAY,CAAC,EAAE;AAAA,IAC5D;AAAA,IACA,MAAM,UAAA;AAAA,IACN,OAAO,GAAG;AAAA,EAAA;AAEZ,SAAO,SAAS,OAAA;AAClB;AAEO,UAAU,gBACf,OACA,MACA,aAC+B;AAC/B,QAAM,MAAM,MAAM,MAAM,EAAE;AAC1B,QAAM,WAAW,IAAI,SAAS,WAAW,EAAE;AAAA,IACzC;AAAA,IACA,MAAM,UAAA;AAAA,IACN,OAAO,GAAG;AAAA,EAAA;AAEZ,SAAO,SAAS,OAAA;AAClB;AAEA,SAAS,iBACP,cACA,cAAuC,oBAAI,OAC3C;AACA,aAAW,CAAC,WAAW,EAAC,WAAA,CAAW,KAAK,OAAO,QAAQ,aAAa,MAAM,GAAG;AAC3E,gBAAY,IAAI,WAAW,UAAmC;AAAA,EAChE;AACA,SAAO;AACT;AAEA,SAAS,kBACP,aACA,OACY;AACZ,QAAM,QAAQ,KAAK,aAAa,iCAAiC;AAEjE,SAAO;AAAA,IACL,MAAM,IAAI,KAAK;AAAA,IACf,UAAU,KAAK,oBAAoB,CAAC,GAAG,MAAM,KAAA,CAAM,EAAE,KAAA,CAAM;AAAA,EAAA;AAG/D;"}
|
|
1
|
+
{"version":3,"file":"pipeline-driver.js","sources":["../../../../../../zero-cache/src/services/view-syncer/pipeline-driver.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert, unreachable} from '../../../../shared/src/asserts.ts';\nimport {deepEqual, type JSONValue} from '../../../../shared/src/json.ts';\nimport {must} from '../../../../shared/src/must.ts';\nimport type {AST} from '../../../../zero-protocol/src/ast.ts';\nimport type {ClientSchema} from '../../../../zero-protocol/src/client-schema.ts';\nimport type {Row} from '../../../../zero-protocol/src/data.ts';\nimport type {PrimaryKey} from '../../../../zero-protocol/src/primary-key.ts';\nimport {buildPipeline} from '../../../../zql/src/builder/builder.ts';\nimport {\n Debug,\n runtimeDebugFlags,\n} from '../../../../zql/src/builder/debug-delegate.ts';\nimport type {Change} from '../../../../zql/src/ivm/change.ts';\nimport type {Node} from '../../../../zql/src/ivm/data.ts';\nimport {type Input, type Storage} from '../../../../zql/src/ivm/operator.ts';\nimport type {SourceSchema} from '../../../../zql/src/ivm/schema.ts';\nimport type {\n Source,\n SourceChange,\n SourceInput,\n} from '../../../../zql/src/ivm/source.ts';\nimport type {ConnectionCostModel} from '../../../../zql/src/planner/planner-connection.ts';\nimport {MeasurePushOperator} from '../../../../zql/src/query/measure-push-operator.ts';\nimport type {ClientGroupStorage} from '../../../../zqlite/src/database-storage.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {createSQLiteCostModel} from '../../../../zqlite/src/sqlite-cost-model.ts';\nimport {TableSource} from '../../../../zqlite/src/table-source.ts';\nimport {\n reloadPermissionsIfChanged,\n type LoadedPermissions,\n} from '../../auth/load-permissions.ts';\nimport type {LogConfig} from '../../config/zero-config.ts';\nimport {computeZqlSpecs, mustGetTableSpec} from '../../db/lite-tables.ts';\nimport type {LiteAndZqlSpec, LiteTableSpec} from '../../db/specs.ts';\nimport {\n getOrCreateCounter,\n getOrCreateHistogram,\n} from '../../observability/metrics.ts';\nimport type {InspectorDelegate} from '../../server/inspector-delegate.ts';\nimport {type RowKey} from '../../types/row-key.ts';\nimport type {SchemaVersions} from '../../types/schema-versions.ts';\nimport {upstreamSchema, type ShardID} from '../../types/shards.ts';\nimport {getSubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {checkClientSchema} from './client-schema.ts';\nimport type {Snapshotter} from './snapshotter.ts';\nimport {ResetPipelinesSignal, type SnapshotDiff} from './snapshotter.ts';\n\nexport type RowAdd = {\n readonly type: 'add';\n readonly queryHash: string;\n readonly table: string;\n readonly rowKey: Row;\n readonly row: Row;\n};\n\nexport type RowRemove = {\n readonly type: 'remove';\n readonly queryHash: string;\n readonly table: string;\n readonly rowKey: Row;\n readonly row: undefined;\n};\n\nexport type RowEdit = {\n readonly type: 'edit';\n readonly queryHash: string;\n readonly table: string;\n readonly rowKey: Row;\n readonly row: Row;\n};\n\nexport type RowChange = RowAdd | RowRemove | RowEdit;\n\ntype Pipeline = {\n readonly input: Input;\n readonly hydrationTimeMs: number;\n readonly originalHash: string;\n readonly transformedAst: AST; // Optional, only set after hydration\n readonly transformationHash: string; // The hash of the transformed AST\n};\n\ntype AdvanceContext = {\n readonly timer: Timer;\n readonly totalHydrationTimeMs: number;\n readonly numChanges: number;\n pos: number;\n};\n\ntype HydrateContext = {\n readonly timer: Timer;\n};\n\nexport type Timer = {\n elapsedLap: () => number;\n totalElapsed: () => number;\n};\n\n/**\n * No matter how fast hydration is, advancement is given at least this long to\n * complete before doing a pipeline reset.\n */\nconst MIN_ADVANCEMENT_TIME_LIMIT_MS = 50;\n\n/**\n * Manages the state of IVM pipelines for a given ViewSyncer (i.e. client group).\n */\nexport class PipelineDriver {\n readonly #tables = new Map<string, TableSource>();\n // We probs need the original query hash\n // so we can decide not to re-transform a custom query\n // that is already hydrated.\n readonly #pipelines = new Map<string, Pipeline>();\n\n readonly #lc: LogContext;\n readonly #snapshotter: Snapshotter;\n readonly #storage: ClientGroupStorage;\n readonly #shardID: ShardID;\n readonly #logConfig: LogConfig;\n readonly #tableSpecs = new Map<string, LiteAndZqlSpec>();\n readonly #costModels: WeakMap<Database, ConnectionCostModel> | undefined;\n readonly #yieldThresholdMs: number;\n #streamer: Streamer | null = null;\n #hydrateContext: HydrateContext | null = null;\n #advanceContext: AdvanceContext | null = null;\n #replicaVersion: string | null = null;\n #primaryKeys: Map<string, PrimaryKey> | null = null;\n #permissions: LoadedPermissions | null = null;\n\n readonly #advanceTime = getOrCreateHistogram('sync', 'ivm.advance-time', {\n description:\n 'Time to advance all queries for a given client group for in response to a single change.',\n unit: 's',\n });\n\n readonly #conflictRowsDeleted = getOrCreateCounter(\n 'sync',\n 'ivm.conflict-rows-deleted',\n 'Number of rows deleted because they conflicted with added row',\n );\n\n readonly #inspectorDelegate: InspectorDelegate;\n\n constructor(\n lc: LogContext,\n logConfig: LogConfig,\n snapshotter: Snapshotter,\n shardID: ShardID,\n storage: ClientGroupStorage,\n clientGroupID: string,\n inspectorDelegate: InspectorDelegate,\n yieldThresholdMs: number,\n enablePlanner?: boolean,\n ) {\n this.#lc = lc.withContext('clientGroupID', clientGroupID);\n this.#snapshotter = snapshotter;\n this.#storage = storage;\n this.#shardID = shardID;\n this.#logConfig = logConfig;\n this.#inspectorDelegate = inspectorDelegate;\n this.#costModels = enablePlanner ? new WeakMap() : undefined;\n this.#yieldThresholdMs = yieldThresholdMs;\n }\n\n /**\n * Initializes the PipelineDriver to the current head of the database.\n * Queries can then be added (i.e. hydrated) with {@link addQuery()}.\n *\n * Must only be called once.\n */\n init(clientSchema: ClientSchema) {\n assert(!this.#snapshotter.initialized(), 'Already initialized');\n this.#snapshotter.init();\n this.#initAndResetCommon(clientSchema);\n }\n\n /**\n * @returns Whether the PipelineDriver has been initialized.\n */\n initialized(): boolean {\n return this.#snapshotter.initialized();\n }\n\n /**\n * Clears the current pipelines and TableSources, returning the PipelineDriver\n * to its initial state. This should be called in response to a schema change,\n * as TableSources need to be recomputed.\n */\n reset(clientSchema: ClientSchema) {\n for (const {input} of this.#pipelines.values()) {\n input.destroy();\n }\n this.#pipelines.clear();\n this.#tables.clear();\n this.#initAndResetCommon(clientSchema);\n }\n\n #initAndResetCommon(clientSchema: ClientSchema) {\n const {db} = this.#snapshotter.current();\n const fullTables = new Map<string, LiteTableSpec>();\n computeZqlSpecs(this.#lc, db.db, this.#tableSpecs, fullTables);\n checkClientSchema(\n this.#shardID,\n clientSchema,\n this.#tableSpecs,\n fullTables,\n );\n const primaryKeys = this.#primaryKeys ?? new Map<string, PrimaryKey>();\n this.#primaryKeys = primaryKeys;\n primaryKeys.clear();\n for (const [table, spec] of this.#tableSpecs.entries()) {\n if (table.startsWith(upstreamSchema(this.#shardID))) {\n primaryKeys.set(table, spec.tableSpec.primaryKey);\n }\n }\n buildPrimaryKeys(clientSchema, primaryKeys);\n const {replicaVersion} = getSubscriptionState(db);\n this.#replicaVersion = replicaVersion;\n }\n\n /** @returns The replica version. The PipelineDriver must have been initialized. */\n get replicaVersion(): string {\n return must(this.#replicaVersion, 'Not yet initialized');\n }\n\n /**\n * Returns the current version of the database. This will reflect the\n * latest version change when calling {@link advance()} once the\n * iteration has begun.\n */\n currentVersion(): string {\n assert(this.initialized(), 'Not yet initialized');\n return this.#snapshotter.current().version;\n }\n\n /**\n * Returns the current supported schema version range of the database. This\n * will reflect changes to supported schema version range when calling\n * {@link advance()} once the iteration has begun.\n */\n currentSchemaVersions(): SchemaVersions {\n assert(this.initialized(), 'Not yet initialized');\n return this.#snapshotter.current().schemaVersions;\n }\n\n /**\n * Returns the current upstream {app}.permissions, or `null` if none are defined.\n */\n currentPermissions(): LoadedPermissions | null {\n assert(this.initialized(), 'Not yet initialized');\n const res = reloadPermissionsIfChanged(\n this.#lc,\n this.#snapshotter.current().db,\n this.#shardID.appID,\n this.#permissions,\n );\n if (res.changed) {\n this.#permissions = res.permissions;\n this.#lc.debug?.(\n 'Reloaded permissions',\n JSON.stringify(this.#permissions),\n );\n }\n return this.#permissions;\n }\n\n advanceWithoutDiff(): string {\n const {db, version} = this.#snapshotter.advanceWithoutDiff().curr;\n for (const table of this.#tables.values()) {\n table.setDB(db.db);\n }\n return version;\n }\n\n #ensureCostModelExistsIfEnabled(db: Database) {\n let existing = this.#costModels?.get(db);\n if (existing) {\n return existing;\n }\n if (this.#costModels) {\n const costModel = createSQLiteCostModel(db, this.#tableSpecs);\n this.#costModels.set(db, costModel);\n return costModel;\n }\n return undefined;\n }\n\n /**\n * Clears storage used for the pipelines. Call this when the\n * PipelineDriver will no longer be used.\n */\n destroy() {\n this.#storage.destroy();\n this.#snapshotter.destroy();\n }\n\n /** @return The Set of query hashes for all added queries. */\n addedQueries(): [\n transformationHashes: Set<string>,\n byOriginalHash: Map<\n string,\n {\n transformationHash: string;\n transformedAst: AST;\n }[]\n >,\n ] {\n const byOriginalHash = new Map<\n string,\n {transformationHash: string; transformedAst: AST}[]\n >();\n for (const pipeline of this.#pipelines.values()) {\n const {originalHash, transformedAst, transformationHash} = pipeline;\n\n if (!byOriginalHash.has(originalHash)) {\n byOriginalHash.set(originalHash, []);\n }\n byOriginalHash.get(originalHash)!.push({\n transformationHash,\n transformedAst,\n });\n }\n return [new Set(this.#pipelines.keys()), byOriginalHash];\n }\n\n totalHydrationTimeMs(): number {\n let total = 0;\n for (const pipeline of this.#pipelines.values()) {\n total += pipeline.hydrationTimeMs;\n }\n return total;\n }\n\n /**\n * Adds a pipeline for the query. The method will hydrate the query using the\n * driver's current snapshot of the database and return a stream of results.\n * Henceforth, updates to the query will be returned when the driver is\n * {@link advance}d. The query and its pipeline can be removed with\n * {@link removeQuery()}.\n *\n * If a query with an identical hash has already been added, this method is a\n * no-op and no RowChanges are generated.\n *\n * @param timer The caller-controlled {@link Timer} used to determine the\n * final hydration time. (The caller may pause and resume the timer\n * when yielding the thread for time-slicing).\n * @return The rows from the initial hydration of the query.\n */\n *addQuery(\n transformationHash: string,\n queryID: string,\n query: AST,\n timer: Timer,\n ): Iterable<RowChange | 'yield'> {\n assert(this.initialized());\n this.#inspectorDelegate.addQuery(transformationHash, queryID, query);\n if (this.#pipelines.has(transformationHash)) {\n this.#lc.info?.(`query ${transformationHash} already added`, query);\n return;\n }\n const debugDelegate = runtimeDebugFlags.trackRowsVended\n ? new Debug()\n : undefined;\n\n const costModel = this.#ensureCostModelExistsIfEnabled(\n this.#snapshotter.current().db.db,\n );\n\n const input = buildPipeline(\n query,\n {\n debug: debugDelegate,\n enableNotExists: true, // Server-side can handle NOT EXISTS\n getSource: name => this.#getSource(name),\n createStorage: () => this.#createStorage(),\n decorateSourceInput: (input: SourceInput, _queryID: string): Input =>\n new MeasurePushOperator(\n input,\n transformationHash,\n this.#inspectorDelegate,\n 'query-update-server',\n ),\n decorateInput: input => input,\n addEdge() {},\n decorateFilterInput: input => input,\n },\n queryID,\n costModel,\n );\n const schema = input.getSchema();\n input.setOutput({\n push: change => {\n const streamer = this.#streamer;\n assert(streamer, 'must #startAccumulating() before pushing changes');\n streamer.accumulate(transformationHash, schema, [change]);\n return [];\n },\n });\n\n assert(this.#advanceContext === null);\n this.#hydrateContext = {\n timer,\n };\n try {\n yield* hydrateInternal(\n input,\n transformationHash,\n must(this.#primaryKeys),\n );\n } finally {\n this.#hydrateContext = null;\n }\n\n const hydrationTimeMs = timer.totalElapsed();\n if (runtimeDebugFlags.trackRowCountsVended) {\n if (hydrationTimeMs > this.#logConfig.slowHydrateThreshold) {\n let totalRowsConsidered = 0;\n const lc = this.#lc\n .withContext('hash', transformationHash)\n .withContext('hydrationTimeMs', hydrationTimeMs);\n for (const tableName of this.#tables.keys()) {\n const entries = Object.entries(\n debugDelegate?.getVendedRowCounts()[tableName] ?? {},\n );\n totalRowsConsidered += entries.reduce(\n (acc, entry) => acc + entry[1],\n 0,\n );\n lc.info?.(tableName + ' VENDED: ', entries);\n }\n lc.info?.(`Total rows considered: ${totalRowsConsidered}`);\n }\n }\n debugDelegate?.reset();\n\n // Note: This hydrationTime is a wall-clock overestimate, as it does\n // not take time slicing into account. The view-syncer resets this\n // to a more precise processing-time measurement with setHydrationTime().\n this.#pipelines.set(transformationHash, {\n input,\n hydrationTimeMs,\n originalHash: queryID,\n transformedAst: query,\n transformationHash,\n });\n }\n\n /**\n * Removes the pipeline for the query. This is a no-op if the query\n * was not added.\n */\n removeQuery(hash: string) {\n const pipeline = this.#pipelines.get(hash);\n if (pipeline) {\n this.#pipelines.delete(hash);\n pipeline.input.destroy();\n }\n }\n\n /**\n * Returns the value of the row with the given primary key `pk`,\n * or `undefined` if there is no such row. The pipeline must have been\n * initialized.\n */\n getRow(table: string, pk: RowKey): Row | undefined {\n assert(this.initialized(), 'Not yet initialized');\n const source = must(this.#tables.get(table));\n return source.getRow(pk as Row);\n }\n\n /**\n * Advances to the new head of the database.\n *\n * @param timer The caller-controlled {@link Timer} that will be used to\n * measure the progress of the advancement and abort with a\n * {@link ResetPipelinesSignal} if it is estimated to take longer\n * than a hydration.\n * @return The resulting row changes for all added queries. Note that the\n * `changes` must be iterated over in their entirety in order to\n * advance the database snapshot.\n */\n advance(timer: Timer): {\n version: string;\n numChanges: number;\n changes: Iterable<RowChange | 'yield'>;\n } {\n assert(this.initialized());\n const diff = this.#snapshotter.advance(this.#tableSpecs);\n const {prev, curr, changes} = diff;\n this.#lc.debug?.(\n `advance ${prev.version} => ${curr.version}: ${changes} changes`,\n );\n\n return {\n version: curr.version,\n numChanges: changes,\n changes: this.#advance(diff, timer, changes),\n };\n }\n\n *#advance(\n diff: SnapshotDiff,\n timer: Timer,\n numChanges: number,\n ): Iterable<RowChange | 'yield'> {\n assert(this.#hydrateContext === null);\n this.#advanceContext = {\n timer,\n totalHydrationTimeMs: this.totalHydrationTimeMs(),\n numChanges,\n pos: 0,\n };\n try {\n for (const {table, prevValues, nextValue} of diff) {\n // Advance progress is checked each time a row is fetched\n // from a TableSource during push processing, but some pushes\n // don't read any rows. Check progress here before processing\n // the next change.\n if (this.#shouldAdvanceYieldMaybeAbortAdvance()) {\n yield 'yield';\n }\n const start = performance.now();\n let type;\n try {\n const tableSource = this.#tables.get(table);\n if (!tableSource) {\n // no pipelines read from this table, so no need to process the change\n continue;\n }\n const primaryKey = mustGetPrimaryKey(this.#primaryKeys, table);\n let editOldRow: Row | undefined = undefined;\n for (const prevValue of prevValues) {\n if (\n nextValue &&\n deepEqual(\n getRowKey(primaryKey, prevValue as Row) as JSONValue,\n getRowKey(primaryKey, nextValue as Row) as JSONValue,\n )\n ) {\n editOldRow = prevValue;\n } else {\n if (nextValue) {\n this.#conflictRowsDeleted.add(1);\n }\n yield* this.#push(tableSource, {\n type: 'remove',\n row: prevValue,\n });\n }\n }\n if (nextValue) {\n if (editOldRow) {\n yield* this.#push(tableSource, {\n type: 'edit',\n row: nextValue,\n oldRow: editOldRow,\n });\n } else {\n yield* this.#push(tableSource, {\n type: 'add',\n row: nextValue,\n });\n }\n }\n } finally {\n this.#advanceContext.pos++;\n }\n\n const elapsed = performance.now() - start;\n this.#advanceTime.record(elapsed / 1000, {\n table,\n type,\n });\n }\n\n // Set the new snapshot on all TableSources.\n const {curr} = diff;\n for (const table of this.#tables.values()) {\n table.setDB(curr.db.db);\n }\n this.#ensureCostModelExistsIfEnabled(curr.db.db);\n this.#lc.debug?.(`Advanced to ${curr.version}`);\n } finally {\n this.#advanceContext = null;\n }\n }\n\n /** Implements `BuilderDelegate.getSource()` */\n #getSource(tableName: string): Source {\n let source = this.#tables.get(tableName);\n if (source) {\n return source;\n }\n\n const tableSpec = mustGetTableSpec(this.#tableSpecs, tableName);\n const primaryKey = mustGetPrimaryKey(this.#primaryKeys, tableName);\n\n const {db} = this.#snapshotter.current();\n source = new TableSource(\n this.#lc,\n this.#logConfig,\n db.db,\n tableName,\n tableSpec.zqlSpec,\n primaryKey,\n () => this.#shouldYield(),\n );\n this.#tables.set(tableName, source);\n this.#lc.debug?.(`created TableSource for ${tableName}`);\n return source;\n }\n\n #shouldYield(): boolean {\n if (this.#hydrateContext) {\n return this.#hydrateContext.timer.elapsedLap() > this.#yieldThresholdMs;\n }\n if (this.#advanceContext) {\n return this.#shouldAdvanceYieldMaybeAbortAdvance();\n }\n throw new Error('shouldYield called outside of hydration or advancement');\n }\n\n /**\n * Cancel the advancement processing, by throwing a ResetPipelinesSignal, if\n * it has taken longer than half the total hydration time to make it through\n * half of the advancement, or if processing time exceeds total hydration\n * time. This serves as both a circuit breaker for very large transactions,\n * as well as a bound on the amount of time the previous connection locks\n * the inactive WAL file (as the lock prevents WAL2 from switching to the\n * free WAL when the current one is over the size limit, which can make\n * the WAL grow continuously and compound slowness).\n * This is checked:\n * 1. before starting to process each change in an advancement is processed\n * 2. whenever a row is fetched from a TableSource during push processing\n */\n #shouldAdvanceYieldMaybeAbortAdvance(): boolean {\n const {\n pos,\n numChanges,\n timer: advanceTimer,\n totalHydrationTimeMs,\n } = must(this.#advanceContext);\n const elapsed = advanceTimer.totalElapsed();\n if (\n elapsed > MIN_ADVANCEMENT_TIME_LIMIT_MS &&\n (elapsed > totalHydrationTimeMs ||\n (elapsed > totalHydrationTimeMs / 2 && pos <= numChanges / 2))\n ) {\n throw new ResetPipelinesSignal(\n `Advancement exceeded timeout at ${pos} of ${numChanges} changes ` +\n `after ${elapsed} ms. Advancement time limited based on total ` +\n `hydration time of ${totalHydrationTimeMs} ms.`,\n );\n }\n return advanceTimer.elapsedLap() > this.#yieldThresholdMs;\n }\n\n /** Implements `BuilderDelegate.createStorage()` */\n #createStorage(): Storage {\n return this.#storage.createStorage();\n }\n\n *#push(\n source: TableSource,\n change: SourceChange,\n ): Iterable<RowChange | 'yield'> {\n this.#startAccumulating();\n try {\n for (const val of source.genPush(change)) {\n if (val === 'yield') {\n yield 'yield';\n }\n for (const changeOrYield of this.#stopAccumulating().stream()) {\n yield changeOrYield;\n }\n this.#startAccumulating();\n }\n } finally {\n if (this.#streamer !== null) {\n this.#stopAccumulating();\n }\n }\n }\n\n #startAccumulating() {\n assert(this.#streamer === null);\n this.#streamer = new Streamer(must(this.#primaryKeys));\n }\n\n #stopAccumulating(): Streamer {\n const streamer = this.#streamer;\n assert(streamer);\n this.#streamer = null;\n return streamer;\n }\n}\n\nclass Streamer {\n readonly #primaryKeys: Map<string, PrimaryKey>;\n\n constructor(primaryKeys: Map<string, PrimaryKey>) {\n this.#primaryKeys = primaryKeys;\n }\n\n readonly #changes: [\n hash: string,\n schema: SourceSchema,\n changes: Iterable<Change | 'yield'>,\n ][] = [];\n\n accumulate(\n hash: string,\n schema: SourceSchema,\n changes: Iterable<Change | 'yield'>,\n ): this {\n this.#changes.push([hash, schema, changes]);\n return this;\n }\n\n *stream(): Iterable<RowChange | 'yield'> {\n for (const [hash, schema, changes] of this.#changes) {\n yield* this.#streamChanges(hash, schema, changes);\n }\n }\n\n *#streamChanges(\n queryHash: string,\n schema: SourceSchema,\n changes: Iterable<Change | 'yield'>,\n ): Iterable<RowChange | 'yield'> {\n // We do not sync rows gathered by the permissions\n // system to the client.\n if (schema.system === 'permissions') {\n return;\n }\n\n for (const change of changes) {\n if (change === 'yield') {\n yield change;\n continue;\n }\n const {type} = change;\n\n switch (type) {\n case 'add':\n case 'remove': {\n yield* this.#streamNodes(queryHash, schema, type, () => [\n change.node,\n ]);\n break;\n }\n case 'child': {\n const {child} = change;\n const childSchema = must(\n schema.relationships[child.relationshipName],\n );\n\n yield* this.#streamChanges(queryHash, childSchema, [child.change]);\n break;\n }\n case 'edit':\n yield* this.#streamNodes(queryHash, schema, type, () => [\n {row: change.node.row, relationships: {}},\n ]);\n break;\n default:\n unreachable(type);\n }\n }\n }\n\n *#streamNodes(\n queryHash: string,\n schema: SourceSchema,\n op: 'add' | 'remove' | 'edit',\n nodes: () => Iterable<Node | 'yield'>,\n ): Iterable<RowChange | 'yield'> {\n const {tableName: table, system} = schema;\n\n const primaryKey = must(this.#primaryKeys.get(table));\n\n // We do not sync rows gathered by the permissions\n // system to the client.\n if (system === 'permissions') {\n return;\n }\n\n for (const node of nodes()) {\n if (node === 'yield') {\n yield node;\n continue;\n }\n const {relationships, row} = node;\n const rowKey = getRowKey(primaryKey, row);\n\n yield {\n type: op,\n queryHash,\n table,\n rowKey,\n row: op === 'remove' ? undefined : row,\n } as RowChange;\n\n for (const [relationship, children] of Object.entries(relationships)) {\n const childSchema = must(schema.relationships[relationship]);\n yield* this.#streamNodes(queryHash, childSchema, op, children);\n }\n }\n }\n}\n\nfunction* toAdds(nodes: Iterable<Node | 'yield'>): Iterable<Change | 'yield'> {\n for (const node of nodes) {\n if (node === 'yield') {\n yield node;\n continue;\n }\n yield {type: 'add', node};\n }\n}\n\nfunction getRowKey(cols: PrimaryKey, row: Row): RowKey {\n return Object.fromEntries(cols.map(col => [col, must(row[col])]));\n}\n\n/**\n * Core hydration logic used by {@link PipelineDriver#addQuery}, extracted to a\n * function for reuse by bin-analyze so that bin-analyze's hydration logic\n * is as close as possible to zero-cache's real hydration logic.\n */\nexport function* hydrate(\n input: Input,\n hash: string,\n clientSchema: ClientSchema,\n): Iterable<RowChange | 'yield'> {\n const res = input.fetch({});\n const streamer = new Streamer(buildPrimaryKeys(clientSchema)).accumulate(\n hash,\n input.getSchema(),\n toAdds(res),\n );\n yield* streamer.stream();\n}\n\nexport function* hydrateInternal(\n input: Input,\n hash: string,\n primaryKeys: Map<string, PrimaryKey>,\n): Iterable<RowChange | 'yield'> {\n const res = input.fetch({});\n const streamer = new Streamer(primaryKeys).accumulate(\n hash,\n input.getSchema(),\n toAdds(res),\n );\n yield* streamer.stream();\n}\n\nfunction buildPrimaryKeys(\n clientSchema: ClientSchema,\n primaryKeys: Map<string, PrimaryKey> = new Map<string, PrimaryKey>(),\n) {\n for (const [tableName, {primaryKey}] of Object.entries(clientSchema.tables)) {\n primaryKeys.set(tableName, primaryKey as unknown as PrimaryKey);\n }\n return primaryKeys;\n}\n\nfunction mustGetPrimaryKey(\n primaryKeys: Map<string, PrimaryKey> | null,\n table: string,\n): PrimaryKey {\n const pKeys = must(primaryKeys, 'primaryKey map must be non-null');\n\n return must(\n pKeys.get(table),\n `table '${table}' is not one of: ${[...pKeys.keys()].sort()}. ` +\n `Check the spelling and ensure that the table has a primary key.`,\n );\n}\n"],"names":["input"],"mappings":";;;;;;;;;;;;;;;;;AAsGA,MAAM,gCAAgC;AAK/B,MAAM,eAAe;AAAA,EACjB,8BAAc,IAAA;AAAA;AAAA;AAAA;AAAA,EAId,iCAAiB,IAAA;AAAA,EAEjB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA,kCAAkB,IAAA;AAAA,EAClB;AAAA,EACA;AAAA,EACT,YAA6B;AAAA,EAC7B,kBAAyC;AAAA,EACzC,kBAAyC;AAAA,EACzC,kBAAiC;AAAA,EACjC,eAA+C;AAAA,EAC/C,eAAyC;AAAA,EAEhC,eAAe,qBAAqB,QAAQ,oBAAoB;AAAA,IACvE,aACE;AAAA,IACF,MAAM;AAAA,EAAA,CACP;AAAA,EAEQ,uBAAuB;AAAA,IAC9B;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAAA,EAGO;AAAA,EAET,YACE,IACA,WACA,aACA,SACA,SACA,eACA,mBACA,kBACA,eACA;AACA,SAAK,MAAM,GAAG,YAAY,iBAAiB,aAAa;AACxD,SAAK,eAAe;AACpB,SAAK,WAAW;AAChB,SAAK,WAAW;AAChB,SAAK,aAAa;AAClB,SAAK,qBAAqB;AAC1B,SAAK,cAAc,gBAAgB,oBAAI,QAAA,IAAY;AACnD,SAAK,oBAAoB;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,KAAK,cAA4B;AAC/B,WAAO,CAAC,KAAK,aAAa,YAAA,GAAe,qBAAqB;AAC9D,SAAK,aAAa,KAAA;AAClB,SAAK,oBAAoB,YAAY;AAAA,EACvC;AAAA;AAAA;AAAA;AAAA,EAKA,cAAuB;AACrB,WAAO,KAAK,aAAa,YAAA;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,cAA4B;AAChC,eAAW,EAAC,MAAA,KAAU,KAAK,WAAW,UAAU;AAC9C,YAAM,QAAA;AAAA,IACR;AACA,SAAK,WAAW,MAAA;AAChB,SAAK,QAAQ,MAAA;AACb,SAAK,oBAAoB,YAAY;AAAA,EACvC;AAAA,EAEA,oBAAoB,cAA4B;AAC9C,UAAM,EAAC,GAAA,IAAM,KAAK,aAAa,QAAA;AAC/B,UAAM,iCAAiB,IAAA;AACvB,oBAAgB,KAAK,KAAK,GAAG,IAAI,KAAK,aAAa,UAAU;AAC7D;AAAA,MACE,KAAK;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL;AAAA,IAAA;AAEF,UAAM,cAAc,KAAK,gBAAgB,oBAAI,IAAA;AAC7C,SAAK,eAAe;AACpB,gBAAY,MAAA;AACZ,eAAW,CAAC,OAAO,IAAI,KAAK,KAAK,YAAY,WAAW;AACtD,UAAI,MAAM,WAAW,eAAe,KAAK,QAAQ,CAAC,GAAG;AACnD,oBAAY,IAAI,OAAO,KAAK,UAAU,UAAU;AAAA,MAClD;AAAA,IACF;AACA,qBAAiB,cAAc,WAAW;AAC1C,UAAM,EAAC,eAAA,IAAkB,qBAAqB,EAAE;AAChD,SAAK,kBAAkB;AAAA,EACzB;AAAA;AAAA,EAGA,IAAI,iBAAyB;AAC3B,WAAO,KAAK,KAAK,iBAAiB,qBAAqB;AAAA,EACzD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,iBAAyB;AACvB,WAAO,KAAK,YAAA,GAAe,qBAAqB;AAChD,WAAO,KAAK,aAAa,QAAA,EAAU;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,wBAAwC;AACtC,WAAO,KAAK,YAAA,GAAe,qBAAqB;AAChD,WAAO,KAAK,aAAa,QAAA,EAAU;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA,EAKA,qBAA+C;AAC7C,WAAO,KAAK,YAAA,GAAe,qBAAqB;AAChD,UAAM,MAAM;AAAA,MACV,KAAK;AAAA,MACL,KAAK,aAAa,QAAA,EAAU;AAAA,MAC5B,KAAK,SAAS;AAAA,MACd,KAAK;AAAA,IAAA;AAEP,QAAI,IAAI,SAAS;AACf,WAAK,eAAe,IAAI;AACxB,WAAK,IAAI;AAAA,QACP;AAAA,QACA,KAAK,UAAU,KAAK,YAAY;AAAA,MAAA;AAAA,IAEpC;AACA,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,qBAA6B;AAC3B,UAAM,EAAC,IAAI,QAAA,IAAW,KAAK,aAAa,qBAAqB;AAC7D,eAAW,SAAS,KAAK,QAAQ,OAAA,GAAU;AACzC,YAAM,MAAM,GAAG,EAAE;AAAA,IACnB;AACA,WAAO;AAAA,EACT;AAAA,EAEA,gCAAgC,IAAc;AAC5C,QAAI,WAAW,KAAK,aAAa,IAAI,EAAE;AACvC,QAAI,UAAU;AACZ,aAAO;AAAA,IACT;AACA,QAAI,KAAK,aAAa;AACpB,YAAM,YAAY,sBAAsB,IAAI,KAAK,WAAW;AAC5D,WAAK,YAAY,IAAI,IAAI,SAAS;AAClC,aAAO;AAAA,IACT;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,UAAU;AACR,SAAK,SAAS,QAAA;AACd,SAAK,aAAa,QAAA;AAAA,EACpB;AAAA;AAAA,EAGA,eASE;AACA,UAAM,qCAAqB,IAAA;AAI3B,eAAW,YAAY,KAAK,WAAW,OAAA,GAAU;AAC/C,YAAM,EAAC,cAAc,gBAAgB,mBAAA,IAAsB;AAE3D,UAAI,CAAC,eAAe,IAAI,YAAY,GAAG;AACrC,uBAAe,IAAI,cAAc,EAAE;AAAA,MACrC;AACA,qBAAe,IAAI,YAAY,EAAG,KAAK;AAAA,QACrC;AAAA,QACA;AAAA,MAAA,CACD;AAAA,IACH;AACA,WAAO,CAAC,IAAI,IAAI,KAAK,WAAW,KAAA,CAAM,GAAG,cAAc;AAAA,EACzD;AAAA,EAEA,uBAA+B;AAC7B,QAAI,QAAQ;AACZ,eAAW,YAAY,KAAK,WAAW,OAAA,GAAU;AAC/C,eAAS,SAAS;AAAA,IACpB;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBA,CAAC,SACC,oBACA,SACA,OACA,OAC+B;AAC/B,WAAO,KAAK,aAAa;AACzB,SAAK,mBAAmB,SAAS,oBAAoB,SAAS,KAAK;AACnE,QAAI,KAAK,WAAW,IAAI,kBAAkB,GAAG;AAC3C,WAAK,IAAI,OAAO,SAAS,kBAAkB,kBAAkB,KAAK;AAClE;AAAA,IACF;AACA,UAAM,gBAAgB,kBAAkB,kBACpC,IAAI,UACJ;AAEJ,UAAM,YAAY,KAAK;AAAA,MACrB,KAAK,aAAa,QAAA,EAAU,GAAG;AAAA,IAAA;AAGjC,UAAM,QAAQ;AAAA,MACZ;AAAA,MACA;AAAA,QACE,OAAO;AAAA,QACP,iBAAiB;AAAA;AAAA,QACjB,WAAW,CAAA,SAAQ,KAAK,WAAW,IAAI;AAAA,QACvC,eAAe,MAAM,KAAK,eAAA;AAAA,QAC1B,qBAAqB,CAACA,QAAoB,aACxC,IAAI;AAAA,UACFA;AAAAA,UACA;AAAA,UACA,KAAK;AAAA,UACL;AAAA,QAAA;AAAA,QAEJ,eAAe,CAAAA,WAASA;AAAAA,QACxB,UAAU;AAAA,QAAC;AAAA,QACX,qBAAqB,CAAAA,WAASA;AAAAA,MAAA;AAAA,MAEhC;AAAA,MACA;AAAA,IAAA;AAEF,UAAM,SAAS,MAAM,UAAA;AACrB,UAAM,UAAU;AAAA,MACd,MAAM,CAAA,WAAU;AACd,cAAM,WAAW,KAAK;AACtB,eAAO,UAAU,kDAAkD;AACnE,iBAAS,WAAW,oBAAoB,QAAQ,CAAC,MAAM,CAAC;AACxD,eAAO,CAAA;AAAA,MACT;AAAA,IAAA,CACD;AAED,WAAO,KAAK,oBAAoB,IAAI;AACpC,SAAK,kBAAkB;AAAA,MACrB;AAAA,IAAA;AAEF,QAAI;AACF,aAAO;AAAA,QACL;AAAA,QACA;AAAA,QACA,KAAK,KAAK,YAAY;AAAA,MAAA;AAAA,IAE1B,UAAA;AACE,WAAK,kBAAkB;AAAA,IACzB;AAEA,UAAM,kBAAkB,MAAM,aAAA;AAC9B,QAAI,kBAAkB,sBAAsB;AAC1C,UAAI,kBAAkB,KAAK,WAAW,sBAAsB;AAC1D,YAAI,sBAAsB;AAC1B,cAAM,KAAK,KAAK,IACb,YAAY,QAAQ,kBAAkB,EACtC,YAAY,mBAAmB,eAAe;AACjD,mBAAW,aAAa,KAAK,QAAQ,KAAA,GAAQ;AAC3C,gBAAM,UAAU,OAAO;AAAA,YACrB,eAAe,qBAAqB,SAAS,KAAK,CAAA;AAAA,UAAC;AAErD,iCAAuB,QAAQ;AAAA,YAC7B,CAAC,KAAK,UAAU,MAAM,MAAM,CAAC;AAAA,YAC7B;AAAA,UAAA;AAEF,aAAG,OAAO,YAAY,aAAa,OAAO;AAAA,QAC5C;AACA,WAAG,OAAO,0BAA0B,mBAAmB,EAAE;AAAA,MAC3D;AAAA,IACF;AACA,mBAAe,MAAA;AAKf,SAAK,WAAW,IAAI,oBAAoB;AAAA,MACtC;AAAA,MACA;AAAA,MACA,cAAc;AAAA,MACd,gBAAgB;AAAA,MAChB;AAAA,IAAA,CACD;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,YAAY,MAAc;AACxB,UAAM,WAAW,KAAK,WAAW,IAAI,IAAI;AACzC,QAAI,UAAU;AACZ,WAAK,WAAW,OAAO,IAAI;AAC3B,eAAS,MAAM,QAAA;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,OAAO,OAAe,IAA6B;AACjD,WAAO,KAAK,YAAA,GAAe,qBAAqB;AAChD,UAAM,SAAS,KAAK,KAAK,QAAQ,IAAI,KAAK,CAAC;AAC3C,WAAO,OAAO,OAAO,EAAS;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,QAAQ,OAIN;AACA,WAAO,KAAK,aAAa;AACzB,UAAM,OAAO,KAAK,aAAa,QAAQ,KAAK,WAAW;AACvD,UAAM,EAAC,MAAM,MAAM,QAAA,IAAW;AAC9B,SAAK,IAAI;AAAA,MACP,WAAW,KAAK,OAAO,OAAO,KAAK,OAAO,KAAK,OAAO;AAAA,IAAA;AAGxD,WAAO;AAAA,MACL,SAAS,KAAK;AAAA,MACd,YAAY;AAAA,MACZ,SAAS,KAAK,SAAS,MAAM,OAAO,OAAO;AAAA,IAAA;AAAA,EAE/C;AAAA,EAEA,CAAC,SACC,MACA,OACA,YAC+B;AAC/B,WAAO,KAAK,oBAAoB,IAAI;AACpC,SAAK,kBAAkB;AAAA,MACrB;AAAA,MACA,sBAAsB,KAAK,qBAAA;AAAA,MAC3B;AAAA,MACA,KAAK;AAAA,IAAA;AAEP,QAAI;AACF,iBAAW,EAAC,OAAO,YAAY,UAAA,KAAc,MAAM;AAKjD,YAAI,KAAK,wCAAwC;AAC/C,gBAAM;AAAA,QACR;AACA,cAAM,QAAQ,YAAY,IAAA;AAC1B,YAAI;AACJ,YAAI;AACF,gBAAM,cAAc,KAAK,QAAQ,IAAI,KAAK;AAC1C,cAAI,CAAC,aAAa;AAEhB;AAAA,UACF;AACA,gBAAM,aAAa,kBAAkB,KAAK,cAAc,KAAK;AAC7D,cAAI,aAA8B;AAClC,qBAAW,aAAa,YAAY;AAClC,gBACE,aACA;AAAA,cACE,UAAU,YAAY,SAAgB;AAAA,cACtC,UAAU,YAAY,SAAgB;AAAA,YAAA,GAExC;AACA,2BAAa;AAAA,YACf,OAAO;AACL,kBAAI,WAAW;AACb,qBAAK,qBAAqB,IAAI,CAAC;AAAA,cACjC;AACA,qBAAO,KAAK,MAAM,aAAa;AAAA,gBAC7B,MAAM;AAAA,gBACN,KAAK;AAAA,cAAA,CACN;AAAA,YACH;AAAA,UACF;AACA,cAAI,WAAW;AACb,gBAAI,YAAY;AACd,qBAAO,KAAK,MAAM,aAAa;AAAA,gBAC7B,MAAM;AAAA,gBACN,KAAK;AAAA,gBACL,QAAQ;AAAA,cAAA,CACT;AAAA,YACH,OAAO;AACL,qBAAO,KAAK,MAAM,aAAa;AAAA,gBAC7B,MAAM;AAAA,gBACN,KAAK;AAAA,cAAA,CACN;AAAA,YACH;AAAA,UACF;AAAA,QACF,UAAA;AACE,eAAK,gBAAgB;AAAA,QACvB;AAEA,cAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,aAAK,aAAa,OAAO,UAAU,KAAM;AAAA,UACvC;AAAA,UACA;AAAA,QAAA,CACD;AAAA,MACH;AAGA,YAAM,EAAC,SAAQ;AACf,iBAAW,SAAS,KAAK,QAAQ,OAAA,GAAU;AACzC,cAAM,MAAM,KAAK,GAAG,EAAE;AAAA,MACxB;AACA,WAAK,gCAAgC,KAAK,GAAG,EAAE;AAC/C,WAAK,IAAI,QAAQ,eAAe,KAAK,OAAO,EAAE;AAAA,IAChD,UAAA;AACE,WAAK,kBAAkB;AAAA,IACzB;AAAA,EACF;AAAA;AAAA,EAGA,WAAW,WAA2B;AACpC,QAAI,SAAS,KAAK,QAAQ,IAAI,SAAS;AACvC,QAAI,QAAQ;AACV,aAAO;AAAA,IACT;AAEA,UAAM,YAAY,iBAAiB,KAAK,aAAa,SAAS;AAC9D,UAAM,aAAa,kBAAkB,KAAK,cAAc,SAAS;AAEjE,UAAM,EAAC,GAAA,IAAM,KAAK,aAAa,QAAA;AAC/B,aAAS,IAAI;AAAA,MACX,KAAK;AAAA,MACL,KAAK;AAAA,MACL,GAAG;AAAA,MACH;AAAA,MACA,UAAU;AAAA,MACV;AAAA,MACA,MAAM,KAAK,aAAA;AAAA,IAAa;AAE1B,SAAK,QAAQ,IAAI,WAAW,MAAM;AAClC,SAAK,IAAI,QAAQ,2BAA2B,SAAS,EAAE;AACvD,WAAO;AAAA,EACT;AAAA,EAEA,eAAwB;AACtB,QAAI,KAAK,iBAAiB;AACxB,aAAO,KAAK,gBAAgB,MAAM,WAAA,IAAe,KAAK;AAAA,IACxD;AACA,QAAI,KAAK,iBAAiB;AACxB,aAAO,KAAK,qCAAA;AAAA,IACd;AACA,UAAM,IAAI,MAAM,wDAAwD;AAAA,EAC1E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAeA,uCAAgD;AAC9C,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA,OAAO;AAAA,MACP;AAAA,IAAA,IACE,KAAK,KAAK,eAAe;AAC7B,UAAM,UAAU,aAAa,aAAA;AAC7B,QACE,UAAU,kCACT,UAAU,wBACR,UAAU,uBAAuB,KAAK,OAAO,aAAa,IAC7D;AACA,YAAM,IAAI;AAAA,QACR,mCAAmC,GAAG,OAAO,UAAU,kBAC5C,OAAO,kEACK,oBAAoB;AAAA,MAAA;AAAA,IAE/C;AACA,WAAO,aAAa,eAAe,KAAK;AAAA,EAC1C;AAAA;AAAA,EAGA,iBAA0B;AACxB,WAAO,KAAK,SAAS,cAAA;AAAA,EACvB;AAAA,EAEA,CAAC,MACC,QACA,QAC+B;AAC/B,SAAK,mBAAA;AACL,QAAI;AACF,iBAAW,OAAO,OAAO,QAAQ,MAAM,GAAG;AACxC,YAAI,QAAQ,SAAS;AACnB,gBAAM;AAAA,QACR;AACA,mBAAW,iBAAiB,KAAK,kBAAA,EAAoB,UAAU;AAC7D,gBAAM;AAAA,QACR;AACA,aAAK,mBAAA;AAAA,MACP;AAAA,IACF,UAAA;AACE,UAAI,KAAK,cAAc,MAAM;AAC3B,aAAK,kBAAA;AAAA,MACP;AAAA,IACF;AAAA,EACF;AAAA,EAEA,qBAAqB;AACnB,WAAO,KAAK,cAAc,IAAI;AAC9B,SAAK,YAAY,IAAI,SAAS,KAAK,KAAK,YAAY,CAAC;AAAA,EACvD;AAAA,EAEA,oBAA8B;AAC5B,UAAM,WAAW,KAAK;AACtB,WAAO,QAAQ;AACf,SAAK,YAAY;AACjB,WAAO;AAAA,EACT;AACF;AAEA,MAAM,SAAS;AAAA,EACJ;AAAA,EAET,YAAY,aAAsC;AAChD,SAAK,eAAe;AAAA,EACtB;AAAA,EAES,WAIH,CAAA;AAAA,EAEN,WACE,MACA,QACA,SACM;AACN,SAAK,SAAS,KAAK,CAAC,MAAM,QAAQ,OAAO,CAAC;AAC1C,WAAO;AAAA,EACT;AAAA,EAEA,CAAC,SAAwC;AACvC,eAAW,CAAC,MAAM,QAAQ,OAAO,KAAK,KAAK,UAAU;AACnD,aAAO,KAAK,eAAe,MAAM,QAAQ,OAAO;AAAA,IAClD;AAAA,EACF;AAAA,EAEA,CAAC,eACC,WACA,QACA,SAC+B;AAG/B,QAAI,OAAO,WAAW,eAAe;AACnC;AAAA,IACF;AAEA,eAAW,UAAU,SAAS;AAC5B,UAAI,WAAW,SAAS;AACtB,cAAM;AACN;AAAA,MACF;AACA,YAAM,EAAC,SAAQ;AAEf,cAAQ,MAAA;AAAA,QACN,KAAK;AAAA,QACL,KAAK,UAAU;AACb,iBAAO,KAAK,aAAa,WAAW,QAAQ,MAAM,MAAM;AAAA,YACtD,OAAO;AAAA,UAAA,CACR;AACD;AAAA,QACF;AAAA,QACA,KAAK,SAAS;AACZ,gBAAM,EAAC,UAAS;AAChB,gBAAM,cAAc;AAAA,YAClB,OAAO,cAAc,MAAM,gBAAgB;AAAA,UAAA;AAG7C,iBAAO,KAAK,eAAe,WAAW,aAAa,CAAC,MAAM,MAAM,CAAC;AACjE;AAAA,QACF;AAAA,QACA,KAAK;AACH,iBAAO,KAAK,aAAa,WAAW,QAAQ,MAAM,MAAM;AAAA,YACtD,EAAC,KAAK,OAAO,KAAK,KAAK,eAAe,CAAA,EAAC;AAAA,UAAC,CACzC;AACD;AAAA,QACF;AACE,sBAAgB;AAAA,MAAA;AAAA,IAEtB;AAAA,EACF;AAAA,EAEA,CAAC,aACC,WACA,QACA,IACA,OAC+B;AAC/B,UAAM,EAAC,WAAW,OAAO,OAAA,IAAU;AAEnC,UAAM,aAAa,KAAK,KAAK,aAAa,IAAI,KAAK,CAAC;AAIpD,QAAI,WAAW,eAAe;AAC5B;AAAA,IACF;AAEA,eAAW,QAAQ,SAAS;AAC1B,UAAI,SAAS,SAAS;AACpB,cAAM;AACN;AAAA,MACF;AACA,YAAM,EAAC,eAAe,IAAA,IAAO;AAC7B,YAAM,SAAS,UAAU,YAAY,GAAG;AAExC,YAAM;AAAA,QACJ,MAAM;AAAA,QACN;AAAA,QACA;AAAA,QACA;AAAA,QACA,KAAK,OAAO,WAAW,SAAY;AAAA,MAAA;AAGrC,iBAAW,CAAC,cAAc,QAAQ,KAAK,OAAO,QAAQ,aAAa,GAAG;AACpE,cAAM,cAAc,KAAK,OAAO,cAAc,YAAY,CAAC;AAC3D,eAAO,KAAK,aAAa,WAAW,aAAa,IAAI,QAAQ;AAAA,MAC/D;AAAA,IACF;AAAA,EACF;AACF;AAEA,UAAU,OAAO,OAA6D;AAC5E,aAAW,QAAQ,OAAO;AACxB,QAAI,SAAS,SAAS;AACpB,YAAM;AACN;AAAA,IACF;AACA,UAAM,EAAC,MAAM,OAAO,KAAA;AAAA,EACtB;AACF;AAEA,SAAS,UAAU,MAAkB,KAAkB;AACrD,SAAO,OAAO,YAAY,KAAK,IAAI,CAAA,QAAO,CAAC,KAAK,KAAK,IAAI,GAAG,CAAC,CAAC,CAAC,CAAC;AAClE;AAOO,UAAU,QACf,OACA,MACA,cAC+B;AAC/B,QAAM,MAAM,MAAM,MAAM,EAAE;AAC1B,QAAM,WAAW,IAAI,SAAS,iBAAiB,YAAY,CAAC,EAAE;AAAA,IAC5D;AAAA,IACA,MAAM,UAAA;AAAA,IACN,OAAO,GAAG;AAAA,EAAA;AAEZ,SAAO,SAAS,OAAA;AAClB;AAEO,UAAU,gBACf,OACA,MACA,aAC+B;AAC/B,QAAM,MAAM,MAAM,MAAM,EAAE;AAC1B,QAAM,WAAW,IAAI,SAAS,WAAW,EAAE;AAAA,IACzC;AAAA,IACA,MAAM,UAAA;AAAA,IACN,OAAO,GAAG;AAAA,EAAA;AAEZ,SAAO,SAAS,OAAA;AAClB;AAEA,SAAS,iBACP,cACA,cAAuC,oBAAI,OAC3C;AACA,aAAW,CAAC,WAAW,EAAC,WAAA,CAAW,KAAK,OAAO,QAAQ,aAAa,MAAM,GAAG;AAC3E,gBAAY,IAAI,WAAW,UAAmC;AAAA,EAChE;AACA,SAAO;AACT;AAEA,SAAS,kBACP,aACA,OACY;AACZ,QAAM,QAAQ,KAAK,aAAa,iCAAiC;AAEjE,SAAO;AAAA,IACL,MAAM,IAAI,KAAK;AAAA,IACf,UAAU,KAAK,oBAAoB,CAAC,GAAG,MAAM,KAAA,CAAM,EAAE,KAAA,CAAM;AAAA,EAAA;AAG/D;"}
|
|
@@ -15,6 +15,7 @@ export type InstancesRow = {
|
|
|
15
15
|
owner: string | null;
|
|
16
16
|
grantedAt: number | null;
|
|
17
17
|
clientSchema: ClientSchema | null;
|
|
18
|
+
profileID: string | null;
|
|
18
19
|
};
|
|
19
20
|
export declare function compareInstancesRows(a: InstancesRow, b: InstancesRow): number;
|
|
20
21
|
export type ClientsRow = {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"cvr.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/view-syncer/schema/cvr.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AACrC,OAAO,EACL,KAAK,UAAU,EACf,KAAK,SAAS,EAEf,MAAM,0CAA0C,CAAC;AAClD,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,mCAAmC,CAAC;AAEzE,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,mDAAmD,CAAC;AAEpF,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,0BAA0B,CAAC;AACjE,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iBAAiB,CAAC;AAC9C,OAAO,EACL,KAAK,KAAK,EACV,KAAK,SAAS,EAGf,MAAM,YAAY,CAAC;AAWpB,MAAM,MAAM,YAAY,GAAG;IACzB,aAAa,EAAE,MAAM,CAAC;IACtB,OAAO,EAAE,MAAM,CAAC;IAChB,UAAU,EAAE,MAAM,CAAC;IACnB,QAAQ,EAAE,QAAQ,CAAC;IACnB,cAAc,EAAE,MAAM,GAAG,IAAI,CAAC;IAC9B,KAAK,EAAE,MAAM,GAAG,IAAI,CAAC;IACrB,SAAS,EAAE,MAAM,GAAG,IAAI,CAAC;IACzB,YAAY,EAAE,YAAY,GAAG,IAAI,CAAC;
|
|
1
|
+
{"version":3,"file":"cvr.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/view-syncer/schema/cvr.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,QAAQ,MAAM,UAAU,CAAC;AACrC,OAAO,EACL,KAAK,UAAU,EACf,KAAK,SAAS,EAEf,MAAM,0CAA0C,CAAC;AAClD,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,mCAAmC,CAAC;AAEzE,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,mDAAmD,CAAC;AAEpF,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,0BAA0B,CAAC;AACjE,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,iBAAiB,CAAC;AAC9C,OAAO,EACL,KAAK,KAAK,EACV,KAAK,SAAS,EAGf,MAAM,YAAY,CAAC;AAWpB,MAAM,MAAM,YAAY,GAAG;IACzB,aAAa,EAAE,MAAM,CAAC;IACtB,OAAO,EAAE,MAAM,CAAC;IAChB,UAAU,EAAE,MAAM,CAAC;IACnB,QAAQ,EAAE,QAAQ,CAAC;IACnB,cAAc,EAAE,MAAM,GAAG,IAAI,CAAC;IAC9B,KAAK,EAAE,MAAM,GAAG,IAAI,CAAC;IACrB,SAAS,EAAE,MAAM,GAAG,IAAI,CAAC;IACzB,YAAY,EAAE,YAAY,GAAG,IAAI,CAAC;IAClC,SAAS,EAAE,MAAM,GAAG,IAAI,CAAC;CAC1B,CAAC;AA+BF,wBAAgB,oBAAoB,CAAC,CAAC,EAAE,YAAY,EAAE,CAAC,EAAE,YAAY,UAEpE;AAED,MAAM,MAAM,UAAU,GAAG;IACvB,aAAa,EAAE,MAAM,CAAC;IACtB,QAAQ,EAAE,MAAM,CAAC;CAClB,CAAC;AAkBF,wBAAgB,kBAAkB,CAAC,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,UAAU,UAM9D;AAED,MAAM,MAAM,UAAU,GAAG;IACvB,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;IAElB,SAAS,EAAE,SAAS,GAAG,IAAI,CAAC;IAC5B,SAAS,EAAE,MAAM,GAAG,IAAI,CAAC;IACzB,SAAS,EAAE,SAAS,iBAAiB,EAAE,GAAG,IAAI,CAAC;IAC/C,YAAY,EAAE,MAAM,GAAG,IAAI,CAAC;IAC5B,kBAAkB,EAAE,MAAM,GAAG,IAAI,CAAC;IAClC,qBAAqB,EAAE,MAAM,GAAG,IAAI,CAAC;IACrC,QAAQ,EAAE,OAAO,GAAG,IAAI,CAAC;IACzB,OAAO,EAAE,OAAO,GAAG,IAAI,CAAC;CACzB,CAAC;AA8BF,wBAAgB,kBAAkB,CAAC,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,UAAU,UAM9D;AAED,MAAM,MAAM,UAAU,GAAG;IACvB,aAAa,EAAE,MAAM,CAAC;IACtB,QAAQ,EAAE,MAAM,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,YAAY,EAAE,MAAM,CAAC;IACrB,OAAO,EAAE,OAAO,GAAG,IAAI,CAAC;IACxB,GAAG,EAAE,MAAM,GAAG,IAAI,CAAC;IACnB,aAAa,EAAE,QAAQ,GAAG,IAAI,CAAC;CAChC,CAAC;AAgCF,wBAAgB,kBAAkB,CAAC,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,UAAU,UAU9D;AAED,MAAM,MAAM,OAAO,GAAG;IACpB,aAAa,EAAE,MAAM,CAAC;IACtB,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,UAAU,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,YAAY,EAAE,MAAM,CAAC;IACrB,SAAS,EAAE;QAAC,CAAC,SAAS,EAAE,MAAM,GAAG,MAAM,CAAA;KAAC,GAAG,IAAI,CAAC;CACjD,CAAC;AAEF,wBAAgB,cAAc,CAAC,OAAO,EAAE,OAAO,GAAG,KAAK,CAMtD;AAED,wBAAgB,kBAAkB,CAAC,OAAO,EAAE,OAAO,GAAG,SAAS,CAO9D;AAED,wBAAgB,kBAAkB,CAChC,aAAa,EAAE,MAAM,EACrB,SAAS,EAAE,SAAS,GACnB,OAAO,CAUT;AAED,wBAAgB,eAAe,CAAC,CAAC,EAAE,OAAO,EAAE,CAAC,EAAE,OAAO,UAiBrD;AAED;;;;;;;;;;;;;;GAcG;AACH,wBAAgB,sBAAsB,CAAC,KAAK,EAAE,OAAO,UAOpD;AAqCD,MAAM,MAAM,cAAc,GAAG;IAC3B,aAAa,EAAE,MAAM,CAAC;IACtB,OAAO,EAAE,MAAM,CAAC;CACjB,CAAC;AAcF,wBAAsB,cAAc,CAClC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,QAAQ,CAAC,cAAc,EAC3B,KAAK,EAAE,OAAO,iBAIf"}
|
|
@@ -10,22 +10,35 @@ function createSchema(shard) {
|
|
|
10
10
|
return `CREATE SCHEMA IF NOT EXISTS ${schema(shard)};`;
|
|
11
11
|
}
|
|
12
12
|
function createInstancesTable(shard) {
|
|
13
|
-
return
|
|
13
|
+
return (
|
|
14
|
+
/*sql*/
|
|
15
|
+
`
|
|
14
16
|
CREATE TABLE ${schema(shard)}.instances (
|
|
15
17
|
"clientGroupID" TEXT PRIMARY KEY,
|
|
16
|
-
"version" TEXT NOT NULL,
|
|
17
|
-
"lastActive" TIMESTAMPTZ NOT NULL,
|
|
18
|
-
"ttlClock" DOUBLE PRECISION NOT NULL, -- The ttl clock gets "paused" when disconnected.
|
|
19
|
-
"replicaVersion" TEXT,
|
|
20
|
-
"owner" TEXT,
|
|
21
|
-
"grantedAt" TIMESTAMPTZ,
|
|
22
|
-
"clientSchema" JSONB
|
|
18
|
+
"version" TEXT NOT NULL, -- Sortable representation of CVRVersion, e.g. "5nbqa2w:09"
|
|
19
|
+
"lastActive" TIMESTAMPTZ NOT NULL, -- For garbage collection
|
|
20
|
+
"ttlClock" DOUBLE PRECISION NOT NULL DEFAULT 0, -- The ttl clock gets "paused" when disconnected.
|
|
21
|
+
"replicaVersion" TEXT, -- Identifies the replica (i.e. initial-sync point) from which the CVR data comes.
|
|
22
|
+
"owner" TEXT, -- The ID of the task / server that has been granted ownership of the CVR.
|
|
23
|
+
"grantedAt" TIMESTAMPTZ, -- The time at which the current owner was last granted ownership (most recent connection time).
|
|
24
|
+
"clientSchema" JSONB, -- ClientSchema of the client group
|
|
25
|
+
"profileID" TEXT, -- Stable profile id ("p..."), falling back to the clientGroupID ("cg{clientGroupID}") for old clients
|
|
26
|
+
"deleted" BOOL DEFAULT FALSE -- Tombstone column for deleted CVRs; instances rows are kept longer for usage stats
|
|
23
27
|
);
|
|
24
28
|
|
|
25
29
|
-- For garbage collection.
|
|
26
30
|
CREATE INDEX instances_last_active
|
|
27
|
-
ON ${schema(shard)}.instances ("lastActive");
|
|
28
|
-
|
|
31
|
+
ON ${schema(shard)}.instances ("lastActive") WHERE NOT "deleted";
|
|
32
|
+
CREATE INDEX tombstones_last_active
|
|
33
|
+
ON ${schema(shard)}.instances ("lastActive") WHERE "deleted";
|
|
34
|
+
|
|
35
|
+
-- For usage stats; the composite index allows a
|
|
36
|
+
-- SELECT COUNT(DISTINCT("profileID")) query to be answered by
|
|
37
|
+
-- an index scan without additional table lookups.
|
|
38
|
+
CREATE INDEX profile_ids_last_active ON ${schema(shard)}.instances ("lastActive", "profileID")
|
|
39
|
+
WHERE "profileID" IS NOT NULL;
|
|
40
|
+
`
|
|
41
|
+
);
|
|
29
42
|
}
|
|
30
43
|
function createClientsTable(shard) {
|
|
31
44
|
return `
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"cvr.js","sources":["../../../../../../../zero-cache/src/services/view-syncer/schema/cvr.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {ident} from 'pg-format';\nimport type postgres from 'postgres';\nimport {\n type JSONObject,\n type JSONValue,\n stringify,\n} from '../../../../../shared/src/bigint-json.ts';\nimport type {ReadonlyJSONValue} from '../../../../../shared/src/json.ts';\nimport {stringCompare} from '../../../../../shared/src/string-compare.ts';\nimport type {ClientSchema} from '../../../../../zero-protocol/src/client-schema.ts';\nimport {normalizedKeyOrder, type RowKey} from '../../../types/row-key.ts';\nimport {cvrSchema, type ShardID} from '../../../types/shards.ts';\nimport type {TTLClock} from '../ttl-clock.ts';\nimport {\n type RowID,\n type RowRecord,\n versionFromString,\n versionString,\n} from './types.ts';\n\n// For readability in the sql statements.\nfunction schema(shard: ShardID) {\n return ident(cvrSchema(shard));\n}\n\nfunction createSchema(shard: ShardID) {\n return `CREATE SCHEMA IF NOT EXISTS ${schema(shard)};`;\n}\n\nexport type InstancesRow = {\n clientGroupID: string;\n version: string;\n lastActive: number;\n ttlClock: TTLClock;\n replicaVersion: string | null;\n owner: string | null;\n grantedAt: number | null;\n clientSchema: ClientSchema | null;\n};\n\nfunction createInstancesTable(shard: ShardID) {\n return `\nCREATE TABLE ${schema(shard)}.instances (\n \"clientGroupID\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL, -- Sortable representation of CVRVersion, e.g. \"5nbqa2w:09\"\n \"lastActive\" TIMESTAMPTZ NOT NULL, -- For garbage collection\n \"ttlClock\" DOUBLE PRECISION NOT NULL, -- The ttl clock gets \"paused\" when disconnected.\n \"replicaVersion\" TEXT, -- Identifies the replica (i.e. initial-sync point) from which the CVR data comes.\n \"owner\" TEXT, -- The ID of the task / server that has been granted ownership of the CVR.\n \"grantedAt\" TIMESTAMPTZ, -- The time at which the current owner was last granted ownership (most recent connection time).\n \"clientSchema\" JSONB -- ClientSchema of the client group\n);\n\n-- For garbage collection.\nCREATE INDEX instances_last_active\n ON ${schema(shard)}.instances (\"lastActive\");\n`;\n}\n\nexport function compareInstancesRows(a: InstancesRow, b: InstancesRow) {\n return stringCompare(a.clientGroupID, b.clientGroupID);\n}\n\nexport type ClientsRow = {\n clientGroupID: string;\n clientID: string;\n};\n\nfunction createClientsTable(shard: ShardID) {\n return `\nCREATE TABLE ${schema(shard)}.clients (\n \"clientGroupID\" TEXT,\n \"clientID\" TEXT,\n\n PRIMARY KEY (\"clientGroupID\", \"clientID\"),\n\n CONSTRAINT fk_clients_client_group\n FOREIGN KEY(\"clientGroupID\")\n REFERENCES ${schema(shard)}.instances(\"clientGroupID\")\n ON DELETE CASCADE\n);\n\n`;\n}\nexport function compareClientsRows(a: ClientsRow, b: ClientsRow) {\n const clientGroupIDComp = stringCompare(a.clientGroupID, b.clientGroupID);\n if (clientGroupIDComp !== 0) {\n return clientGroupIDComp;\n }\n return stringCompare(a.clientID, b.clientID);\n}\n\nexport type QueriesRow = {\n clientGroupID: string;\n queryHash: string;\n // This is the client AST _AFTER_ applying server name transformations.\n clientAST: JSONValue | null;\n queryName: string | null;\n queryArgs: readonly ReadonlyJSONValue[] | null;\n patchVersion: string | null;\n transformationHash: string | null;\n transformationVersion: string | null;\n internal: boolean | null;\n deleted: boolean | null;\n};\n\nfunction createQueriesTable(shard: ShardID) {\n return `\nCREATE TABLE ${schema(shard)}.queries (\n \"clientGroupID\" TEXT,\n \"queryHash\" TEXT, -- this is the hash of the client query AST\n \"clientAST\" JSONB, -- this is nullable as custom queries will not persist an AST\n \"queryName\" TEXT, -- the name of the query if it is a custom query\n \"queryArgs\" JSON, -- the arguments of the query if it is a custom query\n \"patchVersion\" TEXT, -- NULL if only desired but not yet \"got\"\n \"transformationHash\" TEXT,\n \"transformationVersion\" TEXT,\n \"internal\" BOOL, -- If true, no need to track / send patches\n \"deleted\" BOOL, -- put vs del \"got\" query\n\n PRIMARY KEY (\"clientGroupID\", \"queryHash\"),\n\n CONSTRAINT fk_queries_client_group\n FOREIGN KEY(\"clientGroupID\")\n REFERENCES ${schema(shard)}.instances(\"clientGroupID\")\n ON DELETE CASCADE\n);\n\n-- For catchup patches.\nCREATE INDEX queries_patch_version \n ON ${schema(shard)}.queries (\"patchVersion\" NULLS FIRST);\n`;\n}\n\nexport function compareQueriesRows(a: QueriesRow, b: QueriesRow) {\n const clientGroupIDComp = stringCompare(a.clientGroupID, b.clientGroupID);\n if (clientGroupIDComp !== 0) {\n return clientGroupIDComp;\n }\n return stringCompare(a.queryHash, b.queryHash);\n}\n\nexport type DesiresRow = {\n clientGroupID: string;\n clientID: string;\n queryHash: string;\n patchVersion: string;\n deleted: boolean | null;\n ttl: number | null;\n inactivatedAt: TTLClock | null;\n};\n\nfunction createDesiresTable(shard: ShardID) {\n return `\nCREATE TABLE ${schema(shard)}.desires (\n \"clientGroupID\" TEXT,\n \"clientID\" TEXT,\n \"queryHash\" TEXT,\n \"patchVersion\" TEXT NOT NULL,\n \"deleted\" BOOL, -- put vs del \"desired\" query\n \"ttl\" INTERVAL, -- DEPRECATED: Use ttlMs instead. Time to live for this client\n \"ttlMs\" DOUBLE PRECISION, -- Time to live in milliseconds\n \"inactivatedAt\" TIMESTAMPTZ, -- DEPRECATED: Use inactivatedAtMs instead. Time at which this row was inactivated\n \"inactivatedAtMs\" DOUBLE PRECISION, -- Time at which this row was inactivated (milliseconds since client group start)\n\n PRIMARY KEY (\"clientGroupID\", \"clientID\", \"queryHash\"),\n\n CONSTRAINT fk_desires_query\n FOREIGN KEY(\"clientGroupID\", \"queryHash\")\n REFERENCES ${ident(cvrSchema(shard))}.queries(\"clientGroupID\", \"queryHash\")\n ON DELETE CASCADE\n);\n\n-- For catchup patches.\nCREATE INDEX desires_patch_version\n ON ${schema(shard)}.desires (\"patchVersion\");\n\nCREATE INDEX desires_inactivated_at\n ON ${schema(shard)}.desires (\"inactivatedAt\");\n`;\n}\n\nexport function compareDesiresRows(a: DesiresRow, b: DesiresRow) {\n const clientGroupIDComp = stringCompare(a.clientGroupID, b.clientGroupID);\n if (clientGroupIDComp !== 0) {\n return clientGroupIDComp;\n }\n const clientIDComp = stringCompare(a.clientID, b.clientID);\n if (clientIDComp !== 0) {\n return clientIDComp;\n }\n return stringCompare(a.queryHash, b.queryHash);\n}\n\nexport type RowsRow = {\n clientGroupID: string;\n schema: string;\n table: string;\n rowKey: JSONObject;\n rowVersion: string;\n patchVersion: string;\n refCounts: {[queryHash: string]: number} | null;\n};\n\nexport function rowsRowToRowID(rowsRow: RowsRow): RowID {\n return {\n schema: rowsRow.schema,\n table: rowsRow.table,\n rowKey: rowsRow.rowKey as Record<string, JSONValue>,\n };\n}\n\nexport function rowsRowToRowRecord(rowsRow: RowsRow): RowRecord {\n return {\n id: rowsRowToRowID(rowsRow),\n rowVersion: rowsRow.rowVersion,\n patchVersion: versionFromString(rowsRow.patchVersion),\n refCounts: rowsRow.refCounts,\n };\n}\n\nexport function rowRecordToRowsRow(\n clientGroupID: string,\n rowRecord: RowRecord,\n): RowsRow {\n return {\n clientGroupID,\n schema: rowRecord.id.schema,\n table: rowRecord.id.table,\n rowKey: rowRecord.id.rowKey as Record<string, JSONValue>,\n rowVersion: rowRecord.rowVersion,\n patchVersion: versionString(rowRecord.patchVersion),\n refCounts: rowRecord.refCounts,\n };\n}\n\nexport function compareRowsRows(a: RowsRow, b: RowsRow) {\n const clientGroupIDComp = stringCompare(a.clientGroupID, b.clientGroupID);\n if (clientGroupIDComp !== 0) {\n return clientGroupIDComp;\n }\n const schemaComp = stringCompare(a.schema, b.schema);\n if (schemaComp !== 0) {\n return schemaComp;\n }\n const tableComp = stringCompare(b.table, b.table);\n if (tableComp !== 0) {\n return tableComp;\n }\n return stringCompare(\n stringifySorted(a.rowKey as RowKey),\n stringifySorted(b.rowKey as RowKey),\n );\n}\n\n/**\n * The version of the data in the `cvr.rows` table. This may lag\n * `version` in `cvr.instances` but eventually catches up, modulo\n * exceptional circumstances like a server crash.\n *\n * The `rowsVersion` is tracked in a separate table (as opposed to\n * a column in the `cvr.instances` table) so that general `cvr` updates\n * and `row` updates can be executed independently without serialization\n * conflicts.\n *\n * Note: Although `clientGroupID` logically references the same column in\n * `cvr.instances`, a FOREIGN KEY constraint must not be declared as the\n * `cvr.rows` TABLE needs to be updated without affecting the\n * `SELECT ... FOR UPDATE` lock when `cvr.instances` is updated.\n */\nexport function createRowsVersionTable(shard: ShardID) {\n return `\nCREATE TABLE ${schema(shard)}.\"rowsVersion\" (\n \"clientGroupID\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL\n);\n`;\n}\n\n/**\n * CVR `rows` are updated asynchronously from the CVR metadata\n * (i.e. `instances`). The `rowsVersion` table is updated atomically with\n * updates to the `rows` data.\n */\nfunction createRowsTable(shard: ShardID) {\n return `\nCREATE TABLE ${schema(shard)}.rows (\n \"clientGroupID\" TEXT,\n \"schema\" TEXT,\n \"table\" TEXT,\n \"rowKey\" JSONB,\n \"rowVersion\" TEXT NOT NULL,\n \"patchVersion\" TEXT NOT NULL,\n \"refCounts\" JSONB, -- {[queryHash: string]: number}, NULL for tombstone\n\n PRIMARY KEY (\"clientGroupID\", \"schema\", \"table\", \"rowKey\"),\n\n CONSTRAINT fk_rows_client_group\n FOREIGN KEY(\"clientGroupID\")\n REFERENCES ${schema(shard)}.\"rowsVersion\" (\"clientGroupID\")\n ON DELETE CASCADE\n);\n\n-- For catchup patches.\nCREATE INDEX row_patch_version \n ON ${schema(shard)}.rows (\"patchVersion\");\n\n-- For listing rows returned by one or more query hashes. e.g.\n-- SELECT * FROM cvr_shard.rows WHERE \"refCounts\" ?| array[...queryHashes...];\nCREATE INDEX row_ref_counts ON ${schema(shard)}.rows \n USING GIN (\"refCounts\");\n`;\n}\n\nexport type RowsVersionRow = {\n clientGroupID: string;\n version: string;\n};\n\nfunction createTables(shard: ShardID) {\n return (\n createSchema(shard) +\n createInstancesTable(shard) +\n createClientsTable(shard) +\n createQueriesTable(shard) +\n createDesiresTable(shard) +\n createRowsVersionTable(shard) +\n createRowsTable(shard)\n );\n}\n\nexport async function setupCVRTables(\n lc: LogContext,\n db: postgres.TransactionSql,\n shard: ShardID,\n) {\n lc.info?.(`Setting up CVR tables`);\n await db.unsafe(createTables(shard));\n}\n\nfunction stringifySorted(r: RowKey) {\n return stringify(normalizedKeyOrder(r));\n}\n"],"names":[],"mappings":";;;;;AAsBA,SAAS,OAAO,OAAgB;AAC9B,SAAO,MAAM,UAAU,KAAK,CAAC;AAC/B;AAEA,SAAS,aAAa,OAAgB;AACpC,SAAO,+BAA+B,OAAO,KAAK,CAAC;AACrD;AAaA,SAAS,qBAAqB,OAAgB;AAC5C,SAAO;AAAA,eACM,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAarB,OAAO,KAAK,CAAC;AAAA;AAEpB;AAWA,SAAS,mBAAmB,OAAgB;AAC1C,SAAO;AAAA,eACM,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iBAQX,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAK9B;AAuBA,SAAS,mBAAmB,OAAgB;AAC1C,SAAO;AAAA,eACM,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iBAgBX,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAMvB,OAAO,KAAK,CAAC;AAAA;AAEpB;AAoBA,SAAS,mBAAmB,OAAgB;AAC1C,SAAO;AAAA,eACM,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iBAeX,MAAM,UAAU,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAMjC,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA,OAGb,OAAO,KAAK,CAAC;AAAA;AAEpB;AAwBO,SAAS,eAAe,SAAyB;AACtD,SAAO;AAAA,IACL,QAAQ,QAAQ;AAAA,IAChB,OAAO,QAAQ;AAAA,IACf,QAAQ,QAAQ;AAAA,EAAA;AAEpB;AAEO,SAAS,mBAAmB,SAA6B;AAC9D,SAAO;AAAA,IACL,IAAI,eAAe,OAAO;AAAA,IAC1B,YAAY,QAAQ;AAAA,IACpB,cAAc,kBAAkB,QAAQ,YAAY;AAAA,IACpD,WAAW,QAAQ;AAAA,EAAA;AAEvB;AAEO,SAAS,mBACd,eACA,WACS;AACT,SAAO;AAAA,IACL;AAAA,IACA,QAAQ,UAAU,GAAG;AAAA,IACrB,OAAO,UAAU,GAAG;AAAA,IACpB,QAAQ,UAAU,GAAG;AAAA,IACrB,YAAY,UAAU;AAAA,IACtB,cAAc,cAAc,UAAU,YAAY;AAAA,IAClD,WAAW,UAAU;AAAA,EAAA;AAEzB;AAoCO,SAAS,uBAAuB,OAAgB;AACrD,SAAO;AAAA,eACM,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAK5B;AAOA,SAAS,gBAAgB,OAAgB;AACvC,SAAO;AAAA,eACM,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iBAaX,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAMvB,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA,iCAIa,OAAO,KAAK,CAAC;AAAA;AAAA;AAG9C;AAOA,SAAS,aAAa,OAAgB;AACpC,SACE,aAAa,KAAK,IAClB,qBAAqB,KAAK,IAC1B,mBAAmB,KAAK,IACxB,mBAAmB,KAAK,IACxB,mBAAmB,KAAK,IACxB,uBAAuB,KAAK,IAC5B,gBAAgB,KAAK;AAEzB;AAEA,eAAsB,eACpB,IACA,IACA,OACA;AACA,KAAG,OAAO,uBAAuB;AACjC,QAAM,GAAG,OAAO,aAAa,KAAK,CAAC;AACrC;"}
|
|
1
|
+
{"version":3,"file":"cvr.js","sources":["../../../../../../../zero-cache/src/services/view-syncer/schema/cvr.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {ident} from 'pg-format';\nimport type postgres from 'postgres';\nimport {\n type JSONObject,\n type JSONValue,\n stringify,\n} from '../../../../../shared/src/bigint-json.ts';\nimport type {ReadonlyJSONValue} from '../../../../../shared/src/json.ts';\nimport {stringCompare} from '../../../../../shared/src/string-compare.ts';\nimport type {ClientSchema} from '../../../../../zero-protocol/src/client-schema.ts';\nimport {normalizedKeyOrder, type RowKey} from '../../../types/row-key.ts';\nimport {cvrSchema, type ShardID} from '../../../types/shards.ts';\nimport type {TTLClock} from '../ttl-clock.ts';\nimport {\n type RowID,\n type RowRecord,\n versionFromString,\n versionString,\n} from './types.ts';\n\n// For readability in the sql statements.\nfunction schema(shard: ShardID) {\n return ident(cvrSchema(shard));\n}\n\nfunction createSchema(shard: ShardID) {\n return `CREATE SCHEMA IF NOT EXISTS ${schema(shard)};`;\n}\n\nexport type InstancesRow = {\n clientGroupID: string;\n version: string;\n lastActive: number;\n ttlClock: TTLClock;\n replicaVersion: string | null;\n owner: string | null;\n grantedAt: number | null;\n clientSchema: ClientSchema | null;\n profileID: string | null;\n};\n\nfunction createInstancesTable(shard: ShardID) {\n return /*sql*/ `\nCREATE TABLE ${schema(shard)}.instances (\n \"clientGroupID\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL, -- Sortable representation of CVRVersion, e.g. \"5nbqa2w:09\"\n \"lastActive\" TIMESTAMPTZ NOT NULL, -- For garbage collection\n \"ttlClock\" DOUBLE PRECISION NOT NULL DEFAULT 0, -- The ttl clock gets \"paused\" when disconnected.\n \"replicaVersion\" TEXT, -- Identifies the replica (i.e. initial-sync point) from which the CVR data comes.\n \"owner\" TEXT, -- The ID of the task / server that has been granted ownership of the CVR.\n \"grantedAt\" TIMESTAMPTZ, -- The time at which the current owner was last granted ownership (most recent connection time).\n \"clientSchema\" JSONB, -- ClientSchema of the client group\n \"profileID\" TEXT, -- Stable profile id (\"p...\"), falling back to the clientGroupID (\"cg{clientGroupID}\") for old clients\n \"deleted\" BOOL DEFAULT FALSE -- Tombstone column for deleted CVRs; instances rows are kept longer for usage stats\n);\n\n-- For garbage collection.\nCREATE INDEX instances_last_active\n ON ${schema(shard)}.instances (\"lastActive\") WHERE NOT \"deleted\";\nCREATE INDEX tombstones_last_active\n ON ${schema(shard)}.instances (\"lastActive\") WHERE \"deleted\";\n\n-- For usage stats; the composite index allows a \n-- SELECT COUNT(DISTINCT(\"profileID\")) query to be answered by\n-- an index scan without additional table lookups.\nCREATE INDEX profile_ids_last_active ON ${schema(shard)}.instances (\"lastActive\", \"profileID\")\n WHERE \"profileID\" IS NOT NULL;\n`;\n}\n\nexport function compareInstancesRows(a: InstancesRow, b: InstancesRow) {\n return stringCompare(a.clientGroupID, b.clientGroupID);\n}\n\nexport type ClientsRow = {\n clientGroupID: string;\n clientID: string;\n};\n\nfunction createClientsTable(shard: ShardID) {\n return `\nCREATE TABLE ${schema(shard)}.clients (\n \"clientGroupID\" TEXT,\n \"clientID\" TEXT,\n\n PRIMARY KEY (\"clientGroupID\", \"clientID\"),\n\n CONSTRAINT fk_clients_client_group\n FOREIGN KEY(\"clientGroupID\")\n REFERENCES ${schema(shard)}.instances(\"clientGroupID\")\n ON DELETE CASCADE\n);\n\n`;\n}\nexport function compareClientsRows(a: ClientsRow, b: ClientsRow) {\n const clientGroupIDComp = stringCompare(a.clientGroupID, b.clientGroupID);\n if (clientGroupIDComp !== 0) {\n return clientGroupIDComp;\n }\n return stringCompare(a.clientID, b.clientID);\n}\n\nexport type QueriesRow = {\n clientGroupID: string;\n queryHash: string;\n // This is the client AST _AFTER_ applying server name transformations.\n clientAST: JSONValue | null;\n queryName: string | null;\n queryArgs: readonly ReadonlyJSONValue[] | null;\n patchVersion: string | null;\n transformationHash: string | null;\n transformationVersion: string | null;\n internal: boolean | null;\n deleted: boolean | null;\n};\n\nfunction createQueriesTable(shard: ShardID) {\n return `\nCREATE TABLE ${schema(shard)}.queries (\n \"clientGroupID\" TEXT,\n \"queryHash\" TEXT, -- this is the hash of the client query AST\n \"clientAST\" JSONB, -- this is nullable as custom queries will not persist an AST\n \"queryName\" TEXT, -- the name of the query if it is a custom query\n \"queryArgs\" JSON, -- the arguments of the query if it is a custom query\n \"patchVersion\" TEXT, -- NULL if only desired but not yet \"got\"\n \"transformationHash\" TEXT,\n \"transformationVersion\" TEXT,\n \"internal\" BOOL, -- If true, no need to track / send patches\n \"deleted\" BOOL, -- put vs del \"got\" query\n\n PRIMARY KEY (\"clientGroupID\", \"queryHash\"),\n\n CONSTRAINT fk_queries_client_group\n FOREIGN KEY(\"clientGroupID\")\n REFERENCES ${schema(shard)}.instances(\"clientGroupID\")\n ON DELETE CASCADE\n);\n\n-- For catchup patches.\nCREATE INDEX queries_patch_version \n ON ${schema(shard)}.queries (\"patchVersion\" NULLS FIRST);\n`;\n}\n\nexport function compareQueriesRows(a: QueriesRow, b: QueriesRow) {\n const clientGroupIDComp = stringCompare(a.clientGroupID, b.clientGroupID);\n if (clientGroupIDComp !== 0) {\n return clientGroupIDComp;\n }\n return stringCompare(a.queryHash, b.queryHash);\n}\n\nexport type DesiresRow = {\n clientGroupID: string;\n clientID: string;\n queryHash: string;\n patchVersion: string;\n deleted: boolean | null;\n ttl: number | null;\n inactivatedAt: TTLClock | null;\n};\n\nfunction createDesiresTable(shard: ShardID) {\n return `\nCREATE TABLE ${schema(shard)}.desires (\n \"clientGroupID\" TEXT,\n \"clientID\" TEXT,\n \"queryHash\" TEXT,\n \"patchVersion\" TEXT NOT NULL,\n \"deleted\" BOOL, -- put vs del \"desired\" query\n \"ttl\" INTERVAL, -- DEPRECATED: Use ttlMs instead. Time to live for this client\n \"ttlMs\" DOUBLE PRECISION, -- Time to live in milliseconds\n \"inactivatedAt\" TIMESTAMPTZ, -- DEPRECATED: Use inactivatedAtMs instead. Time at which this row was inactivated\n \"inactivatedAtMs\" DOUBLE PRECISION, -- Time at which this row was inactivated (milliseconds since client group start)\n\n PRIMARY KEY (\"clientGroupID\", \"clientID\", \"queryHash\"),\n\n CONSTRAINT fk_desires_query\n FOREIGN KEY(\"clientGroupID\", \"queryHash\")\n REFERENCES ${ident(cvrSchema(shard))}.queries(\"clientGroupID\", \"queryHash\")\n ON DELETE CASCADE\n);\n\n-- For catchup patches.\nCREATE INDEX desires_patch_version\n ON ${schema(shard)}.desires (\"patchVersion\");\n\nCREATE INDEX desires_inactivated_at\n ON ${schema(shard)}.desires (\"inactivatedAt\");\n`;\n}\n\nexport function compareDesiresRows(a: DesiresRow, b: DesiresRow) {\n const clientGroupIDComp = stringCompare(a.clientGroupID, b.clientGroupID);\n if (clientGroupIDComp !== 0) {\n return clientGroupIDComp;\n }\n const clientIDComp = stringCompare(a.clientID, b.clientID);\n if (clientIDComp !== 0) {\n return clientIDComp;\n }\n return stringCompare(a.queryHash, b.queryHash);\n}\n\nexport type RowsRow = {\n clientGroupID: string;\n schema: string;\n table: string;\n rowKey: JSONObject;\n rowVersion: string;\n patchVersion: string;\n refCounts: {[queryHash: string]: number} | null;\n};\n\nexport function rowsRowToRowID(rowsRow: RowsRow): RowID {\n return {\n schema: rowsRow.schema,\n table: rowsRow.table,\n rowKey: rowsRow.rowKey as Record<string, JSONValue>,\n };\n}\n\nexport function rowsRowToRowRecord(rowsRow: RowsRow): RowRecord {\n return {\n id: rowsRowToRowID(rowsRow),\n rowVersion: rowsRow.rowVersion,\n patchVersion: versionFromString(rowsRow.patchVersion),\n refCounts: rowsRow.refCounts,\n };\n}\n\nexport function rowRecordToRowsRow(\n clientGroupID: string,\n rowRecord: RowRecord,\n): RowsRow {\n return {\n clientGroupID,\n schema: rowRecord.id.schema,\n table: rowRecord.id.table,\n rowKey: rowRecord.id.rowKey as Record<string, JSONValue>,\n rowVersion: rowRecord.rowVersion,\n patchVersion: versionString(rowRecord.patchVersion),\n refCounts: rowRecord.refCounts,\n };\n}\n\nexport function compareRowsRows(a: RowsRow, b: RowsRow) {\n const clientGroupIDComp = stringCompare(a.clientGroupID, b.clientGroupID);\n if (clientGroupIDComp !== 0) {\n return clientGroupIDComp;\n }\n const schemaComp = stringCompare(a.schema, b.schema);\n if (schemaComp !== 0) {\n return schemaComp;\n }\n const tableComp = stringCompare(b.table, b.table);\n if (tableComp !== 0) {\n return tableComp;\n }\n return stringCompare(\n stringifySorted(a.rowKey as RowKey),\n stringifySorted(b.rowKey as RowKey),\n );\n}\n\n/**\n * The version of the data in the `cvr.rows` table. This may lag\n * `version` in `cvr.instances` but eventually catches up, modulo\n * exceptional circumstances like a server crash.\n *\n * The `rowsVersion` is tracked in a separate table (as opposed to\n * a column in the `cvr.instances` table) so that general `cvr` updates\n * and `row` updates can be executed independently without serialization\n * conflicts.\n *\n * Note: Although `clientGroupID` logically references the same column in\n * `cvr.instances`, a FOREIGN KEY constraint must not be declared as the\n * `cvr.rows` TABLE needs to be updated without affecting the\n * `SELECT ... FOR UPDATE` lock when `cvr.instances` is updated.\n */\nexport function createRowsVersionTable(shard: ShardID) {\n return `\nCREATE TABLE ${schema(shard)}.\"rowsVersion\" (\n \"clientGroupID\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL\n);\n`;\n}\n\n/**\n * CVR `rows` are updated asynchronously from the CVR metadata\n * (i.e. `instances`). The `rowsVersion` table is updated atomically with\n * updates to the `rows` data.\n */\nfunction createRowsTable(shard: ShardID) {\n return `\nCREATE TABLE ${schema(shard)}.rows (\n \"clientGroupID\" TEXT,\n \"schema\" TEXT,\n \"table\" TEXT,\n \"rowKey\" JSONB,\n \"rowVersion\" TEXT NOT NULL,\n \"patchVersion\" TEXT NOT NULL,\n \"refCounts\" JSONB, -- {[queryHash: string]: number}, NULL for tombstone\n\n PRIMARY KEY (\"clientGroupID\", \"schema\", \"table\", \"rowKey\"),\n\n CONSTRAINT fk_rows_client_group\n FOREIGN KEY(\"clientGroupID\")\n REFERENCES ${schema(shard)}.\"rowsVersion\" (\"clientGroupID\")\n ON DELETE CASCADE\n);\n\n-- For catchup patches.\nCREATE INDEX row_patch_version \n ON ${schema(shard)}.rows (\"patchVersion\");\n\n-- For listing rows returned by one or more query hashes. e.g.\n-- SELECT * FROM cvr_shard.rows WHERE \"refCounts\" ?| array[...queryHashes...];\nCREATE INDEX row_ref_counts ON ${schema(shard)}.rows \n USING GIN (\"refCounts\");\n`;\n}\n\nexport type RowsVersionRow = {\n clientGroupID: string;\n version: string;\n};\n\nfunction createTables(shard: ShardID) {\n return (\n createSchema(shard) +\n createInstancesTable(shard) +\n createClientsTable(shard) +\n createQueriesTable(shard) +\n createDesiresTable(shard) +\n createRowsVersionTable(shard) +\n createRowsTable(shard)\n );\n}\n\nexport async function setupCVRTables(\n lc: LogContext,\n db: postgres.TransactionSql,\n shard: ShardID,\n) {\n lc.info?.(`Setting up CVR tables`);\n await db.unsafe(createTables(shard));\n}\n\nfunction stringifySorted(r: RowKey) {\n return stringify(normalizedKeyOrder(r));\n}\n"],"names":[],"mappings":";;;;;AAsBA,SAAS,OAAO,OAAgB;AAC9B,SAAO,MAAM,UAAU,KAAK,CAAC;AAC/B;AAEA,SAAS,aAAa,OAAgB;AACpC,SAAO,+BAA+B,OAAO,KAAK,CAAC;AACrD;AAcA,SAAS,qBAAqB,OAAgB;AAC5C;AAAA;AAAA,IAAe;AAAA,eACF,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAerB,OAAO,KAAK,CAAC;AAAA;AAAA,OAEb,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,0CAKsB,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAGvD;AAWA,SAAS,mBAAmB,OAAgB;AAC1C,SAAO;AAAA,eACM,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iBAQX,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAK9B;AAuBA,SAAS,mBAAmB,OAAgB;AAC1C,SAAO;AAAA,eACM,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iBAgBX,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAMvB,OAAO,KAAK,CAAC;AAAA;AAEpB;AAoBA,SAAS,mBAAmB,OAAgB;AAC1C,SAAO;AAAA,eACM,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iBAeX,MAAM,UAAU,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAMjC,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA,OAGb,OAAO,KAAK,CAAC;AAAA;AAEpB;AAwBO,SAAS,eAAe,SAAyB;AACtD,SAAO;AAAA,IACL,QAAQ,QAAQ;AAAA,IAChB,OAAO,QAAQ;AAAA,IACf,QAAQ,QAAQ;AAAA,EAAA;AAEpB;AAEO,SAAS,mBAAmB,SAA6B;AAC9D,SAAO;AAAA,IACL,IAAI,eAAe,OAAO;AAAA,IAC1B,YAAY,QAAQ;AAAA,IACpB,cAAc,kBAAkB,QAAQ,YAAY;AAAA,IACpD,WAAW,QAAQ;AAAA,EAAA;AAEvB;AAEO,SAAS,mBACd,eACA,WACS;AACT,SAAO;AAAA,IACL;AAAA,IACA,QAAQ,UAAU,GAAG;AAAA,IACrB,OAAO,UAAU,GAAG;AAAA,IACpB,QAAQ,UAAU,GAAG;AAAA,IACrB,YAAY,UAAU;AAAA,IACtB,cAAc,cAAc,UAAU,YAAY;AAAA,IAClD,WAAW,UAAU;AAAA,EAAA;AAEzB;AAoCO,SAAS,uBAAuB,OAAgB;AACrD,SAAO;AAAA,eACM,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAK5B;AAOA,SAAS,gBAAgB,OAAgB;AACvC,SAAO;AAAA,eACM,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iBAaX,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAMvB,OAAO,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA,iCAIa,OAAO,KAAK,CAAC;AAAA;AAAA;AAG9C;AAOA,SAAS,aAAa,OAAgB;AACpC,SACE,aAAa,KAAK,IAClB,qBAAqB,KAAK,IAC1B,mBAAmB,KAAK,IACxB,mBAAmB,KAAK,IACxB,mBAAmB,KAAK,IACxB,uBAAuB,KAAK,IAC5B,gBAAgB,KAAK;AAEzB;AAEA,eAAsB,eACpB,IACA,IACA,OACA;AACA,KAAG,OAAO,uBAAuB;AACjC,QAAM,GAAG,OAAO,aAAa,KAAK,CAAC;AACrC;"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"init.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/view-syncer/schema/init.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAOjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,sBAAsB,CAAC;AACrD,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,0BAA0B,CAAC;AAGjE,wBAAsB,oBAAoB,CACxC,GAAG,EAAE,UAAU,EACf,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,GACb,OAAO,CAAC,IAAI,CAAC,
|
|
1
|
+
{"version":3,"file":"init.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/view-syncer/schema/init.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAOjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,sBAAsB,CAAC;AACrD,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,0BAA0B,CAAC;AAGjE,wBAAsB,oBAAoB,CACxC,GAAG,EAAE,UAAU,EACf,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,GACb,OAAO,CAAC,IAAI,CAAC,CAkPf"}
|
|
@@ -147,6 +147,33 @@ async function initViewSyncerSchema(log, db, shard) {
|
|
|
147
147
|
`;
|
|
148
148
|
}
|
|
149
149
|
};
|
|
150
|
+
const migratedV15ToV16 = {
|
|
151
|
+
migrateSchema: async (_, sql) => {
|
|
152
|
+
await sql`ALTER TABLE ${sql(schema)}.instances ADD COLUMN "profileID" TEXT`;
|
|
153
|
+
await sql`ALTER TABLE ${sql(schema)}.instances ADD COLUMN "deleted" BOOL DEFAULT FALSE`;
|
|
154
|
+
await sql`
|
|
155
|
+
DROP INDEX IF EXISTS ${sql(schema)}.instances_last_active`;
|
|
156
|
+
await sql`
|
|
157
|
+
CREATE INDEX instances_last_active ON ${sql(schema)}.instances ("lastActive")
|
|
158
|
+
WHERE NOT "deleted"`;
|
|
159
|
+
await sql`
|
|
160
|
+
CREATE INDEX tombstones_last_active ON ${sql(schema)}.instances ("lastActive")
|
|
161
|
+
WHERE "deleted"`;
|
|
162
|
+
await sql`
|
|
163
|
+
CREATE INDEX profile_ids_last_active ON ${sql(schema)}.instances ("lastActive", "profileID")
|
|
164
|
+
WHERE "profileID" IS NOT NULL`;
|
|
165
|
+
},
|
|
166
|
+
// Backfill profileIDs to the `cg${clientGroupID}`, as is done for
|
|
167
|
+
// client groups from old zero-clients that don't send a profileID.
|
|
168
|
+
migrateData: async (lc, sql) => {
|
|
169
|
+
lc.info?.("Backfilling instance.profileIDs");
|
|
170
|
+
await sql`
|
|
171
|
+
UPDATE ${sql(schema)}.instances
|
|
172
|
+
SET "profileID" = 'cg' || "clientGroupID"
|
|
173
|
+
WHERE "profileID" IS NULL
|
|
174
|
+
`;
|
|
175
|
+
}
|
|
176
|
+
};
|
|
150
177
|
const schemaVersionMigrationMap = {
|
|
151
178
|
2: migrateV1toV2,
|
|
152
179
|
3: migrateV2ToV3,
|
|
@@ -175,7 +202,10 @@ async function initViewSyncerSchema(log, db, shard) {
|
|
|
175
202
|
// V15 adds desires."inactivatedAtTTLClock" to store TTLClock values
|
|
176
203
|
// directly as DOUBLE PRECISION, avoiding postgres.js TIMESTAMPTZ
|
|
177
204
|
// type conversion issues
|
|
178
|
-
15: migratedV14ToV15
|
|
205
|
+
15: migratedV14ToV15,
|
|
206
|
+
// V16 adds instances."profileID" and a corresponding index for estimating
|
|
207
|
+
// active user counts more accurately for apps that use memstore.
|
|
208
|
+
16: migratedV15ToV16
|
|
179
209
|
};
|
|
180
210
|
await runSchemaMigrations(
|
|
181
211
|
log,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"init.js","sources":["../../../../../../../zero-cache/src/services/view-syncer/schema/init.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type {PendingQuery, Row} from 'postgres';\nimport {\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../db/migration.ts';\nimport type {PostgresDB} from '../../../types/pg.ts';\nimport {cvrSchema, type ShardID} from '../../../types/shards.ts';\nimport {createRowsVersionTable, setupCVRTables} from './cvr.ts';\n\nexport async function initViewSyncerSchema(\n log: LogContext,\n db: PostgresDB,\n shard: ShardID,\n): Promise<void> {\n const schema = cvrSchema(shard);\n\n const setupMigration: Migration = {\n migrateSchema: (lc, tx) => setupCVRTables(lc, tx, shard),\n minSafeVersion: 1,\n };\n\n const migrateV1toV2: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.instances ADD \"replicaVersion\" TEXT`;\n },\n };\n\n const migrateV2ToV3: Migration = {\n migrateSchema: async (_, tx) => {\n await tx.unsafe(createRowsVersionTable(shard));\n },\n\n /** Populates the cvr.rowsVersion table with versions from cvr.instances. */\n migrateData: async (lc, tx) => {\n const pending: PendingQuery<Row[]>[] = [];\n for await (const versions of tx<\n {clientGroupID: string; version: string}[]\n >`\n SELECT \"clientGroupID\", \"version\" FROM ${tx(schema)}.instances`.cursor(\n 5000,\n )) {\n for (const version of versions) {\n pending.push(\n tx`INSERT INTO ${tx(schema)}.\"rowsVersion\" ${tx(version)} \n ON CONFLICT (\"clientGroupID\")\n DO UPDATE SET ${tx(version)}`.execute(),\n );\n }\n }\n lc.info?.(`initializing rowsVersion for ${pending.length} cvrs`);\n await Promise.all(pending);\n },\n };\n\n const migrateV3ToV4: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.instances ADD \"owner\" TEXT`;\n await tx`ALTER TABLE ${tx(schema)}.instances ADD \"grantedAt\" TIMESTAMPTZ`;\n },\n };\n\n const migrateV5ToV6: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`\n ALTER TABLE ${tx(schema)}.\"rows\"\n DROP CONSTRAINT fk_rows_client_group`;\n await tx`\n ALTER TABLE ${tx(schema)}.\"rowsVersion\"\n DROP CONSTRAINT fk_rows_version_client_group`;\n },\n };\n\n const migrateV6ToV7: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.desires ADD \"expiresAt\" TIMESTAMPTZ`;\n await tx`ALTER TABLE ${tx(\n schema,\n )}.desires ADD \"inactivatedAt\" TIMESTAMPTZ`;\n await tx`ALTER TABLE ${tx(schema)}.desires ADD \"ttl\" INTERVAL`;\n\n await tx`CREATE INDEX desires_expires_at ON ${tx(\n schema,\n )}.desires (\"expiresAt\")`;\n await tx`CREATE INDEX desires_inactivated_at ON ${tx(\n schema,\n )}.desires (\"inactivatedAt\")`;\n },\n };\n\n const migrateV7ToV8: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(\n schema,\n )}.\"desires\" DROP CONSTRAINT fk_desires_client`;\n },\n };\n\n const migrateV8ToV9: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.instances ADD \"clientSchema\" JSONB`;\n },\n };\n\n const migrateV9ToV10: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.queries ADD \"queryName\" TEXT`;\n await tx`ALTER TABLE ${tx(schema)}.queries ADD \"queryArgs\" JSONB`;\n await tx`ALTER TABLE ${tx(schema)}.queries ALTER COLUMN \"clientAST\" DROP NOT NULL`;\n },\n };\n\n const migrateV10ToV11: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`DROP INDEX IF EXISTS ${tx(schema)}.desires_expires_at`;\n await tx`ALTER TABLE ${tx(schema)}.desires DROP COLUMN \"expiresAt\"`;\n await tx`DROP INDEX IF EXISTS ${tx(schema)}.client_patch_version`;\n await tx`ALTER TABLE ${tx(schema)}.clients DROP COLUMN \"patchVersion\"`;\n await tx`ALTER TABLE ${tx(schema)}.clients DROP COLUMN \"deleted\"`;\n },\n };\n\n const migratedV11ToV12: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.queries ALTER COLUMN \"queryArgs\" TYPE JSON USING \"queryArgs\"::JSON`;\n },\n };\n\n const migratedV12ToV13: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.instances ADD COLUMN \"ttlClock\" DOUBLE PRECISION NOT NULL DEFAULT 0`;\n },\n };\n\n const migratedV13ToV14: Migration = {\n migrateSchema: async (_, sql) => {\n await sql`\n CREATE INDEX instances_last_active ON ${sql(schema)}.instances (\"lastActive\");\n `;\n\n // Update / add foreign key constraints to cascade deletes.\n for (const [table, reference] of [\n ['clients', 'instances'],\n ['queries', 'instances'],\n ['rows', 'rowsVersion'],\n ] as [string, string][]) {\n const constraint = sql(`fk_${table}_client_group`);\n await sql`\n ALTER TABLE ${sql(schema)}.${sql(table)} DROP CONSTRAINT IF EXISTS ${constraint}`;\n await sql`\n ALTER TABLE ${sql(schema)}.${sql(table)} ADD CONSTRAINT ${constraint}\n FOREIGN KEY(\"clientGroupID\")\n REFERENCES ${sql(schema)}.${sql(reference)} (\"clientGroupID\")\n ON DELETE CASCADE;\n `;\n }\n },\n };\n\n const migratedV14ToV15: Migration = {\n migrateSchema: async (_, sql) => {\n // Add new columns for storing inactivatedAt and ttl in milliseconds.\n // This avoids postgres.js type conversion issues with TIMESTAMPTZ and INTERVAL.\n await sql`ALTER TABLE ${sql(schema)}.desires \n ADD COLUMN \"inactivatedAtMs\" DOUBLE PRECISION`;\n await sql`ALTER TABLE ${sql(schema)}.desires \n ADD COLUMN \"ttlMs\" DOUBLE PRECISION`;\n },\n // Migrate existing data: convert TIMESTAMPTZ to milliseconds for inactivatedAt\n // and INTERVAL to milliseconds for ttl\n // Note: EXTRACT(EPOCH FROM NULL) returns NULL, so NULL values are preserved\n migrateData: async (lc, sql) => {\n lc.info?.(\n 'Migrating desires.inactivatedAt to inactivatedAtMs and ttl to ttlMs',\n );\n await sql`\n UPDATE ${sql(schema)}.desires\n SET \"inactivatedAtMs\" = EXTRACT(EPOCH FROM \"inactivatedAt\") * 1000,\n \"ttlMs\" = EXTRACT(EPOCH FROM \"ttl\") * 1000\n `;\n },\n };\n\n const schemaVersionMigrationMap: IncrementalMigrationMap = {\n 2: migrateV1toV2,\n 3: migrateV2ToV3,\n 4: migrateV3ToV4,\n // v5 enables asynchronous row-record flushing, and thus relies on\n // the logic that updates and checks the rowsVersion table in v3.\n 5: {minSafeVersion: 3},\n 6: migrateV5ToV6,\n 7: migrateV6ToV7,\n 8: migrateV7ToV8,\n 9: migrateV8ToV9,\n // v10 adds queryName and queryArgs to the queries table to support\n // custom queries. clientAST is now optional to support migrating\n // off client queries.\n 10: migrateV9ToV10,\n // V11 removes the deprecated queries.\"expiresAt\", clients.\"patchVersion\",\n // clients.\"deleted\" columns.\n 11: migrateV10ToV11,\n 12: migratedV11ToV12,\n // V13 adds instances.\"ttlClock\"\n 13: migratedV12ToV13,\n // V14 adds an index on instances.\"lastActive\" and a FK constraint\n // from rows.\"clientGroupID\" to rowsVersion.\"clientGroupID\" for\n // garbage collection\n 14: migratedV13ToV14,\n // V15 adds desires.\"inactivatedAtTTLClock\" to store TTLClock values\n // directly as DOUBLE PRECISION, avoiding postgres.js TIMESTAMPTZ\n // type conversion issues\n 15: migratedV14ToV15,\n };\n\n await runSchemaMigrations(\n log,\n 'view-syncer',\n cvrSchema(shard),\n db,\n setupMigration,\n schemaVersionMigrationMap,\n );\n}\n"],"names":[],"mappings":";;;AAWA,eAAsB,qBACpB,KACA,IACA,OACe;AACf,QAAM,SAAS,UAAU,KAAK;AAE9B,QAAM,iBAA4B;AAAA,IAChC,eAAe,CAAC,IAAI,OAAO,eAAe,IAAI,IAAI,KAAK;AAAA,IACvD,gBAAgB;AAAA,EAAA;AAGlB,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,GAAG,OAAO,uBAAuB,KAAK,CAAC;AAAA,IAC/C;AAAA;AAAA,IAGA,aAAa,OAAO,IAAI,OAAO;AAC7B,YAAM,UAAiC,CAAA;AACvC,uBAAiB,YAAY;AAAA,+CAGY,GAAG,MAAM,CAAC,aAAa;AAAA,QAC9D;AAAA,MAAA,GACC;AACD,mBAAW,WAAW,UAAU;AAC9B,kBAAQ;AAAA,YACN,iBAAiB,GAAG,MAAM,CAAC,kBAAkB,GAAG,OAAO,CAAC;AAAA;AAAA,+BAErC,GAAG,OAAO,CAAC,GAAG,QAAA;AAAA,UAAQ;AAAA,QAE7C;AAAA,MACF;AACA,SAAG,OAAO,gCAAgC,QAAQ,MAAM,OAAO;AAC/D,YAAM,QAAQ,IAAI,OAAO;AAAA,IAC3B;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM;AAAA,oBACQ,GAAG,MAAM,CAAC;AAAA;AAExB,YAAM;AAAA,oBACQ,GAAG,MAAM,CAAC;AAAA;AAAA,IAE1B;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,iBAAiB;AAAA,QACrB;AAAA,MAAA,CACD;AACD,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAEjC,YAAM,wCAAwC;AAAA,QAC5C;AAAA,MAAA,CACD;AACD,YAAM,4CAA4C;AAAA,QAChD;AAAA,MAAA,CACD;AAAA,IACH;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB;AAAA,QACrB;AAAA,MAAA,CACD;AAAA,IACH;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,iBAA4B;AAAA,IAChC,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,kBAA6B;AAAA,IACjC,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,0BAA0B,GAAG,MAAM,CAAC;AAC1C,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,0BAA0B,GAAG,MAAM,CAAC;AAC1C,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,mBAA8B;AAAA,IAClC,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,mBAA8B;AAAA,IAClC,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,mBAA8B;AAAA,IAClC,eAAe,OAAO,GAAG,QAAQ;AAC/B,YAAM;AAAA,gDACoC,IAAI,MAAM,CAAC;AAAA;AAIrD,iBAAW,CAAC,OAAO,SAAS,KAAK;AAAA,QAC/B,CAAC,WAAW,WAAW;AAAA,QACvB,CAAC,WAAW,WAAW;AAAA,QACvB,CAAC,QAAQ,aAAa;AAAA,MAAA,GACC;AACvB,cAAM,aAAa,IAAI,MAAM,KAAK,eAAe;AACjD,cAAM;AAAA,wBACU,IAAI,MAAM,CAAC,IAAI,IAAI,KAAK,CAAC,8BAA8B,UAAU;AACjF,cAAM;AAAA,wBACU,IAAI,MAAM,CAAC,IAAI,IAAI,KAAK,CAAC,mBAAmB,UAAU;AAAA;AAAA,yBAErD,IAAI,MAAM,CAAC,IAAI,IAAI,SAAS,CAAC;AAAA;AAAA;AAAA,MAGhD;AAAA,IACF;AAAA,EAAA;AAGF,QAAM,mBAA8B;AAAA,IAClC,eAAe,OAAO,GAAG,QAAQ;AAG/B,YAAM,kBAAkB,IAAI,MAAM,CAAC;AAAA;AAEnC,YAAM,kBAAkB,IAAI,MAAM,CAAC;AAAA;AAAA,IAErC;AAAA;AAAA;AAAA;AAAA,IAIA,aAAa,OAAO,IAAI,QAAQ;AAC9B,SAAG;AAAA,QACD;AAAA,MAAA;AAEF,YAAM;AAAA,iBACK,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA,IAIxB;AAAA,EAAA;AAGF,QAAM,4BAAqD;AAAA,IACzD,GAAG;AAAA,IACH,GAAG;AAAA,IACH,GAAG;AAAA;AAAA;AAAA,IAGH,GAAG,EAAC,gBAAgB,EAAA;AAAA,IACpB,GAAG;AAAA,IACH,GAAG;AAAA,IACH,GAAG;AAAA,IACH,GAAG;AAAA;AAAA;AAAA;AAAA,IAIH,IAAI;AAAA;AAAA;AAAA,IAGJ,IAAI;AAAA,IACJ,IAAI;AAAA;AAAA,IAEJ,IAAI;AAAA;AAAA;AAAA;AAAA,IAIJ,IAAI;AAAA;AAAA;AAAA;AAAA,IAIJ,IAAI;AAAA,EAAA;AAGN,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,UAAU,KAAK;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEJ;"}
|
|
1
|
+
{"version":3,"file":"init.js","sources":["../../../../../../../zero-cache/src/services/view-syncer/schema/init.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport type {PendingQuery, Row} from 'postgres';\nimport {\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../db/migration.ts';\nimport type {PostgresDB} from '../../../types/pg.ts';\nimport {cvrSchema, type ShardID} from '../../../types/shards.ts';\nimport {createRowsVersionTable, setupCVRTables} from './cvr.ts';\n\nexport async function initViewSyncerSchema(\n log: LogContext,\n db: PostgresDB,\n shard: ShardID,\n): Promise<void> {\n const schema = cvrSchema(shard);\n\n const setupMigration: Migration = {\n migrateSchema: (lc, tx) => setupCVRTables(lc, tx, shard),\n minSafeVersion: 1,\n };\n\n const migrateV1toV2: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.instances ADD \"replicaVersion\" TEXT`;\n },\n };\n\n const migrateV2ToV3: Migration = {\n migrateSchema: async (_, tx) => {\n await tx.unsafe(createRowsVersionTable(shard));\n },\n\n /** Populates the cvr.rowsVersion table with versions from cvr.instances. */\n migrateData: async (lc, tx) => {\n const pending: PendingQuery<Row[]>[] = [];\n for await (const versions of tx<\n {clientGroupID: string; version: string}[]\n >`\n SELECT \"clientGroupID\", \"version\" FROM ${tx(schema)}.instances`.cursor(\n 5000,\n )) {\n for (const version of versions) {\n pending.push(\n tx`INSERT INTO ${tx(schema)}.\"rowsVersion\" ${tx(version)} \n ON CONFLICT (\"clientGroupID\")\n DO UPDATE SET ${tx(version)}`.execute(),\n );\n }\n }\n lc.info?.(`initializing rowsVersion for ${pending.length} cvrs`);\n await Promise.all(pending);\n },\n };\n\n const migrateV3ToV4: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.instances ADD \"owner\" TEXT`;\n await tx`ALTER TABLE ${tx(schema)}.instances ADD \"grantedAt\" TIMESTAMPTZ`;\n },\n };\n\n const migrateV5ToV6: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`\n ALTER TABLE ${tx(schema)}.\"rows\"\n DROP CONSTRAINT fk_rows_client_group`;\n await tx`\n ALTER TABLE ${tx(schema)}.\"rowsVersion\"\n DROP CONSTRAINT fk_rows_version_client_group`;\n },\n };\n\n const migrateV6ToV7: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.desires ADD \"expiresAt\" TIMESTAMPTZ`;\n await tx`ALTER TABLE ${tx(\n schema,\n )}.desires ADD \"inactivatedAt\" TIMESTAMPTZ`;\n await tx`ALTER TABLE ${tx(schema)}.desires ADD \"ttl\" INTERVAL`;\n\n await tx`CREATE INDEX desires_expires_at ON ${tx(\n schema,\n )}.desires (\"expiresAt\")`;\n await tx`CREATE INDEX desires_inactivated_at ON ${tx(\n schema,\n )}.desires (\"inactivatedAt\")`;\n },\n };\n\n const migrateV7ToV8: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(\n schema,\n )}.\"desires\" DROP CONSTRAINT fk_desires_client`;\n },\n };\n\n const migrateV8ToV9: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.instances ADD \"clientSchema\" JSONB`;\n },\n };\n\n const migrateV9ToV10: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.queries ADD \"queryName\" TEXT`;\n await tx`ALTER TABLE ${tx(schema)}.queries ADD \"queryArgs\" JSONB`;\n await tx`ALTER TABLE ${tx(schema)}.queries ALTER COLUMN \"clientAST\" DROP NOT NULL`;\n },\n };\n\n const migrateV10ToV11: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`DROP INDEX IF EXISTS ${tx(schema)}.desires_expires_at`;\n await tx`ALTER TABLE ${tx(schema)}.desires DROP COLUMN \"expiresAt\"`;\n await tx`DROP INDEX IF EXISTS ${tx(schema)}.client_patch_version`;\n await tx`ALTER TABLE ${tx(schema)}.clients DROP COLUMN \"patchVersion\"`;\n await tx`ALTER TABLE ${tx(schema)}.clients DROP COLUMN \"deleted\"`;\n },\n };\n\n const migratedV11ToV12: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.queries ALTER COLUMN \"queryArgs\" TYPE JSON USING \"queryArgs\"::JSON`;\n },\n };\n\n const migratedV12ToV13: Migration = {\n migrateSchema: async (_, tx) => {\n await tx`ALTER TABLE ${tx(schema)}.instances ADD COLUMN \"ttlClock\" DOUBLE PRECISION NOT NULL DEFAULT 0`;\n },\n };\n\n const migratedV13ToV14: Migration = {\n migrateSchema: async (_, sql) => {\n await sql`\n CREATE INDEX instances_last_active ON ${sql(schema)}.instances (\"lastActive\");\n `;\n\n // Update / add foreign key constraints to cascade deletes.\n for (const [table, reference] of [\n ['clients', 'instances'],\n ['queries', 'instances'],\n ['rows', 'rowsVersion'],\n ] as [string, string][]) {\n const constraint = sql(`fk_${table}_client_group`);\n await sql`\n ALTER TABLE ${sql(schema)}.${sql(table)} DROP CONSTRAINT IF EXISTS ${constraint}`;\n await sql`\n ALTER TABLE ${sql(schema)}.${sql(table)} ADD CONSTRAINT ${constraint}\n FOREIGN KEY(\"clientGroupID\")\n REFERENCES ${sql(schema)}.${sql(reference)} (\"clientGroupID\")\n ON DELETE CASCADE;\n `;\n }\n },\n };\n\n const migratedV14ToV15: Migration = {\n migrateSchema: async (_, sql) => {\n // Add new columns for storing inactivatedAt and ttl in milliseconds.\n // This avoids postgres.js type conversion issues with TIMESTAMPTZ and INTERVAL.\n await sql`ALTER TABLE ${sql(schema)}.desires \n ADD COLUMN \"inactivatedAtMs\" DOUBLE PRECISION`;\n await sql`ALTER TABLE ${sql(schema)}.desires \n ADD COLUMN \"ttlMs\" DOUBLE PRECISION`;\n },\n // Migrate existing data: convert TIMESTAMPTZ to milliseconds for inactivatedAt\n // and INTERVAL to milliseconds for ttl\n // Note: EXTRACT(EPOCH FROM NULL) returns NULL, so NULL values are preserved\n migrateData: async (lc, sql) => {\n lc.info?.(\n 'Migrating desires.inactivatedAt to inactivatedAtMs and ttl to ttlMs',\n );\n await sql`\n UPDATE ${sql(schema)}.desires\n SET \"inactivatedAtMs\" = EXTRACT(EPOCH FROM \"inactivatedAt\") * 1000,\n \"ttlMs\" = EXTRACT(EPOCH FROM \"ttl\") * 1000\n `;\n },\n };\n\n const migratedV15ToV16: Migration = {\n migrateSchema: async (_, sql) => {\n await sql`ALTER TABLE ${sql(schema)}.instances ADD COLUMN \"profileID\" TEXT`;\n await sql`ALTER TABLE ${sql(schema)}.instances ADD COLUMN \"deleted\" BOOL DEFAULT FALSE`;\n\n // Recreate the instances_last_active index to exclude tombstones\n await sql`\n DROP INDEX IF EXISTS ${sql(schema)}.instances_last_active`;\n await sql`\n CREATE INDEX instances_last_active ON ${sql(schema)}.instances (\"lastActive\")\n WHERE NOT \"deleted\"`;\n await sql`\n CREATE INDEX tombstones_last_active ON ${sql(schema)}.instances (\"lastActive\")\n WHERE \"deleted\"`;\n await sql`\n CREATE INDEX profile_ids_last_active ON ${sql(schema)}.instances (\"lastActive\", \"profileID\")\n WHERE \"profileID\" IS NOT NULL`;\n },\n\n // Backfill profileIDs to the `cg${clientGroupID}`, as is done for\n // client groups from old zero-clients that don't send a profileID.\n migrateData: async (lc, sql) => {\n lc.info?.('Backfilling instance.profileIDs');\n await sql`\n UPDATE ${sql(schema)}.instances\n SET \"profileID\" = 'cg' || \"clientGroupID\"\n WHERE \"profileID\" IS NULL\n `;\n },\n };\n\n const schemaVersionMigrationMap: IncrementalMigrationMap = {\n 2: migrateV1toV2,\n 3: migrateV2ToV3,\n 4: migrateV3ToV4,\n // v5 enables asynchronous row-record flushing, and thus relies on\n // the logic that updates and checks the rowsVersion table in v3.\n 5: {minSafeVersion: 3},\n 6: migrateV5ToV6,\n 7: migrateV6ToV7,\n 8: migrateV7ToV8,\n 9: migrateV8ToV9,\n // v10 adds queryName and queryArgs to the queries table to support\n // custom queries. clientAST is now optional to support migrating\n // off client queries.\n 10: migrateV9ToV10,\n // V11 removes the deprecated queries.\"expiresAt\", clients.\"patchVersion\",\n // clients.\"deleted\" columns.\n 11: migrateV10ToV11,\n 12: migratedV11ToV12,\n // V13 adds instances.\"ttlClock\"\n 13: migratedV12ToV13,\n // V14 adds an index on instances.\"lastActive\" and a FK constraint\n // from rows.\"clientGroupID\" to rowsVersion.\"clientGroupID\" for\n // garbage collection\n 14: migratedV13ToV14,\n // V15 adds desires.\"inactivatedAtTTLClock\" to store TTLClock values\n // directly as DOUBLE PRECISION, avoiding postgres.js TIMESTAMPTZ\n // type conversion issues\n 15: migratedV14ToV15,\n // V16 adds instances.\"profileID\" and a corresponding index for estimating\n // active user counts more accurately for apps that use memstore.\n 16: migratedV15ToV16,\n };\n\n await runSchemaMigrations(\n log,\n 'view-syncer',\n cvrSchema(shard),\n db,\n setupMigration,\n schemaVersionMigrationMap,\n );\n}\n"],"names":[],"mappings":";;;AAWA,eAAsB,qBACpB,KACA,IACA,OACe;AACf,QAAM,SAAS,UAAU,KAAK;AAE9B,QAAM,iBAA4B;AAAA,IAChC,eAAe,CAAC,IAAI,OAAO,eAAe,IAAI,IAAI,KAAK;AAAA,IACvD,gBAAgB;AAAA,EAAA;AAGlB,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,GAAG,OAAO,uBAAuB,KAAK,CAAC;AAAA,IAC/C;AAAA;AAAA,IAGA,aAAa,OAAO,IAAI,OAAO;AAC7B,YAAM,UAAiC,CAAA;AACvC,uBAAiB,YAAY;AAAA,+CAGY,GAAG,MAAM,CAAC,aAAa;AAAA,QAC9D;AAAA,MAAA,GACC;AACD,mBAAW,WAAW,UAAU;AAC9B,kBAAQ;AAAA,YACN,iBAAiB,GAAG,MAAM,CAAC,kBAAkB,GAAG,OAAO,CAAC;AAAA;AAAA,+BAErC,GAAG,OAAO,CAAC,GAAG,QAAA;AAAA,UAAQ;AAAA,QAE7C;AAAA,MACF;AACA,SAAG,OAAO,gCAAgC,QAAQ,MAAM,OAAO;AAC/D,YAAM,QAAQ,IAAI,OAAO;AAAA,IAC3B;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM;AAAA,oBACQ,GAAG,MAAM,CAAC;AAAA;AAExB,YAAM;AAAA,oBACQ,GAAG,MAAM,CAAC;AAAA;AAAA,IAE1B;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,iBAAiB;AAAA,QACrB;AAAA,MAAA,CACD;AACD,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAEjC,YAAM,wCAAwC;AAAA,QAC5C;AAAA,MAAA,CACD;AACD,YAAM,4CAA4C;AAAA,QAChD;AAAA,MAAA,CACD;AAAA,IACH;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB;AAAA,QACrB;AAAA,MAAA,CACD;AAAA,IACH;AAAA,EAAA;AAGF,QAAM,gBAA2B;AAAA,IAC/B,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,iBAA4B;AAAA,IAChC,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,kBAA6B;AAAA,IACjC,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,0BAA0B,GAAG,MAAM,CAAC;AAC1C,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,0BAA0B,GAAG,MAAM,CAAC;AAC1C,YAAM,iBAAiB,GAAG,MAAM,CAAC;AACjC,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,mBAA8B;AAAA,IAClC,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,mBAA8B;AAAA,IAClC,eAAe,OAAO,GAAG,OAAO;AAC9B,YAAM,iBAAiB,GAAG,MAAM,CAAC;AAAA,IACnC;AAAA,EAAA;AAGF,QAAM,mBAA8B;AAAA,IAClC,eAAe,OAAO,GAAG,QAAQ;AAC/B,YAAM;AAAA,gDACoC,IAAI,MAAM,CAAC;AAAA;AAIrD,iBAAW,CAAC,OAAO,SAAS,KAAK;AAAA,QAC/B,CAAC,WAAW,WAAW;AAAA,QACvB,CAAC,WAAW,WAAW;AAAA,QACvB,CAAC,QAAQ,aAAa;AAAA,MAAA,GACC;AACvB,cAAM,aAAa,IAAI,MAAM,KAAK,eAAe;AACjD,cAAM;AAAA,wBACU,IAAI,MAAM,CAAC,IAAI,IAAI,KAAK,CAAC,8BAA8B,UAAU;AACjF,cAAM;AAAA,wBACU,IAAI,MAAM,CAAC,IAAI,IAAI,KAAK,CAAC,mBAAmB,UAAU;AAAA;AAAA,yBAErD,IAAI,MAAM,CAAC,IAAI,IAAI,SAAS,CAAC;AAAA;AAAA;AAAA,MAGhD;AAAA,IACF;AAAA,EAAA;AAGF,QAAM,mBAA8B;AAAA,IAClC,eAAe,OAAO,GAAG,QAAQ;AAG/B,YAAM,kBAAkB,IAAI,MAAM,CAAC;AAAA;AAEnC,YAAM,kBAAkB,IAAI,MAAM,CAAC;AAAA;AAAA,IAErC;AAAA;AAAA;AAAA;AAAA,IAIA,aAAa,OAAO,IAAI,QAAQ;AAC9B,SAAG;AAAA,QACD;AAAA,MAAA;AAEF,YAAM;AAAA,iBACK,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA,IAIxB;AAAA,EAAA;AAGF,QAAM,mBAA8B;AAAA,IAClC,eAAe,OAAO,GAAG,QAAQ;AAC/B,YAAM,kBAAkB,IAAI,MAAM,CAAC;AACnC,YAAM,kBAAkB,IAAI,MAAM,CAAC;AAGnC,YAAM;AAAA,+BACmB,IAAI,MAAM,CAAC;AACpC,YAAM;AAAA,gDACoC,IAAI,MAAM,CAAC;AAAA;AAErD,YAAM;AAAA,iDACqC,IAAI,MAAM,CAAC;AAAA;AAEtD,YAAM;AAAA,kDACsC,IAAI,MAAM,CAAC;AAAA;AAAA,IAEzD;AAAA;AAAA;AAAA,IAIA,aAAa,OAAO,IAAI,QAAQ;AAC9B,SAAG,OAAO,iCAAiC;AAC3C,YAAM;AAAA,iBACK,IAAI,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA,IAIxB;AAAA,EAAA;AAGF,QAAM,4BAAqD;AAAA,IACzD,GAAG;AAAA,IACH,GAAG;AAAA,IACH,GAAG;AAAA;AAAA;AAAA,IAGH,GAAG,EAAC,gBAAgB,EAAA;AAAA,IACpB,GAAG;AAAA,IACH,GAAG;AAAA,IACH,GAAG;AAAA,IACH,GAAG;AAAA;AAAA;AAAA;AAAA,IAIH,IAAI;AAAA;AAAA;AAAA,IAGJ,IAAI;AAAA,IACJ,IAAI;AAAA;AAAA,IAEJ,IAAI;AAAA;AAAA;AAAA;AAAA,IAIJ,IAAI;AAAA;AAAA;AAAA;AAAA,IAIJ,IAAI;AAAA;AAAA;AAAA,IAGJ,IAAI;AAAA,EAAA;AAGN,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,UAAU,KAAK;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEJ;"}
|
|
@@ -24,6 +24,7 @@ export type TokenData = {
|
|
|
24
24
|
export type SyncContext = {
|
|
25
25
|
readonly clientID: string;
|
|
26
26
|
readonly wsID: string;
|
|
27
|
+
readonly profileID: string | null;
|
|
27
28
|
readonly baseCookie: string | null;
|
|
28
29
|
readonly protocolVersion: number;
|
|
29
30
|
readonly schemaVersion: number | null;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"view-syncer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/view-syncer.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,MAAM,CAAC;AAcrC,OAAO,KAAK,EAAC,2BAA2B,EAAC,MAAM,yDAAyD,CAAC;AACzG,OAAO,KAAK,EAEV,qBAAqB,EACtB,MAAM,0CAA0C,CAAC;AAElD,OAAO,KAAK,EAAC,oBAAoB,EAAC,MAAM,iDAAiD,CAAC;AAC1F,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,uCAAuC,CAAC;AAOtE,OAAO,KAAK,EAEV,gBAAgB,EACjB,MAAM,6CAA6C,CAAC;AAMrD,OAAO,KAAK,EAAC,oBAAoB,EAAC,MAAM,2BAA2B,CAAC;AAEpE,OAAO,KAAK,EAAC,sBAAsB,EAAC,MAAM,yCAAyC,CAAC;AAOpF,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,oCAAoC,CAAC;AAK1E,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAElD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,uBAAuB,CAAC;AACnD,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,wBAAwB,CAAC;AACnD,OAAO,EAAC,YAAY,EAAC,MAAM,6BAA6B,CAAC;AACzD,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6BAA6B,CAAC;AAE9D,OAAO,KAAK,EAAC,oBAAoB,EAAC,MAAM,eAAe,CAAC;AAiBxD,OAAO,KAAK,EAAC,gBAAgB,EAAC,MAAM,wBAAwB,CAAC;AAE7D,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,sBAAsB,CAAC;AAuBzD,MAAM,MAAM,SAAS,GAAG;IACtB,QAAQ,CAAC,GAAG,EAAE,MAAM,CAAC;IACrB,kBAAkB;IAClB,QAAQ,CAAC,OAAO,EAAE,UAAU,CAAC;CAC9B,CAAC;AAEF,MAAM,MAAM,WAAW,GAAG;IACxB,QAAQ,CAAC,QAAQ,EAAE,MAAM,CAAC;IAC1B,QAAQ,CAAC,IAAI,EAAE,MAAM,CAAC;IACtB,QAAQ,CAAC,UAAU,EAAE,MAAM,GAAG,IAAI,CAAC;IACnC,QAAQ,CAAC,eAAe,EAAE,MAAM,CAAC;IACjC,QAAQ,CAAC,aAAa,EAAE,MAAM,GAAG,IAAI,CAAC;IACtC,QAAQ,CAAC,SAAS,EAAE,SAAS,GAAG,SAAS,CAAC;IAC1C,QAAQ,CAAC,UAAU,EAAE,MAAM,GAAG,SAAS,CAAC;CACzC,CAAC;AAMF,MAAM,WAAW,UAAU;IACzB,cAAc,CACZ,GAAG,EAAE,WAAW,EAChB,GAAG,EAAE,qBAAqB,GACzB,MAAM,CAAC,UAAU,CAAC,CAAC;IAEtB,oBAAoB,CAClB,GAAG,EAAE,WAAW,EAChB,GAAG,EAAE,2BAA2B,GAC/B,OAAO,CAAC,IAAI,CAAC,CAAC;IAEjB,aAAa,CAAC,GAAG,EAAE,WAAW,EAAE,GAAG,EAAE,oBAAoB,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAC1E,OAAO,CAAC,OAAO,EAAE,WAAW,EAAE,GAAG,EAAE,gBAAgB,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CACrE;AAQD,KAAK,UAAU,GAAG,CAChB,EAAE,EAAE,CAAC,GAAG,IAAI,EAAE,OAAO,EAAE,KAAK,IAAI,EAChC,KAAK,CAAC,EAAE,MAAM,KACX,UAAU,CAAC,OAAO,UAAU,CAAC,CAAC;AAEnC;;;;GAIG;AACH,eAAO,MAAM,kBAAkB,QAAS,CAAC;AAEzC;;;;;GAKG;AACH,eAAO,MAAM,oBAAoB,KAAK,CAAC;AAEvC,qBAAa,iBAAkB,YAAW,UAAU,EAAE,oBAAoB;;IACxE,QAAQ,CAAC,EAAE,EAAE,MAAM,CAAC;IAUpB,YAAY,CAAC,EAAE,MAAM,GAAG,SAAS,CAAC;
|
|
1
|
+
{"version":3,"file":"view-syncer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/view-syncer.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,MAAM,CAAC;AAcrC,OAAO,KAAK,EAAC,2BAA2B,EAAC,MAAM,yDAAyD,CAAC;AACzG,OAAO,KAAK,EAEV,qBAAqB,EACtB,MAAM,0CAA0C,CAAC;AAElD,OAAO,KAAK,EAAC,oBAAoB,EAAC,MAAM,iDAAiD,CAAC;AAC1F,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,uCAAuC,CAAC;AAOtE,OAAO,KAAK,EAEV,gBAAgB,EACjB,MAAM,6CAA6C,CAAC;AAMrD,OAAO,KAAK,EAAC,oBAAoB,EAAC,MAAM,2BAA2B,CAAC;AAEpE,OAAO,KAAK,EAAC,sBAAsB,EAAC,MAAM,yCAAyC,CAAC;AAOpF,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,oCAAoC,CAAC;AAK1E,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAElD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,uBAAuB,CAAC;AACnD,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,wBAAwB,CAAC;AACnD,OAAO,EAAC,YAAY,EAAC,MAAM,6BAA6B,CAAC;AACzD,OAAO,KAAK,EAAC,YAAY,EAAC,MAAM,6BAA6B,CAAC;AAE9D,OAAO,KAAK,EAAC,oBAAoB,EAAC,MAAM,eAAe,CAAC;AAiBxD,OAAO,KAAK,EAAC,gBAAgB,EAAC,MAAM,wBAAwB,CAAC;AAE7D,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,sBAAsB,CAAC;AAuBzD,MAAM,MAAM,SAAS,GAAG;IACtB,QAAQ,CAAC,GAAG,EAAE,MAAM,CAAC;IACrB,kBAAkB;IAClB,QAAQ,CAAC,OAAO,EAAE,UAAU,CAAC;CAC9B,CAAC;AAEF,MAAM,MAAM,WAAW,GAAG;IACxB,QAAQ,CAAC,QAAQ,EAAE,MAAM,CAAC;IAC1B,QAAQ,CAAC,IAAI,EAAE,MAAM,CAAC;IACtB,QAAQ,CAAC,SAAS,EAAE,MAAM,GAAG,IAAI,CAAC;IAClC,QAAQ,CAAC,UAAU,EAAE,MAAM,GAAG,IAAI,CAAC;IACnC,QAAQ,CAAC,eAAe,EAAE,MAAM,CAAC;IACjC,QAAQ,CAAC,aAAa,EAAE,MAAM,GAAG,IAAI,CAAC;IACtC,QAAQ,CAAC,SAAS,EAAE,SAAS,GAAG,SAAS,CAAC;IAC1C,QAAQ,CAAC,UAAU,EAAE,MAAM,GAAG,SAAS,CAAC;CACzC,CAAC;AAMF,MAAM,WAAW,UAAU;IACzB,cAAc,CACZ,GAAG,EAAE,WAAW,EAChB,GAAG,EAAE,qBAAqB,GACzB,MAAM,CAAC,UAAU,CAAC,CAAC;IAEtB,oBAAoB,CAClB,GAAG,EAAE,WAAW,EAChB,GAAG,EAAE,2BAA2B,GAC/B,OAAO,CAAC,IAAI,CAAC,CAAC;IAEjB,aAAa,CAAC,GAAG,EAAE,WAAW,EAAE,GAAG,EAAE,oBAAoB,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAC1E,OAAO,CAAC,OAAO,EAAE,WAAW,EAAE,GAAG,EAAE,gBAAgB,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CACrE;AAQD,KAAK,UAAU,GAAG,CAChB,EAAE,EAAE,CAAC,GAAG,IAAI,EAAE,OAAO,EAAE,KAAK,IAAI,EAChC,KAAK,CAAC,EAAE,MAAM,KACX,UAAU,CAAC,OAAO,UAAU,CAAC,CAAC;AAEnC;;;;GAIG;AACH,eAAO,MAAM,kBAAkB,QAAS,CAAC;AAEzC;;;;;GAKG;AACH,eAAO,MAAM,oBAAoB,KAAK,CAAC;AAEvC,qBAAa,iBAAkB,YAAW,UAAU,EAAE,oBAAoB;;IACxE,QAAQ,CAAC,EAAE,EAAE,MAAM,CAAC;IAUpB,YAAY,CAAC,EAAE,MAAM,GAAG,SAAS,CAAC;gBAoHhC,MAAM,EAAE,oBAAoB,EAC5B,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,aAAa,EAAE,MAAM,EACrB,KAAK,EAAE,UAAU,EACjB,UAAU,EAAE,UAAU,GAAG,SAAS,EAClC,cAAc,EAAE,cAAc,EAC9B,cAAc,EAAE,YAAY,CAAC,YAAY,CAAC,EAC1C,gBAAgB,EAAE,gBAAgB,EAClC,oBAAoB,EAAE,MAAM,EAC5B,iBAAiB,EAAE,iBAAiB,EACpC,sBAAsB,EAAE,sBAAsB,GAAG,SAAS,EAC1D,WAAW,SAAuB,EAClC,YAAY,GAAE,UAAwC;IA2FxD,UAAU,IAAI,OAAO,CAAC,aAAa,GAAG,UAAU,CAAC;IAO3C,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;IAiH1B;;;;;;;;OAQG;IACH,SAAS,IAAI,OAAO;IAwEpB,cAAc,CACZ,GAAG,EAAE,WAAW,EAChB,qBAAqB,EAAE,qBAAqB,GAC3C,MAAM,CAAC,UAAU,CAAC;IAuHf,oBAAoB,CACxB,GAAG,EAAE,WAAW,EAChB,GAAG,EAAE,2BAA2B,GAC/B,OAAO,CAAC,IAAI,CAAC;IAIV,aAAa,CACjB,GAAG,EAAE,WAAW,EAChB,GAAG,EAAE,oBAAoB,GACxB,OAAO,CAAC,IAAI,CAAC;IAyuChB,OAAO,CAAC,OAAO,EAAE,WAAW,EAAE,GAAG,EAAE,gBAAgB,GAAG,OAAO,CAAC,IAAI,CAAC;IA2BnE,IAAI,IAAI,OAAO,CAAC,IAAI,CAAC;IAqBrB;;;OAGG;IACH,eAAe;CAGhB;AAuED,wBAAgB,SAAS,CACvB,EAAE,EAAE,UAAU,EACd,aAAa,EAAE,SAAS,GAAG,SAAS,EACpC,QAAQ,EAAE,SAAS,GAAG,SAAS,yBAkDhC;AAyCD,qBAAa,cAAc;;IAInB,KAAK;IAOX,oBAAoB;IAMd,YAAY,CAAC,cAAc,CAAC,EAAE,MAAM;IAW1C,UAAU;IAWV,sCAAsC;IACtC,IAAI,IAAI,MAAM;IAKd;;;OAGG;IACH,YAAY,IAAI,MAAM;CAKvB"}
|