@rocicorp/zero 1.4.0-canary.5 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  var package_default = {
2
2
  name: "@rocicorp/zero",
3
- version: "1.4.0-canary.5",
3
+ version: "1.4.0",
4
4
  description: "Zero is a web framework for serverless web development.",
5
5
  homepage: "https://zero.rocicorp.dev",
6
6
  bugs: { "url": "https://bugs.rocicorp.dev" },
@@ -1 +1 @@
1
- {"version":3,"file":"package.js","names":[],"sources":["../../package.json"],"sourcesContent":["{\n \"name\": \"@rocicorp/zero\",\n \"version\": \"1.4.0-canary.5\",\n \"description\": \"Zero is a web framework for serverless web development.\",\n \"homepage\": \"https://zero.rocicorp.dev\",\n \"bugs\": {\n \"url\": \"https://bugs.rocicorp.dev\"\n },\n \"license\": \"Apache-2.0\",\n \"author\": \"Rocicorp, Inc.\",\n \"repository\": {\n \"type\": \"git\",\n \"url\": \"git+https://github.com/rocicorp/mono.git\",\n \"directory\": \"packages/zero\"\n },\n \"bin\": {\n \"analyze-query\": \"./out/zero/src/analyze-query.js\",\n \"ast-to-zql\": \"./out/zero/src/ast-to-zql.js\",\n \"transform-query\": \"./out/zero/src/transform-query.js\",\n \"zero-build-schema\": \"./out/zero/src/build-schema.js\",\n \"zero-cache\": \"./out/zero/src/cli.js\",\n \"zero-cache-dev\": \"./out/zero/src/zero-cache-dev.js\",\n \"zero-deploy-permissions\": \"./out/zero/src/deploy-permissions.js\",\n \"zero-out\": \"./out/zero/src/zero-out.js\"\n },\n \"files\": [\n \"out\",\n \"!*.tsbuildinfo\"\n ],\n \"type\": \"module\",\n \"main\": \"out/zero/src/zero.js\",\n \"module\": \"out/zero/src/zero.js\",\n \"types\": \"out/zero/src/zero.d.ts\",\n \"exports\": {\n \".\": {\n \"types\": \"./out/zero/src/zero.d.ts\",\n \"default\": \"./out/zero/src/zero.js\"\n },\n \"./analyze\": {\n \"types\": \"./out/zero/src/analyze.d.ts\",\n \"default\": \"./out/zero/src/analyze.js\"\n },\n \"./bindings\": {\n \"types\": \"./out/zero/src/bindings.d.ts\",\n \"default\": \"./out/zero/src/bindings.js\"\n },\n \"./change-protocol/v0\": {\n \"types\": \"./out/zero/src/change-protocol/v0.d.ts\",\n \"default\": \"./out/zero/src/change-protocol/v0.js\"\n },\n \"./expo-sqlite\": {\n \"types\": \"./out/zero/src/expo-sqlite.d.ts\",\n \"default\": \"./out/zero/src/expo-sqlite.js\"\n },\n \"./op-sqlite\": {\n \"types\": \"./out/zero/src/op-sqlite.d.ts\",\n \"default\": \"./out/zero/src/op-sqlite.js\"\n },\n \"./pg\": {\n \"types\": \"./out/zero/src/pg.d.ts\",\n \"default\": \"./out/zero/src/pg.js\"\n },\n \"./react\": {\n \"types\": \"./out/zero/src/react.d.ts\",\n \"default\": \"./out/zero/src/react.js\"\n },\n \"./react-native\": {\n \"types\": \"./out/zero/src/react-native.d.ts\",\n \"default\": \"./out/zero/src/react-native.js\"\n },\n \"./server\": {\n \"types\": \"./out/zero/src/server.d.ts\",\n \"default\": \"./out/zero/src/server.js\"\n },\n \"./server/adapters/drizzle\": {\n \"types\": \"./out/zero/src/adapters/drizzle.d.ts\",\n \"default\": \"./out/zero/src/adapters/drizzle.js\"\n },\n \"./server/adapters/kysely\": {\n \"types\": \"./out/zero/src/adapters/kysely.d.ts\",\n \"default\": \"./out/zero/src/adapters/kysely.js\"\n },\n \"./server/adapters/prisma\": {\n \"types\": \"./out/zero/src/adapters/prisma.d.ts\",\n \"default\": \"./out/zero/src/adapters/prisma.js\"\n },\n \"./server/adapters/pg\": {\n \"types\": \"./out/zero/src/adapters/pg.d.ts\",\n \"default\": \"./out/zero/src/adapters/pg.js\"\n },\n \"./server/adapters/postgresjs\": {\n \"types\": \"./out/zero/src/adapters/postgresjs.d.ts\",\n \"default\": \"./out/zero/src/adapters/postgresjs.js\"\n },\n \"./solid\": {\n \"types\": \"./out/zero/src/solid.d.ts\",\n \"default\": \"./out/zero/src/solid.js\"\n },\n \"./sqlite\": {\n \"types\": \"./out/zero/src/sqlite.d.ts\",\n \"default\": \"./out/zero/src/sqlite.js\"\n },\n \"./zqlite\": {\n \"types\": \"./out/zero/src/zqlite.d.ts\",\n \"default\": \"./out/zero/src/zqlite.js\"\n }\n },\n \"scripts\": {\n \"build\": \"node --experimental-strip-types --no-warnings tool/build.ts\",\n \"build:watch\": \"node --experimental-strip-types --no-warnings tool/build.ts --watch\",\n \"check-types\": \"tsc -p tsconfig.client.json && tsc -p tsconfig.server.json\",\n \"check-types:client:watch\": \"tsc -p tsconfig.client.json --watch\",\n \"check-types:server:watch\": \"tsc -p tsconfig.server.json --watch\",\n \"format\": \"oxfmt .\",\n \"check-format\": \"oxfmt --check .\",\n \"lint\": \"oxlint --type-aware src/\",\n \"docs\": \"node --experimental-strip-types --no-warnings tool/generate-docs.ts\",\n \"docs:server\": \"node --watch --experimental-strip-types --no-warnings tool/generate-docs.ts --server\",\n \"fmt\": \"oxfmt .\",\n \"check-fmt\": \"oxfmt --check .\"\n },\n \"dependencies\": {\n \"@badrap/valita\": \"0.3.11\",\n \"@databases/escape-identifier\": \"^1.0.3\",\n \"@databases/sql\": \"^3.3.0\",\n \"@dotenvx/dotenvx\": \"^1.39.0\",\n \"@drdgvhbh/postgres-error-codes\": \"^0.0.6\",\n \"@fastify/cors\": \"^10.0.0\",\n \"@fastify/websocket\": \"^11.0.0\",\n \"@google-cloud/precise-date\": \"^4.0.0\",\n \"@opentelemetry/api\": \"^1.9.0\",\n \"@opentelemetry/api-logs\": \"^0.203.0\",\n \"@opentelemetry/auto-instrumentations-node\": \"^0.62.0\",\n \"@opentelemetry/exporter-metrics-otlp-http\": \"^0.203.0\",\n \"@opentelemetry/resources\": \"^2.0.1\",\n \"@opentelemetry/sdk-metrics\": \"^2.0.1\",\n \"@opentelemetry/sdk-node\": \"^0.203.0\",\n \"@opentelemetry/sdk-trace-node\": \"^2.0.1\",\n \"@postgresql-typed/oids\": \"^0.2.0\",\n \"@rocicorp/lock\": \"^1.0.4\",\n \"@rocicorp/logger\": \"^5.4.0\",\n \"@rocicorp/resolver\": \"^1.0.2\",\n \"@rocicorp/zero-sqlite3\": \"^1.0.17\",\n \"@standard-schema/spec\": \"^1.0.0\",\n \"@types/basic-auth\": \"^1.1.8\",\n \"@types/ws\": \"^8.5.12\",\n \"basic-auth\": \"^2.0.1\",\n \"chalk-template\": \"^1.1.0\",\n \"chokidar\": \"^4.0.1\",\n \"cloudevents\": \"^10.0.0\",\n \"command-line-args\": \"^6.0.1\",\n \"command-line-usage\": \"^7.0.3\",\n \"compare-utf8\": \"^0.2.0\",\n \"defu\": \"^6.1.4\",\n \"eventemitter3\": \"^5.0.1\",\n \"fastify\": \"^5.0.0\",\n \"is-in-subnet\": \"^4.0.1\",\n \"jose\": \"^5.9.3\",\n \"js-xxhash\": \"^4.0.0\",\n \"json-custom-numbers\": \"^3.1.1\",\n \"kasi\": \"^1.1.0\",\n \"nanoid\": \"^5.1.2\",\n \"oxfmt\": \"^0.45.0\",\n \"parse-prometheus-text-format\": \"^1.1.1\",\n \"pg-format\": \"npm:pg-format-fix@^1.0.5\",\n \"postgres\": \"3.4.7\",\n \"semver\": \"^7.5.4\",\n \"tsx\": \"^4.21.0\",\n \"url-pattern\": \"^1.0.3\",\n \"urlpattern-polyfill\": \"^10.1.0\",\n \"ws\": \"^8.18.1\"\n },\n \"devDependencies\": {\n \"@op-engineering/op-sqlite\": \">=15\",\n \"@vitest/runner\": \"4.1.3\",\n \"analyze-query\": \"0.0.0\",\n \"ast-to-zql\": \"0.0.0\",\n \"expo-sqlite\": \">=15\",\n \"replicache\": \"15.2.1\",\n \"shared\": \"0.0.0\",\n \"syncpack\": \"^14.3.0\",\n \"typedoc\": \"^0.28.17\",\n \"typedoc-plugin-markdown\": \"^4.10.0\",\n \"typescript\": \"~6.0.2\",\n \"vite\": \"8.0.3\",\n \"vitest\": \"4.1.3\",\n \"zero-cache\": \"0.0.0\",\n \"zero-client\": \"0.0.0\",\n \"zero-pg\": \"0.0.0\",\n \"zero-protocol\": \"0.0.0\",\n \"zero-react\": \"0.0.0\",\n \"zero-server\": \"0.0.0\",\n \"zero-solid\": \"0.0.0\",\n \"zqlite\": \"0.0.0\"\n },\n \"peerDependencies\": {\n \"@op-engineering/op-sqlite\": \">=15\",\n \"expo-sqlite\": \">=15\",\n \"kysely\": \"^0.28.16\"\n },\n \"peerDependenciesMeta\": {\n \"kysely\": {\n \"optional\": true\n },\n \"expo-sqlite\": {\n \"optional\": true\n },\n \"@op-engineering/op-sqlite\": {\n \"optional\": true\n }\n },\n \"engines\": {\n \"node\": \">=22\"\n }\n}"],"mappings":""}
1
+ {"version":3,"file":"package.js","names":[],"sources":["../../package.json"],"sourcesContent":["{\n \"name\": \"@rocicorp/zero\",\n \"version\": \"1.4.0\",\n \"description\": \"Zero is a web framework for serverless web development.\",\n \"homepage\": \"https://zero.rocicorp.dev\",\n \"bugs\": {\n \"url\": \"https://bugs.rocicorp.dev\"\n },\n \"license\": \"Apache-2.0\",\n \"author\": \"Rocicorp, Inc.\",\n \"repository\": {\n \"type\": \"git\",\n \"url\": \"git+https://github.com/rocicorp/mono.git\",\n \"directory\": \"packages/zero\"\n },\n \"bin\": {\n \"analyze-query\": \"./out/zero/src/analyze-query.js\",\n \"ast-to-zql\": \"./out/zero/src/ast-to-zql.js\",\n \"transform-query\": \"./out/zero/src/transform-query.js\",\n \"zero-build-schema\": \"./out/zero/src/build-schema.js\",\n \"zero-cache\": \"./out/zero/src/cli.js\",\n \"zero-cache-dev\": \"./out/zero/src/zero-cache-dev.js\",\n \"zero-deploy-permissions\": \"./out/zero/src/deploy-permissions.js\",\n \"zero-out\": \"./out/zero/src/zero-out.js\"\n },\n \"files\": [\n \"out\",\n \"!*.tsbuildinfo\"\n ],\n \"type\": \"module\",\n \"main\": \"out/zero/src/zero.js\",\n \"module\": \"out/zero/src/zero.js\",\n \"types\": \"out/zero/src/zero.d.ts\",\n \"exports\": {\n \".\": {\n \"types\": \"./out/zero/src/zero.d.ts\",\n \"default\": \"./out/zero/src/zero.js\"\n },\n \"./analyze\": {\n \"types\": \"./out/zero/src/analyze.d.ts\",\n \"default\": \"./out/zero/src/analyze.js\"\n },\n \"./bindings\": {\n \"types\": \"./out/zero/src/bindings.d.ts\",\n \"default\": \"./out/zero/src/bindings.js\"\n },\n \"./change-protocol/v0\": {\n \"types\": \"./out/zero/src/change-protocol/v0.d.ts\",\n \"default\": \"./out/zero/src/change-protocol/v0.js\"\n },\n \"./expo-sqlite\": {\n \"types\": \"./out/zero/src/expo-sqlite.d.ts\",\n \"default\": \"./out/zero/src/expo-sqlite.js\"\n },\n \"./op-sqlite\": {\n \"types\": \"./out/zero/src/op-sqlite.d.ts\",\n \"default\": \"./out/zero/src/op-sqlite.js\"\n },\n \"./pg\": {\n \"types\": \"./out/zero/src/pg.d.ts\",\n \"default\": \"./out/zero/src/pg.js\"\n },\n \"./react\": {\n \"types\": \"./out/zero/src/react.d.ts\",\n \"default\": \"./out/zero/src/react.js\"\n },\n \"./react-native\": {\n \"types\": \"./out/zero/src/react-native.d.ts\",\n \"default\": \"./out/zero/src/react-native.js\"\n },\n \"./server\": {\n \"types\": \"./out/zero/src/server.d.ts\",\n \"default\": \"./out/zero/src/server.js\"\n },\n \"./server/adapters/drizzle\": {\n \"types\": \"./out/zero/src/adapters/drizzle.d.ts\",\n \"default\": \"./out/zero/src/adapters/drizzle.js\"\n },\n \"./server/adapters/kysely\": {\n \"types\": \"./out/zero/src/adapters/kysely.d.ts\",\n \"default\": \"./out/zero/src/adapters/kysely.js\"\n },\n \"./server/adapters/prisma\": {\n \"types\": \"./out/zero/src/adapters/prisma.d.ts\",\n \"default\": \"./out/zero/src/adapters/prisma.js\"\n },\n \"./server/adapters/pg\": {\n \"types\": \"./out/zero/src/adapters/pg.d.ts\",\n \"default\": \"./out/zero/src/adapters/pg.js\"\n },\n \"./server/adapters/postgresjs\": {\n \"types\": \"./out/zero/src/adapters/postgresjs.d.ts\",\n \"default\": \"./out/zero/src/adapters/postgresjs.js\"\n },\n \"./solid\": {\n \"types\": \"./out/zero/src/solid.d.ts\",\n \"default\": \"./out/zero/src/solid.js\"\n },\n \"./sqlite\": {\n \"types\": \"./out/zero/src/sqlite.d.ts\",\n \"default\": \"./out/zero/src/sqlite.js\"\n },\n \"./zqlite\": {\n \"types\": \"./out/zero/src/zqlite.d.ts\",\n \"default\": \"./out/zero/src/zqlite.js\"\n }\n },\n \"scripts\": {\n \"build\": \"node --experimental-strip-types --no-warnings tool/build.ts\",\n \"build:watch\": \"node --experimental-strip-types --no-warnings tool/build.ts --watch\",\n \"check-types\": \"tsc -p tsconfig.client.json && tsc -p tsconfig.server.json\",\n \"check-types:client:watch\": \"tsc -p tsconfig.client.json --watch\",\n \"check-types:server:watch\": \"tsc -p tsconfig.server.json --watch\",\n \"format\": \"oxfmt .\",\n \"check-format\": \"oxfmt --check .\",\n \"lint\": \"oxlint --type-aware src/\",\n \"docs\": \"node --experimental-strip-types --no-warnings tool/generate-docs.ts\",\n \"docs:server\": \"node --watch --experimental-strip-types --no-warnings tool/generate-docs.ts --server\",\n \"fmt\": \"oxfmt .\",\n \"check-fmt\": \"oxfmt --check .\"\n },\n \"dependencies\": {\n \"@badrap/valita\": \"0.3.11\",\n \"@databases/escape-identifier\": \"^1.0.3\",\n \"@databases/sql\": \"^3.3.0\",\n \"@dotenvx/dotenvx\": \"^1.39.0\",\n \"@drdgvhbh/postgres-error-codes\": \"^0.0.6\",\n \"@fastify/cors\": \"^10.0.0\",\n \"@fastify/websocket\": \"^11.0.0\",\n \"@google-cloud/precise-date\": \"^4.0.0\",\n \"@opentelemetry/api\": \"^1.9.0\",\n \"@opentelemetry/api-logs\": \"^0.203.0\",\n \"@opentelemetry/auto-instrumentations-node\": \"^0.62.0\",\n \"@opentelemetry/exporter-metrics-otlp-http\": \"^0.203.0\",\n \"@opentelemetry/resources\": \"^2.0.1\",\n \"@opentelemetry/sdk-metrics\": \"^2.0.1\",\n \"@opentelemetry/sdk-node\": \"^0.203.0\",\n \"@opentelemetry/sdk-trace-node\": \"^2.0.1\",\n \"@postgresql-typed/oids\": \"^0.2.0\",\n \"@rocicorp/lock\": \"^1.0.4\",\n \"@rocicorp/logger\": \"^5.4.0\",\n \"@rocicorp/resolver\": \"^1.0.2\",\n \"@rocicorp/zero-sqlite3\": \"^1.0.17\",\n \"@standard-schema/spec\": \"^1.0.0\",\n \"@types/basic-auth\": \"^1.1.8\",\n \"@types/ws\": \"^8.5.12\",\n \"basic-auth\": \"^2.0.1\",\n \"chalk-template\": \"^1.1.0\",\n \"chokidar\": \"^4.0.1\",\n \"cloudevents\": \"^10.0.0\",\n \"command-line-args\": \"^6.0.1\",\n \"command-line-usage\": \"^7.0.3\",\n \"compare-utf8\": \"^0.2.0\",\n \"defu\": \"^6.1.4\",\n \"eventemitter3\": \"^5.0.1\",\n \"fastify\": \"^5.0.0\",\n \"is-in-subnet\": \"^4.0.1\",\n \"jose\": \"^5.9.3\",\n \"js-xxhash\": \"^4.0.0\",\n \"json-custom-numbers\": \"^3.1.1\",\n \"kasi\": \"^1.1.0\",\n \"nanoid\": \"^5.1.2\",\n \"oxfmt\": \"^0.45.0\",\n \"parse-prometheus-text-format\": \"^1.1.1\",\n \"pg-format\": \"npm:pg-format-fix@^1.0.5\",\n \"postgres\": \"3.4.7\",\n \"semver\": \"^7.5.4\",\n \"tsx\": \"^4.21.0\",\n \"url-pattern\": \"^1.0.3\",\n \"urlpattern-polyfill\": \"^10.1.0\",\n \"ws\": \"^8.18.1\"\n },\n \"devDependencies\": {\n \"@op-engineering/op-sqlite\": \">=15\",\n \"@vitest/runner\": \"4.1.3\",\n \"analyze-query\": \"0.0.0\",\n \"ast-to-zql\": \"0.0.0\",\n \"expo-sqlite\": \">=15\",\n \"replicache\": \"15.2.1\",\n \"shared\": \"0.0.0\",\n \"syncpack\": \"^14.3.0\",\n \"typedoc\": \"^0.28.17\",\n \"typedoc-plugin-markdown\": \"^4.10.0\",\n \"typescript\": \"~6.0.2\",\n \"vite\": \"8.0.3\",\n \"vitest\": \"4.1.3\",\n \"zero-cache\": \"0.0.0\",\n \"zero-client\": \"0.0.0\",\n \"zero-pg\": \"0.0.0\",\n \"zero-protocol\": \"0.0.0\",\n \"zero-react\": \"0.0.0\",\n \"zero-server\": \"0.0.0\",\n \"zero-solid\": \"0.0.0\",\n \"zqlite\": \"0.0.0\"\n },\n \"peerDependencies\": {\n \"@op-engineering/op-sqlite\": \">=15\",\n \"expo-sqlite\": \">=15\",\n \"kysely\": \"^0.28.16\"\n },\n \"peerDependenciesMeta\": {\n \"kysely\": {\n \"optional\": true\n },\n \"expo-sqlite\": {\n \"optional\": true\n },\n \"@op-engineering/op-sqlite\": {\n \"optional\": true\n }\n },\n \"engines\": {\n \"node\": \">=22\"\n }\n}"],"mappings":""}
@@ -1 +1 @@
1
- {"version":3,"file":"shard.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,EAGL,KAAK,UAAU,EAChB,MAAM,6CAA6C,CAAC;AACrD,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAE5D,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,yBAAyB,CAAC;AAC7E,OAAO,KAAK,EAAC,KAAK,EAAE,WAAW,EAAE,OAAO,EAAC,MAAM,6BAA6B,CAAC;AAI7E,OAAO,EAGL,KAAK,eAAe,EACpB,KAAK,eAAe,EACrB,MAAM,gBAAgB,CAAC;AASxB;;;;GAIG;AACH,wBAAgB,uBAAuB,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI,CAY1D;AAED,wBAAgB,yBAAyB,CAAC,EAAC,KAAK,EAAC,EAAE,KAAK,UAEvD;AAED,wBAAgB,qBAAqB,CAAC,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAE,OAAO,UAE/D;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,OAAO,UAGnD;AAED;;;GAGG;AACH,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,OAAO,UAIvD;AAED,wBAAgB,kBAAkB,CAAC,KAAK,EAAE,OAAO,UAEhD;AAMD,wBAAgB,uBAAuB,CACrC,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,GAAG,MAAM,UAGzB;AAoCD,wBAAsB,kBAAkB,CAAC,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,KAAK,iBAEpE;AAED,wBAAgB,yBAAyB,CAAC,MAAM,EAAE,MAAM,UASvD;AAED;;;;;;;GAOG;AACH,wBAAgB,2BAA2B,CAAC,MAAM,EAAE,MAAM,UASzD;AAED,eAAO,MAAM,kBAAkB,gBAAgB,CAAC;AAEhD,wBAAgB,UAAU,CACxB,WAAW,EAAE,WAAW,EACxB,mBAAmB,EAAE,MAAM,GAC1B,MAAM,CA4CR;AAED,wBAAgB,SAAS,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,MAAM,CAYzE;AAED,QAAA,MAAM,yBAAyB;;;aAG7B,CAAC;AAEH,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC;AAE5E,QAAA,MAAM,aAAa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAMjB,CAAC;AAEH,MAAM,MAAM,OAAO,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAcpD,wBAAsB,UAAU,CAC9B,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,IAAI,EAAE,MAAM,EACZ,cAAc,EAAE,MAAM,EACtB,EAAC,MAAM,EAAE,OAAO,EAAC,EAAE,eAAe,EAClC,kBAAkB,EAAE,UAAU,iBAQ/B;AAED,wBAAsB,mBAAmB,CACvC,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,cAAc,EAAE,MAAM,EACtB,OAAO,CAAC,EAAE,UAAU,GACnB,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAmBzB;AAED,wBAAsB,sBAAsB,CAC1C,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,GACb,OAAO,CAAC,mBAAmB,CAAC,CAU9B;AAED;;;GAGG;AACH,wBAAsB,yBAAyB,CAC7C,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,mBAAmB,EACxB,SAAS,EAAE,WAAW,iBAyDvB;AAED,wBAAsB,aAAa,CACjC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,mBAAmB,EACvB,KAAK,EAAE,WAAW,iBA6BnB;AAED,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,UAAU,EACd,SAAS,EAAE,eAAe,QAkB3B;AAED,KAAK,iBAAiB,GAAG;IACvB,KAAK,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CACtD,CAAC;AAEF,wBAAgB,4CAA4C,CAC1D,IAAI,EAAE,eAAe,GACpB,iBAAiB,GAAG,SAAS,CA+C/B"}
1
+ {"version":3,"file":"shard.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,EAGL,KAAK,UAAU,EAChB,MAAM,6CAA6C,CAAC;AACrD,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAE5D,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,yBAAyB,CAAC;AAC7E,OAAO,KAAK,EAAC,KAAK,EAAE,WAAW,EAAE,OAAO,EAAC,MAAM,6BAA6B,CAAC;AAI7E,OAAO,EAGL,KAAK,eAAe,EACpB,KAAK,eAAe,EACrB,MAAM,gBAAgB,CAAC;AASxB;;;;GAIG;AACH,wBAAgB,uBAAuB,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI,CAY1D;AAED,wBAAgB,yBAAyB,CAAC,EAAC,KAAK,EAAC,EAAE,KAAK,UAEvD;AAED,wBAAgB,qBAAqB,CAAC,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAE,OAAO,UAE/D;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,OAAO,UAGnD;AAED;;;GAGG;AACH,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,OAAO,UAIvD;AAED,wBAAgB,kBAAkB,CAAC,KAAK,EAAE,OAAO,UAEhD;AAMD,wBAAgB,uBAAuB,CACrC,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,GAAG,MAAM,UAGzB;AAoCD,wBAAsB,kBAAkB,CAAC,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,KAAK,iBAEpE;AAED,wBAAgB,yBAAyB,CAAC,MAAM,EAAE,MAAM,UASvD;AAED;;;;;;;GAOG;AACH,wBAAgB,2BAA2B,CAAC,MAAM,EAAE,MAAM,UASzD;AAED,eAAO,MAAM,kBAAkB,gBAAgB,CAAC;AAEhD,wBAAgB,UAAU,CACxB,WAAW,EAAE,WAAW,EACxB,mBAAmB,EAAE,MAAM,GAC1B,MAAM,CA4CR;AAED,wBAAgB,SAAS,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,MAAM,CAYzE;AAED,QAAA,MAAM,yBAAyB;;;aAG7B,CAAC;AAEH,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC;AAE5E,QAAA,MAAM,aAAa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAMjB,CAAC;AAEH,MAAM,MAAM,OAAO,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAcpD,wBAAsB,UAAU,CAC9B,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,IAAI,EAAE,MAAM,EACZ,cAAc,EAAE,MAAM,EACtB,EAAC,MAAM,EAAE,OAAO,EAAC,EAAE,eAAe,EAClC,kBAAkB,EAAE,UAAU,iBAQ/B;AAED,wBAAsB,mBAAmB,CACvC,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,cAAc,EAAE,MAAM,EACtB,OAAO,CAAC,EAAE,UAAU,GACnB,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CA2BzB;AAED,wBAAsB,sBAAsB,CAC1C,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,GACb,OAAO,CAAC,mBAAmB,CAAC,CAU9B;AAED;;;GAGG;AACH,wBAAsB,yBAAyB,CAC7C,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,mBAAmB,EACxB,SAAS,EAAE,WAAW,iBAyDvB;AAED,wBAAsB,aAAa,CACjC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,mBAAmB,EACvB,KAAK,EAAE,WAAW,iBA6BnB;AAED,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,UAAU,EACd,SAAS,EAAE,eAAe,QAkB3B;AAED,KAAK,iBAAiB,GAAG;IACvB,KAAK,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CACtD,CAAC;AAEF,wBAAgB,4CAA4C,CAC1D,IAAI,EAAE,eAAe,GACpB,iBAAiB,GAAG,SAAS,CA+C/B"}
@@ -191,7 +191,15 @@ async function addReplica(sql, shard, slot, replicaVersion, { tables, indexes },
191
191
  async function getReplicaAtVersion(lc, sql, shard, replicaVersion, context) {
192
192
  const schema = sql(upstreamSchema(shard));
193
193
  const result = await sql`
194
- SELECT * FROM ${schema}.replicas JOIN ${schema}."shardConfig" ON true
194
+ SELECT
195
+ replicas."slot",
196
+ replicas."version",
197
+ replicas."initialSchema",
198
+ replicas."initialSyncContext",
199
+ replicas."subscriberContext",
200
+ "shardConfig"."publications",
201
+ "shardConfig"."ddlDetection"
202
+ FROM ${schema}.replicas JOIN ${schema}."shardConfig" ON true
195
203
  WHERE version = ${replicaVersion};
196
204
  `;
197
205
  if (result.length === 0) {
@@ -1 +1 @@
1
- {"version":3,"file":"shard.js","names":[],"sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"sourcesContent":["import {PG_INSUFFICIENT_PRIVILEGE} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {literal} from 'pg-format';\nimport postgres from 'postgres';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport {\n jsonObjectSchema,\n stringify,\n type JSONObject,\n} from '../../../../../../shared/src/bigint-json.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {Default} from '../../../../db/postgres-replica-identity-enum.ts';\nimport type {PostgresDB, PostgresTransaction} from '../../../../types/pg.ts';\nimport type {AppID, ShardConfig, ShardID} from '../../../../types/shards.ts';\nimport {appSchema, check, upstreamSchema} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {createEventTriggerStatements} from './ddl.ts';\nimport {\n getPublicationInfo,\n publishedSchema,\n type PublicationInfo,\n type PublishedSchema,\n} from './published.ts';\nimport {validate} from './validation.ts';\n\n/**\n * PostgreSQL unquoted identifiers must start with a letter or underscore\n * and contain only letters, digits, and underscores.\n */\nconst VALID_PUBLICATION_NAME = /^[a-zA-Z_][a-zA-Z0-9_]*$/;\n\n/**\n * Validates that a publication name is a valid PostgreSQL identifier.\n * This provides defense-in-depth against SQL injection when publication\n * names are used in replication commands.\n */\nexport function validatePublicationName(name: string): void {\n if (!VALID_PUBLICATION_NAME.test(name)) {\n throw new Error(\n `Invalid publication name \"${name}\". Publication names must start with a letter or underscore ` +\n `and contain only letters, digits, and underscores.`,\n );\n }\n if (name.length > 63) {\n throw new Error(\n `Publication name \"${name}\" exceeds PostgreSQL's 63-character identifier limit.`,\n );\n }\n}\n\nexport function internalPublicationPrefix({appID}: AppID) {\n return `_${appID}_`;\n}\n\nexport function legacyReplicationSlot({appID, shardNum}: ShardID) {\n return `${appID}_${shardNum}`;\n}\n\nexport function replicationSlotPrefix(shard: ShardID) {\n const {appID, shardNum} = check(shard);\n return `${appID}_${shardNum}_`;\n}\n\n/**\n * An expression used to match replication slots in the shard\n * in a Postgres `LIKE` operator.\n */\nexport function replicationSlotExpression(shard: ShardID) {\n // Underscores have a special meaning in LIKE values\n // so they have to be escaped.\n return `${replicationSlotPrefix(shard)}%`.replaceAll('_', '\\\\_');\n}\n\nexport function newReplicationSlot(shard: ShardID) {\n return replicationSlotPrefix(shard) + Date.now();\n}\n\nfunction defaultPublicationName(appID: string, shardID: string | number) {\n return `_${appID}_public_${shardID}`;\n}\n\nexport function metadataPublicationName(\n appID: string,\n shardID: string | number,\n) {\n return `_${appID}_metadata_${shardID}`;\n}\n\n// The GLOBAL_SETUP must be idempotent as it can be run multiple times for different shards.\nfunction globalSetup(appID: AppID): string {\n const app = id(appSchema(appID));\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${app};\n\n CREATE TABLE IF NOT EXISTS ${app}.permissions (\n \"permissions\" JSONB,\n \"hash\" TEXT,\n\n -- Ensure that there is only a single row in the table.\n -- Application code can be agnostic to this column, and\n -- simply invoke UPDATE statements on the version columns.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n CREATE OR REPLACE FUNCTION ${app}.set_permissions_hash()\n RETURNS TRIGGER AS $$\n BEGIN\n NEW.hash = md5(NEW.permissions::text);\n RETURN NEW;\n END;\n $$ LANGUAGE plpgsql;\n\n CREATE OR REPLACE TRIGGER on_set_permissions \n BEFORE INSERT OR UPDATE ON ${app}.permissions\n FOR EACH ROW\n EXECUTE FUNCTION ${app}.set_permissions_hash();\n\n INSERT INTO ${app}.permissions (permissions) VALUES (NULL) ON CONFLICT DO NOTHING;\n`;\n}\n\nexport async function ensureGlobalTables(db: PostgresDB, appID: AppID) {\n await db.unsafe(globalSetup(appID));\n}\n\nexport function getClientsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"clients\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"lastMutationID\" BIGINT NOT NULL,\n \"userID\" TEXT,\n PRIMARY KEY(\"clientGroupID\", \"clientID\")\n );`;\n}\n\n/**\n * Tracks the results of mutations.\n * 1. It is an error for the same mutation ID to be used twice.\n * 2. The result is JSONB to allow for arbitrary results.\n *\n * The tables must be cleaned up as the clients\n * receive the mutation responses and as clients are removed.\n */\nexport function getMutationsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"mutations\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"mutationID\" BIGINT NOT NULL,\n \"result\" JSON NOT NULL,\n PRIMARY KEY(\"clientGroupID\", \"clientID\", \"mutationID\")\n );`;\n}\n\nexport const SHARD_CONFIG_TABLE = 'shardConfig';\n\nexport function shardSetup(\n shardConfig: ShardConfig,\n metadataPublication: string,\n): string {\n const app = id(appSchema(shardConfig));\n const shard = id(upstreamSchema(shardConfig));\n\n const pubs = shardConfig.publications.toSorted();\n assert(\n pubs.includes(metadataPublication),\n () => `Publications must include ${metadataPublication}`,\n );\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${shard};\n\n ${getClientsTableDefinition(shard)}\n ${getMutationsTableDefinition(shard)}\n\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n CREATE PUBLICATION ${id(metadataPublication)}\n FOR TABLE ${app}.\"permissions\", TABLE ${shard}.\"clients\", ${shard}.\"mutations\";\n\n CREATE TABLE ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\" TEXT[] NOT NULL,\n \"ddlDetection\" BOOL NOT NULL,\n\n -- Ensure that there is only a single row in the table.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n INSERT INTO ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\",\n \"ddlDetection\" \n ) VALUES (\n ARRAY[${literal(pubs)}], \n false -- set in SAVEPOINT with triggerSetup() statements\n );\n\n CREATE TABLE ${shard}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL,\n \"initialSyncContext\" JSON,\n \"subscriberContext\" JSON\n );\n `;\n}\n\nexport function dropShard(appID: string, shardID: string | number): string {\n const schema = `${appID}_${shardID}`;\n const metadataPublication = metadataPublicationName(appID, shardID);\n const defaultPublication = defaultPublicationName(appID, shardID);\n\n // DROP SCHEMA ... CASCADE does not drop dependent PUBLICATIONS,\n // so PUBLICATIONs must be dropped explicitly.\n return /*sql*/ `\n DROP PUBLICATION IF EXISTS ${id(defaultPublication)};\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n DROP SCHEMA IF EXISTS ${id(schema)} CASCADE;\n `;\n}\n\nconst internalShardConfigSchema = v.object({\n publications: v.array(v.string()),\n ddlDetection: v.boolean(),\n});\n\nexport type InternalShardConfig = v.Infer<typeof internalShardConfigSchema>;\n\nconst replicaSchema = internalShardConfigSchema.extend({\n slot: v.string(),\n version: v.string(),\n initialSchema: publishedSchema,\n initialSyncContext: jsonObjectSchema.nullable(),\n subscriberContext: jsonObjectSchema.nullable(),\n});\n\nexport type Replica = v.Infer<typeof replicaSchema>;\n\n// triggerSetup is run separately in a sub-transaction (i.e. SAVEPOINT) so\n// that a failure (e.g. due to lack of superuser permissions) can be handled\n// by continuing in a degraded mode (ddlDetection = false).\nfunction triggerSetup(shard: ShardConfig): string {\n const schema = id(upstreamSchema(shard));\n return (\n createEventTriggerStatements(shard) +\n /*sql*/ `UPDATE ${schema}.\"shardConfig\" SET \"ddlDetection\" = true;`\n );\n}\n\n// Called in initial-sync to store the exact schema that was initially synced.\nexport async function addReplica(\n sql: PostgresDB,\n shard: ShardID,\n slot: string,\n replicaVersion: string,\n {tables, indexes}: PublishedSchema,\n initialSyncContext: JSONObject,\n) {\n const schema = upstreamSchema(shard);\n const synced: PublishedSchema = {tables, indexes};\n await sql`\n INSERT INTO ${sql(schema)}.replicas\n (\"slot\", \"version\", \"initialSchema\", \"initialSyncContext\")\n VALUES (${slot}, ${replicaVersion}, ${synced}, ${initialSyncContext})`;\n}\n\nexport async function getReplicaAtVersion(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardID,\n replicaVersion: string,\n context?: JSONObject,\n): Promise<Replica | null> {\n const schema = sql(upstreamSchema(shard));\n const result = await sql`\n SELECT * FROM ${schema}.replicas JOIN ${schema}.\"shardConfig\" ON true\n WHERE version = ${replicaVersion};\n `;\n if (result.length === 0) {\n // log out all the replicas and the joined shardConfig\n const allReplicas = await sql`\n SELECT slot, version, \"initialSyncContext\", \"subscriberContext\" \n FROM ${schema}.replicas`;\n lc.info?.(\n `Replica ${replicaVersion} ` +\n (context ? `(context: ${stringify(context)}) ` : '') +\n `not found in: ${stringify(allReplicas)}`,\n );\n return null;\n }\n return v.parse(result[0], replicaSchema, 'passthrough');\n}\n\nexport async function getInternalShardConfig(\n sql: PostgresDB,\n shard: ShardID,\n): Promise<InternalShardConfig> {\n const result = await sql`\n SELECT \"publications\", \"ddlDetection\"\n FROM ${sql(upstreamSchema(shard))}.\"shardConfig\";\n `;\n assert(\n result.length === 1,\n () => `Expected exactly one shardConfig row, got ${result.length}`,\n );\n return v.parse(result[0], internalShardConfigSchema, 'passthrough');\n}\n\n/**\n * Sets up and returns all publications (including internal ones) for\n * the given shard.\n */\nexport async function setupTablesAndReplication(\n lc: LogContext,\n sql: PostgresTransaction,\n requested: ShardConfig,\n) {\n const {publications} = requested;\n // Validate requested publications.\n for (const pub of publications) {\n validatePublicationName(pub);\n if (pub.startsWith('_')) {\n throw new Error(\n `Publication names starting with \"_\" are reserved for internal use.\\n` +\n `Please use a different name for publication \"${pub}\".`,\n );\n }\n }\n const allPublications: string[] = [];\n\n // Setup application publications.\n if (publications.length) {\n const results = await sql<{pubname: string}[]>`\n SELECT pubname from pg_publication WHERE pubname IN ${sql(\n publications,\n )}`.values();\n\n if (results.length !== publications.length) {\n throw new Error(\n `Unknown or invalid publications. Specified: [${publications}]. Found: [${results.flat()}]`,\n );\n }\n allPublications.push(...publications);\n } else {\n const defaultPublication = defaultPublicationName(\n requested.appID,\n requested.shardNum,\n );\n await sql`\n DROP PUBLICATION IF EXISTS ${sql(defaultPublication)}`;\n await sql`\n CREATE PUBLICATION ${sql(defaultPublication)} \n FOR TABLES IN SCHEMA public\n WITH (publish_via_partition_root = true)`;\n allPublications.push(defaultPublication);\n }\n\n const metadataPublication = metadataPublicationName(\n requested.appID,\n requested.shardNum,\n );\n allPublications.push(metadataPublication);\n\n const shard = {...requested, publications: allPublications};\n\n // Setup the global tables and shard tables / publications.\n await sql.unsafe(globalSetup(shard) + shardSetup(shard, metadataPublication));\n\n const pubs = await getPublicationInfo(sql, allPublications);\n await replicaIdentitiesForTablesWithoutPrimaryKeys(pubs)?.apply(lc, sql);\n\n await setupTriggers(lc, sql, shard);\n}\n\nexport async function setupTriggers(\n lc: LogContext,\n tx: PostgresTransaction,\n shard: ShardConfig,\n) {\n const schema = upstreamSchema(shard);\n const [{ddlDetection}] = await tx<InternalShardConfig[]> /*sql*/ `\n SELECT \"ddlDetection\" FROM ${tx(schema)}.\"shardConfig\"`;\n try {\n await tx.savepoint(sub => sub.unsafe(triggerSetup(shard)));\n } catch (e) {\n if (ddlDetection) {\n // If ddlDetection has already been enabled, subsequent failures to\n // upgrade the trigger should be propagated rather than swallowed.\n throw e;\n }\n if (\n !(\n e instanceof postgres.PostgresError &&\n e.code === PG_INSUFFICIENT_PRIVILEGE\n )\n ) {\n throw e;\n }\n // If triggerSetup() fails, replication continues in ddlDetection=false mode.\n lc.warn?.(\n `Unable to create event triggers for schema change detection:\\n\\n` +\n `\"${e.hint ?? e.message}\"\\n\\n` +\n `Proceeding in degraded mode: schema changes will halt replication,\\n` +\n `requiring the replica to be reset (manually or with --auto-reset).`,\n );\n }\n}\n\nexport function validatePublications(\n lc: LogContext,\n published: PublicationInfo,\n) {\n // Verify that all publications export the proper events.\n published.publications.forEach(pub => {\n if (\n !pub.pubinsert ||\n !pub.pubupdate ||\n !pub.pubdelete ||\n !pub.pubtruncate\n ) {\n // TODO: Make APIError?\n throw new Error(\n `PUBLICATION ${pub.pubname} must publish insert, update, delete, and truncate`,\n );\n }\n });\n\n published.tables.forEach(table => validate(lc, table));\n}\n\ntype ReplicaIdentities = {\n apply(lc: LogContext, db: PostgresDB): Promise<void>;\n};\n\nexport function replicaIdentitiesForTablesWithoutPrimaryKeys(\n pubs: PublishedSchema,\n): ReplicaIdentities | undefined {\n const replicaIdentities: {\n schema: string;\n tableName: string;\n indexName: string;\n }[] = [];\n for (const table of pubs.tables) {\n if (!table.primaryKey?.length && table.replicaIdentity === Default) {\n // Look for an index that can serve as the REPLICA IDENTITY USING INDEX. It must be:\n // - UNIQUE\n // - NOT NULL columns\n // - not deferrable (i.e. isImmediate)\n // - not partial (are already filtered out)\n //\n // https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-REPLICA-IDENTITY\n const {schema, name: tableName} = table;\n for (const {columns, name: indexName} of pubs.indexes.filter(\n idx =>\n idx.schema === schema &&\n idx.tableName === tableName &&\n idx.unique &&\n idx.isImmediate,\n )) {\n if (Object.keys(columns).some(col => !table.columns[col].notNull)) {\n continue; // Only indexes with all NOT NULL columns are suitable.\n }\n replicaIdentities.push({schema, tableName, indexName});\n break;\n }\n }\n }\n\n if (replicaIdentities.length === 0) {\n return undefined;\n }\n return {\n apply: async (lc: LogContext, sql: PostgresDB) => {\n for (const {schema, tableName, indexName} of replicaIdentities) {\n lc.info?.(\n `setting \"${indexName}\" as the REPLICA IDENTITY for \"${tableName}\"`,\n );\n await sql`\n ALTER TABLE ${sql(schema)}.${sql(tableName)} \n REPLICA IDENTITY USING INDEX ${sql(indexName)}`;\n }\n },\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;AA6BA,IAAM,yBAAyB;;;;;;AAO/B,SAAgB,wBAAwB,MAAoB;AAC1D,KAAI,CAAC,uBAAuB,KAAK,KAAK,CACpC,OAAM,IAAI,MACR,6BAA6B,KAAK,gHAEnC;AAEH,KAAI,KAAK,SAAS,GAChB,OAAM,IAAI,MACR,qBAAqB,KAAK,uDAC3B;;AAIL,SAAgB,0BAA0B,EAAC,SAAe;AACxD,QAAO,IAAI,MAAM;;AAGnB,SAAgB,sBAAsB,EAAC,OAAO,YAAoB;AAChE,QAAO,GAAG,MAAM,GAAG;;AAGrB,SAAgB,sBAAsB,OAAgB;CACpD,MAAM,EAAC,OAAO,aAAY,MAAM,MAAM;AACtC,QAAO,GAAG,MAAM,GAAG,SAAS;;;;;;AAO9B,SAAgB,0BAA0B,OAAgB;AAGxD,QAAO,GAAG,sBAAsB,MAAM,CAAC,GAAG,WAAW,KAAK,MAAM;;AAGlE,SAAgB,mBAAmB,OAAgB;AACjD,QAAO,sBAAsB,MAAM,GAAG,KAAK,KAAK;;AAGlD,SAAS,uBAAuB,OAAe,SAA0B;AACvE,QAAO,IAAI,MAAM,UAAU;;AAG7B,SAAgB,wBACd,OACA,SACA;AACA,QAAO,IAAI,MAAM,YAAY;;AAI/B,SAAS,YAAY,OAAsB;CACzC,MAAM,MAAM,GAAG,UAAU,MAAM,CAAC;AAEhC,QAAe;gCACe,IAAI;;+BAEL,IAAI;;;;;;;;;;+BAUJ,IAAI;;;;;;;;;iCASF,IAAI;;uBAEd,IAAI;;gBAEX,IAAI;;;AAIpB,eAAsB,mBAAmB,IAAgB,OAAc;AACrE,OAAM,GAAG,OAAO,YAAY,MAAM,CAAC;;AAGrC,SAAgB,0BAA0B,QAAgB;AACxD,QAAe;iBACA,OAAO;;;;;;;;;;;;;;;;AAiBxB,SAAgB,4BAA4B,QAAgB;AAC1D,QAAe;iBACA,OAAO;;;;;;;;AASxB,IAAa,qBAAqB;AAElC,SAAgB,WACd,aACA,qBACQ;CACR,MAAM,MAAM,GAAG,UAAU,YAAY,CAAC;CACtC,MAAM,QAAQ,GAAG,eAAe,YAAY,CAAC;CAE7C,MAAM,OAAO,YAAY,aAAa,UAAU;AAChD,QACE,KAAK,SAAS,oBAAoB,QAC5B,6BAA6B,sBACpC;AAED,QAAe;gCACe,MAAM;;IAElC,0BAA0B,MAAM,CAAC;IACjC,4BAA4B,MAAM,CAAC;;+BAER,GAAG,oBAAoB,CAAC;uBAChC,GAAG,oBAAoB,CAAC;gBAC/B,IAAI,wBAAwB,MAAM,cAAc,MAAM;;iBAErD,MAAM,IAAI,mBAAmB;;;;;;;;gBAQ9B,MAAM,IAAI,mBAAmB;;;;cAI/B,QAAQ,KAAK,CAAC;;;;iBAIX,MAAM;;;;;;;;;AAUvB,SAAgB,UAAU,OAAe,SAAkC;CACzE,MAAM,SAAS,GAAG,MAAM,GAAG;CAC3B,MAAM,sBAAsB,wBAAwB,OAAO,QAAQ;AAKnE,QAAe;iCACgB,GALJ,uBAAuB,OAAO,QAAQ,CAKZ,CAAC;iCACvB,GAAG,oBAAoB,CAAC;4BAC7B,GAAG,OAAO,CAAC;;;AAIvC,IAAM,4BAA4B,eAAE,OAAO;CACzC,cAAc,eAAE,MAAM,eAAE,QAAQ,CAAC;CACjC,cAAc,eAAE,SAAS;CAC1B,CAAC;AAIF,IAAM,gBAAgB,0BAA0B,OAAO;CACrD,MAAM,eAAE,QAAQ;CAChB,SAAS,eAAE,QAAQ;CACnB,eAAe;CACf,oBAAoB,iBAAiB,UAAU;CAC/C,mBAAmB,iBAAiB,UAAU;CAC/C,CAAC;AAOF,SAAS,aAAa,OAA4B;CAChD,MAAM,SAAS,GAAG,eAAe,MAAM,CAAC;AACxC,QACE,6BAA6B,MAAM,GAC3B,UAAU,OAAO;;AAK7B,eAAsB,WACpB,KACA,OACA,MACA,gBACA,EAAC,QAAQ,WACT,oBACA;CACA,MAAM,SAAS,eAAe,MAAM;CACpC,MAAM,SAA0B;EAAC;EAAQ;EAAQ;AACjD,OAAM,GAAG;kBACO,IAAI,OAAO,CAAC;;gBAEd,KAAK,IAAI,eAAe,IAAI,OAAO,IAAI,mBAAmB;;AAG1E,eAAsB,oBACpB,IACA,KACA,OACA,gBACA,SACyB;CACzB,MAAM,SAAS,IAAI,eAAe,MAAM,CAAC;CACzC,MAAM,SAAS,MAAM,GAAG;oBACN,OAAO,iBAAiB,OAAO;wBAC3B,eAAe;;AAErC,KAAI,OAAO,WAAW,GAAG;EAEvB,MAAM,cAAc,MAAM,GAAG;;eAElB,OAAO;AAClB,KAAG,OACD,WAAW,eAAe,MACvB,UAAU,aAAa,UAAU,QAAQ,CAAC,MAAM,MACjD,iBAAiB,UAAU,YAAY,GAC1C;AACD,SAAO;;AAET,QAAO,MAAQ,OAAO,IAAI,eAAe,cAAc;;AAGzD,eAAsB,uBACpB,KACA,OAC8B;CAC9B,MAAM,SAAS,MAAM,GAAG;;aAEb,IAAI,eAAe,MAAM,CAAC,CAAC;;AAEtC,QACE,OAAO,WAAW,SACZ,6CAA6C,OAAO,SAC3D;AACD,QAAO,MAAQ,OAAO,IAAI,2BAA2B,cAAc;;;;;;AAOrE,eAAsB,0BACpB,IACA,KACA,WACA;CACA,MAAM,EAAC,iBAAgB;AAEvB,MAAK,MAAM,OAAO,cAAc;AAC9B,0BAAwB,IAAI;AAC5B,MAAI,IAAI,WAAW,IAAI,CACrB,OAAM,IAAI,MACR,oHACkD,IAAI,IACvD;;CAGL,MAAM,kBAA4B,EAAE;AAGpC,KAAI,aAAa,QAAQ;EACvB,MAAM,UAAU,MAAM,GAAwB;0DACQ,IACpD,aACD,GAAG,QAAQ;AAEZ,MAAI,QAAQ,WAAW,aAAa,OAClC,OAAM,IAAI,MACR,gDAAgD,aAAa,aAAa,QAAQ,MAAM,CAAC,GAC1F;AAEH,kBAAgB,KAAK,GAAG,aAAa;QAChC;EACL,MAAM,qBAAqB,uBACzB,UAAU,OACV,UAAU,SACX;AACD,QAAM,GAAG;mCACsB,IAAI,mBAAmB;AACtD,QAAM,GAAG;2BACc,IAAI,mBAAmB,CAAC;;;AAG/C,kBAAgB,KAAK,mBAAmB;;CAG1C,MAAM,sBAAsB,wBAC1B,UAAU,OACV,UAAU,SACX;AACD,iBAAgB,KAAK,oBAAoB;CAEzC,MAAM,QAAQ;EAAC,GAAG;EAAW,cAAc;EAAgB;AAG3D,OAAM,IAAI,OAAO,YAAY,MAAM,GAAG,WAAW,OAAO,oBAAoB,CAAC;AAG7E,OAAM,6CADO,MAAM,mBAAmB,KAAK,gBAAgB,CACH,EAAE,MAAM,IAAI,IAAI;AAExE,OAAM,cAAc,IAAI,KAAK,MAAM;;AAGrC,eAAsB,cACpB,IACA,IACA,OACA;CAEA,MAAM,CAAC,EAAC,kBAAiB,MAAM,EAAkC;iCAClC,GAFhB,eAAe,MAAM,CAEK,CAAC;AAC1C,KAAI;AACF,QAAM,GAAG,WAAU,QAAO,IAAI,OAAO,aAAa,MAAM,CAAC,CAAC;UACnD,GAAG;AACV,MAAI,aAGF,OAAM;AAER,MACE,EACE,aAAa,SAAS,iBACtB,EAAE,SAAS,2BAGb,OAAM;AAGR,KAAG,OACD,oEACM,EAAE,QAAQ,EAAE,QAAQ,6IAG3B;;;AAIL,SAAgB,qBACd,IACA,WACA;AAEA,WAAU,aAAa,SAAQ,QAAO;AACpC,MACE,CAAC,IAAI,aACL,CAAC,IAAI,aACL,CAAC,IAAI,aACL,CAAC,IAAI,YAGL,OAAM,IAAI,MACR,eAAe,IAAI,QAAQ,oDAC5B;GAEH;AAEF,WAAU,OAAO,SAAQ,UAAS,SAAS,IAAI,MAAM,CAAC;;AAOxD,SAAgB,6CACd,MAC+B;CAC/B,MAAM,oBAIA,EAAE;AACR,MAAK,MAAM,SAAS,KAAK,OACvB,KAAI,CAAC,MAAM,YAAY,UAAU,MAAM,oBAAA,KAA6B;EAQlE,MAAM,EAAC,QAAQ,MAAM,cAAa;AAClC,OAAK,MAAM,EAAC,SAAS,MAAM,eAAc,KAAK,QAAQ,QACpD,QACE,IAAI,WAAW,UACf,IAAI,cAAc,aAClB,IAAI,UACJ,IAAI,YACP,EAAE;AACD,OAAI,OAAO,KAAK,QAAQ,CAAC,MAAK,QAAO,CAAC,MAAM,QAAQ,KAAK,QAAQ,CAC/D;AAEF,qBAAkB,KAAK;IAAC;IAAQ;IAAW;IAAU,CAAC;AACtD;;;AAKN,KAAI,kBAAkB,WAAW,EAC/B;AAEF,QAAO,EACL,OAAO,OAAO,IAAgB,QAAoB;AAChD,OAAK,MAAM,EAAC,QAAQ,WAAW,eAAc,mBAAmB;AAC9D,MAAG,OACD,YAAY,UAAU,iCAAiC,UAAU,GAClE;AACD,SAAM,GAAG;sBACK,IAAI,OAAO,CAAC,GAAG,IAAI,UAAU,CAAC;yCACX,IAAI,UAAU;;IAGpD"}
1
+ {"version":3,"file":"shard.js","names":[],"sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"sourcesContent":["import {PG_INSUFFICIENT_PRIVILEGE} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {literal} from 'pg-format';\nimport postgres from 'postgres';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport {\n jsonObjectSchema,\n stringify,\n type JSONObject,\n} from '../../../../../../shared/src/bigint-json.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {Default} from '../../../../db/postgres-replica-identity-enum.ts';\nimport type {PostgresDB, PostgresTransaction} from '../../../../types/pg.ts';\nimport type {AppID, ShardConfig, ShardID} from '../../../../types/shards.ts';\nimport {appSchema, check, upstreamSchema} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {createEventTriggerStatements} from './ddl.ts';\nimport {\n getPublicationInfo,\n publishedSchema,\n type PublicationInfo,\n type PublishedSchema,\n} from './published.ts';\nimport {validate} from './validation.ts';\n\n/**\n * PostgreSQL unquoted identifiers must start with a letter or underscore\n * and contain only letters, digits, and underscores.\n */\nconst VALID_PUBLICATION_NAME = /^[a-zA-Z_][a-zA-Z0-9_]*$/;\n\n/**\n * Validates that a publication name is a valid PostgreSQL identifier.\n * This provides defense-in-depth against SQL injection when publication\n * names are used in replication commands.\n */\nexport function validatePublicationName(name: string): void {\n if (!VALID_PUBLICATION_NAME.test(name)) {\n throw new Error(\n `Invalid publication name \"${name}\". Publication names must start with a letter or underscore ` +\n `and contain only letters, digits, and underscores.`,\n );\n }\n if (name.length > 63) {\n throw new Error(\n `Publication name \"${name}\" exceeds PostgreSQL's 63-character identifier limit.`,\n );\n }\n}\n\nexport function internalPublicationPrefix({appID}: AppID) {\n return `_${appID}_`;\n}\n\nexport function legacyReplicationSlot({appID, shardNum}: ShardID) {\n return `${appID}_${shardNum}`;\n}\n\nexport function replicationSlotPrefix(shard: ShardID) {\n const {appID, shardNum} = check(shard);\n return `${appID}_${shardNum}_`;\n}\n\n/**\n * An expression used to match replication slots in the shard\n * in a Postgres `LIKE` operator.\n */\nexport function replicationSlotExpression(shard: ShardID) {\n // Underscores have a special meaning in LIKE values\n // so they have to be escaped.\n return `${replicationSlotPrefix(shard)}%`.replaceAll('_', '\\\\_');\n}\n\nexport function newReplicationSlot(shard: ShardID) {\n return replicationSlotPrefix(shard) + Date.now();\n}\n\nfunction defaultPublicationName(appID: string, shardID: string | number) {\n return `_${appID}_public_${shardID}`;\n}\n\nexport function metadataPublicationName(\n appID: string,\n shardID: string | number,\n) {\n return `_${appID}_metadata_${shardID}`;\n}\n\n// The GLOBAL_SETUP must be idempotent as it can be run multiple times for different shards.\nfunction globalSetup(appID: AppID): string {\n const app = id(appSchema(appID));\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${app};\n\n CREATE TABLE IF NOT EXISTS ${app}.permissions (\n \"permissions\" JSONB,\n \"hash\" TEXT,\n\n -- Ensure that there is only a single row in the table.\n -- Application code can be agnostic to this column, and\n -- simply invoke UPDATE statements on the version columns.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n CREATE OR REPLACE FUNCTION ${app}.set_permissions_hash()\n RETURNS TRIGGER AS $$\n BEGIN\n NEW.hash = md5(NEW.permissions::text);\n RETURN NEW;\n END;\n $$ LANGUAGE plpgsql;\n\n CREATE OR REPLACE TRIGGER on_set_permissions \n BEFORE INSERT OR UPDATE ON ${app}.permissions\n FOR EACH ROW\n EXECUTE FUNCTION ${app}.set_permissions_hash();\n\n INSERT INTO ${app}.permissions (permissions) VALUES (NULL) ON CONFLICT DO NOTHING;\n`;\n}\n\nexport async function ensureGlobalTables(db: PostgresDB, appID: AppID) {\n await db.unsafe(globalSetup(appID));\n}\n\nexport function getClientsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"clients\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"lastMutationID\" BIGINT NOT NULL,\n \"userID\" TEXT,\n PRIMARY KEY(\"clientGroupID\", \"clientID\")\n );`;\n}\n\n/**\n * Tracks the results of mutations.\n * 1. It is an error for the same mutation ID to be used twice.\n * 2. The result is JSONB to allow for arbitrary results.\n *\n * The tables must be cleaned up as the clients\n * receive the mutation responses and as clients are removed.\n */\nexport function getMutationsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"mutations\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"mutationID\" BIGINT NOT NULL,\n \"result\" JSON NOT NULL,\n PRIMARY KEY(\"clientGroupID\", \"clientID\", \"mutationID\")\n );`;\n}\n\nexport const SHARD_CONFIG_TABLE = 'shardConfig';\n\nexport function shardSetup(\n shardConfig: ShardConfig,\n metadataPublication: string,\n): string {\n const app = id(appSchema(shardConfig));\n const shard = id(upstreamSchema(shardConfig));\n\n const pubs = shardConfig.publications.toSorted();\n assert(\n pubs.includes(metadataPublication),\n () => `Publications must include ${metadataPublication}`,\n );\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${shard};\n\n ${getClientsTableDefinition(shard)}\n ${getMutationsTableDefinition(shard)}\n\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n CREATE PUBLICATION ${id(metadataPublication)}\n FOR TABLE ${app}.\"permissions\", TABLE ${shard}.\"clients\", ${shard}.\"mutations\";\n\n CREATE TABLE ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\" TEXT[] NOT NULL,\n \"ddlDetection\" BOOL NOT NULL,\n\n -- Ensure that there is only a single row in the table.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n INSERT INTO ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\",\n \"ddlDetection\" \n ) VALUES (\n ARRAY[${literal(pubs)}], \n false -- set in SAVEPOINT with triggerSetup() statements\n );\n\n CREATE TABLE ${shard}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL,\n \"initialSyncContext\" JSON,\n \"subscriberContext\" JSON\n );\n `;\n}\n\nexport function dropShard(appID: string, shardID: string | number): string {\n const schema = `${appID}_${shardID}`;\n const metadataPublication = metadataPublicationName(appID, shardID);\n const defaultPublication = defaultPublicationName(appID, shardID);\n\n // DROP SCHEMA ... CASCADE does not drop dependent PUBLICATIONS,\n // so PUBLICATIONs must be dropped explicitly.\n return /*sql*/ `\n DROP PUBLICATION IF EXISTS ${id(defaultPublication)};\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n DROP SCHEMA IF EXISTS ${id(schema)} CASCADE;\n `;\n}\n\nconst internalShardConfigSchema = v.object({\n publications: v.array(v.string()),\n ddlDetection: v.boolean(),\n});\n\nexport type InternalShardConfig = v.Infer<typeof internalShardConfigSchema>;\n\nconst replicaSchema = internalShardConfigSchema.extend({\n slot: v.string(),\n version: v.string(),\n initialSchema: publishedSchema,\n initialSyncContext: jsonObjectSchema.nullable(),\n subscriberContext: jsonObjectSchema.nullable(),\n});\n\nexport type Replica = v.Infer<typeof replicaSchema>;\n\n// triggerSetup is run separately in a sub-transaction (i.e. SAVEPOINT) so\n// that a failure (e.g. due to lack of superuser permissions) can be handled\n// by continuing in a degraded mode (ddlDetection = false).\nfunction triggerSetup(shard: ShardConfig): string {\n const schema = id(upstreamSchema(shard));\n return (\n createEventTriggerStatements(shard) +\n /*sql*/ `UPDATE ${schema}.\"shardConfig\" SET \"ddlDetection\" = true;`\n );\n}\n\n// Called in initial-sync to store the exact schema that was initially synced.\nexport async function addReplica(\n sql: PostgresDB,\n shard: ShardID,\n slot: string,\n replicaVersion: string,\n {tables, indexes}: PublishedSchema,\n initialSyncContext: JSONObject,\n) {\n const schema = upstreamSchema(shard);\n const synced: PublishedSchema = {tables, indexes};\n await sql`\n INSERT INTO ${sql(schema)}.replicas\n (\"slot\", \"version\", \"initialSchema\", \"initialSyncContext\")\n VALUES (${slot}, ${replicaVersion}, ${synced}, ${initialSyncContext})`;\n}\n\nexport async function getReplicaAtVersion(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardID,\n replicaVersion: string,\n context?: JSONObject,\n): Promise<Replica | null> {\n const schema = sql(upstreamSchema(shard));\n const result = await sql`\n SELECT\n replicas.\"slot\",\n replicas.\"version\",\n replicas.\"initialSchema\",\n replicas.\"initialSyncContext\",\n replicas.\"subscriberContext\",\n \"shardConfig\".\"publications\",\n \"shardConfig\".\"ddlDetection\"\n FROM ${schema}.replicas JOIN ${schema}.\"shardConfig\" ON true\n WHERE version = ${replicaVersion};\n `;\n if (result.length === 0) {\n // log out all the replicas and the joined shardConfig\n const allReplicas = await sql`\n SELECT slot, version, \"initialSyncContext\", \"subscriberContext\" \n FROM ${schema}.replicas`;\n lc.info?.(\n `Replica ${replicaVersion} ` +\n (context ? `(context: ${stringify(context)}) ` : '') +\n `not found in: ${stringify(allReplicas)}`,\n );\n return null;\n }\n return v.parse(result[0], replicaSchema, 'passthrough');\n}\n\nexport async function getInternalShardConfig(\n sql: PostgresDB,\n shard: ShardID,\n): Promise<InternalShardConfig> {\n const result = await sql`\n SELECT \"publications\", \"ddlDetection\"\n FROM ${sql(upstreamSchema(shard))}.\"shardConfig\";\n `;\n assert(\n result.length === 1,\n () => `Expected exactly one shardConfig row, got ${result.length}`,\n );\n return v.parse(result[0], internalShardConfigSchema, 'passthrough');\n}\n\n/**\n * Sets up and returns all publications (including internal ones) for\n * the given shard.\n */\nexport async function setupTablesAndReplication(\n lc: LogContext,\n sql: PostgresTransaction,\n requested: ShardConfig,\n) {\n const {publications} = requested;\n // Validate requested publications.\n for (const pub of publications) {\n validatePublicationName(pub);\n if (pub.startsWith('_')) {\n throw new Error(\n `Publication names starting with \"_\" are reserved for internal use.\\n` +\n `Please use a different name for publication \"${pub}\".`,\n );\n }\n }\n const allPublications: string[] = [];\n\n // Setup application publications.\n if (publications.length) {\n const results = await sql<{pubname: string}[]>`\n SELECT pubname from pg_publication WHERE pubname IN ${sql(\n publications,\n )}`.values();\n\n if (results.length !== publications.length) {\n throw new Error(\n `Unknown or invalid publications. Specified: [${publications}]. Found: [${results.flat()}]`,\n );\n }\n allPublications.push(...publications);\n } else {\n const defaultPublication = defaultPublicationName(\n requested.appID,\n requested.shardNum,\n );\n await sql`\n DROP PUBLICATION IF EXISTS ${sql(defaultPublication)}`;\n await sql`\n CREATE PUBLICATION ${sql(defaultPublication)} \n FOR TABLES IN SCHEMA public\n WITH (publish_via_partition_root = true)`;\n allPublications.push(defaultPublication);\n }\n\n const metadataPublication = metadataPublicationName(\n requested.appID,\n requested.shardNum,\n );\n allPublications.push(metadataPublication);\n\n const shard = {...requested, publications: allPublications};\n\n // Setup the global tables and shard tables / publications.\n await sql.unsafe(globalSetup(shard) + shardSetup(shard, metadataPublication));\n\n const pubs = await getPublicationInfo(sql, allPublications);\n await replicaIdentitiesForTablesWithoutPrimaryKeys(pubs)?.apply(lc, sql);\n\n await setupTriggers(lc, sql, shard);\n}\n\nexport async function setupTriggers(\n lc: LogContext,\n tx: PostgresTransaction,\n shard: ShardConfig,\n) {\n const schema = upstreamSchema(shard);\n const [{ddlDetection}] = await tx<InternalShardConfig[]> /*sql*/ `\n SELECT \"ddlDetection\" FROM ${tx(schema)}.\"shardConfig\"`;\n try {\n await tx.savepoint(sub => sub.unsafe(triggerSetup(shard)));\n } catch (e) {\n if (ddlDetection) {\n // If ddlDetection has already been enabled, subsequent failures to\n // upgrade the trigger should be propagated rather than swallowed.\n throw e;\n }\n if (\n !(\n e instanceof postgres.PostgresError &&\n e.code === PG_INSUFFICIENT_PRIVILEGE\n )\n ) {\n throw e;\n }\n // If triggerSetup() fails, replication continues in ddlDetection=false mode.\n lc.warn?.(\n `Unable to create event triggers for schema change detection:\\n\\n` +\n `\"${e.hint ?? e.message}\"\\n\\n` +\n `Proceeding in degraded mode: schema changes will halt replication,\\n` +\n `requiring the replica to be reset (manually or with --auto-reset).`,\n );\n }\n}\n\nexport function validatePublications(\n lc: LogContext,\n published: PublicationInfo,\n) {\n // Verify that all publications export the proper events.\n published.publications.forEach(pub => {\n if (\n !pub.pubinsert ||\n !pub.pubupdate ||\n !pub.pubdelete ||\n !pub.pubtruncate\n ) {\n // TODO: Make APIError?\n throw new Error(\n `PUBLICATION ${pub.pubname} must publish insert, update, delete, and truncate`,\n );\n }\n });\n\n published.tables.forEach(table => validate(lc, table));\n}\n\ntype ReplicaIdentities = {\n apply(lc: LogContext, db: PostgresDB): Promise<void>;\n};\n\nexport function replicaIdentitiesForTablesWithoutPrimaryKeys(\n pubs: PublishedSchema,\n): ReplicaIdentities | undefined {\n const replicaIdentities: {\n schema: string;\n tableName: string;\n indexName: string;\n }[] = [];\n for (const table of pubs.tables) {\n if (!table.primaryKey?.length && table.replicaIdentity === Default) {\n // Look for an index that can serve as the REPLICA IDENTITY USING INDEX. It must be:\n // - UNIQUE\n // - NOT NULL columns\n // - not deferrable (i.e. isImmediate)\n // - not partial (are already filtered out)\n //\n // https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-REPLICA-IDENTITY\n const {schema, name: tableName} = table;\n for (const {columns, name: indexName} of pubs.indexes.filter(\n idx =>\n idx.schema === schema &&\n idx.tableName === tableName &&\n idx.unique &&\n idx.isImmediate,\n )) {\n if (Object.keys(columns).some(col => !table.columns[col].notNull)) {\n continue; // Only indexes with all NOT NULL columns are suitable.\n }\n replicaIdentities.push({schema, tableName, indexName});\n break;\n }\n }\n }\n\n if (replicaIdentities.length === 0) {\n return undefined;\n }\n return {\n apply: async (lc: LogContext, sql: PostgresDB) => {\n for (const {schema, tableName, indexName} of replicaIdentities) {\n lc.info?.(\n `setting \"${indexName}\" as the REPLICA IDENTITY for \"${tableName}\"`,\n );\n await sql`\n ALTER TABLE ${sql(schema)}.${sql(tableName)} \n REPLICA IDENTITY USING INDEX ${sql(indexName)}`;\n }\n },\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;AA6BA,IAAM,yBAAyB;;;;;;AAO/B,SAAgB,wBAAwB,MAAoB;AAC1D,KAAI,CAAC,uBAAuB,KAAK,KAAK,CACpC,OAAM,IAAI,MACR,6BAA6B,KAAK,gHAEnC;AAEH,KAAI,KAAK,SAAS,GAChB,OAAM,IAAI,MACR,qBAAqB,KAAK,uDAC3B;;AAIL,SAAgB,0BAA0B,EAAC,SAAe;AACxD,QAAO,IAAI,MAAM;;AAGnB,SAAgB,sBAAsB,EAAC,OAAO,YAAoB;AAChE,QAAO,GAAG,MAAM,GAAG;;AAGrB,SAAgB,sBAAsB,OAAgB;CACpD,MAAM,EAAC,OAAO,aAAY,MAAM,MAAM;AACtC,QAAO,GAAG,MAAM,GAAG,SAAS;;;;;;AAO9B,SAAgB,0BAA0B,OAAgB;AAGxD,QAAO,GAAG,sBAAsB,MAAM,CAAC,GAAG,WAAW,KAAK,MAAM;;AAGlE,SAAgB,mBAAmB,OAAgB;AACjD,QAAO,sBAAsB,MAAM,GAAG,KAAK,KAAK;;AAGlD,SAAS,uBAAuB,OAAe,SAA0B;AACvE,QAAO,IAAI,MAAM,UAAU;;AAG7B,SAAgB,wBACd,OACA,SACA;AACA,QAAO,IAAI,MAAM,YAAY;;AAI/B,SAAS,YAAY,OAAsB;CACzC,MAAM,MAAM,GAAG,UAAU,MAAM,CAAC;AAEhC,QAAe;gCACe,IAAI;;+BAEL,IAAI;;;;;;;;;;+BAUJ,IAAI;;;;;;;;;iCASF,IAAI;;uBAEd,IAAI;;gBAEX,IAAI;;;AAIpB,eAAsB,mBAAmB,IAAgB,OAAc;AACrE,OAAM,GAAG,OAAO,YAAY,MAAM,CAAC;;AAGrC,SAAgB,0BAA0B,QAAgB;AACxD,QAAe;iBACA,OAAO;;;;;;;;;;;;;;;;AAiBxB,SAAgB,4BAA4B,QAAgB;AAC1D,QAAe;iBACA,OAAO;;;;;;;;AASxB,IAAa,qBAAqB;AAElC,SAAgB,WACd,aACA,qBACQ;CACR,MAAM,MAAM,GAAG,UAAU,YAAY,CAAC;CACtC,MAAM,QAAQ,GAAG,eAAe,YAAY,CAAC;CAE7C,MAAM,OAAO,YAAY,aAAa,UAAU;AAChD,QACE,KAAK,SAAS,oBAAoB,QAC5B,6BAA6B,sBACpC;AAED,QAAe;gCACe,MAAM;;IAElC,0BAA0B,MAAM,CAAC;IACjC,4BAA4B,MAAM,CAAC;;+BAER,GAAG,oBAAoB,CAAC;uBAChC,GAAG,oBAAoB,CAAC;gBAC/B,IAAI,wBAAwB,MAAM,cAAc,MAAM;;iBAErD,MAAM,IAAI,mBAAmB;;;;;;;;gBAQ9B,MAAM,IAAI,mBAAmB;;;;cAI/B,QAAQ,KAAK,CAAC;;;;iBAIX,MAAM;;;;;;;;;AAUvB,SAAgB,UAAU,OAAe,SAAkC;CACzE,MAAM,SAAS,GAAG,MAAM,GAAG;CAC3B,MAAM,sBAAsB,wBAAwB,OAAO,QAAQ;AAKnE,QAAe;iCACgB,GALJ,uBAAuB,OAAO,QAAQ,CAKZ,CAAC;iCACvB,GAAG,oBAAoB,CAAC;4BAC7B,GAAG,OAAO,CAAC;;;AAIvC,IAAM,4BAA4B,eAAE,OAAO;CACzC,cAAc,eAAE,MAAM,eAAE,QAAQ,CAAC;CACjC,cAAc,eAAE,SAAS;CAC1B,CAAC;AAIF,IAAM,gBAAgB,0BAA0B,OAAO;CACrD,MAAM,eAAE,QAAQ;CAChB,SAAS,eAAE,QAAQ;CACnB,eAAe;CACf,oBAAoB,iBAAiB,UAAU;CAC/C,mBAAmB,iBAAiB,UAAU;CAC/C,CAAC;AAOF,SAAS,aAAa,OAA4B;CAChD,MAAM,SAAS,GAAG,eAAe,MAAM,CAAC;AACxC,QACE,6BAA6B,MAAM,GAC3B,UAAU,OAAO;;AAK7B,eAAsB,WACpB,KACA,OACA,MACA,gBACA,EAAC,QAAQ,WACT,oBACA;CACA,MAAM,SAAS,eAAe,MAAM;CACpC,MAAM,SAA0B;EAAC;EAAQ;EAAQ;AACjD,OAAM,GAAG;kBACO,IAAI,OAAO,CAAC;;gBAEd,KAAK,IAAI,eAAe,IAAI,OAAO,IAAI,mBAAmB;;AAG1E,eAAsB,oBACpB,IACA,KACA,OACA,gBACA,SACyB;CACzB,MAAM,SAAS,IAAI,eAAe,MAAM,CAAC;CACzC,MAAM,SAAS,MAAM,GAAG;;;;;;;;;WASf,OAAO,iBAAiB,OAAO;wBAClB,eAAe;;AAErC,KAAI,OAAO,WAAW,GAAG;EAEvB,MAAM,cAAc,MAAM,GAAG;;eAElB,OAAO;AAClB,KAAG,OACD,WAAW,eAAe,MACvB,UAAU,aAAa,UAAU,QAAQ,CAAC,MAAM,MACjD,iBAAiB,UAAU,YAAY,GAC1C;AACD,SAAO;;AAET,QAAO,MAAQ,OAAO,IAAI,eAAe,cAAc;;AAGzD,eAAsB,uBACpB,KACA,OAC8B;CAC9B,MAAM,SAAS,MAAM,GAAG;;aAEb,IAAI,eAAe,MAAM,CAAC,CAAC;;AAEtC,QACE,OAAO,WAAW,SACZ,6CAA6C,OAAO,SAC3D;AACD,QAAO,MAAQ,OAAO,IAAI,2BAA2B,cAAc;;;;;;AAOrE,eAAsB,0BACpB,IACA,KACA,WACA;CACA,MAAM,EAAC,iBAAgB;AAEvB,MAAK,MAAM,OAAO,cAAc;AAC9B,0BAAwB,IAAI;AAC5B,MAAI,IAAI,WAAW,IAAI,CACrB,OAAM,IAAI,MACR,oHACkD,IAAI,IACvD;;CAGL,MAAM,kBAA4B,EAAE;AAGpC,KAAI,aAAa,QAAQ;EACvB,MAAM,UAAU,MAAM,GAAwB;0DACQ,IACpD,aACD,GAAG,QAAQ;AAEZ,MAAI,QAAQ,WAAW,aAAa,OAClC,OAAM,IAAI,MACR,gDAAgD,aAAa,aAAa,QAAQ,MAAM,CAAC,GAC1F;AAEH,kBAAgB,KAAK,GAAG,aAAa;QAChC;EACL,MAAM,qBAAqB,uBACzB,UAAU,OACV,UAAU,SACX;AACD,QAAM,GAAG;mCACsB,IAAI,mBAAmB;AACtD,QAAM,GAAG;2BACc,IAAI,mBAAmB,CAAC;;;AAG/C,kBAAgB,KAAK,mBAAmB;;CAG1C,MAAM,sBAAsB,wBAC1B,UAAU,OACV,UAAU,SACX;AACD,iBAAgB,KAAK,oBAAoB;CAEzC,MAAM,QAAQ;EAAC,GAAG;EAAW,cAAc;EAAgB;AAG3D,OAAM,IAAI,OAAO,YAAY,MAAM,GAAG,WAAW,OAAO,oBAAoB,CAAC;AAG7E,OAAM,6CADO,MAAM,mBAAmB,KAAK,gBAAgB,CACH,EAAE,MAAM,IAAI,IAAI;AAExE,OAAM,cAAc,IAAI,KAAK,MAAM;;AAGrC,eAAsB,cACpB,IACA,IACA,OACA;CAEA,MAAM,CAAC,EAAC,kBAAiB,MAAM,EAAkC;iCAClC,GAFhB,eAAe,MAAM,CAEK,CAAC;AAC1C,KAAI;AACF,QAAM,GAAG,WAAU,QAAO,IAAI,OAAO,aAAa,MAAM,CAAC,CAAC;UACnD,GAAG;AACV,MAAI,aAGF,OAAM;AAER,MACE,EACE,aAAa,SAAS,iBACtB,EAAE,SAAS,2BAGb,OAAM;AAGR,KAAG,OACD,oEACM,EAAE,QAAQ,EAAE,QAAQ,6IAG3B;;;AAIL,SAAgB,qBACd,IACA,WACA;AAEA,WAAU,aAAa,SAAQ,QAAO;AACpC,MACE,CAAC,IAAI,aACL,CAAC,IAAI,aACL,CAAC,IAAI,aACL,CAAC,IAAI,YAGL,OAAM,IAAI,MACR,eAAe,IAAI,QAAQ,oDAC5B;GAEH;AAEF,WAAU,OAAO,SAAQ,UAAS,SAAS,IAAI,MAAM,CAAC;;AAOxD,SAAgB,6CACd,MAC+B;CAC/B,MAAM,oBAIA,EAAE;AACR,MAAK,MAAM,SAAS,KAAK,OACvB,KAAI,CAAC,MAAM,YAAY,UAAU,MAAM,oBAAA,KAA6B;EAQlE,MAAM,EAAC,QAAQ,MAAM,cAAa;AAClC,OAAK,MAAM,EAAC,SAAS,MAAM,eAAc,KAAK,QAAQ,QACpD,QACE,IAAI,WAAW,UACf,IAAI,cAAc,aAClB,IAAI,UACJ,IAAI,YACP,EAAE;AACD,OAAI,OAAO,KAAK,QAAQ,CAAC,MAAK,QAAO,CAAC,MAAM,QAAQ,KAAK,QAAQ,CAC/D;AAEF,qBAAkB,KAAK;IAAC;IAAQ;IAAW;IAAU,CAAC;AACtD;;;AAKN,KAAI,kBAAkB,WAAW,EAC/B;AAEF,QAAO,EACL,OAAO,OAAO,IAAgB,QAAoB;AAChD,OAAK,MAAM,EAAC,QAAQ,WAAW,eAAc,mBAAmB;AAC9D,MAAG,OACD,YAAY,UAAU,iCAAiC,UAAU,GAClE;AACD,SAAM,GAAG;sBACK,IAAI,OAAO,CAAC,GAAG,IAAI,UAAU,CAAC;yCACX,IAAI,UAAU;;IAGpD"}
@@ -1 +1 @@
1
- {"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAWjD,OAAO,EAAC,eAAe,EAAC,MAAM,8BAA8B,CAAC;AAC7D,OAAO,EAAC,KAAK,UAAU,EAA2B,MAAM,mBAAmB,CAAC;AAC5E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAKL,KAAK,eAAe,EAMrB,MAAM,sCAAsC,CAAC;AAC9C,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,iDAAiD,CAAC;AAC5E,OAAO,KAAK,EACV,uBAAuB,EACvB,qBAAqB,EACtB,MAAM,6CAA6C,CAAC;AACrD,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8BAA8B,CAAC;AAUpE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AA8BhD,MAAM,MAAM,aAAa,GAAG;IAC1B,+BAA+B,EAAE,MAAM,CAAC;IACxC,kBAAkB,EAAE,MAAM,CAAC;CAC5B,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,qBAAa,MAAO,YAAW,OAAO;;IACpC,QAAQ,CAAC,EAAE,YAAY;gBAkBrB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,MAAM,EACtB,UAAU,EAAE,CAAC,CAAC,EAAE,MAAM,GAAG,qBAAqB,KAAK,IAAI,EACvD,OAAO,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI,EAC7B,EAAC,+BAA+B,EAAE,kBAAkB,EAAC,EAAE,aAAa;IA+BhE,eAAe,CAAC,SAAS,CAAC,EAAE,SAAS,GAAG,IAAI;IA2B5C,sCAAsC,IAAI,OAAO,CAAC;QACtD,aAAa,EAAE,MAAM,CAAC;QACtB,gBAAgB,EAAE,eAAe,EAAE,CAAC;KACrC,CAAC;IAkCI,yBAAyB,IAAI,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAOzD,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IA+BtD;;OAEG;IACH,KAAK,CAAC,KAAK,EAAE,iBAAiB;IAsB9B,KAAK;IAIL,MAAM,CAAC,CAAC,EAAE,uBAAuB;IAIjC,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,cAAc;IAMpD,YAAY,IAAI,OAAO,CAAC,IAAI,CAAC,GAAG,SAAS;IA0CzC;;;OAGG;IACG,GAAG;IAocT;;;OAGG;IACG,YAAY;IAQlB,IAAI;CAOL;AAgBD,qBAAa,SAAS;;IAGpB,QAAQ,CAAC,cAAc,EAAE,MAAM,CAAC;IAChC,QAAQ,CAAC,YAAY,EAAE,MAAM,CAAC;gBAG5B,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,eAAe,EACnB,cAAc,EAAE,MAAM,EACtB,SAAS,EAAE,MAAM;IAUb,OAAO;CAWd;AAED,qBAAa,WAAW;;gBAKV,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,OAAO,EAAE,EAAE,EAAE,UAAU;IAWpD,OAAO;CA4Bd"}
1
+ {"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAWjD,OAAO,EAAC,eAAe,EAAC,MAAM,8BAA8B,CAAC;AAC7D,OAAO,EAAC,KAAK,UAAU,EAA2B,MAAM,mBAAmB,CAAC;AAC5E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAKL,KAAK,eAAe,EAMrB,MAAM,sCAAsC,CAAC;AAC9C,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,iDAAiD,CAAC;AAC5E,OAAO,KAAK,EACV,uBAAuB,EACvB,qBAAqB,EACtB,MAAM,6CAA6C,CAAC;AACrD,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8BAA8B,CAAC;AASpE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AAkChD,MAAM,MAAM,aAAa,GAAG;IAC1B,+BAA+B,EAAE,MAAM,CAAC;IACxC,kBAAkB,EAAE,MAAM,CAAC;CAC5B,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,qBAAa,MAAO,YAAW,OAAO;;IACpC,QAAQ,CAAC,EAAE,YAAY;gBAkBrB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,MAAM,EACtB,UAAU,EAAE,CAAC,CAAC,EAAE,MAAM,GAAG,qBAAqB,KAAK,IAAI,EACvD,OAAO,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI,EAC7B,EAAC,+BAA+B,EAAE,kBAAkB,EAAC,EAAE,aAAa;IA+BhE,eAAe,CAAC,SAAS,CAAC,EAAE,SAAS,GAAG,IAAI;IA2B5C,sCAAsC,IAAI,OAAO,CAAC;QACtD,aAAa,EAAE,MAAM,CAAC;QACtB,gBAAgB,EAAE,eAAe,EAAE,CAAC;KACrC,CAAC;IAkCI,yBAAyB,IAAI,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAOzD,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IA+BtD;;OAEG;IACH,KAAK,CAAC,KAAK,EAAE,iBAAiB;IAsB9B,KAAK;IAIL,MAAM,CAAC,CAAC,EAAE,uBAAuB;IAIjC,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,cAAc;IAMpD,YAAY,IAAI,OAAO,CAAC,IAAI,CAAC,GAAG,SAAS;IA0CzC;;;OAGG;IACG,GAAG;IAocT;;;OAGG;IACG,YAAY;IAQlB,IAAI;CAOL;AAgBD,qBAAa,SAAS;;IAGpB,QAAQ,CAAC,cAAc,EAAE,MAAM,CAAC;IAChC,QAAQ,CAAC,YAAY,EAAE,MAAM,CAAC;gBAG5B,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,eAAe,EACnB,cAAc,EAAE,MAAM,EACtB,SAAS,EAAE,MAAM;IAUb,OAAO;CAWd;AAED,qBAAa,WAAW;;gBAKV,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,OAAO,EAAE,EAAE,EAAE,UAAU;IAWpD,OAAO;CA4Bd"}
@@ -139,7 +139,7 @@ var Storer = class {
139
139
  RETURNING watermark, pos
140
140
  ) SELECT COUNT(*) as deleted FROM purged;`;
141
141
  const [{ owner }] = await sql`
142
- SELECT * FROM ${this.#cdc("replicationState")} FOR SHARE`;
142
+ SELECT "owner" FROM ${this.#cdc("replicationState")} FOR SHARE`;
143
143
  if (owner !== this.#taskID) throw new AbortError(`aborting changeLog purge to ${watermark} because ownership has been taken by ${owner}`);
144
144
  return Number(deleted);
145
145
  });
@@ -281,7 +281,7 @@ var Storer = class {
281
281
  tx.pool.run(this.#db);
282
282
  tx.pool.process((tx) => {
283
283
  tx`
284
- SELECT * FROM ${this.#cdc("replicationState")} FOR UPDATE`.then(([result]) => resolve(result), reject);
284
+ SELECT "owner" FROM ${this.#cdc("replicationState")} FOR UPDATE`.then(([result]) => resolve(result), reject);
285
285
  return [];
286
286
  });
287
287
  } else {
@@ -333,7 +333,7 @@ var Storer = class {
333
333
  let lastWatermark;
334
334
  try {
335
335
  [{lastWatermark}] = await reader.processReadTask((sql) => sql`
336
- SELECT * FROM ${this.#cdc("replicationState")}
336
+ SELECT "lastWatermark" FROM ${this.#cdc("replicationState")}
337
337
  `);
338
338
  } catch (e) {
339
339
  subs.map(({ subscriber }) => subscriber.fail(e));
@@ -1 +1 @@
1
- {"version":3,"file":"storer.js","names":["#lc","#shard","#taskID","#discoveryAddress","#discoveryProtocol","#db","#replicaVersion","#onConsumed","#onFatal","#queue","#backPressureThresholdBytes","#statementTimeoutMs","#cdc","#approximateQueuedBytes","#running","#readyForMore","#stopped","#processQueue","#cancelQueueEntries","#startCatchup","#trackBackfillMetadata","#maybeReleaseBackPressure","#catchup","#upsertTableMetadataStmt","#upsertColumnBackfillStmt","#tx","#released"],"sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"sourcesContent":["import {getHeapStatistics} from 'node:v8';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport {type PendingQuery, type Row} from 'postgres';\nimport {AbortError} from '../../../../shared/src/abort-error.ts';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {BigIntJSON} from '../../../../shared/src/bigint-json.ts';\nimport {Queue} from '../../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../../shared/src/valita.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {runTx} from '../../db/run-transaction.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../../types/pg.ts';\nimport {cdcSchema, type ShardID} from '../../types/shards.ts';\nimport {\n backfillRequestSchema,\n isDataChange,\n isSchemaChange,\n type BackfillID,\n type BackfillRequest,\n type Change,\n type DataChange,\n type Identifier,\n type SchemaChange,\n type TableMetadata,\n} from '../change-source/protocol/current.ts';\nimport {type Commit} from '../change-source/protocol/current/downstream.ts';\nimport type {\n DownstreamStatusMessage,\n UpstreamStatusMessage,\n} from '../change-source/protocol/current/status.ts';\nimport type {ReplicatorMode} from '../replicator/replicator.ts';\nimport type {Service} from '../service.ts';\nimport type {WatermarkedChange} from './change-streamer-service.ts';\nimport {type ChangeEntry} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {\n AutoResetSignal,\n markResetRequired,\n type BackfillingColumn,\n type ReplicationState,\n type TableMetadataRow,\n} from './schema/tables.ts';\nimport type {Subscriber} from './subscriber.ts';\n\ntype SubscriberAndMode = {\n subscriber: Subscriber;\n mode: ReplicatorMode;\n};\n\ntype QueueEntry =\n | [\n 'change',\n watermark: string,\n json: string,\n orig: Exclude<Change, DataChange> | null, // null for DataChanges\n ]\n | ['ready', callback: () => void]\n | ['subscriber', SubscriberAndMode]\n | DownstreamStatusMessage\n | ['abort']\n | 'stop';\n\ntype PendingTransaction = {\n pool: TransactionPool;\n preCommitWatermark: string;\n pos: number;\n startingReplicationState: Promise<ReplicationState>;\n ack: boolean;\n};\n\nconst backfillRequestsSchema = v.array(backfillRequestSchema);\n\nexport type TuningOptions = {\n backPressureLimitHeapProportion: number;\n statementTimeoutMs: number;\n};\n\n/**\n * Handles the storage of changes and the catchup of subscribers\n * that are behind.\n *\n * In the context of catchup and cleanup, it is the responsibility of the\n * Storer to decide whether a client can be caught up, or whether the\n * changes needed to catch a client up have been purged.\n *\n * **Maintained invariant**: The Change DB is only empty for a\n * completely new replica (i.e. initial-sync with no changes from the\n * replication stream).\n * * In this case, all new subscribers are expected start from the\n * `replicaVersion`, which is the version at which initial sync\n * was performed, and any attempts to catchup from a different\n * point fail.\n *\n * Conversely, if non-initial changes have flowed through the system\n * (i.e. via the replication stream), the ChangeDB must *not* be empty,\n * and the earliest change in the `changeLog` represents the earliest\n * \"commit\" from (after) which a subscriber can be caught up.\n * * Any attempts to catchup from an earlier point must fail with\n * a `WatermarkTooOld` error.\n * * Failure to do so could result in streaming changes to the\n * subscriber such that there is a gap in its replication history.\n *\n * Note: Subscribers (i.e. `incremental-syncer`) consider an \"error\" signal\n * an unrecoverable error and shut down in response. This allows the\n * production system to replace it with a new task and fresh copy of the\n * replica backup.\n */\nexport class Storer implements Service {\n readonly id = 'storer';\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #taskID: string;\n readonly #discoveryAddress: string;\n readonly #discoveryProtocol: string;\n readonly #db: PostgresDB;\n readonly #replicaVersion: string;\n readonly #onConsumed: (c: Commit | UpstreamStatusMessage) => void;\n readonly #onFatal: (err: Error) => void;\n readonly #queue = new Queue<QueueEntry>();\n readonly #backPressureThresholdBytes: number;\n readonly #statementTimeoutMs: number;\n\n #approximateQueuedBytes = 0;\n #running = false;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n db: PostgresDB,\n replicaVersion: string,\n onConsumed: (c: Commit | UpstreamStatusMessage) => void,\n onFatal: (err: Error) => void,\n {backPressureLimitHeapProportion, statementTimeoutMs}: TuningOptions,\n ) {\n this.#lc = lc.withContext('component', 'change-log');\n this.#shard = shard;\n this.#taskID = taskID;\n this.#discoveryAddress = discoveryAddress;\n this.#discoveryProtocol = discoveryProtocol;\n this.#db = db;\n this.#replicaVersion = replicaVersion;\n this.#onConsumed = onConsumed;\n this.#onFatal = onFatal;\n this.#statementTimeoutMs = statementTimeoutMs;\n\n const heapStats = getHeapStatistics();\n this.#backPressureThresholdBytes =\n (heapStats.heap_size_limit - heapStats.used_heap_size) *\n backPressureLimitHeapProportion;\n\n this.#lc.info?.(\n `Using up to ${(this.#backPressureThresholdBytes / 1024 ** 2).toFixed(2)} MB of ` +\n `--max-old-space-size (~${(heapStats.heap_size_limit / 1024 ** 2).toFixed(2)} MB) ` +\n `to absorb upstream spikes`,\n {heapStats},\n );\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async assumeOwnership(purgeLock?: PurgeLock | null) {\n const db = this.#db;\n const owner = this.#taskID;\n const ownerAddress = this.#discoveryAddress;\n const ownerProtocol = this.#discoveryProtocol;\n // we omit `ws://` so that old view syncer versions that are not expecting the protocol continue to not get it\n const addressWithProtocol =\n ownerProtocol === 'ws'\n ? ownerAddress\n : `${ownerProtocol}://${ownerAddress}`;\n this.#lc.info?.(`assuming ownership at ${addressWithProtocol}`);\n const start = performance.now();\n await db`UPDATE ${this.#cdc('replicationState')} SET ${db({owner, ownerAddress: addressWithProtocol})}`;\n const elapsed = (performance.now() - start).toFixed(2);\n this.#lc.info?.(\n `assumed ownership at ${addressWithProtocol} (${elapsed} ms)`,\n );\n\n if (purgeLock) {\n // Once ownership has been assumed, any initial purge-lock preventing the\n // purging of change-log records can be released, as a change-streamer\n // that was attempting to purge records will correspondingly abort on the\n // ownership check.\n void purgeLock.release();\n }\n }\n\n async getStartStreamInitializationParameters(): Promise<{\n lastWatermark: string;\n backfillRequests: BackfillRequest[];\n }> {\n const [[{lastWatermark}], result] = await runTx(\n this.#db,\n sql => [\n sql<{lastWatermark: string}[]>`\n SELECT \"lastWatermark\" FROM ${this.#cdc('replicationState')}`,\n\n // Formats a BackfillRequest using json_object_agg() to construct the\n // `columns` object. It is LEFT JOIN'ed with the `tableMetadata` table\n // to make it optional and possibly `null`.\n sql`\n SELECT \n json_build_object(\n 'schema', b.\"schema\",\n 'name', b.\"table\",\n 'metadata', t.\"metadata\"\n ) as \"table\",\n json_object_agg(b.\"column\", b.\"backfill\") \n as \"columns\"\n FROM ${this.#cdc('backfilling')} as b\n LEFT JOIN ${this.#cdc('tableMetadata')} as t\n ON (b.\"schema\" = t.\"schema\" AND b.\"table\" = t.\"table\")\n GROUP BY b.\"schema\", b.\"table\", t.\"metadata\"\n `,\n ],\n {mode: Mode.READONLY},\n );\n\n return {\n lastWatermark,\n backfillRequests: v.parse(result, backfillRequestsSchema),\n };\n }\n\n async getMinWatermarkForCatchup(): Promise<string | null> {\n const [{minWatermark}] = await this.#db<{minWatermark: string | null}[]>\n /*sql*/ `\n SELECT min(watermark) as \"minWatermark\" FROM ${this.#cdc('changeLog')}`;\n return minWatermark;\n }\n\n purgeRecordsBefore(watermark: string): Promise<number> {\n return runTx(this.#db, async sql => {\n // This NOWAIT pre-check is an optimization to abort the transaction\n // (and release associated resources) early.\n await sql<{watermark: string}[]>`\n SELECT watermark FROM ${this.#cdc('changeLog')}\n ORDER BY watermark, pos LIMIT 1\n FOR UPDATE NOWAIT\n `;\n // If the row is purge-locked by an incoming replication-manager, it\n // will assume ownership of the change-log before releasing the lock.\n // This DELETE blocks until the lock is released, allowing the change\n // in ownership to be reliably detected (and the transaction aborted)\n // in the subsequent check.\n const [{deleted}] = await sql<{deleted: bigint}[]>`\n WITH purged AS (\n DELETE FROM ${this.#cdc('changeLog')} WHERE watermark < ${watermark} \n RETURNING watermark, pos\n ) SELECT COUNT(*) as deleted FROM purged;`;\n\n const [{owner}] = await sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')} FOR SHARE`;\n if (owner !== this.#taskID) {\n throw new AbortError(\n `aborting changeLog purge to ${watermark} because ownership has been taken by ${owner}`,\n );\n }\n return Number(deleted);\n });\n }\n\n /**\n * @returns The size of the serialized entry, for memory / I/O estimations.\n */\n store(entry: WatermarkedChange) {\n const [watermark, [_tag, change]] = entry;\n // Eagerly stringify the JSON object so that the memory usage can be\n // more accurately measured (i.e. without an extra object traversal and\n // ad hoc memory counting heuristics).\n //\n // This essentially moves the stringify() computation out of the pg client,\n // which is instead configured to pass `string` objects directly as JSON\n // strings for JSON-valued columns (see TypeOptions.sendStringAsJson).\n const json = BigIntJSON.stringify(change);\n this.#approximateQueuedBytes += json.length;\n\n this.#queue.enqueue([\n 'change',\n watermark,\n json,\n isDataChange(change) ? null : change, // drop DataChanges to save memory\n ]);\n\n return json.length;\n }\n\n abort() {\n this.#queue.enqueue(['abort']);\n }\n\n status(s: DownstreamStatusMessage) {\n this.#queue.enqueue(s);\n }\n\n catchup(subscriber: Subscriber, mode: ReplicatorMode) {\n this.#queue.enqueue(['subscriber', {subscriber, mode}]);\n }\n\n #readyForMore: Resolver<void> | null = null;\n\n readyForMore(): Promise<void> | undefined {\n if (!this.#running) {\n return undefined;\n }\n if (\n this.#readyForMore === null &&\n this.#approximateQueuedBytes > this.#backPressureThresholdBytes\n ) {\n this.#lc.warn?.(\n `applying back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)\\n` +\n `\\n` +\n `To inspect changeLog backlog in your change DB:\\n` +\n ` SELECT\\n` +\n ` (change->'relation'->>'schema') || '.' || (change->'relation'->>'name') AS table_name,\\n` +\n ` change->>'tag' AS operation,\\n` +\n ` COUNT(*) AS count\\n` +\n ` FROM \"<app_id>/cdc\".\"changeLog\"\\n` +\n ` GROUP BY 1, 2\\n` +\n ` ORDER BY 3 DESC\\n` +\n ` LIMIT 20;`,\n );\n this.#readyForMore = resolver();\n }\n return this.#readyForMore?.promise;\n }\n\n #maybeReleaseBackPressure() {\n if (\n this.#readyForMore !== null &&\n // Wait for at least 20% of the threshold to free up.\n this.#approximateQueuedBytes < this.#backPressureThresholdBytes * 0.8\n ) {\n this.#lc.info?.(\n `releasing back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)`,\n );\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n }\n\n #stopped = promiseVoid;\n\n /**\n * Runs the storer loop until {@link stop()} is called, or an error is thrown.\n * Once {@link run()} completes, it can be called again.\n */\n async run() {\n assert(!this.#running, `storer is already running`);\n\n const {promise: stopped, resolve: signalStopped} = resolver();\n this.#running = true;\n this.#stopped = stopped;\n\n this.#lc.info?.('starting storer');\n let err: unknown;\n try {\n await this.#processQueue();\n } catch (e) {\n err = e; // used in finally\n throw e;\n } finally {\n // Release any pending backpressure so the upstream can proceed\n if (this.#readyForMore !== null) {\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n this.#cancelQueueEntries(\n this.#queue.drain().filter(entry => entry !== undefined),\n err,\n );\n this.#running = false;\n signalStopped();\n this.#lc.info?.('storer stopped');\n }\n }\n\n #cancelQueueEntries(queue: QueueEntry[], e: unknown) {\n if (queue.length === 0) {\n return;\n }\n this.#lc.info?.(\n `canceling ${queue.length} entries from the changeLog queue`,\n );\n const err = e instanceof Error ? e : new AbortError('server shutting down');\n for (const entry of queue) {\n if (entry === 'stop') {\n continue;\n }\n const type = entry[0];\n switch (type) {\n case 'subscriber': {\n // Disconnect subscribers waiting to be caught up so that they can\n // reconnect and try again.\n const {subscriber} = entry[1];\n this.#lc.info?.(`disconnecting ${subscriber.id}`);\n subscriber.fail(err);\n break;\n }\n }\n }\n }\n\n async #processQueue() {\n let tx: PendingTransaction | null = null;\n let msg: QueueEntry | false;\n\n const catchupQueue: SubscriberAndMode[] = [];\n try {\n while ((msg = await this.#queue.dequeue()) !== 'stop') {\n const [msgType] = msg;\n switch (msgType) {\n case 'ready': {\n const signalReady = msg[1];\n signalReady();\n continue;\n }\n case 'subscriber': {\n const subscriber = msg[1];\n if (tx) {\n catchupQueue.push(subscriber); // Wait for the current tx to complete.\n } else {\n await this.#startCatchup([subscriber]); // Catch up immediately.\n }\n continue;\n }\n case 'status':\n this.#onConsumed(msg);\n continue;\n case 'abort': {\n if (tx) {\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n }\n continue;\n }\n }\n // msgType === 'change'\n const [_, watermark, json, change] = msg;\n const tag = change?.tag;\n this.#approximateQueuedBytes -= json.length;\n\n if (tag === 'begin') {\n assert(!tx, 'received BEGIN in the middle of a transaction');\n const {promise, resolve, reject} = resolver<ReplicationState>();\n void promise.catch(() => {}); // handle rejections before the await\n tx = {\n pool: new TransactionPool(\n this.#lc.withContext('watermark', watermark),\n {\n mode: Mode.READ_COMMITTED,\n statementResponseTimeout: this.#statementTimeoutMs,\n },\n ),\n preCommitWatermark: watermark,\n pos: 0,\n startingReplicationState: promise,\n ack: !change.skipAck,\n };\n tx.pool.run(this.#db);\n // Acquire a lock on the replicationState row to detect and/or prevent\n // a concurrent ownership change.\n void tx.pool.process(tx => {\n tx<ReplicationState[]> /*sql*/ `\n SELECT * FROM ${this.#cdc('replicationState')} FOR UPDATE`.then(\n ([result]) => resolve(result),\n reject,\n );\n return [];\n });\n } else {\n assert(tx, () => `received change outside of transaction: ${json}`);\n tx.pos++;\n }\n\n const entry = {\n watermark: tag === 'commit' ? watermark : tx.preCommitWatermark,\n precommit: tag === 'commit' ? tx.preCommitWatermark : null,\n pos: tx.pos,\n change: json,\n };\n\n const processed = tx.pool.process(sql => [\n sql`INSERT INTO ${this.#cdc('changeLog')} ${sql(entry)}`,\n ...(change !== null && isSchemaChange(change)\n ? this.#trackBackfillMetadata(sql, change)\n : []),\n ]);\n\n if (tx.pos % 100 === 0) {\n // Backpressure is exerted on commit when awaiting tx.pool.done().\n // However, backpressure checks need to be regularly done for\n // very large transactions in order to avoid memory blowup.\n await processed;\n }\n this.#maybeReleaseBackPressure();\n\n if (tag === 'commit') {\n const {owner} = await tx.startingReplicationState;\n if (owner !== this.#taskID) {\n // Ownership change reflected in the replicationState read in 'begin'.\n tx.pool.fail(\n new AbortError(\n `changeLog ownership has been assumed by ${owner}`,\n ),\n );\n } else {\n // Update the replication state.\n const lastWatermark = watermark;\n void tx.pool.process(tx => [\n tx`\n UPDATE ${this.#cdc('replicationState')} SET ${tx({lastWatermark})}`,\n ]);\n tx.pool.setDone();\n }\n\n await tx.pool.done();\n\n // ACK the LSN to the upstream Postgres.\n if (tx.ack) {\n this.#onConsumed(['commit', change, {watermark}]);\n }\n tx = null;\n\n // Before beginning the next transaction, open a READONLY snapshot to\n // concurrently catchup any queued subscribers.\n await this.#startCatchup(catchupQueue.splice(0));\n } else if (tag === 'rollback') {\n // Aborted transactions are not stored in the changeLog. Abort the current tx\n // and process catchup of subscribers that were waiting for it to end.\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n\n await this.#startCatchup(catchupQueue.splice(0));\n }\n }\n } catch (e) {\n catchupQueue.forEach(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n }\n\n async #startCatchup(subs: SubscriberAndMode[]) {\n if (subs.length === 0) {\n return;\n }\n\n const reader = new TransactionPool(\n this.#lc.withContext('pool', 'catchup'),\n {mode: Mode.READONLY},\n );\n reader.run(this.#db);\n\n let lastWatermark: string | undefined;\n try {\n // Ensure that the transaction has started (and is thus holding a snapshot\n // of the database) before continuing on to commit more changes. This is\n // done by performing a single read on the db, which determines the\n // snapshot for the REPEATABLE_READ transaction.\n [{lastWatermark}] = await reader.processReadTask(\n sql => sql<ReplicationState[]>`\n SELECT * FROM ${this.#cdc('replicationState')}\n `,\n );\n } catch (e) {\n subs.map(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n\n // Run the actual catchup queries in the background. Errors are handled in\n // #catchup() by disconnecting the associated subscriber.\n void Promise.all(\n subs.map(sub => this.#catchup(sub, lastWatermark, reader)),\n ).finally(() => reader.setDone());\n }\n\n async #catchup(\n {subscriber: sub, mode}: SubscriberAndMode,\n lastWatermark: string,\n reader: TransactionPool,\n ) {\n try {\n await reader.processReadTask(async tx => {\n const start = Date.now();\n\n // When starting from initial-sync, there won't be a change with a watermark\n // equal to the replica version. This is the empty changeLog scenario.\n let watermarkFound = sub.watermark === this.#replicaVersion;\n let count = 0;\n let lastBatchConsumed: Promise<unknown> | undefined;\n\n for await (const entries of tx<ChangeEntry[]> /*sql*/ `\n SELECT watermark, change FROM ${this.#cdc('changeLog')}\n WHERE watermark >= ${sub.watermark}\n AND watermark <= ${lastWatermark}\n ORDER BY watermark, pos`.cursor(2000)) {\n // Wait for the last batch of entries to be consumed by the\n // subscriber before sending down the current batch. This pipelining\n // allows one batch of changes to be received from the change-db\n // while the previous batch of changes are sent to the subscriber,\n // resulting in flow control that caps the number of changes\n // referenced in memory to 2 * batch-size.\n const start = performance.now();\n await lastBatchConsumed;\n const elapsed = performance.now() - start;\n if (lastBatchConsumed) {\n (elapsed > 100 ? this.#lc.info : this.#lc.debug)?.(\n `waited ${elapsed.toFixed(3)} ms for ${sub.id} to consume last batch of catchup entries`,\n );\n }\n\n for (const entry of entries) {\n if (entry.watermark === sub.watermark) {\n // This should be the first entry.\n // Catchup starts from *after* the watermark.\n watermarkFound = true;\n } else if (watermarkFound) {\n lastBatchConsumed = sub.catchup(toDownstream(entry));\n count++;\n } else if (mode === 'backup') {\n throw new AutoResetSignal(\n `backup replica at watermark ${sub.watermark} is behind change db: ${entry.watermark})`,\n );\n } else {\n this.#lc.warn?.(\n `rejecting subscriber at watermark ${sub.watermark} (earliest watermark: ${entry.watermark})`,\n );\n sub.close(\n ErrorType.WatermarkTooOld,\n `earliest supported watermark is ${entry.watermark} (requested ${sub.watermark})`,\n );\n return;\n }\n }\n }\n if (watermarkFound) {\n await lastBatchConsumed;\n this.#lc.info?.(\n `caught up ${sub.id} with ${count} changes (${\n Date.now() - start\n } ms)`,\n );\n } else {\n this.#lc.warn?.(\n `subscriber at watermark ${sub.watermark} is ahead of latest watermark`,\n );\n }\n // Flushes the backlog of messages buffered during catchup and\n // allows the subscription to forward subsequent messages immediately.\n sub.setCaughtUp();\n });\n } catch (err) {\n this.#lc.error?.(`error while catching up subscriber ${sub.id}`, err);\n if (err instanceof AutoResetSignal) {\n await markResetRequired(this.#db, this.#shard);\n this.#onFatal(err);\n }\n sub.fail(err);\n }\n }\n\n /**\n * Returns the db statements necessary to track backfill and table metadata\n * presented in the `change`, if any.\n */\n #trackBackfillMetadata(sql: PostgresTransaction, change: SchemaChange) {\n const stmts: PendingQuery<Row[]>[] = [];\n\n switch (change.tag) {\n case 'update-table-metadata': {\n const {table, new: metadata} = change;\n stmts.push(this.#upsertTableMetadataStmt(sql, table, metadata));\n break;\n }\n\n case 'create-table': {\n const {spec, metadata, backfill} = change;\n if (metadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, spec, metadata));\n }\n if (backfill) {\n Object.entries(backfill).forEach(([col, backfill]) => {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, spec, col, backfill),\n );\n });\n }\n break;\n }\n\n case 'rename-table': {\n const {old} = change;\n const row = {schema: change.new.schema, table: change.new.name};\n stmts.push(\n sql`UPDATE ${this.#cdc('tableMetadata')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n sql`UPDATE ${this.#cdc('backfilling')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n );\n break;\n }\n\n case 'drop-table': {\n const {\n id: {schema, name},\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('tableMetadata')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n );\n break;\n }\n\n case 'add-column': {\n const {table, tableMetadata, column, backfill} = change;\n if (tableMetadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, table, tableMetadata));\n }\n if (backfill) {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, table, column.name, backfill),\n );\n }\n break;\n }\n\n case 'update-column': {\n const {\n table: {schema, name: table},\n old: {name: oldName},\n new: {name: newName},\n } = change;\n if (oldName !== newName) {\n stmts.push(\n sql`UPDATE ${this.#cdc('backfilling')} SET \"column\" = ${newName}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" = ${oldName}`,\n );\n }\n break;\n }\n\n case 'drop-column': {\n const {\n table: {schema, name},\n column,\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name} AND \"column\" = ${column}`,\n );\n break;\n }\n\n case 'backfill-completed': {\n const {\n relation: {schema, name: table, rowKey},\n columns,\n } = change;\n const cols = [...rowKey.columns, ...columns];\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" IN ${sql(cols)}`,\n );\n }\n }\n return stmts;\n }\n\n #upsertTableMetadataStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n metadata: TableMetadata,\n ) {\n const row: TableMetadataRow = {schema, table, metadata};\n return sql`\n INSERT INTO ${this.#cdc('tableMetadata')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n #upsertColumnBackfillStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n column: string,\n backfill: BackfillID,\n ) {\n const row: BackfillingColumn = {schema, table, column, backfill};\n return sql`\n INSERT INTO ${this.#cdc('backfilling')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\", \"column\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n /**\n * Waits until all currently queued entries have been processed.\n * This is only used in tests.\n */\n async allProcessed() {\n if (this.#running) {\n const {promise, resolve} = resolver();\n this.#queue.enqueue(['ready', resolve]);\n await promise;\n }\n }\n\n stop() {\n if (this.#running) {\n this.#lc.info?.(`draining ${this.#queue.size()} changeLog entries`);\n this.#queue.enqueue('stop');\n }\n return this.#stopped;\n }\n}\n\nfunction toDownstream(entry: ChangeEntry): WatermarkedChange {\n const {watermark, change} = entry;\n switch (change.tag) {\n case 'begin':\n return [watermark, ['begin', change, {commitWatermark: watermark}]];\n case 'commit':\n return [watermark, ['commit', change, {watermark}]];\n case 'rollback':\n return [watermark, ['rollback', change]];\n default:\n return [watermark, ['data', change]];\n }\n}\n\nexport class PurgeLock {\n readonly #lc: LogContext;\n readonly #tx: TransactionPool;\n readonly replicaVersion: string;\n readonly minWatermark: string;\n\n constructor(\n lc: LogContext,\n tx: TransactionPool,\n replicaVersion: string,\n watermark: string,\n ) {\n this.#lc = lc;\n this.#tx = tx;\n this.replicaVersion = replicaVersion;\n this.minWatermark = watermark;\n }\n\n #released = false;\n\n async release() {\n if (this.#released) {\n return;\n }\n this.#released = true;\n this.#tx.setDone();\n await this.#tx\n .done()\n .catch(e => this.#lc.warn?.(`error from purge-lock release`, e));\n this.#lc.info?.(`released purge lock on ${this.minWatermark}`);\n }\n}\n\nexport class PurgeLocker {\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #db: PostgresDB;\n\n constructor(lc: LogContext, shard: ShardID, db: PostgresDB) {\n this.#lc = lc.withContext('component', 'purge-locker');\n this.#shard = shard;\n this.#db = db;\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async acquire() {\n const tx = new TransactionPool(this.#lc, {mode: Mode.READ_COMMITTED}).run(\n this.#db,\n );\n const row = await tx.processReadTask(\n sql => sql<{watermark: string}[]>`\n SELECT watermark FROM ${this.#cdc('changeLog')}\n ORDER BY watermark, pos LIMIT 1\n FOR SHARE \n `,\n );\n if (row.length === 0) {\n this.#lc.info?.(`changeLog is empty. No rows to purge-lock.`);\n tx.setDone();\n await tx.done();\n return null;\n }\n const [{watermark}] = row;\n const [{replicaVersion}] = await tx.processReadTask(\n sql => sql<{replicaVersion: string}[]>`\n SELECT \"replicaVersion\" FROM ${this.#cdc('replicationConfig')}\n `,\n );\n this.#lc.info?.(\n `locked watermark ${watermark} from being purged from replica@${replicaVersion}`,\n );\n return new PurgeLock(this.#lc, tx, replicaVersion, watermark);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;AAwEA,IAAM,yBAAyB,eAAE,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqC7D,IAAa,SAAb,MAAuC;CACrC,KAAc;CACd;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA,SAAkB,IAAI,OAAmB;CACzC;CACA;CAEA,0BAA0B;CAC1B,WAAW;CAEX,YACE,IACA,OACA,QACA,kBACA,mBACA,IACA,gBACA,YACA,SACA,EAAC,iCAAiC,sBAClC;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,aAAa;AACpD,QAAA,QAAc;AACd,QAAA,SAAe;AACf,QAAA,mBAAyB;AACzB,QAAA,oBAA0B;AAC1B,QAAA,KAAW;AACX,QAAA,iBAAuB;AACvB,QAAA,aAAmB;AACnB,QAAA,UAAgB;AAChB,QAAA,qBAA2B;EAE3B,MAAM,YAAY,mBAAmB;AACrC,QAAA,8BACG,UAAU,kBAAkB,UAAU,kBACvC;AAEF,QAAA,GAAS,OACP,gBAAgB,MAAA,6BAAmC,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAC5C,UAAU,kBAAkB,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAE/E,EAAC,WAAU,CACZ;;CAIH,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,UAAU,MAAA,MAAY,CAAC,GAAG,QAAQ;;CAGvD,MAAM,gBAAgB,WAA8B;EAClD,MAAM,KAAK,MAAA;EACX,MAAM,QAAQ,MAAA;EACd,MAAM,eAAe,MAAA;EACrB,MAAM,gBAAgB,MAAA;EAEtB,MAAM,sBACJ,kBAAkB,OACd,eACA,GAAG,cAAc,KAAK;AAC5B,QAAA,GAAS,OAAO,yBAAyB,sBAAsB;EAC/D,MAAM,QAAQ,YAAY,KAAK;AAC/B,QAAM,EAAE,UAAU,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG;GAAC;GAAO,cAAc;GAAoB,CAAC;EACrG,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,QAAA,GAAS,OACP,wBAAwB,oBAAoB,IAAI,QAAQ,MACzD;AAED,MAAI,UAKG,WAAU,SAAS;;CAI5B,MAAM,yCAGH;EACD,MAAM,CAAC,CAAC,EAAC,kBAAiB,UAAU,MAAM,MACxC,MAAA,KACA,QAAO,CACL,GAA8B;sCACA,MAAA,IAAU,mBAAmB,IAK3D,GAAG;;;;;;;;;iBASM,MAAA,IAAU,cAAc,CAAC;sBACpB,MAAA,IAAU,gBAAgB,CAAC;;;UAI1C,EACD,EAAC,MAAM,UAAc,CACtB;AAED,SAAO;GACL;GACA,kBAAkB,MAAQ,QAAQ,uBAAuB;GAC1D;;CAGH,MAAM,4BAAoD;EACxD,MAAM,CAAC,EAAC,kBAAiB,MAAM,MAAA,EACvB;qDACyC,MAAA,IAAU,YAAY;AACvE,SAAO;;CAGT,mBAAmB,WAAoC;AACrD,SAAO,MAAM,MAAA,IAAU,OAAM,QAAO;AAGlC,SAAM,GAA0B;kCACJ,MAAA,IAAU,YAAY,CAAC;;;;GASnD,MAAM,CAAC,EAAC,aAAY,MAAM,GAAwB;;wBAEhC,MAAA,IAAU,YAAY,CAAC,qBAAqB,UAAU;;;GAIxE,MAAM,CAAC,EAAC,WAAU,MAAM,GAAuB;wBAC7B,MAAA,IAAU,mBAAmB,CAAC;AAChD,OAAI,UAAU,MAAA,OACZ,OAAM,IAAI,WACR,+BAA+B,UAAU,uCAAuC,QACjF;AAEH,UAAO,OAAO,QAAQ;IACtB;;;;;CAMJ,MAAM,OAA0B;EAC9B,MAAM,CAAC,WAAW,CAAC,MAAM,WAAW;EAQpC,MAAM,OAAO,WAAW,UAAU,OAAO;AACzC,QAAA,0BAAgC,KAAK;AAErC,QAAA,MAAY,QAAQ;GAClB;GACA;GACA;GACA,aAAa,OAAO,GAAG,OAAO;GAC/B,CAAC;AAEF,SAAO,KAAK;;CAGd,QAAQ;AACN,QAAA,MAAY,QAAQ,CAAC,QAAQ,CAAC;;CAGhC,OAAO,GAA4B;AACjC,QAAA,MAAY,QAAQ,EAAE;;CAGxB,QAAQ,YAAwB,MAAsB;AACpD,QAAA,MAAY,QAAQ,CAAC,cAAc;GAAC;GAAY;GAAK,CAAC,CAAC;;CAGzD,gBAAuC;CAEvC,eAA0C;AACxC,MAAI,CAAC,MAAA,QACH;AAEF,MACE,MAAA,iBAAuB,QACvB,MAAA,yBAA+B,MAAA,4BAC/B;AACA,SAAA,GAAS,OACP,+BAA+B,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,4SAW7H;AACD,SAAA,eAAqB,UAAU;;AAEjC,SAAO,MAAA,cAAoB;;CAG7B,4BAA4B;AAC1B,MACE,MAAA,iBAAuB,QAEvB,MAAA,yBAA+B,MAAA,6BAAmC,IAClE;AACA,SAAA,GAAS,OACP,gCAAgC,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,MAC9H;AACD,SAAA,aAAmB,SAAS;AAC5B,SAAA,eAAqB;;;CAIzB,WAAW;;;;;CAMX,MAAM,MAAM;AACV,SAAO,CAAC,MAAA,SAAe,4BAA4B;EAEnD,MAAM,EAAC,SAAS,SAAS,SAAS,kBAAiB,UAAU;AAC7D,QAAA,UAAgB;AAChB,QAAA,UAAgB;AAEhB,QAAA,GAAS,OAAO,kBAAkB;EAClC,IAAI;AACJ,MAAI;AACF,SAAM,MAAA,cAAoB;WACnB,GAAG;AACV,SAAM;AACN,SAAM;YACE;AAER,OAAI,MAAA,iBAAuB,MAAM;AAC/B,UAAA,aAAmB,SAAS;AAC5B,UAAA,eAAqB;;AAEvB,SAAA,mBACE,MAAA,MAAY,OAAO,CAAC,QAAO,UAAS,UAAU,KAAA,EAAU,EACxD,IACD;AACD,SAAA,UAAgB;AAChB,kBAAe;AACf,SAAA,GAAS,OAAO,iBAAiB;;;CAIrC,oBAAoB,OAAqB,GAAY;AACnD,MAAI,MAAM,WAAW,EACnB;AAEF,QAAA,GAAS,OACP,aAAa,MAAM,OAAO,mCAC3B;EACD,MAAM,MAAM,aAAa,QAAQ,IAAI,IAAI,WAAW,uBAAuB;AAC3E,OAAK,MAAM,SAAS,OAAO;AACzB,OAAI,UAAU,OACZ;AAGF,WADa,MAAM,IACnB;IACE,KAAK,cAAc;KAGjB,MAAM,EAAC,eAAc,MAAM;AAC3B,WAAA,GAAS,OAAO,iBAAiB,WAAW,KAAK;AACjD,gBAAW,KAAK,IAAI;AACpB;;;;;CAMR,OAAA,eAAsB;EACpB,IAAI,KAAgC;EACpC,IAAI;EAEJ,MAAM,eAAoC,EAAE;AAC5C,MAAI;AACF,WAAQ,MAAM,MAAM,MAAA,MAAY,SAAS,MAAM,QAAQ;IACrD,MAAM,CAAC,WAAW;AAClB,YAAQ,SAAR;KACE,KAAK,SAAS;MACZ,MAAM,cAAc,IAAI;AACxB,mBAAa;AACb;;KAEF,KAAK,cAAc;MACjB,MAAM,aAAa,IAAI;AACvB,UAAI,GACF,cAAa,KAAK,WAAW;UAE7B,OAAM,MAAA,aAAmB,CAAC,WAAW,CAAC;AAExC;;KAEF,KAAK;AACH,YAAA,WAAiB,IAAI;AACrB;KACF,KAAK;AACH,UAAI,IAAI;AACN,UAAG,KAAK,OAAO;AACf,aAAM,GAAG,KAAK,MAAM;AACpB,YAAK;;AAEP;;IAIJ,MAAM,CAAC,GAAG,WAAW,MAAM,UAAU;IACrC,MAAM,MAAM,QAAQ;AACpB,UAAA,0BAAgC,KAAK;AAErC,QAAI,QAAQ,SAAS;AACnB,YAAO,CAAC,IAAI,gDAAgD;KAC5D,MAAM,EAAC,SAAS,SAAS,WAAU,UAA4B;AAC1D,aAAQ,YAAY,GAAG;AAC5B,UAAK;MACH,MAAM,IAAI,gBACR,MAAA,GAAS,YAAY,aAAa,UAAU,EAC5C;OACE,MAAM;OACN,0BAA0B,MAAA;OAC3B,CACF;MACD,oBAAoB;MACpB,KAAK;MACL,0BAA0B;MAC1B,KAAK,CAAC,OAAO;MACd;AACD,QAAG,KAAK,IAAI,MAAA,GAAS;AAGhB,QAAG,KAAK,SAAQ,OAAM;AACzB,QAA+B;0BACjB,MAAA,IAAU,mBAAmB,CAAC,aAAa,MACtD,CAAC,YAAY,QAAQ,OAAO,EAC7B,OACD;AACD,aAAO,EAAE;OACT;WACG;AACL,YAAO,UAAU,2CAA2C,OAAO;AACnE,QAAG;;IAGL,MAAM,QAAQ;KACZ,WAAW,QAAQ,WAAW,YAAY,GAAG;KAC7C,WAAW,QAAQ,WAAW,GAAG,qBAAqB;KACtD,KAAK,GAAG;KACR,QAAQ;KACT;IAED,MAAM,YAAY,GAAG,KAAK,SAAQ,QAAO,CACvC,GAAG,eAAe,MAAA,IAAU,YAAY,CAAC,GAAG,IAAI,MAAM,IACtD,GAAI,WAAW,QAAQ,eAAe,OAAO,GACzC,MAAA,sBAA4B,KAAK,OAAO,GACxC,EAAE,CACP,CAAC;AAEF,QAAI,GAAG,MAAM,QAAQ,EAInB,OAAM;AAER,UAAA,0BAAgC;AAEhC,QAAI,QAAQ,UAAU;KACpB,MAAM,EAAC,UAAS,MAAM,GAAG;AACzB,SAAI,UAAU,MAAA,OAEZ,IAAG,KAAK,KACN,IAAI,WACF,2CAA2C,QAC5C,CACF;UACI;MAEL,MAAM,gBAAgB;AACjB,SAAG,KAAK,SAAQ,OAAM,CACzB,EAAE;qBACK,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG,EAAC,eAAc,CAAC,GAChE,CAAC;AACF,SAAG,KAAK,SAAS;;AAGnB,WAAM,GAAG,KAAK,MAAM;AAGpB,SAAI,GAAG,IACL,OAAA,WAAiB;MAAC;MAAU;MAAQ,EAAC,WAAU;MAAC,CAAC;AAEnD,UAAK;AAIL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;eACvC,QAAQ,YAAY;AAG7B,QAAG,KAAK,OAAO;AACf,WAAM,GAAG,KAAK,MAAM;AACpB,UAAK;AAEL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;;;WAG7C,GAAG;AACV,gBAAa,SAAS,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC1D,SAAM;;;CAIV,OAAA,aAAoB,MAA2B;AAC7C,MAAI,KAAK,WAAW,EAClB;EAGF,MAAM,SAAS,IAAI,gBACjB,MAAA,GAAS,YAAY,QAAQ,UAAU,EACvC,EAAC,MAAM,UAAc,CACtB;AACD,SAAO,IAAI,MAAA,GAAS;EAEpB,IAAI;AACJ,MAAI;AAKF,IAAC,CAAC,kBAAkB,MAAM,OAAO,iBAC/B,QAAO,GAAuB;wBACd,MAAA,IAAU,mBAAmB,CAAC;QAE/C;WACM,GAAG;AACV,QAAK,KAAK,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC9C,SAAM;;AAKH,UAAQ,IACX,KAAK,KAAI,QAAO,MAAA,QAAc,KAAK,eAAe,OAAO,CAAC,CAC3D,CAAC,cAAc,OAAO,SAAS,CAAC;;CAGnC,OAAA,QACE,EAAC,YAAY,KAAK,QAClB,eACA,QACA;AACA,MAAI;AACF,SAAM,OAAO,gBAAgB,OAAM,OAAM;IACvC,MAAM,QAAQ,KAAK,KAAK;IAIxB,IAAI,iBAAiB,IAAI,cAAc,MAAA;IACvC,IAAI,QAAQ;IACZ,IAAI;AAEJ,eAAW,MAAM,WAAW,EAA0B;0CACpB,MAAA,IAAU,YAAY,CAAC;gCACjC,IAAI,UAAU;gCACd,cAAc;oCACV,OAAO,IAAK,EAAE;KAOxC,MAAM,QAAQ,YAAY,KAAK;AAC/B,WAAM;KACN,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,SAAI,kBACF,EAAC,UAAU,MAAM,MAAA,GAAS,OAAO,MAAA,GAAS,SACxC,UAAU,QAAQ,QAAQ,EAAE,CAAC,UAAU,IAAI,GAAG,2CAC/C;AAGH,UAAK,MAAM,SAAS,QAClB,KAAI,MAAM,cAAc,IAAI,UAG1B,kBAAiB;cACR,gBAAgB;AACzB,0BAAoB,IAAI,QAAQ,aAAa,MAAM,CAAC;AACpD;gBACS,SAAS,SAClB,OAAM,IAAI,gBACR,+BAA+B,IAAI,UAAU,wBAAwB,MAAM,UAAU,GACtF;UACI;AACL,YAAA,GAAS,OACP,qCAAqC,IAAI,UAAU,wBAAwB,MAAM,UAAU,GAC5F;AACD,UAAI,MACF,GACA,mCAAmC,MAAM,UAAU,cAAc,IAAI,UAAU,GAChF;AACD;;;AAIN,QAAI,gBAAgB;AAClB,WAAM;AACN,WAAA,GAAS,OACP,aAAa,IAAI,GAAG,QAAQ,MAAM,YAChC,KAAK,KAAK,GAAG,MACd,MACF;UAED,OAAA,GAAS,OACP,2BAA2B,IAAI,UAAU,+BAC1C;AAIH,QAAI,aAAa;KACjB;WACK,KAAK;AACZ,SAAA,GAAS,QAAQ,sCAAsC,IAAI,MAAM,IAAI;AACrE,OAAI,eAAe,iBAAiB;AAClC,UAAM,kBAAkB,MAAA,IAAU,MAAA,MAAY;AAC9C,UAAA,QAAc,IAAI;;AAEpB,OAAI,KAAK,IAAI;;;;;;;CAQjB,uBAAuB,KAA0B,QAAsB;EACrE,MAAM,QAA+B,EAAE;AAEvC,UAAQ,OAAO,KAAf;GACE,KAAK,yBAAyB;IAC5B,MAAM,EAAC,OAAO,KAAK,aAAY;AAC/B,UAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,SAAS,CAAC;AAC/D;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,MAAM,UAAU,aAAY;AACnC,QAAI,SACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,MAAM,SAAS,CAAC;AAEhE,QAAI,SACF,QAAO,QAAQ,SAAS,CAAC,SAAS,CAAC,KAAK,cAAc;AACpD,WAAM,KACJ,MAAA,yBAA+B,KAAK,MAAM,KAAK,SAAS,CACzD;MACD;AAEJ;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,QAAO;IACd,MAAM,MAAM;KAAC,QAAQ,OAAO,IAAI;KAAQ,OAAO,OAAO,IAAI;KAAK;AAC/D,UAAM,KACJ,GAAG,UAAU,MAAA,IAAU,gBAAgB,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC/B,IAAI,OAAO,iBAAiB,IAAI,QACzD,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC7B,IAAI,OAAO,iBAAiB,IAAI,OAC1D;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EACJ,IAAI,EAAC,QAAQ,WACX;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,gBAAgB,CAAC;mCACpB,OAAO,iBAAiB,QACjD,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,OAClD;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EAAC,OAAO,eAAe,QAAQ,aAAY;AACjD,QAAI,cACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,cAAc,CAAC;AAEtE,QAAI,SACF,OAAM,KACJ,MAAA,yBAA+B,KAAK,OAAO,OAAO,MAAM,SAAS,CAClE;AAEH;;GAGF,KAAK,iBAAiB;IACpB,MAAM,EACJ,OAAO,EAAC,QAAQ,MAAM,SACtB,KAAK,EAAC,MAAM,WACZ,KAAK,EAAC,MAAM,cACV;AACJ,QAAI,YAAY,QACd,OAAM,KACJ,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,kBAAkB,QAAQ;mCACzC,OAAO,iBAAiB,MAAM,kBAAkB,UACxE;AAEH;;GAGF,KAAK,eAAe;IAClB,MAAM,EACJ,OAAO,EAAC,QAAQ,QAChB,WACE;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,KAAK,kBAAkB,SACzE;AACD;;GAGF,KAAK,sBAAsB;IACzB,MAAM,EACJ,UAAU,EAAC,QAAQ,MAAM,OAAO,UAChC,YACE;IACJ,MAAM,OAAO,CAAC,GAAG,OAAO,SAAS,GAAG,QAAQ;AAC5C,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,MAAM,mBAAmB,IAAI,KAAK,GACpF;;;AAGL,SAAO;;CAGT,yBACE,KACA,EAAC,QAAQ,MAAM,SACf,UACA;EACA,MAAM,MAAwB;GAAC;GAAQ;GAAO;GAAS;AACvD,SAAO,GAAG;sBACQ,MAAA,IAAU,gBAAgB,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEnC,IAAI,IAAI,CAAC;;;CAIjC,0BACE,KACA,EAAC,QAAQ,MAAM,SACf,QACA,UACA;EACA,MAAM,MAAyB;GAAC;GAAQ;GAAO;GAAQ;GAAS;AAChE,SAAO,GAAG;sBACQ,MAAA,IAAU,cAAc,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEjC,IAAI,IAAI,CAAC;;;;;;;CAQjC,MAAM,eAAe;AACnB,MAAI,MAAA,SAAe;GACjB,MAAM,EAAC,SAAS,YAAW,UAAU;AACrC,SAAA,MAAY,QAAQ,CAAC,SAAS,QAAQ,CAAC;AACvC,SAAM;;;CAIV,OAAO;AACL,MAAI,MAAA,SAAe;AACjB,SAAA,GAAS,OAAO,YAAY,MAAA,MAAY,MAAM,CAAC,oBAAoB;AACnE,SAAA,MAAY,QAAQ,OAAO;;AAE7B,SAAO,MAAA;;;AAIX,SAAS,aAAa,OAAuC;CAC3D,MAAM,EAAC,WAAW,WAAU;AAC5B,SAAQ,OAAO,KAAf;EACE,KAAK,QACH,QAAO,CAAC,WAAW;GAAC;GAAS;GAAQ,EAAC,iBAAiB,WAAU;GAAC,CAAC;EACrE,KAAK,SACH,QAAO,CAAC,WAAW;GAAC;GAAU;GAAQ,EAAC,WAAU;GAAC,CAAC;EACrD,KAAK,WACH,QAAO,CAAC,WAAW,CAAC,YAAY,OAAO,CAAC;EAC1C,QACE,QAAO,CAAC,WAAW,CAAC,QAAQ,OAAO,CAAC;;;AAI1C,IAAa,YAAb,MAAuB;CACrB;CACA;CACA;CACA;CAEA,YACE,IACA,IACA,gBACA,WACA;AACA,QAAA,KAAW;AACX,QAAA,KAAW;AACX,OAAK,iBAAiB;AACtB,OAAK,eAAe;;CAGtB,YAAY;CAEZ,MAAM,UAAU;AACd,MAAI,MAAA,SACF;AAEF,QAAA,WAAiB;AACjB,QAAA,GAAS,SAAS;AAClB,QAAM,MAAA,GACH,MAAM,CACN,OAAM,MAAK,MAAA,GAAS,OAAO,iCAAiC,EAAE,CAAC;AAClE,QAAA,GAAS,OAAO,0BAA0B,KAAK,eAAe;;;AAIlE,IAAa,cAAb,MAAyB;CACvB;CACA;CACA;CAEA,YAAY,IAAgB,OAAgB,IAAgB;AAC1D,QAAA,KAAW,GAAG,YAAY,aAAa,eAAe;AACtD,QAAA,QAAc;AACd,QAAA,KAAW;;CAIb,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,UAAU,MAAA,MAAY,CAAC,GAAG,QAAQ;;CAGvD,MAAM,UAAU;EACd,MAAM,KAAK,IAAI,gBAAgB,MAAA,IAAU,EAAC,MAAM,gBAAoB,CAAC,CAAC,IACpE,MAAA,GACD;EACD,MAAM,MAAM,MAAM,GAAG,iBACnB,QAAO,GAA0B;8BACT,MAAA,IAAU,YAAY,CAAC;;;MAIhD;AACD,MAAI,IAAI,WAAW,GAAG;AACpB,SAAA,GAAS,OAAO,6CAA6C;AAC7D,MAAG,SAAS;AACZ,SAAM,GAAG,MAAM;AACf,UAAO;;EAET,MAAM,CAAC,EAAC,eAAc;EACtB,MAAM,CAAC,EAAC,oBAAmB,MAAM,GAAG,iBAClC,QAAO,GAA+B;uCACL,MAAA,IAAU,oBAAoB,CAAC;QAEjE;AACD,QAAA,GAAS,OACP,oBAAoB,UAAU,kCAAkC,iBACjE;AACD,SAAO,IAAI,UAAU,MAAA,IAAU,IAAI,gBAAgB,UAAU"}
1
+ {"version":3,"file":"storer.js","names":["#lc","#shard","#taskID","#discoveryAddress","#discoveryProtocol","#db","#replicaVersion","#onConsumed","#onFatal","#queue","#backPressureThresholdBytes","#statementTimeoutMs","#cdc","#approximateQueuedBytes","#running","#readyForMore","#stopped","#processQueue","#cancelQueueEntries","#startCatchup","#trackBackfillMetadata","#maybeReleaseBackPressure","#catchup","#upsertTableMetadataStmt","#upsertColumnBackfillStmt","#tx","#released"],"sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"sourcesContent":["import {getHeapStatistics} from 'node:v8';\nimport type {LogContext} from '@rocicorp/logger';\nimport {resolver, type Resolver} from '@rocicorp/resolver';\nimport {type PendingQuery, type Row} from 'postgres';\nimport {AbortError} from '../../../../shared/src/abort-error.ts';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {BigIntJSON} from '../../../../shared/src/bigint-json.ts';\nimport {Queue} from '../../../../shared/src/queue.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../../shared/src/valita.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {runTx} from '../../db/run-transaction.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../../types/pg.ts';\nimport {cdcSchema, type ShardID} from '../../types/shards.ts';\nimport {\n backfillRequestSchema,\n isDataChange,\n isSchemaChange,\n type BackfillID,\n type BackfillRequest,\n type Change,\n type DataChange,\n type Identifier,\n type SchemaChange,\n type TableMetadata,\n} from '../change-source/protocol/current.ts';\nimport {type Commit} from '../change-source/protocol/current/downstream.ts';\nimport type {\n DownstreamStatusMessage,\n UpstreamStatusMessage,\n} from '../change-source/protocol/current/status.ts';\nimport type {ReplicatorMode} from '../replicator/replicator.ts';\nimport type {Service} from '../service.ts';\nimport type {WatermarkedChange} from './change-streamer-service.ts';\nimport {type ChangeEntry} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {\n AutoResetSignal,\n markResetRequired,\n type BackfillingColumn,\n type TableMetadataRow,\n} from './schema/tables.ts';\nimport type {Subscriber} from './subscriber.ts';\n\ntype SubscriberAndMode = {\n subscriber: Subscriber;\n mode: ReplicatorMode;\n};\n\ntype QueueEntry =\n | [\n 'change',\n watermark: string,\n json: string,\n orig: Exclude<Change, DataChange> | null, // null for DataChanges\n ]\n | ['ready', callback: () => void]\n | ['subscriber', SubscriberAndMode]\n | DownstreamStatusMessage\n | ['abort']\n | 'stop';\n\ntype PendingTransaction = {\n pool: TransactionPool;\n preCommitWatermark: string;\n pos: number;\n startingReplicationState: Promise<ReplicationOwner>;\n ack: boolean;\n};\n\ntype ReplicationOwner = {\n owner: string | null;\n};\n\nconst backfillRequestsSchema = v.array(backfillRequestSchema);\n\nexport type TuningOptions = {\n backPressureLimitHeapProportion: number;\n statementTimeoutMs: number;\n};\n\n/**\n * Handles the storage of changes and the catchup of subscribers\n * that are behind.\n *\n * In the context of catchup and cleanup, it is the responsibility of the\n * Storer to decide whether a client can be caught up, or whether the\n * changes needed to catch a client up have been purged.\n *\n * **Maintained invariant**: The Change DB is only empty for a\n * completely new replica (i.e. initial-sync with no changes from the\n * replication stream).\n * * In this case, all new subscribers are expected start from the\n * `replicaVersion`, which is the version at which initial sync\n * was performed, and any attempts to catchup from a different\n * point fail.\n *\n * Conversely, if non-initial changes have flowed through the system\n * (i.e. via the replication stream), the ChangeDB must *not* be empty,\n * and the earliest change in the `changeLog` represents the earliest\n * \"commit\" from (after) which a subscriber can be caught up.\n * * Any attempts to catchup from an earlier point must fail with\n * a `WatermarkTooOld` error.\n * * Failure to do so could result in streaming changes to the\n * subscriber such that there is a gap in its replication history.\n *\n * Note: Subscribers (i.e. `incremental-syncer`) consider an \"error\" signal\n * an unrecoverable error and shut down in response. This allows the\n * production system to replace it with a new task and fresh copy of the\n * replica backup.\n */\nexport class Storer implements Service {\n readonly id = 'storer';\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #taskID: string;\n readonly #discoveryAddress: string;\n readonly #discoveryProtocol: string;\n readonly #db: PostgresDB;\n readonly #replicaVersion: string;\n readonly #onConsumed: (c: Commit | UpstreamStatusMessage) => void;\n readonly #onFatal: (err: Error) => void;\n readonly #queue = new Queue<QueueEntry>();\n readonly #backPressureThresholdBytes: number;\n readonly #statementTimeoutMs: number;\n\n #approximateQueuedBytes = 0;\n #running = false;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n db: PostgresDB,\n replicaVersion: string,\n onConsumed: (c: Commit | UpstreamStatusMessage) => void,\n onFatal: (err: Error) => void,\n {backPressureLimitHeapProportion, statementTimeoutMs}: TuningOptions,\n ) {\n this.#lc = lc.withContext('component', 'change-log');\n this.#shard = shard;\n this.#taskID = taskID;\n this.#discoveryAddress = discoveryAddress;\n this.#discoveryProtocol = discoveryProtocol;\n this.#db = db;\n this.#replicaVersion = replicaVersion;\n this.#onConsumed = onConsumed;\n this.#onFatal = onFatal;\n this.#statementTimeoutMs = statementTimeoutMs;\n\n const heapStats = getHeapStatistics();\n this.#backPressureThresholdBytes =\n (heapStats.heap_size_limit - heapStats.used_heap_size) *\n backPressureLimitHeapProportion;\n\n this.#lc.info?.(\n `Using up to ${(this.#backPressureThresholdBytes / 1024 ** 2).toFixed(2)} MB of ` +\n `--max-old-space-size (~${(heapStats.heap_size_limit / 1024 ** 2).toFixed(2)} MB) ` +\n `to absorb upstream spikes`,\n {heapStats},\n );\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async assumeOwnership(purgeLock?: PurgeLock | null) {\n const db = this.#db;\n const owner = this.#taskID;\n const ownerAddress = this.#discoveryAddress;\n const ownerProtocol = this.#discoveryProtocol;\n // we omit `ws://` so that old view syncer versions that are not expecting the protocol continue to not get it\n const addressWithProtocol =\n ownerProtocol === 'ws'\n ? ownerAddress\n : `${ownerProtocol}://${ownerAddress}`;\n this.#lc.info?.(`assuming ownership at ${addressWithProtocol}`);\n const start = performance.now();\n await db`UPDATE ${this.#cdc('replicationState')} SET ${db({owner, ownerAddress: addressWithProtocol})}`;\n const elapsed = (performance.now() - start).toFixed(2);\n this.#lc.info?.(\n `assumed ownership at ${addressWithProtocol} (${elapsed} ms)`,\n );\n\n if (purgeLock) {\n // Once ownership has been assumed, any initial purge-lock preventing the\n // purging of change-log records can be released, as a change-streamer\n // that was attempting to purge records will correspondingly abort on the\n // ownership check.\n void purgeLock.release();\n }\n }\n\n async getStartStreamInitializationParameters(): Promise<{\n lastWatermark: string;\n backfillRequests: BackfillRequest[];\n }> {\n const [[{lastWatermark}], result] = await runTx(\n this.#db,\n sql => [\n sql<{lastWatermark: string}[]>`\n SELECT \"lastWatermark\" FROM ${this.#cdc('replicationState')}`,\n\n // Formats a BackfillRequest using json_object_agg() to construct the\n // `columns` object. It is LEFT JOIN'ed with the `tableMetadata` table\n // to make it optional and possibly `null`.\n sql`\n SELECT \n json_build_object(\n 'schema', b.\"schema\",\n 'name', b.\"table\",\n 'metadata', t.\"metadata\"\n ) as \"table\",\n json_object_agg(b.\"column\", b.\"backfill\") \n as \"columns\"\n FROM ${this.#cdc('backfilling')} as b\n LEFT JOIN ${this.#cdc('tableMetadata')} as t\n ON (b.\"schema\" = t.\"schema\" AND b.\"table\" = t.\"table\")\n GROUP BY b.\"schema\", b.\"table\", t.\"metadata\"\n `,\n ],\n {mode: Mode.READONLY},\n );\n\n return {\n lastWatermark,\n backfillRequests: v.parse(result, backfillRequestsSchema),\n };\n }\n\n async getMinWatermarkForCatchup(): Promise<string | null> {\n const [{minWatermark}] = await this.#db<{minWatermark: string | null}[]>\n /*sql*/ `\n SELECT min(watermark) as \"minWatermark\" FROM ${this.#cdc('changeLog')}`;\n return minWatermark;\n }\n\n purgeRecordsBefore(watermark: string): Promise<number> {\n return runTx(this.#db, async sql => {\n // This NOWAIT pre-check is an optimization to abort the transaction\n // (and release associated resources) early.\n await sql<{watermark: string}[]>`\n SELECT watermark FROM ${this.#cdc('changeLog')}\n ORDER BY watermark, pos LIMIT 1\n FOR UPDATE NOWAIT\n `;\n // If the row is purge-locked by an incoming replication-manager, it\n // will assume ownership of the change-log before releasing the lock.\n // This DELETE blocks until the lock is released, allowing the change\n // in ownership to be reliably detected (and the transaction aborted)\n // in the subsequent check.\n const [{deleted}] = await sql<{deleted: bigint}[]>`\n WITH purged AS (\n DELETE FROM ${this.#cdc('changeLog')} WHERE watermark < ${watermark} \n RETURNING watermark, pos\n ) SELECT COUNT(*) as deleted FROM purged;`;\n\n const [{owner}] = await sql<ReplicationOwner[]>`\n SELECT \"owner\" FROM ${this.#cdc('replicationState')} FOR SHARE`;\n if (owner !== this.#taskID) {\n throw new AbortError(\n `aborting changeLog purge to ${watermark} because ownership has been taken by ${owner}`,\n );\n }\n return Number(deleted);\n });\n }\n\n /**\n * @returns The size of the serialized entry, for memory / I/O estimations.\n */\n store(entry: WatermarkedChange) {\n const [watermark, [_tag, change]] = entry;\n // Eagerly stringify the JSON object so that the memory usage can be\n // more accurately measured (i.e. without an extra object traversal and\n // ad hoc memory counting heuristics).\n //\n // This essentially moves the stringify() computation out of the pg client,\n // which is instead configured to pass `string` objects directly as JSON\n // strings for JSON-valued columns (see TypeOptions.sendStringAsJson).\n const json = BigIntJSON.stringify(change);\n this.#approximateQueuedBytes += json.length;\n\n this.#queue.enqueue([\n 'change',\n watermark,\n json,\n isDataChange(change) ? null : change, // drop DataChanges to save memory\n ]);\n\n return json.length;\n }\n\n abort() {\n this.#queue.enqueue(['abort']);\n }\n\n status(s: DownstreamStatusMessage) {\n this.#queue.enqueue(s);\n }\n\n catchup(subscriber: Subscriber, mode: ReplicatorMode) {\n this.#queue.enqueue(['subscriber', {subscriber, mode}]);\n }\n\n #readyForMore: Resolver<void> | null = null;\n\n readyForMore(): Promise<void> | undefined {\n if (!this.#running) {\n return undefined;\n }\n if (\n this.#readyForMore === null &&\n this.#approximateQueuedBytes > this.#backPressureThresholdBytes\n ) {\n this.#lc.warn?.(\n `applying back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)\\n` +\n `\\n` +\n `To inspect changeLog backlog in your change DB:\\n` +\n ` SELECT\\n` +\n ` (change->'relation'->>'schema') || '.' || (change->'relation'->>'name') AS table_name,\\n` +\n ` change->>'tag' AS operation,\\n` +\n ` COUNT(*) AS count\\n` +\n ` FROM \"<app_id>/cdc\".\"changeLog\"\\n` +\n ` GROUP BY 1, 2\\n` +\n ` ORDER BY 3 DESC\\n` +\n ` LIMIT 20;`,\n );\n this.#readyForMore = resolver();\n }\n return this.#readyForMore?.promise;\n }\n\n #maybeReleaseBackPressure() {\n if (\n this.#readyForMore !== null &&\n // Wait for at least 20% of the threshold to free up.\n this.#approximateQueuedBytes < this.#backPressureThresholdBytes * 0.8\n ) {\n this.#lc.info?.(\n `releasing back pressure with ${this.#queue.size()} queued changes (~${(this.#approximateQueuedBytes / 1024 ** 2).toFixed(2)} MB)`,\n );\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n }\n\n #stopped = promiseVoid;\n\n /**\n * Runs the storer loop until {@link stop()} is called, or an error is thrown.\n * Once {@link run()} completes, it can be called again.\n */\n async run() {\n assert(!this.#running, `storer is already running`);\n\n const {promise: stopped, resolve: signalStopped} = resolver();\n this.#running = true;\n this.#stopped = stopped;\n\n this.#lc.info?.('starting storer');\n let err: unknown;\n try {\n await this.#processQueue();\n } catch (e) {\n err = e; // used in finally\n throw e;\n } finally {\n // Release any pending backpressure so the upstream can proceed\n if (this.#readyForMore !== null) {\n this.#readyForMore.resolve();\n this.#readyForMore = null;\n }\n this.#cancelQueueEntries(\n this.#queue.drain().filter(entry => entry !== undefined),\n err,\n );\n this.#running = false;\n signalStopped();\n this.#lc.info?.('storer stopped');\n }\n }\n\n #cancelQueueEntries(queue: QueueEntry[], e: unknown) {\n if (queue.length === 0) {\n return;\n }\n this.#lc.info?.(\n `canceling ${queue.length} entries from the changeLog queue`,\n );\n const err = e instanceof Error ? e : new AbortError('server shutting down');\n for (const entry of queue) {\n if (entry === 'stop') {\n continue;\n }\n const type = entry[0];\n switch (type) {\n case 'subscriber': {\n // Disconnect subscribers waiting to be caught up so that they can\n // reconnect and try again.\n const {subscriber} = entry[1];\n this.#lc.info?.(`disconnecting ${subscriber.id}`);\n subscriber.fail(err);\n break;\n }\n }\n }\n }\n\n async #processQueue() {\n let tx: PendingTransaction | null = null;\n let msg: QueueEntry | false;\n\n const catchupQueue: SubscriberAndMode[] = [];\n try {\n while ((msg = await this.#queue.dequeue()) !== 'stop') {\n const [msgType] = msg;\n switch (msgType) {\n case 'ready': {\n const signalReady = msg[1];\n signalReady();\n continue;\n }\n case 'subscriber': {\n const subscriber = msg[1];\n if (tx) {\n catchupQueue.push(subscriber); // Wait for the current tx to complete.\n } else {\n await this.#startCatchup([subscriber]); // Catch up immediately.\n }\n continue;\n }\n case 'status':\n this.#onConsumed(msg);\n continue;\n case 'abort': {\n if (tx) {\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n }\n continue;\n }\n }\n // msgType === 'change'\n const [_, watermark, json, change] = msg;\n const tag = change?.tag;\n this.#approximateQueuedBytes -= json.length;\n\n if (tag === 'begin') {\n assert(!tx, 'received BEGIN in the middle of a transaction');\n const {promise, resolve, reject} = resolver<ReplicationOwner>();\n void promise.catch(() => {}); // handle rejections before the await\n tx = {\n pool: new TransactionPool(\n this.#lc.withContext('watermark', watermark),\n {\n mode: Mode.READ_COMMITTED,\n statementResponseTimeout: this.#statementTimeoutMs,\n },\n ),\n preCommitWatermark: watermark,\n pos: 0,\n startingReplicationState: promise,\n ack: !change.skipAck,\n };\n tx.pool.run(this.#db);\n // Acquire a lock on the replicationState row to detect and/or prevent\n // a concurrent ownership change.\n void tx.pool.process(tx => {\n tx<ReplicationOwner[]> /*sql*/ `\n SELECT \"owner\" FROM ${this.#cdc('replicationState')} FOR UPDATE`.then(\n ([result]) => resolve(result),\n reject,\n );\n return [];\n });\n } else {\n assert(tx, () => `received change outside of transaction: ${json}`);\n tx.pos++;\n }\n\n const entry = {\n watermark: tag === 'commit' ? watermark : tx.preCommitWatermark,\n precommit: tag === 'commit' ? tx.preCommitWatermark : null,\n pos: tx.pos,\n change: json,\n };\n\n const processed = tx.pool.process(sql => [\n sql`INSERT INTO ${this.#cdc('changeLog')} ${sql(entry)}`,\n ...(change !== null && isSchemaChange(change)\n ? this.#trackBackfillMetadata(sql, change)\n : []),\n ]);\n\n if (tx.pos % 100 === 0) {\n // Backpressure is exerted on commit when awaiting tx.pool.done().\n // However, backpressure checks need to be regularly done for\n // very large transactions in order to avoid memory blowup.\n await processed;\n }\n this.#maybeReleaseBackPressure();\n\n if (tag === 'commit') {\n const {owner} = await tx.startingReplicationState;\n if (owner !== this.#taskID) {\n // Ownership change reflected in the replicationState read in 'begin'.\n tx.pool.fail(\n new AbortError(\n `changeLog ownership has been assumed by ${owner}`,\n ),\n );\n } else {\n // Update the replication state.\n const lastWatermark = watermark;\n void tx.pool.process(tx => [\n tx`\n UPDATE ${this.#cdc('replicationState')} SET ${tx({lastWatermark})}`,\n ]);\n tx.pool.setDone();\n }\n\n await tx.pool.done();\n\n // ACK the LSN to the upstream Postgres.\n if (tx.ack) {\n this.#onConsumed(['commit', change, {watermark}]);\n }\n tx = null;\n\n // Before beginning the next transaction, open a READONLY snapshot to\n // concurrently catchup any queued subscribers.\n await this.#startCatchup(catchupQueue.splice(0));\n } else if (tag === 'rollback') {\n // Aborted transactions are not stored in the changeLog. Abort the current tx\n // and process catchup of subscribers that were waiting for it to end.\n tx.pool.abort();\n await tx.pool.done();\n tx = null;\n\n await this.#startCatchup(catchupQueue.splice(0));\n }\n }\n } catch (e) {\n catchupQueue.forEach(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n }\n\n async #startCatchup(subs: SubscriberAndMode[]) {\n if (subs.length === 0) {\n return;\n }\n\n const reader = new TransactionPool(\n this.#lc.withContext('pool', 'catchup'),\n {mode: Mode.READONLY},\n );\n reader.run(this.#db);\n\n let lastWatermark: string | undefined;\n try {\n // Ensure that the transaction has started (and is thus holding a snapshot\n // of the database) before continuing on to commit more changes. This is\n // done by performing a single read on the db, which determines the\n // snapshot for the REPEATABLE_READ transaction.\n [{lastWatermark}] = await reader.processReadTask(\n sql => sql<{lastWatermark: string}[]>`\n SELECT \"lastWatermark\" FROM ${this.#cdc('replicationState')}\n `,\n );\n } catch (e) {\n subs.map(({subscriber}) => subscriber.fail(e));\n throw e;\n }\n\n // Run the actual catchup queries in the background. Errors are handled in\n // #catchup() by disconnecting the associated subscriber.\n void Promise.all(\n subs.map(sub => this.#catchup(sub, lastWatermark, reader)),\n ).finally(() => reader.setDone());\n }\n\n async #catchup(\n {subscriber: sub, mode}: SubscriberAndMode,\n lastWatermark: string,\n reader: TransactionPool,\n ) {\n try {\n await reader.processReadTask(async tx => {\n const start = Date.now();\n\n // When starting from initial-sync, there won't be a change with a watermark\n // equal to the replica version. This is the empty changeLog scenario.\n let watermarkFound = sub.watermark === this.#replicaVersion;\n let count = 0;\n let lastBatchConsumed: Promise<unknown> | undefined;\n\n for await (const entries of tx<ChangeEntry[]> /*sql*/ `\n SELECT watermark, change FROM ${this.#cdc('changeLog')}\n WHERE watermark >= ${sub.watermark}\n AND watermark <= ${lastWatermark}\n ORDER BY watermark, pos`.cursor(2000)) {\n // Wait for the last batch of entries to be consumed by the\n // subscriber before sending down the current batch. This pipelining\n // allows one batch of changes to be received from the change-db\n // while the previous batch of changes are sent to the subscriber,\n // resulting in flow control that caps the number of changes\n // referenced in memory to 2 * batch-size.\n const start = performance.now();\n await lastBatchConsumed;\n const elapsed = performance.now() - start;\n if (lastBatchConsumed) {\n (elapsed > 100 ? this.#lc.info : this.#lc.debug)?.(\n `waited ${elapsed.toFixed(3)} ms for ${sub.id} to consume last batch of catchup entries`,\n );\n }\n\n for (const entry of entries) {\n if (entry.watermark === sub.watermark) {\n // This should be the first entry.\n // Catchup starts from *after* the watermark.\n watermarkFound = true;\n } else if (watermarkFound) {\n lastBatchConsumed = sub.catchup(toDownstream(entry));\n count++;\n } else if (mode === 'backup') {\n throw new AutoResetSignal(\n `backup replica at watermark ${sub.watermark} is behind change db: ${entry.watermark})`,\n );\n } else {\n this.#lc.warn?.(\n `rejecting subscriber at watermark ${sub.watermark} (earliest watermark: ${entry.watermark})`,\n );\n sub.close(\n ErrorType.WatermarkTooOld,\n `earliest supported watermark is ${entry.watermark} (requested ${sub.watermark})`,\n );\n return;\n }\n }\n }\n if (watermarkFound) {\n await lastBatchConsumed;\n this.#lc.info?.(\n `caught up ${sub.id} with ${count} changes (${\n Date.now() - start\n } ms)`,\n );\n } else {\n this.#lc.warn?.(\n `subscriber at watermark ${sub.watermark} is ahead of latest watermark`,\n );\n }\n // Flushes the backlog of messages buffered during catchup and\n // allows the subscription to forward subsequent messages immediately.\n sub.setCaughtUp();\n });\n } catch (err) {\n this.#lc.error?.(`error while catching up subscriber ${sub.id}`, err);\n if (err instanceof AutoResetSignal) {\n await markResetRequired(this.#db, this.#shard);\n this.#onFatal(err);\n }\n sub.fail(err);\n }\n }\n\n /**\n * Returns the db statements necessary to track backfill and table metadata\n * presented in the `change`, if any.\n */\n #trackBackfillMetadata(sql: PostgresTransaction, change: SchemaChange) {\n const stmts: PendingQuery<Row[]>[] = [];\n\n switch (change.tag) {\n case 'update-table-metadata': {\n const {table, new: metadata} = change;\n stmts.push(this.#upsertTableMetadataStmt(sql, table, metadata));\n break;\n }\n\n case 'create-table': {\n const {spec, metadata, backfill} = change;\n if (metadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, spec, metadata));\n }\n if (backfill) {\n Object.entries(backfill).forEach(([col, backfill]) => {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, spec, col, backfill),\n );\n });\n }\n break;\n }\n\n case 'rename-table': {\n const {old} = change;\n const row = {schema: change.new.schema, table: change.new.name};\n stmts.push(\n sql`UPDATE ${this.#cdc('tableMetadata')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n sql`UPDATE ${this.#cdc('backfilling')} SET ${sql(row)}\n WHERE \"schema\" = ${old.schema} AND \"table\" = ${old.name}`,\n );\n break;\n }\n\n case 'drop-table': {\n const {\n id: {schema, name},\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('tableMetadata')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name}`,\n );\n break;\n }\n\n case 'add-column': {\n const {table, tableMetadata, column, backfill} = change;\n if (tableMetadata) {\n stmts.push(this.#upsertTableMetadataStmt(sql, table, tableMetadata));\n }\n if (backfill) {\n stmts.push(\n this.#upsertColumnBackfillStmt(sql, table, column.name, backfill),\n );\n }\n break;\n }\n\n case 'update-column': {\n const {\n table: {schema, name: table},\n old: {name: oldName},\n new: {name: newName},\n } = change;\n if (oldName !== newName) {\n stmts.push(\n sql`UPDATE ${this.#cdc('backfilling')} SET \"column\" = ${newName}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" = ${oldName}`,\n );\n }\n break;\n }\n\n case 'drop-column': {\n const {\n table: {schema, name},\n column,\n } = change;\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${name} AND \"column\" = ${column}`,\n );\n break;\n }\n\n case 'backfill-completed': {\n const {\n relation: {schema, name: table, rowKey},\n columns,\n } = change;\n const cols = [...rowKey.columns, ...columns];\n stmts.push(\n sql`DELETE FROM ${this.#cdc('backfilling')}\n WHERE \"schema\" = ${schema} AND \"table\" = ${table} AND \"column\" IN ${sql(cols)}`,\n );\n }\n }\n return stmts;\n }\n\n #upsertTableMetadataStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n metadata: TableMetadata,\n ) {\n const row: TableMetadataRow = {schema, table, metadata};\n return sql`\n INSERT INTO ${this.#cdc('tableMetadata')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n #upsertColumnBackfillStmt(\n sql: PostgresTransaction,\n {schema, name: table}: Identifier,\n column: string,\n backfill: BackfillID,\n ) {\n const row: BackfillingColumn = {schema, table, column, backfill};\n return sql`\n INSERT INTO ${this.#cdc('backfilling')} ${sql(row)}\n ON CONFLICT (\"schema\", \"table\", \"column\") \n DO UPDATE SET ${sql(row)};\n `;\n }\n\n /**\n * Waits until all currently queued entries have been processed.\n * This is only used in tests.\n */\n async allProcessed() {\n if (this.#running) {\n const {promise, resolve} = resolver();\n this.#queue.enqueue(['ready', resolve]);\n await promise;\n }\n }\n\n stop() {\n if (this.#running) {\n this.#lc.info?.(`draining ${this.#queue.size()} changeLog entries`);\n this.#queue.enqueue('stop');\n }\n return this.#stopped;\n }\n}\n\nfunction toDownstream(entry: ChangeEntry): WatermarkedChange {\n const {watermark, change} = entry;\n switch (change.tag) {\n case 'begin':\n return [watermark, ['begin', change, {commitWatermark: watermark}]];\n case 'commit':\n return [watermark, ['commit', change, {watermark}]];\n case 'rollback':\n return [watermark, ['rollback', change]];\n default:\n return [watermark, ['data', change]];\n }\n}\n\nexport class PurgeLock {\n readonly #lc: LogContext;\n readonly #tx: TransactionPool;\n readonly replicaVersion: string;\n readonly minWatermark: string;\n\n constructor(\n lc: LogContext,\n tx: TransactionPool,\n replicaVersion: string,\n watermark: string,\n ) {\n this.#lc = lc;\n this.#tx = tx;\n this.replicaVersion = replicaVersion;\n this.minWatermark = watermark;\n }\n\n #released = false;\n\n async release() {\n if (this.#released) {\n return;\n }\n this.#released = true;\n this.#tx.setDone();\n await this.#tx\n .done()\n .catch(e => this.#lc.warn?.(`error from purge-lock release`, e));\n this.#lc.info?.(`released purge lock on ${this.minWatermark}`);\n }\n}\n\nexport class PurgeLocker {\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #db: PostgresDB;\n\n constructor(lc: LogContext, shard: ShardID, db: PostgresDB) {\n this.#lc = lc.withContext('component', 'purge-locker');\n this.#shard = shard;\n this.#db = db;\n }\n\n // For readability in SQL statements.\n #cdc(table: string) {\n return this.#db(`${cdcSchema(this.#shard)}.${table}`);\n }\n\n async acquire() {\n const tx = new TransactionPool(this.#lc, {mode: Mode.READ_COMMITTED}).run(\n this.#db,\n );\n const row = await tx.processReadTask(\n sql => sql<{watermark: string}[]>`\n SELECT watermark FROM ${this.#cdc('changeLog')}\n ORDER BY watermark, pos LIMIT 1\n FOR SHARE \n `,\n );\n if (row.length === 0) {\n this.#lc.info?.(`changeLog is empty. No rows to purge-lock.`);\n tx.setDone();\n await tx.done();\n return null;\n }\n const [{watermark}] = row;\n const [{replicaVersion}] = await tx.processReadTask(\n sql => sql<{replicaVersion: string}[]>`\n SELECT \"replicaVersion\" FROM ${this.#cdc('replicationConfig')}\n `,\n );\n this.#lc.info?.(\n `locked watermark ${watermark} from being purged from replica@${replicaVersion}`,\n );\n return new PurgeLock(this.#lc, tx, replicaVersion, watermark);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;AA2EA,IAAM,yBAAyB,eAAE,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqC7D,IAAa,SAAb,MAAuC;CACrC,KAAc;CACd;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA,SAAkB,IAAI,OAAmB;CACzC;CACA;CAEA,0BAA0B;CAC1B,WAAW;CAEX,YACE,IACA,OACA,QACA,kBACA,mBACA,IACA,gBACA,YACA,SACA,EAAC,iCAAiC,sBAClC;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,aAAa;AACpD,QAAA,QAAc;AACd,QAAA,SAAe;AACf,QAAA,mBAAyB;AACzB,QAAA,oBAA0B;AAC1B,QAAA,KAAW;AACX,QAAA,iBAAuB;AACvB,QAAA,aAAmB;AACnB,QAAA,UAAgB;AAChB,QAAA,qBAA2B;EAE3B,MAAM,YAAY,mBAAmB;AACrC,QAAA,8BACG,UAAU,kBAAkB,UAAU,kBACvC;AAEF,QAAA,GAAS,OACP,gBAAgB,MAAA,6BAAmC,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAC5C,UAAU,kBAAkB,QAAQ,GAAG,QAAQ,EAAE,CAAC,iCAE/E,EAAC,WAAU,CACZ;;CAIH,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,UAAU,MAAA,MAAY,CAAC,GAAG,QAAQ;;CAGvD,MAAM,gBAAgB,WAA8B;EAClD,MAAM,KAAK,MAAA;EACX,MAAM,QAAQ,MAAA;EACd,MAAM,eAAe,MAAA;EACrB,MAAM,gBAAgB,MAAA;EAEtB,MAAM,sBACJ,kBAAkB,OACd,eACA,GAAG,cAAc,KAAK;AAC5B,QAAA,GAAS,OAAO,yBAAyB,sBAAsB;EAC/D,MAAM,QAAQ,YAAY,KAAK;AAC/B,QAAM,EAAE,UAAU,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG;GAAC;GAAO,cAAc;GAAoB,CAAC;EACrG,MAAM,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACtD,QAAA,GAAS,OACP,wBAAwB,oBAAoB,IAAI,QAAQ,MACzD;AAED,MAAI,UAKG,WAAU,SAAS;;CAI5B,MAAM,yCAGH;EACD,MAAM,CAAC,CAAC,EAAC,kBAAiB,UAAU,MAAM,MACxC,MAAA,KACA,QAAO,CACL,GAA8B;sCACA,MAAA,IAAU,mBAAmB,IAK3D,GAAG;;;;;;;;;iBASM,MAAA,IAAU,cAAc,CAAC;sBACpB,MAAA,IAAU,gBAAgB,CAAC;;;UAI1C,EACD,EAAC,MAAM,UAAc,CACtB;AAED,SAAO;GACL;GACA,kBAAkB,MAAQ,QAAQ,uBAAuB;GAC1D;;CAGH,MAAM,4BAAoD;EACxD,MAAM,CAAC,EAAC,kBAAiB,MAAM,MAAA,EACvB;qDACyC,MAAA,IAAU,YAAY;AACvE,SAAO;;CAGT,mBAAmB,WAAoC;AACrD,SAAO,MAAM,MAAA,IAAU,OAAM,QAAO;AAGlC,SAAM,GAA0B;kCACJ,MAAA,IAAU,YAAY,CAAC;;;;GASnD,MAAM,CAAC,EAAC,aAAY,MAAM,GAAwB;;wBAEhC,MAAA,IAAU,YAAY,CAAC,qBAAqB,UAAU;;;GAIxE,MAAM,CAAC,EAAC,WAAU,MAAM,GAAuB;8BACvB,MAAA,IAAU,mBAAmB,CAAC;AACtD,OAAI,UAAU,MAAA,OACZ,OAAM,IAAI,WACR,+BAA+B,UAAU,uCAAuC,QACjF;AAEH,UAAO,OAAO,QAAQ;IACtB;;;;;CAMJ,MAAM,OAA0B;EAC9B,MAAM,CAAC,WAAW,CAAC,MAAM,WAAW;EAQpC,MAAM,OAAO,WAAW,UAAU,OAAO;AACzC,QAAA,0BAAgC,KAAK;AAErC,QAAA,MAAY,QAAQ;GAClB;GACA;GACA;GACA,aAAa,OAAO,GAAG,OAAO;GAC/B,CAAC;AAEF,SAAO,KAAK;;CAGd,QAAQ;AACN,QAAA,MAAY,QAAQ,CAAC,QAAQ,CAAC;;CAGhC,OAAO,GAA4B;AACjC,QAAA,MAAY,QAAQ,EAAE;;CAGxB,QAAQ,YAAwB,MAAsB;AACpD,QAAA,MAAY,QAAQ,CAAC,cAAc;GAAC;GAAY;GAAK,CAAC,CAAC;;CAGzD,gBAAuC;CAEvC,eAA0C;AACxC,MAAI,CAAC,MAAA,QACH;AAEF,MACE,MAAA,iBAAuB,QACvB,MAAA,yBAA+B,MAAA,4BAC/B;AACA,SAAA,GAAS,OACP,+BAA+B,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,4SAW7H;AACD,SAAA,eAAqB,UAAU;;AAEjC,SAAO,MAAA,cAAoB;;CAG7B,4BAA4B;AAC1B,MACE,MAAA,iBAAuB,QAEvB,MAAA,yBAA+B,MAAA,6BAAmC,IAClE;AACA,SAAA,GAAS,OACP,gCAAgC,MAAA,MAAY,MAAM,CAAC,qBAAqB,MAAA,yBAA+B,QAAQ,GAAG,QAAQ,EAAE,CAAC,MAC9H;AACD,SAAA,aAAmB,SAAS;AAC5B,SAAA,eAAqB;;;CAIzB,WAAW;;;;;CAMX,MAAM,MAAM;AACV,SAAO,CAAC,MAAA,SAAe,4BAA4B;EAEnD,MAAM,EAAC,SAAS,SAAS,SAAS,kBAAiB,UAAU;AAC7D,QAAA,UAAgB;AAChB,QAAA,UAAgB;AAEhB,QAAA,GAAS,OAAO,kBAAkB;EAClC,IAAI;AACJ,MAAI;AACF,SAAM,MAAA,cAAoB;WACnB,GAAG;AACV,SAAM;AACN,SAAM;YACE;AAER,OAAI,MAAA,iBAAuB,MAAM;AAC/B,UAAA,aAAmB,SAAS;AAC5B,UAAA,eAAqB;;AAEvB,SAAA,mBACE,MAAA,MAAY,OAAO,CAAC,QAAO,UAAS,UAAU,KAAA,EAAU,EACxD,IACD;AACD,SAAA,UAAgB;AAChB,kBAAe;AACf,SAAA,GAAS,OAAO,iBAAiB;;;CAIrC,oBAAoB,OAAqB,GAAY;AACnD,MAAI,MAAM,WAAW,EACnB;AAEF,QAAA,GAAS,OACP,aAAa,MAAM,OAAO,mCAC3B;EACD,MAAM,MAAM,aAAa,QAAQ,IAAI,IAAI,WAAW,uBAAuB;AAC3E,OAAK,MAAM,SAAS,OAAO;AACzB,OAAI,UAAU,OACZ;AAGF,WADa,MAAM,IACnB;IACE,KAAK,cAAc;KAGjB,MAAM,EAAC,eAAc,MAAM;AAC3B,WAAA,GAAS,OAAO,iBAAiB,WAAW,KAAK;AACjD,gBAAW,KAAK,IAAI;AACpB;;;;;CAMR,OAAA,eAAsB;EACpB,IAAI,KAAgC;EACpC,IAAI;EAEJ,MAAM,eAAoC,EAAE;AAC5C,MAAI;AACF,WAAQ,MAAM,MAAM,MAAA,MAAY,SAAS,MAAM,QAAQ;IACrD,MAAM,CAAC,WAAW;AAClB,YAAQ,SAAR;KACE,KAAK,SAAS;MACZ,MAAM,cAAc,IAAI;AACxB,mBAAa;AACb;;KAEF,KAAK,cAAc;MACjB,MAAM,aAAa,IAAI;AACvB,UAAI,GACF,cAAa,KAAK,WAAW;UAE7B,OAAM,MAAA,aAAmB,CAAC,WAAW,CAAC;AAExC;;KAEF,KAAK;AACH,YAAA,WAAiB,IAAI;AACrB;KACF,KAAK;AACH,UAAI,IAAI;AACN,UAAG,KAAK,OAAO;AACf,aAAM,GAAG,KAAK,MAAM;AACpB,YAAK;;AAEP;;IAIJ,MAAM,CAAC,GAAG,WAAW,MAAM,UAAU;IACrC,MAAM,MAAM,QAAQ;AACpB,UAAA,0BAAgC,KAAK;AAErC,QAAI,QAAQ,SAAS;AACnB,YAAO,CAAC,IAAI,gDAAgD;KAC5D,MAAM,EAAC,SAAS,SAAS,WAAU,UAA4B;AAC1D,aAAQ,YAAY,GAAG;AAC5B,UAAK;MACH,MAAM,IAAI,gBACR,MAAA,GAAS,YAAY,aAAa,UAAU,EAC5C;OACE,MAAM;OACN,0BAA0B,MAAA;OAC3B,CACF;MACD,oBAAoB;MACpB,KAAK;MACL,0BAA0B;MAC1B,KAAK,CAAC,OAAO;MACd;AACD,QAAG,KAAK,IAAI,MAAA,GAAS;AAGhB,QAAG,KAAK,SAAQ,OAAM;AACzB,QAA+B;gCACX,MAAA,IAAU,mBAAmB,CAAC,aAAa,MAC5D,CAAC,YAAY,QAAQ,OAAO,EAC7B,OACD;AACD,aAAO,EAAE;OACT;WACG;AACL,YAAO,UAAU,2CAA2C,OAAO;AACnE,QAAG;;IAGL,MAAM,QAAQ;KACZ,WAAW,QAAQ,WAAW,YAAY,GAAG;KAC7C,WAAW,QAAQ,WAAW,GAAG,qBAAqB;KACtD,KAAK,GAAG;KACR,QAAQ;KACT;IAED,MAAM,YAAY,GAAG,KAAK,SAAQ,QAAO,CACvC,GAAG,eAAe,MAAA,IAAU,YAAY,CAAC,GAAG,IAAI,MAAM,IACtD,GAAI,WAAW,QAAQ,eAAe,OAAO,GACzC,MAAA,sBAA4B,KAAK,OAAO,GACxC,EAAE,CACP,CAAC;AAEF,QAAI,GAAG,MAAM,QAAQ,EAInB,OAAM;AAER,UAAA,0BAAgC;AAEhC,QAAI,QAAQ,UAAU;KACpB,MAAM,EAAC,UAAS,MAAM,GAAG;AACzB,SAAI,UAAU,MAAA,OAEZ,IAAG,KAAK,KACN,IAAI,WACF,2CAA2C,QAC5C,CACF;UACI;MAEL,MAAM,gBAAgB;AACjB,SAAG,KAAK,SAAQ,OAAM,CACzB,EAAE;qBACK,MAAA,IAAU,mBAAmB,CAAC,OAAO,GAAG,EAAC,eAAc,CAAC,GAChE,CAAC;AACF,SAAG,KAAK,SAAS;;AAGnB,WAAM,GAAG,KAAK,MAAM;AAGpB,SAAI,GAAG,IACL,OAAA,WAAiB;MAAC;MAAU;MAAQ,EAAC,WAAU;MAAC,CAAC;AAEnD,UAAK;AAIL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;eACvC,QAAQ,YAAY;AAG7B,QAAG,KAAK,OAAO;AACf,WAAM,GAAG,KAAK,MAAM;AACpB,UAAK;AAEL,WAAM,MAAA,aAAmB,aAAa,OAAO,EAAE,CAAC;;;WAG7C,GAAG;AACV,gBAAa,SAAS,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC1D,SAAM;;;CAIV,OAAA,aAAoB,MAA2B;AAC7C,MAAI,KAAK,WAAW,EAClB;EAGF,MAAM,SAAS,IAAI,gBACjB,MAAA,GAAS,YAAY,QAAQ,UAAU,EACvC,EAAC,MAAM,UAAc,CACtB;AACD,SAAO,IAAI,MAAA,GAAS;EAEpB,IAAI;AACJ,MAAI;AAKF,IAAC,CAAC,kBAAkB,MAAM,OAAO,iBAC/B,QAAO,GAA8B;sCACP,MAAA,IAAU,mBAAmB,CAAC;QAE7D;WACM,GAAG;AACV,QAAK,KAAK,EAAC,iBAAgB,WAAW,KAAK,EAAE,CAAC;AAC9C,SAAM;;AAKH,UAAQ,IACX,KAAK,KAAI,QAAO,MAAA,QAAc,KAAK,eAAe,OAAO,CAAC,CAC3D,CAAC,cAAc,OAAO,SAAS,CAAC;;CAGnC,OAAA,QACE,EAAC,YAAY,KAAK,QAClB,eACA,QACA;AACA,MAAI;AACF,SAAM,OAAO,gBAAgB,OAAM,OAAM;IACvC,MAAM,QAAQ,KAAK,KAAK;IAIxB,IAAI,iBAAiB,IAAI,cAAc,MAAA;IACvC,IAAI,QAAQ;IACZ,IAAI;AAEJ,eAAW,MAAM,WAAW,EAA0B;0CACpB,MAAA,IAAU,YAAY,CAAC;gCACjC,IAAI,UAAU;gCACd,cAAc;oCACV,OAAO,IAAK,EAAE;KAOxC,MAAM,QAAQ,YAAY,KAAK;AAC/B,WAAM;KACN,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,SAAI,kBACF,EAAC,UAAU,MAAM,MAAA,GAAS,OAAO,MAAA,GAAS,SACxC,UAAU,QAAQ,QAAQ,EAAE,CAAC,UAAU,IAAI,GAAG,2CAC/C;AAGH,UAAK,MAAM,SAAS,QAClB,KAAI,MAAM,cAAc,IAAI,UAG1B,kBAAiB;cACR,gBAAgB;AACzB,0BAAoB,IAAI,QAAQ,aAAa,MAAM,CAAC;AACpD;gBACS,SAAS,SAClB,OAAM,IAAI,gBACR,+BAA+B,IAAI,UAAU,wBAAwB,MAAM,UAAU,GACtF;UACI;AACL,YAAA,GAAS,OACP,qCAAqC,IAAI,UAAU,wBAAwB,MAAM,UAAU,GAC5F;AACD,UAAI,MACF,GACA,mCAAmC,MAAM,UAAU,cAAc,IAAI,UAAU,GAChF;AACD;;;AAIN,QAAI,gBAAgB;AAClB,WAAM;AACN,WAAA,GAAS,OACP,aAAa,IAAI,GAAG,QAAQ,MAAM,YAChC,KAAK,KAAK,GAAG,MACd,MACF;UAED,OAAA,GAAS,OACP,2BAA2B,IAAI,UAAU,+BAC1C;AAIH,QAAI,aAAa;KACjB;WACK,KAAK;AACZ,SAAA,GAAS,QAAQ,sCAAsC,IAAI,MAAM,IAAI;AACrE,OAAI,eAAe,iBAAiB;AAClC,UAAM,kBAAkB,MAAA,IAAU,MAAA,MAAY;AAC9C,UAAA,QAAc,IAAI;;AAEpB,OAAI,KAAK,IAAI;;;;;;;CAQjB,uBAAuB,KAA0B,QAAsB;EACrE,MAAM,QAA+B,EAAE;AAEvC,UAAQ,OAAO,KAAf;GACE,KAAK,yBAAyB;IAC5B,MAAM,EAAC,OAAO,KAAK,aAAY;AAC/B,UAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,SAAS,CAAC;AAC/D;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,MAAM,UAAU,aAAY;AACnC,QAAI,SACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,MAAM,SAAS,CAAC;AAEhE,QAAI,SACF,QAAO,QAAQ,SAAS,CAAC,SAAS,CAAC,KAAK,cAAc;AACpD,WAAM,KACJ,MAAA,yBAA+B,KAAK,MAAM,KAAK,SAAS,CACzD;MACD;AAEJ;;GAGF,KAAK,gBAAgB;IACnB,MAAM,EAAC,QAAO;IACd,MAAM,MAAM;KAAC,QAAQ,OAAO,IAAI;KAAQ,OAAO,OAAO,IAAI;KAAK;AAC/D,UAAM,KACJ,GAAG,UAAU,MAAA,IAAU,gBAAgB,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC/B,IAAI,OAAO,iBAAiB,IAAI,QACzD,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,OAAO,IAAI,IAAI,CAAC;mCAC7B,IAAI,OAAO,iBAAiB,IAAI,OAC1D;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EACJ,IAAI,EAAC,QAAQ,WACX;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,gBAAgB,CAAC;mCACpB,OAAO,iBAAiB,QACjD,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,OAClD;AACD;;GAGF,KAAK,cAAc;IACjB,MAAM,EAAC,OAAO,eAAe,QAAQ,aAAY;AACjD,QAAI,cACF,OAAM,KAAK,MAAA,wBAA8B,KAAK,OAAO,cAAc,CAAC;AAEtE,QAAI,SACF,OAAM,KACJ,MAAA,yBAA+B,KAAK,OAAO,OAAO,MAAM,SAAS,CAClE;AAEH;;GAGF,KAAK,iBAAiB;IACpB,MAAM,EACJ,OAAO,EAAC,QAAQ,MAAM,SACtB,KAAK,EAAC,MAAM,WACZ,KAAK,EAAC,MAAM,cACV;AACJ,QAAI,YAAY,QACd,OAAM,KACJ,GAAG,UAAU,MAAA,IAAU,cAAc,CAAC,kBAAkB,QAAQ;mCACzC,OAAO,iBAAiB,MAAM,kBAAkB,UACxE;AAEH;;GAGF,KAAK,eAAe;IAClB,MAAM,EACJ,OAAO,EAAC,QAAQ,QAChB,WACE;AACJ,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,KAAK,kBAAkB,SACzE;AACD;;GAGF,KAAK,sBAAsB;IACzB,MAAM,EACJ,UAAU,EAAC,QAAQ,MAAM,OAAO,UAChC,YACE;IACJ,MAAM,OAAO,CAAC,GAAG,OAAO,SAAS,GAAG,QAAQ;AAC5C,UAAM,KACJ,GAAG,eAAe,MAAA,IAAU,cAAc,CAAC;mCAClB,OAAO,iBAAiB,MAAM,mBAAmB,IAAI,KAAK,GACpF;;;AAGL,SAAO;;CAGT,yBACE,KACA,EAAC,QAAQ,MAAM,SACf,UACA;EACA,MAAM,MAAwB;GAAC;GAAQ;GAAO;GAAS;AACvD,SAAO,GAAG;sBACQ,MAAA,IAAU,gBAAgB,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEnC,IAAI,IAAI,CAAC;;;CAIjC,0BACE,KACA,EAAC,QAAQ,MAAM,SACf,QACA,UACA;EACA,MAAM,MAAyB;GAAC;GAAQ;GAAO;GAAQ;GAAS;AAChE,SAAO,GAAG;sBACQ,MAAA,IAAU,cAAc,CAAC,GAAG,IAAI,IAAI,CAAC;;0BAEjC,IAAI,IAAI,CAAC;;;;;;;CAQjC,MAAM,eAAe;AACnB,MAAI,MAAA,SAAe;GACjB,MAAM,EAAC,SAAS,YAAW,UAAU;AACrC,SAAA,MAAY,QAAQ,CAAC,SAAS,QAAQ,CAAC;AACvC,SAAM;;;CAIV,OAAO;AACL,MAAI,MAAA,SAAe;AACjB,SAAA,GAAS,OAAO,YAAY,MAAA,MAAY,MAAM,CAAC,oBAAoB;AACnE,SAAA,MAAY,QAAQ,OAAO;;AAE7B,SAAO,MAAA;;;AAIX,SAAS,aAAa,OAAuC;CAC3D,MAAM,EAAC,WAAW,WAAU;AAC5B,SAAQ,OAAO,KAAf;EACE,KAAK,QACH,QAAO,CAAC,WAAW;GAAC;GAAS;GAAQ,EAAC,iBAAiB,WAAU;GAAC,CAAC;EACrE,KAAK,SACH,QAAO,CAAC,WAAW;GAAC;GAAU;GAAQ,EAAC,WAAU;GAAC,CAAC;EACrD,KAAK,WACH,QAAO,CAAC,WAAW,CAAC,YAAY,OAAO,CAAC;EAC1C,QACE,QAAO,CAAC,WAAW,CAAC,QAAQ,OAAO,CAAC;;;AAI1C,IAAa,YAAb,MAAuB;CACrB;CACA;CACA;CACA;CAEA,YACE,IACA,IACA,gBACA,WACA;AACA,QAAA,KAAW;AACX,QAAA,KAAW;AACX,OAAK,iBAAiB;AACtB,OAAK,eAAe;;CAGtB,YAAY;CAEZ,MAAM,UAAU;AACd,MAAI,MAAA,SACF;AAEF,QAAA,WAAiB;AACjB,QAAA,GAAS,SAAS;AAClB,QAAM,MAAA,GACH,MAAM,CACN,OAAM,MAAK,MAAA,GAAS,OAAO,iCAAiC,EAAE,CAAC;AAClE,QAAA,GAAS,OAAO,0BAA0B,KAAK,eAAe;;;AAIlE,IAAa,cAAb,MAAyB;CACvB;CACA;CACA;CAEA,YAAY,IAAgB,OAAgB,IAAgB;AAC1D,QAAA,KAAW,GAAG,YAAY,aAAa,eAAe;AACtD,QAAA,QAAc;AACd,QAAA,KAAW;;CAIb,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,UAAU,MAAA,MAAY,CAAC,GAAG,QAAQ;;CAGvD,MAAM,UAAU;EACd,MAAM,KAAK,IAAI,gBAAgB,MAAA,IAAU,EAAC,MAAM,gBAAoB,CAAC,CAAC,IACpE,MAAA,GACD;EACD,MAAM,MAAM,MAAM,GAAG,iBACnB,QAAO,GAA0B;8BACT,MAAA,IAAU,YAAY,CAAC;;;MAIhD;AACD,MAAI,IAAI,WAAW,GAAG;AACpB,SAAA,GAAS,OAAO,6CAA6C;AAC7D,MAAG,SAAS;AACZ,SAAM,GAAG,MAAM;AACf,UAAO;;EAET,MAAM,CAAC,EAAC,eAAc;EACtB,MAAM,CAAC,EAAC,oBAAmB,MAAM,GAAG,iBAClC,QAAO,GAA+B;uCACL,MAAA,IAAU,oBAAoB,CAAC;QAEjE;AACD,QAAA,GAAS,OACP,oBAAoB,UAAU,kCAAkC,iBACjE;AACD,SAAO,IAAI,UAAU,MAAA,IAAU,IAAI,gBAAgB,UAAU"}
@@ -1 +1 @@
1
- {"version":3,"file":"change-log.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/replicator/schema/change-log.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AACzD,OAAO,KAAK,EAAC,QAAQ,EAAY,MAAM,iCAAiC,CAAC;AACzE,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAChE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,wBAAwB,CAAC;AAGvD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AAEH,eAAO,MAAM,MAAM,MAAM,CAAC;AAC1B,eAAO,MAAM,MAAM,MAAM,CAAC;AAC1B,eAAO,MAAM,WAAW,MAAM,CAAC;AAC/B,eAAO,MAAM,QAAQ,MAAM,CAAC;AAG5B,eAAO,MAAM,uBAAuB,sbAmCjC,CAAC;AAEJ;;;;;GAKG;AACH,eAAO,MAAM,oBAAoB;;;;;EAc5B,CAAC;AAEN,MAAM,MAAM,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAElE,QAAA,MAAM,uBAAuB;;;;;;aAQ3B,CAAC;AAEH,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE,qBAAa,SAAS;;gBAMR,EAAE,EAAE,QAAQ;IAsCxB;;;;;;;;;;OAUG;IACH,QAAQ,CACN,OAAO,EAAE,WAAW,EACpB,GAAG,EAAE,MAAM,EACX,KAAK,EAAE,MAAM,EACb,GAAG,EAAE,UAAU,EACf,UAAU,EAAE,MAAM,EAAE,GAAG,SAAS,GAC/B,MAAM;IAIT,WAAW,CACT,OAAO,EAAE,WAAW,EACpB,GAAG,EAAE,MAAM,EACX,KAAK,EAAE,MAAM,EACb,GAAG,EAAE,UAAU,GACd,MAAM;IAOT,cAAc,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,UAAU;;;;;;;IAoC7C,aAAa,CAAC,OAAO,EAAE,WAAW,EAAE,KAAK,EAAE,MAAM;IAIjD,UAAU,CAAC,OAAO,EAAE,WAAW,EAAE,KAAK,EAAE,MAAM;CAG/C"}
1
+ {"version":3,"file":"change-log.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/replicator/schema/change-log.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AACzD,OAAO,KAAK,EAAC,QAAQ,EAAY,MAAM,iCAAiC,CAAC;AACzE,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAChE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,wBAAwB,CAAC;AAGvD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AAEH,eAAO,MAAM,MAAM,MAAM,CAAC;AAC1B,eAAO,MAAM,MAAM,MAAM,CAAC;AAC1B,eAAO,MAAM,WAAW,MAAM,CAAC;AAC/B,eAAO,MAAM,QAAQ,MAAM,CAAC;AAG5B,eAAO,MAAM,uBAAuB,sbAmCjC,CAAC;AAEJ;;;;;GAKG;AACH,eAAO,MAAM,oBAAoB;;;;;EAc5B,CAAC;AAEN,MAAM,MAAM,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAElE,QAAA,MAAM,uBAAuB;;;;;;aAQ3B,CAAC;AAEH,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE,qBAAa,SAAS;;gBAMR,EAAE,EAAE,QAAQ;IAuCxB;;;;;;;;;;OAUG;IACH,QAAQ,CACN,OAAO,EAAE,WAAW,EACpB,GAAG,EAAE,MAAM,EACX,KAAK,EAAE,MAAM,EACb,GAAG,EAAE,UAAU,EACf,UAAU,EAAE,MAAM,EAAE,GAAG,SAAS,GAC/B,MAAM;IAIT,WAAW,CACT,OAAO,EAAE,WAAW,EACpB,GAAG,EAAE,MAAM,EACX,KAAK,EAAE,MAAM,EACb,GAAG,EAAE,UAAU,GACd,MAAM;IAOT,cAAc,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,EAAE,UAAU;;;;;;;IAoC7C,aAAa,CAAC,OAAO,EAAE,WAAW,EAAE,KAAK,EAAE,MAAM;IAIjD,UAAU,CAAC,OAAO,EAAE,WAAW,EAAE,KAAK,EAAE,MAAM;CAG/C"}
@@ -1 +1 @@
1
- {"version":3,"file":"change-log.js","names":["#logRowOpStmt","#logRowOpWithBackfillStmt","#logTableWideOpStmt","#getRowOpStmt","#logRowOp"],"sources":["../../../../../../../zero-cache/src/services/replicator/schema/change-log.ts"],"sourcesContent":["import {\n jsonObjectSchema,\n parse,\n stringify,\n} from '../../../../../shared/src/bigint-json.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport type {Database, Statement} from '../../../../../zqlite/src/db.ts';\nimport type {LexiVersion} from '../../../types/lexi-version.ts';\nimport type {LiteRowKey} from '../../../types/lite.ts';\nimport {normalizedKeyOrder} from '../../../types/row-key.ts';\n\n/**\n * The Change Log tracks the last operation (set or delete) for each row in the\n * data base, ordered by state version; in other words, a cross-table\n * index of row changes ordered by version. This facilitates a minimal \"diff\"\n * of row changes needed to advance a pipeline from one state version to another.\n *\n * The Change Log stores identifiers only, i.e. it does not store contents.\n * A database snapshot at the previous version can be used to query a row's\n * old contents, if any, and the current snapshot can be used to query a row's\n * new contents. (In the common case, the new contents will have just been applied\n * and thus has a high likelihood of being in the SQLite cache.)\n *\n * There are two table-wide operations:\n * - `t` corresponds to the postgres `TRUNCATE` operation\n * - `r` represents any schema (i.e. column) change\n *\n * For both operations, the corresponding row changes are not explicitly included\n * in the change log. The consumer has the option of simulating them be reading\n * from pre- and post- snapshots, or resetting their state entirely with the current\n * snapshot.\n *\n * To achieve the desired ordering semantics when processing tables that have been\n * truncated, reset, and modified, the \"rowKey\" is set to `null` for resets and\n * the empty string `\"\"` for truncates. This means that resets will be encountered\n * before truncates, which will be processed before any subsequent row changes.\n *\n * This ordering is chosen because resets are currently the more \"destructive\" op\n * and result in aborting the processing (and starting from scratch); doing this\n * earlier reduces wasted work.\n */\n\nexport const SET_OP = 's';\nexport const DEL_OP = 'd';\nexport const TRUNCATE_OP = 't';\nexport const RESET_OP = 'r';\n\n// Exported for testing (and migrations)\nexport const CREATE_CHANGELOG_SCHEMA =\n // stateVersion : a.k.a. row version\n // pos : order in which to process the change (within the version)\n // table : The table associated with the change\n // rowKey : JSON row key for a row change. For table-wide changes RESET\n // and TRUNCATE, there is no associated row; instead, `pos` is\n // set to -1 and the rowKey is set to the stateVersion,\n // guaranteeing when attempting to process the transaction,\n // the pipeline is reset (and the change log traversal\n // aborted).\n // op : 's' for set (insert/update)\n // : 'd' for delete\n // : 'r' for table reset (schema change)\n // : 't' for table truncation (which also resets the pipeline)\n // backfillingColumnVersions\n // : A JSON mapping from column name to stateVersion tracked\n // for replicated writes of columns that are being backfilled.\n // This is used to prevent backfill data, which is at a\n // fixed snapshot/version outside of the replication stream,\n // from overwriting newer column values.\n //\n // Naming note: To maintain compatibility between a new replication-manager\n // and old view-syncers, the previous _zero.changeLog table is preserved\n // and its replacement given a new name \"changeLog2\".\n `\n CREATE TABLE \"_zero.changeLog2\" (\n \"stateVersion\" TEXT NOT NULL,\n \"pos\" INT NOT NULL,\n \"table\" TEXT NOT NULL,\n \"rowKey\" TEXT NOT NULL,\n \"op\" TEXT NOT NULL,\n \"backfillingColumnVersions\" TEXT DEFAULT '{}',\n PRIMARY KEY(\"stateVersion\", \"pos\"),\n UNIQUE(\"table\", \"rowKey\")\n );\n `;\n\n/**\n * Contains the changeLog fields relevant for computing the diff between\n * two snapshots of a replica. The `pos` and `backfillingColumnVersions`\n * fields are excluded, though the query should be ordered by\n * `<stateVersion, pos>`.\n */\nexport const changeLogEntrySchema = v\n .object({\n stateVersion: v.string(),\n table: v.string(),\n rowKey: v.string(),\n op: v.literalUnion(SET_OP, DEL_OP, TRUNCATE_OP, RESET_OP),\n })\n .map(val => ({\n ...val,\n // Note: sets the rowKey to `null` for table-wide ops / resets\n rowKey:\n val.op === 't' || val.op === 'r'\n ? null\n : v.parse(parse(val.rowKey), jsonObjectSchema),\n }));\n\nexport type ChangeLogEntry = v.Infer<typeof changeLogEntrySchema>;\n\nconst rawChangeLogEntrySchema = v.object({\n stateVersion: v.string(),\n table: v.string(),\n rowKey: v.string(),\n op: v.literalUnion(SET_OP, DEL_OP, TRUNCATE_OP, RESET_OP),\n backfillingColumnVersions: v\n .string()\n .map(val => v.record(v.string()).parse(JSON.parse(val))),\n});\n\nexport type RawChangeLogEntry = v.Infer<typeof rawChangeLogEntrySchema>;\n\nexport class ChangeLog {\n readonly #logRowOpStmt: Statement;\n readonly #logRowOpWithBackfillStmt: Statement;\n readonly #logTableWideOpStmt;\n readonly #getRowOpStmt: Statement;\n\n constructor(db: Database) {\n this.#logRowOpStmt = db.prepare(/*sql*/ `\n INSERT OR REPLACE INTO \"_zero.changeLog2\" \n (stateVersion, pos, \"table\", rowKey, op)\n VALUES (@version, @pos, @table, JSON(@rowKey), @op)\n `);\n\n this.#logRowOpWithBackfillStmt = db.prepare(/*sql*/ `\n INSERT INTO \"_zero.changeLog2\" \n (stateVersion, pos, \"table\", rowKey, op, backfillingColumnVersions)\n VALUES (@version, @pos, @table, JSON(@rowKey), @op, \n JSON(@backfillingColumnVersions))\n ON CONFLICT (\"table\", rowKey) DO UPDATE \n SET stateVersion = excluded.stateVersion,\n pos = excluded.pos,\n op = excluded.op,\n backfillingColumnVersions = json_patch(\n backfillingColumnVersions, excluded.backfillingColumnVersions)\n `);\n\n // Because table-wide ops result in aborting an incremental update\n // and rehydrating all queries at \"head\", they are assigned pos = -1\n // as an optimization to abort as early as possible to skip unnecessary\n // updates.\n //\n // However, changeLog entries that are destined to be \"skipped\" are\n // nonetheless kept for the purpose of tracking backfillingColumnVersions.\n this.#logTableWideOpStmt = db.prepare(/*sql*/ `\n INSERT OR REPLACE INTO \"_zero.changeLog2\" \n (stateVersion, pos, \"table\", rowKey, op) \n VALUES (@version, -1, @table, @version, @op)\n `);\n\n this.#getRowOpStmt = db.prepare(/*sql*/ `\n SELECT * FROM \"_zero.changeLog2\" WHERE \"table\" = ? AND \"rowKey\" = JSON(?)\n `);\n }\n\n /**\n *\n * @param backfilled The backfilling columns for which values were set. Note\n * that an empty list and the `undefined` value mean different things;\n * * An empty list indicates that a backfill is in progress but no\n * backfilling values were set. In this case, existing\n * backfillingColumnVersions are preserved.\n * * `undefined` indicates that there are no columns being backfilled.\n * In this case, any vestigial `backfillingColumnVersions` value\n * is cleared.\n */\n logSetOp(\n version: LexiVersion,\n pos: number,\n table: string,\n row: LiteRowKey,\n backfilled: string[] | undefined,\n ): string {\n return this.#logRowOp(version, pos, table, row, SET_OP, backfilled);\n }\n\n logDeleteOp(\n version: LexiVersion,\n pos: number,\n table: string,\n row: LiteRowKey,\n ): string {\n // Note: For delete ops, it is always safe to clear the\n // backfillingColumnVersions because the backfill algorithm\n // understands that deletes apply to the whole row.\n return this.#logRowOp(version, pos, table, row, DEL_OP, undefined);\n }\n\n getLatestRowOp(table: string, row: LiteRowKey) {\n const rowKey = stringify(normalizedKeyOrder(row));\n const result = this.#getRowOpStmt.get(table, rowKey);\n return result === undefined\n ? undefined\n : v.parse(result, rawChangeLogEntrySchema, 'passthrough');\n }\n\n #logRowOp(\n version: LexiVersion,\n pos: number,\n table: string,\n row: LiteRowKey,\n op: string,\n backfilled: string[] | undefined,\n ): string {\n const rowKey = stringify(normalizedKeyOrder(row));\n if (backfilled === undefined) {\n this.#logRowOpStmt.run({version, pos, table, rowKey, op});\n } else {\n const versions: Record<string, string> = {};\n for (const col of backfilled) {\n versions[col] = version;\n }\n this.#logRowOpWithBackfillStmt.run({\n version,\n pos,\n table,\n rowKey,\n op,\n backfillingColumnVersions: JSON.stringify(versions),\n });\n }\n return rowKey;\n }\n\n logTruncateOp(version: LexiVersion, table: string) {\n this.#logTableWideOpStmt.run({version, table, op: TRUNCATE_OP});\n }\n\n logResetOp(version: LexiVersion, table: string) {\n this.#logTableWideOpStmt.run({version, table, op: RESET_OP});\n }\n}\n"],"mappings":";;;AAgDA,IAAa,0BAwBX;;;;;;;;;;;;;;;;;;AAmBF,IAAa,uBAAuB,eACjC,OAAO;CACN,cAAc,eAAE,QAAQ;CACxB,OAAO,eAAE,QAAQ;CACjB,QAAQ,eAAE,QAAQ;CAClB,IAAI,aAAA,KAAA,KAAA,KAAA,IAAqD;CAC1D,CAAC,CACD,KAAI,SAAQ;CACX,GAAG;CAEH,QACE,IAAI,OAAO,OAAO,IAAI,OAAO,MACzB,OACA,MAAQ,QAAM,IAAI,OAAO,EAAE,iBAAiB;CACnD,EAAE;AAIL,IAAM,0BAA0B,eAAE,OAAO;CACvC,cAAc,eAAE,QAAQ;CACxB,OAAO,eAAE,QAAQ;CACjB,QAAQ,eAAE,QAAQ;CAClB,IAAI,aAAA,KAAA,KAAA,KAAA,IAAqD;CACzD,2BAA2B,eACxB,QAAQ,CACR,KAAI,QAAO,eAAE,OAAO,eAAE,QAAQ,CAAC,CAAC,MAAM,KAAK,MAAM,IAAI,CAAC,CAAC;CAC3D,CAAC;AAIF,IAAa,YAAb,MAAuB;CACrB;CACA;CACA;CACA;CAEA,YAAY,IAAc;AACxB,QAAA,eAAqB,GAAG,QAAgB;;;;MAItC;AAEF,QAAA,2BAAiC,GAAG,QAAgB;;;;;;;;;;;MAWlD;AASF,QAAA,qBAA2B,GAAG,QAAgB;;;;MAI5C;AAEF,QAAA,eAAqB,GAAG,QAAgB;;MAEtC;;;;;;;;;;;;;CAcJ,SACE,SACA,KACA,OACA,KACA,YACQ;AACR,SAAO,MAAA,SAAe,SAAS,KAAK,OAAO,KAAA,KAAa,WAAW;;CAGrE,YACE,SACA,KACA,OACA,KACQ;AAIR,SAAO,MAAA,SAAe,SAAS,KAAK,OAAO,KAAA,KAAa,KAAA,EAAU;;CAGpE,eAAe,OAAe,KAAiB;EAC7C,MAAM,SAAS,UAAU,mBAAmB,IAAI,CAAC;EACjD,MAAM,SAAS,MAAA,aAAmB,IAAI,OAAO,OAAO;AACpD,SAAO,WAAW,KAAA,IACd,KAAA,IACA,MAAQ,QAAQ,yBAAyB,cAAc;;CAG7D,UACE,SACA,KACA,OACA,KACA,IACA,YACQ;EACR,MAAM,SAAS,UAAU,mBAAmB,IAAI,CAAC;AACjD,MAAI,eAAe,KAAA,EACjB,OAAA,aAAmB,IAAI;GAAC;GAAS;GAAK;GAAO;GAAQ;GAAG,CAAC;OACpD;GACL,MAAM,WAAmC,EAAE;AAC3C,QAAK,MAAM,OAAO,WAChB,UAAS,OAAO;AAElB,SAAA,yBAA+B,IAAI;IACjC;IACA;IACA;IACA;IACA;IACA,2BAA2B,KAAK,UAAU,SAAS;IACpD,CAAC;;AAEJ,SAAO;;CAGT,cAAc,SAAsB,OAAe;AACjD,QAAA,mBAAyB,IAAI;GAAC;GAAS;GAAO,IAAA;GAAgB,CAAC;;CAGjE,WAAW,SAAsB,OAAe;AAC9C,QAAA,mBAAyB,IAAI;GAAC;GAAS;GAAO,IAAA;GAAa,CAAC"}
1
+ {"version":3,"file":"change-log.js","names":["#logRowOpStmt","#logRowOpWithBackfillStmt","#logTableWideOpStmt","#getRowOpStmt","#logRowOp"],"sources":["../../../../../../../zero-cache/src/services/replicator/schema/change-log.ts"],"sourcesContent":["import {\n jsonObjectSchema,\n parse,\n stringify,\n} from '../../../../../shared/src/bigint-json.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport type {Database, Statement} from '../../../../../zqlite/src/db.ts';\nimport type {LexiVersion} from '../../../types/lexi-version.ts';\nimport type {LiteRowKey} from '../../../types/lite.ts';\nimport {normalizedKeyOrder} from '../../../types/row-key.ts';\n\n/**\n * The Change Log tracks the last operation (set or delete) for each row in the\n * data base, ordered by state version; in other words, a cross-table\n * index of row changes ordered by version. This facilitates a minimal \"diff\"\n * of row changes needed to advance a pipeline from one state version to another.\n *\n * The Change Log stores identifiers only, i.e. it does not store contents.\n * A database snapshot at the previous version can be used to query a row's\n * old contents, if any, and the current snapshot can be used to query a row's\n * new contents. (In the common case, the new contents will have just been applied\n * and thus has a high likelihood of being in the SQLite cache.)\n *\n * There are two table-wide operations:\n * - `t` corresponds to the postgres `TRUNCATE` operation\n * - `r` represents any schema (i.e. column) change\n *\n * For both operations, the corresponding row changes are not explicitly included\n * in the change log. The consumer has the option of simulating them be reading\n * from pre- and post- snapshots, or resetting their state entirely with the current\n * snapshot.\n *\n * To achieve the desired ordering semantics when processing tables that have been\n * truncated, reset, and modified, the \"rowKey\" is set to `null` for resets and\n * the empty string `\"\"` for truncates. This means that resets will be encountered\n * before truncates, which will be processed before any subsequent row changes.\n *\n * This ordering is chosen because resets are currently the more \"destructive\" op\n * and result in aborting the processing (and starting from scratch); doing this\n * earlier reduces wasted work.\n */\n\nexport const SET_OP = 's';\nexport const DEL_OP = 'd';\nexport const TRUNCATE_OP = 't';\nexport const RESET_OP = 'r';\n\n// Exported for testing (and migrations)\nexport const CREATE_CHANGELOG_SCHEMA =\n // stateVersion : a.k.a. row version\n // pos : order in which to process the change (within the version)\n // table : The table associated with the change\n // rowKey : JSON row key for a row change. For table-wide changes RESET\n // and TRUNCATE, there is no associated row; instead, `pos` is\n // set to -1 and the rowKey is set to the stateVersion,\n // guaranteeing when attempting to process the transaction,\n // the pipeline is reset (and the change log traversal\n // aborted).\n // op : 's' for set (insert/update)\n // : 'd' for delete\n // : 'r' for table reset (schema change)\n // : 't' for table truncation (which also resets the pipeline)\n // backfillingColumnVersions\n // : A JSON mapping from column name to stateVersion tracked\n // for replicated writes of columns that are being backfilled.\n // This is used to prevent backfill data, which is at a\n // fixed snapshot/version outside of the replication stream,\n // from overwriting newer column values.\n //\n // Naming note: To maintain compatibility between a new replication-manager\n // and old view-syncers, the previous _zero.changeLog table is preserved\n // and its replacement given a new name \"changeLog2\".\n `\n CREATE TABLE \"_zero.changeLog2\" (\n \"stateVersion\" TEXT NOT NULL,\n \"pos\" INT NOT NULL,\n \"table\" TEXT NOT NULL,\n \"rowKey\" TEXT NOT NULL,\n \"op\" TEXT NOT NULL,\n \"backfillingColumnVersions\" TEXT DEFAULT '{}',\n PRIMARY KEY(\"stateVersion\", \"pos\"),\n UNIQUE(\"table\", \"rowKey\")\n );\n `;\n\n/**\n * Contains the changeLog fields relevant for computing the diff between\n * two snapshots of a replica. The `pos` and `backfillingColumnVersions`\n * fields are excluded, though the query should be ordered by\n * `<stateVersion, pos>`.\n */\nexport const changeLogEntrySchema = v\n .object({\n stateVersion: v.string(),\n table: v.string(),\n rowKey: v.string(),\n op: v.literalUnion(SET_OP, DEL_OP, TRUNCATE_OP, RESET_OP),\n })\n .map(val => ({\n ...val,\n // Note: sets the rowKey to `null` for table-wide ops / resets\n rowKey:\n val.op === 't' || val.op === 'r'\n ? null\n : v.parse(parse(val.rowKey), jsonObjectSchema),\n }));\n\nexport type ChangeLogEntry = v.Infer<typeof changeLogEntrySchema>;\n\nconst rawChangeLogEntrySchema = v.object({\n stateVersion: v.string(),\n table: v.string(),\n rowKey: v.string(),\n op: v.literalUnion(SET_OP, DEL_OP, TRUNCATE_OP, RESET_OP),\n backfillingColumnVersions: v\n .string()\n .map(val => v.record(v.string()).parse(JSON.parse(val))),\n});\n\nexport type RawChangeLogEntry = v.Infer<typeof rawChangeLogEntrySchema>;\n\nexport class ChangeLog {\n readonly #logRowOpStmt: Statement;\n readonly #logRowOpWithBackfillStmt: Statement;\n readonly #logTableWideOpStmt;\n readonly #getRowOpStmt: Statement;\n\n constructor(db: Database) {\n this.#logRowOpStmt = db.prepare(/*sql*/ `\n INSERT OR REPLACE INTO \"_zero.changeLog2\" \n (stateVersion, pos, \"table\", rowKey, op)\n VALUES (@version, @pos, @table, JSON(@rowKey), @op)\n `);\n\n this.#logRowOpWithBackfillStmt = db.prepare(/*sql*/ `\n INSERT INTO \"_zero.changeLog2\" \n (stateVersion, pos, \"table\", rowKey, op, backfillingColumnVersions)\n VALUES (@version, @pos, @table, JSON(@rowKey), @op, \n JSON(@backfillingColumnVersions))\n ON CONFLICT (\"table\", rowKey) DO UPDATE \n SET stateVersion = excluded.stateVersion,\n pos = excluded.pos,\n op = excluded.op,\n backfillingColumnVersions = json_patch(\n backfillingColumnVersions, excluded.backfillingColumnVersions)\n `);\n\n // Because table-wide ops result in aborting an incremental update\n // and rehydrating all queries at \"head\", they are assigned pos = -1\n // as an optimization to abort as early as possible to skip unnecessary\n // updates.\n //\n // However, changeLog entries that are destined to be \"skipped\" are\n // nonetheless kept for the purpose of tracking backfillingColumnVersions.\n this.#logTableWideOpStmt = db.prepare(/*sql*/ `\n INSERT OR REPLACE INTO \"_zero.changeLog2\" \n (stateVersion, pos, \"table\", rowKey, op) \n VALUES (@version, -1, @table, @version, @op)\n `);\n\n // oxlint-disable-next-line zero/no-select-star -- Local SQLite replica query; not run through pg prepared statements.\n this.#getRowOpStmt = db.prepare(/*sql*/ `\n SELECT * FROM \"_zero.changeLog2\" WHERE \"table\" = ? AND \"rowKey\" = JSON(?)\n `);\n }\n\n /**\n *\n * @param backfilled The backfilling columns for which values were set. Note\n * that an empty list and the `undefined` value mean different things;\n * * An empty list indicates that a backfill is in progress but no\n * backfilling values were set. In this case, existing\n * backfillingColumnVersions are preserved.\n * * `undefined` indicates that there are no columns being backfilled.\n * In this case, any vestigial `backfillingColumnVersions` value\n * is cleared.\n */\n logSetOp(\n version: LexiVersion,\n pos: number,\n table: string,\n row: LiteRowKey,\n backfilled: string[] | undefined,\n ): string {\n return this.#logRowOp(version, pos, table, row, SET_OP, backfilled);\n }\n\n logDeleteOp(\n version: LexiVersion,\n pos: number,\n table: string,\n row: LiteRowKey,\n ): string {\n // Note: For delete ops, it is always safe to clear the\n // backfillingColumnVersions because the backfill algorithm\n // understands that deletes apply to the whole row.\n return this.#logRowOp(version, pos, table, row, DEL_OP, undefined);\n }\n\n getLatestRowOp(table: string, row: LiteRowKey) {\n const rowKey = stringify(normalizedKeyOrder(row));\n const result = this.#getRowOpStmt.get(table, rowKey);\n return result === undefined\n ? undefined\n : v.parse(result, rawChangeLogEntrySchema, 'passthrough');\n }\n\n #logRowOp(\n version: LexiVersion,\n pos: number,\n table: string,\n row: LiteRowKey,\n op: string,\n backfilled: string[] | undefined,\n ): string {\n const rowKey = stringify(normalizedKeyOrder(row));\n if (backfilled === undefined) {\n this.#logRowOpStmt.run({version, pos, table, rowKey, op});\n } else {\n const versions: Record<string, string> = {};\n for (const col of backfilled) {\n versions[col] = version;\n }\n this.#logRowOpWithBackfillStmt.run({\n version,\n pos,\n table,\n rowKey,\n op,\n backfillingColumnVersions: JSON.stringify(versions),\n });\n }\n return rowKey;\n }\n\n logTruncateOp(version: LexiVersion, table: string) {\n this.#logTableWideOpStmt.run({version, table, op: TRUNCATE_OP});\n }\n\n logResetOp(version: LexiVersion, table: string) {\n this.#logTableWideOpStmt.run({version, table, op: RESET_OP});\n }\n}\n"],"mappings":";;;AAgDA,IAAa,0BAwBX;;;;;;;;;;;;;;;;;;AAmBF,IAAa,uBAAuB,eACjC,OAAO;CACN,cAAc,eAAE,QAAQ;CACxB,OAAO,eAAE,QAAQ;CACjB,QAAQ,eAAE,QAAQ;CAClB,IAAI,aAAA,KAAA,KAAA,KAAA,IAAqD;CAC1D,CAAC,CACD,KAAI,SAAQ;CACX,GAAG;CAEH,QACE,IAAI,OAAO,OAAO,IAAI,OAAO,MACzB,OACA,MAAQ,QAAM,IAAI,OAAO,EAAE,iBAAiB;CACnD,EAAE;AAIL,IAAM,0BAA0B,eAAE,OAAO;CACvC,cAAc,eAAE,QAAQ;CACxB,OAAO,eAAE,QAAQ;CACjB,QAAQ,eAAE,QAAQ;CAClB,IAAI,aAAA,KAAA,KAAA,KAAA,IAAqD;CACzD,2BAA2B,eACxB,QAAQ,CACR,KAAI,QAAO,eAAE,OAAO,eAAE,QAAQ,CAAC,CAAC,MAAM,KAAK,MAAM,IAAI,CAAC,CAAC;CAC3D,CAAC;AAIF,IAAa,YAAb,MAAuB;CACrB;CACA;CACA;CACA;CAEA,YAAY,IAAc;AACxB,QAAA,eAAqB,GAAG,QAAgB;;;;MAItC;AAEF,QAAA,2BAAiC,GAAG,QAAgB;;;;;;;;;;;MAWlD;AASF,QAAA,qBAA2B,GAAG,QAAgB;;;;MAI5C;AAGF,QAAA,eAAqB,GAAG,QAAgB;;MAEtC;;;;;;;;;;;;;CAcJ,SACE,SACA,KACA,OACA,KACA,YACQ;AACR,SAAO,MAAA,SAAe,SAAS,KAAK,OAAO,KAAA,KAAa,WAAW;;CAGrE,YACE,SACA,KACA,OACA,KACQ;AAIR,SAAO,MAAA,SAAe,SAAS,KAAK,OAAO,KAAA,KAAa,KAAA,EAAU;;CAGpE,eAAe,OAAe,KAAiB;EAC7C,MAAM,SAAS,UAAU,mBAAmB,IAAI,CAAC;EACjD,MAAM,SAAS,MAAA,aAAmB,IAAI,OAAO,OAAO;AACpD,SAAO,WAAW,KAAA,IACd,KAAA,IACA,MAAQ,QAAQ,yBAAyB,cAAc;;CAG7D,UACE,SACA,KACA,OACA,KACA,IACA,YACQ;EACR,MAAM,SAAS,UAAU,mBAAmB,IAAI,CAAC;AACjD,MAAI,eAAe,KAAA,EACjB,OAAA,aAAmB,IAAI;GAAC;GAAS;GAAK;GAAO;GAAQ;GAAG,CAAC;OACpD;GACL,MAAM,WAAmC,EAAE;AAC3C,QAAK,MAAM,OAAO,WAChB,UAAS,OAAO;AAElB,SAAA,yBAA+B,IAAI;IACjC;IACA;IACA;IACA;IACA;IACA,2BAA2B,KAAK,UAAU,SAAS;IACpD,CAAC;;AAEJ,SAAO;;CAGT,cAAc,SAAsB,OAAe;AACjD,QAAA,mBAAyB,IAAI;GAAC;GAAS;GAAO,IAAA;GAAgB,CAAC;;CAGjE,WAAW,SAAsB,OAAe;AAC9C,QAAA,mBAAyB,IAAI;GAAC;GAAS;GAAO,IAAA;GAAa,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"cvr-store.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/cvr-store.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAiBjD,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,+CAA+C,CAAC;AAMnF,OAAO,EAAC,sBAAsB,EAAC,MAAM,iCAAiC,CAAC;AACvE,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,mBAAmB,CAAC;AAEvE,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,KAAK,EAAQ,cAAc,EAAC,MAAM,qBAAqB,CAAC;AAC/D,OAAO,KAAK,EAAC,GAAG,EAAE,WAAW,EAAC,MAAM,UAAU,CAAC;AAE/C,OAAO,EAKL,KAAK,OAAO,EACb,MAAM,iBAAiB,CAAC;AACzB,OAAO,EAEL,KAAK,YAAY,EAGjB,KAAK,UAAU,EAGf,KAAK,kBAAkB,EACvB,KAAK,UAAU,EACf,KAAK,WAAW,EAEhB,KAAK,KAAK,EACV,KAAK,SAAS,EAGf,MAAM,mBAAmB,CAAC;AAC3B,OAAO,EACL,KAAK,QAAQ,EAGd,MAAM,gBAAgB,CAAC;AAExB,MAAM,MAAM,aAAa,GAAG;IAC1B,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,IAAI,EAAE,MAAM,CAAC;IACb,YAAY,EAAE,MAAM,CAAC;IACrB,UAAU,EAAE,MAAM,CAAC;CACpB,CAAC;AAqGF,qBAAa,QAAQ;;gBAmCjB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,UAAU,EACjB,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,KAAK,EAAE,MAAM,EACb,WAAW,EAAE,CAAC,CAAC,EAAE,OAAO,KAAK,IAAI,EACjC,qBAAqB,SAA2B,EAChD,eAAe,SAAoB,EACnC,yBAAyB,SAAM,EAAE,qBAAqB;IACtD,YAAY,oBAAa;IAmC3B,IAAI,CAAC,EAAE,EAAE,UAAU,EAAE,eAAe,EAAE,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC;IAyN3D,aAAa,IAAI,OAAO,CAAC,WAAW,CAAC,KAAK,EAAE,SAAS,CAAC,CAAC;IAIvD,YAAY,CAAC,GAAG,EAAE,SAAS,GAAG,IAAI;IAIlC;;;;;;;OAOG;IACH,YAAY,CAAC,EAAE,EAAE,KAAK,GAAG,IAAI;IAI7B;;;;OAIG;IACH,YAAY,CAAC,GAAG,GAAG,EAAE,KAAK,EAAE;IAM5B;;;;OAIG;IACG,cAAc,CAAC,QAAQ,EAAE,QAAQ,EAAE,UAAU,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAO3E;;;;;OAKG;IACG,WAAW,IAAI,OAAO,CAAC,QAAQ,GAAG,SAAS,CAAC;IAelD,WAAW,CAAC,EACV,OAAO,EACP,cAAc,EACd,UAAU,EACV,YAAY,EACZ,SAAS,EACT,QAAQ,GACT,EAAE,IAAI,CACL,WAAW,EACT,SAAS,GACT,gBAAgB,GAChB,YAAY,GACZ,cAAc,GACd,WAAW,GACX,UAAU,CACb,GAAG,IAAI;IAqBR,kBAAkB,CAAC,OAAO,EAAE,UAAU,EAAE,UAAU,EAAE,UAAU,GAAG,IAAI;IASrE,QAAQ,CAAC,KAAK,EAAE,WAAW,GAAG,IAAI;IAelC,WAAW,CAAC,KAAK,EAAE,WAAW;IAc9B,qBAAqB,CAAC,SAAS,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,IAAI;IAIjE,YAAY,CAAC,MAAM,EAAE,YAAY,GAAG,IAAI;IAYxC,YAAY,CAAC,QAAQ,EAAE,MAAM;IAU7B,eAAe,CACb,UAAU,EAAE,UAAU,EACtB,KAAK,EAAE;QAAC,EAAE,EAAE,MAAM,CAAA;KAAC,EACnB,MAAM,EAAE;QAAC,EAAE,EAAE,MAAM,CAAA;KAAC,EACpB,OAAO,EAAE,OAAO,EAChB,aAAa,EAAE,QAAQ,GAAG,SAAS,EACnC,GAAG,EAAE,MAAM,GACV,IAAI;IAkBP,iBAAiB,CACf,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,kBAAkB,EAChC,OAAO,EAAE,WAAW,EACpB,OAAO,EAAE,UAAU,EACnB,kBAAkB,GAAE,MAAM,EAAO,GAChC,cAAc,CAAC,OAAO,EAAE,EAAE,IAAI,EAAE,SAAS,CAAC;IAUvC,oBAAoB,CACxB,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,kBAAkB,EAChC,OAAO,EAAE,WAAW,EACpB,OAAO,EAAE,UAAU,GAClB,OAAO,CAAC,cAAc,EAAE,CAAC;IAwe5B,IAAI,QAAQ,IAAI,MAAM,CAErB;IAEK,KAAK,CACT,EAAE,EAAE,UAAU,EACd,sBAAsB,EAAE,UAAU,EAClC,GAAG,EAAE,WAAW,EAChB,eAAe,EAAE,MAAM,GACtB,OAAO,CAAC,aAAa,GAAG,IAAI,CAAC;IAkChC,iBAAiB,IAAI,OAAO;IAI5B,qDAAqD;IACrD,OAAO,CAAC,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;IAIhC,cAAc,CAClB,EAAE,EAAE,UAAU,EACd,QAAQ,EAAE,QAAQ,EAClB,QAAQ,CAAC,EAAE,MAAM,GAChB,OAAO,CAAC,eAAe,EAAE,CAAC;CAsC9B;AAED;;;;GAIG;AACH,wBAAsB,YAAY,CAChC,EAAE,EAAE,mBAAmB,EACvB,MAAM,EAAE,MAAM,EACd,aAAa,EAAE,MAAM,EACrB,sBAAsB,EAAE,UAAU,GACjC,OAAO,CAAC,IAAI,CAAC,CAUf;AAED,qBAAa,mBAAoB,SAAQ,sBAAsB;gBACjD,OAAO,EAAE,MAAM;CAU5B;AAED,qBAAa,+BAAgC,SAAQ,sBAAsB;IACzE,QAAQ,CAAC,IAAI,qCAAqC;gBAEtC,eAAe,EAAE,MAAM,EAAE,aAAa,EAAE,MAAM;CAU3D;AAED,qBAAa,cAAe,SAAQ,sBAAsB;IACxD,QAAQ,CAAC,IAAI,oBAAoB;gBAG/B,KAAK,EAAE,MAAM,GAAG,IAAI,EACpB,SAAS,EAAE,MAAM,GAAG,IAAI,EACxB,eAAe,EAAE,MAAM;CAe1B;AAED,qBAAa,wBAAyB,SAAQ,sBAAsB;IAClE,QAAQ,CAAC,IAAI,8BAA8B;gBAE/B,KAAK,EAAE,OAAO;CAW3B;AAED,qBAAa,sBAAuB,SAAQ,KAAK;IAC/C,QAAQ,CAAC,IAAI,4BAA4B;IACzC,QAAQ,CAAC,UAAU,EAAE,MAAM,CAAC;IAC5B,QAAQ,CAAC,WAAW,EAAE,MAAM,GAAG,IAAI,CAAC;gBAExB,UAAU,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,GAAG,IAAI;CAK3D"}
1
+ {"version":3,"file":"cvr-store.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/cvr-store.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAiBjD,OAAO,KAAK,EAAC,eAAe,EAAC,MAAM,+CAA+C,CAAC;AAMnF,OAAO,EAAC,sBAAsB,EAAC,MAAM,iCAAiC,CAAC;AACvE,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,mBAAmB,CAAC;AAEvE,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,KAAK,EAAQ,cAAc,EAAC,MAAM,qBAAqB,CAAC;AAC/D,OAAO,KAAK,EAAC,GAAG,EAAE,WAAW,EAAC,MAAM,UAAU,CAAC;AAE/C,OAAO,EAKL,KAAK,OAAO,EACb,MAAM,iBAAiB,CAAC;AACzB,OAAO,EAEL,KAAK,YAAY,EAGjB,KAAK,UAAU,EAGf,KAAK,kBAAkB,EACvB,KAAK,UAAU,EACf,KAAK,WAAW,EAEhB,KAAK,KAAK,EACV,KAAK,SAAS,EAGf,MAAM,mBAAmB,CAAC;AAC3B,OAAO,EACL,KAAK,QAAQ,EAGd,MAAM,gBAAgB,CAAC;AAExB,MAAM,MAAM,aAAa,GAAG;IAC1B,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC;IAChB,IAAI,EAAE,MAAM,CAAC;IACb,YAAY,EAAE,MAAM,CAAC;IACrB,UAAU,EAAE,MAAM,CAAC;CACpB,CAAC;AAqGF,qBAAa,QAAQ;;gBAmCjB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,UAAU,EACjB,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,KAAK,EAAE,MAAM,EACb,WAAW,EAAE,CAAC,CAAC,EAAE,OAAO,KAAK,IAAI,EACjC,qBAAqB,SAA2B,EAChD,eAAe,SAAoB,EACnC,yBAAyB,SAAM,EAAE,qBAAqB;IACtD,YAAY,oBAAa;IAmC3B,IAAI,CAAC,EAAE,EAAE,UAAU,EAAE,eAAe,EAAE,MAAM,GAAG,OAAO,CAAC,GAAG,CAAC;IAqO3D,aAAa,IAAI,OAAO,CAAC,WAAW,CAAC,KAAK,EAAE,SAAS,CAAC,CAAC;IAIvD,YAAY,CAAC,GAAG,EAAE,SAAS,GAAG,IAAI;IAIlC;;;;;;;OAOG;IACH,YAAY,CAAC,EAAE,EAAE,KAAK,GAAG,IAAI;IAI7B;;;;OAIG;IACH,YAAY,CAAC,GAAG,GAAG,EAAE,KAAK,EAAE;IAM5B;;;;OAIG;IACG,cAAc,CAAC,QAAQ,EAAE,QAAQ,EAAE,UAAU,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAO3E;;;;;OAKG;IACG,WAAW,IAAI,OAAO,CAAC,QAAQ,GAAG,SAAS,CAAC;IAelD,WAAW,CAAC,EACV,OAAO,EACP,cAAc,EACd,UAAU,EACV,YAAY,EACZ,SAAS,EACT,QAAQ,GACT,EAAE,IAAI,CACL,WAAW,EACT,SAAS,GACT,gBAAgB,GAChB,YAAY,GACZ,cAAc,GACd,WAAW,GACX,UAAU,CACb,GAAG,IAAI;IAqBR,kBAAkB,CAAC,OAAO,EAAE,UAAU,EAAE,UAAU,EAAE,UAAU,GAAG,IAAI;IASrE,QAAQ,CAAC,KAAK,EAAE,WAAW,GAAG,IAAI;IAelC,WAAW,CAAC,KAAK,EAAE,WAAW;IAc9B,qBAAqB,CAAC,SAAS,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,IAAI;IAIjE,YAAY,CAAC,MAAM,EAAE,YAAY,GAAG,IAAI;IAYxC,YAAY,CAAC,QAAQ,EAAE,MAAM;IAU7B,eAAe,CACb,UAAU,EAAE,UAAU,EACtB,KAAK,EAAE;QAAC,EAAE,EAAE,MAAM,CAAA;KAAC,EACnB,MAAM,EAAE;QAAC,EAAE,EAAE,MAAM,CAAA;KAAC,EACpB,OAAO,EAAE,OAAO,EAChB,aAAa,EAAE,QAAQ,GAAG,SAAS,EACnC,GAAG,EAAE,MAAM,GACV,IAAI;IAkBP,iBAAiB,CACf,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,kBAAkB,EAChC,OAAO,EAAE,WAAW,EACpB,OAAO,EAAE,UAAU,EACnB,kBAAkB,GAAE,MAAM,EAAO,GAChC,cAAc,CAAC,OAAO,EAAE,EAAE,IAAI,EAAE,SAAS,CAAC;IAUvC,oBAAoB,CACxB,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,kBAAkB,EAChC,OAAO,EAAE,WAAW,EACpB,OAAO,EAAE,UAAU,GAClB,OAAO,CAAC,cAAc,EAAE,CAAC;IAgf5B,IAAI,QAAQ,IAAI,MAAM,CAErB;IAEK,KAAK,CACT,EAAE,EAAE,UAAU,EACd,sBAAsB,EAAE,UAAU,EAClC,GAAG,EAAE,WAAW,EAChB,eAAe,EAAE,MAAM,GACtB,OAAO,CAAC,aAAa,GAAG,IAAI,CAAC;IAkChC,iBAAiB,IAAI,OAAO;IAI5B,qDAAqD;IACrD,OAAO,CAAC,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;IAIhC,cAAc,CAClB,EAAE,EAAE,UAAU,EACd,QAAQ,EAAE,QAAQ,EAClB,QAAQ,CAAC,EAAE,MAAM,GAChB,OAAO,CAAC,eAAe,EAAE,CAAC;CAsC9B;AAED;;;;GAIG;AACH,wBAAsB,YAAY,CAChC,EAAE,EAAE,mBAAmB,EACvB,MAAM,EAAE,MAAM,EACd,aAAa,EAAE,MAAM,EACrB,sBAAsB,EAAE,UAAU,GACjC,OAAO,CAAC,IAAI,CAAC,CAUf;AAED,qBAAa,mBAAoB,SAAQ,sBAAsB;gBACjD,OAAO,EAAE,MAAM;CAU5B;AAED,qBAAa,+BAAgC,SAAQ,sBAAsB;IACzE,QAAQ,CAAC,IAAI,qCAAqC;gBAEtC,eAAe,EAAE,MAAM,EAAE,aAAa,EAAE,MAAM;CAU3D;AAED,qBAAa,cAAe,SAAQ,sBAAsB;IACxD,QAAQ,CAAC,IAAI,oBAAoB;gBAG/B,KAAK,EAAE,MAAM,GAAG,IAAI,EACpB,SAAS,EAAE,MAAM,GAAG,IAAI,EACxB,eAAe,EAAE,MAAM;CAe1B;AAED,qBAAa,wBAAyB,SAAQ,sBAAsB;IAClE,QAAQ,CAAC,IAAI,8BAA8B;gBAE/B,KAAK,EAAE,OAAO;CAW3B;AAED,qBAAa,sBAAuB,SAAQ,KAAK;IAC/C,QAAQ,CAAC,IAAI,4BAA4B;IACzC,QAAQ,CAAC,UAAU,EAAE,MAAM,CAAC;IAC5B,QAAQ,CAAC,WAAW,EAAE,MAAM,GAAG,IAAI,CAAC;gBAExB,UAAU,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,GAAG,IAAI;CAK3D"}
@@ -164,7 +164,19 @@ var CVRStore = class {
164
164
  WHERE cvr."clientGroupID" = ${id}`,
165
165
  tx`SELECT "clientID" FROM ${this.#cvr("clients")}
166
166
  WHERE "clientGroupID" = ${id}`,
167
- tx`SELECT * FROM ${this.#cvr("queries")}
167
+ tx`SELECT
168
+ "clientGroupID",
169
+ "queryHash",
170
+ "clientAST",
171
+ "queryName",
172
+ "queryArgs",
173
+ "patchVersion",
174
+ "transformationHash",
175
+ "transformationVersion",
176
+ "internal",
177
+ "deleted",
178
+ "rowSetSignature"
179
+ FROM ${this.#cvr("queries")}
168
180
  WHERE "clientGroupID" = ${id} AND deleted IS DISTINCT FROM true`,
169
181
  tx`SELECT
170
182
  "clientGroupID",
@@ -378,7 +390,15 @@ var CVRStore = class {
378
390
  try {
379
391
  await reader.processReadTask((tx) => checkVersion(tx, this.#schema, this.#id, current));
380
392
  const [allDesires, queryRows] = await reader.processReadTask((tx) => Promise.all([tx`
381
- SELECT * FROM ${this.#cvr("desires")}
393
+ SELECT
394
+ "clientGroupID",
395
+ "clientID",
396
+ "queryHash",
397
+ "patchVersion",
398
+ "deleted",
399
+ "ttl",
400
+ "inactivatedAt"
401
+ FROM ${this.#cvr("desires")}
382
402
  WHERE "clientGroupID" = ${this.#id}
383
403
  AND "patchVersion" > ${start}
384
404
  AND "patchVersion" <= ${end}`, tx`
@@ -1 +1 @@
1
- {"version":3,"file":"cvr-store.js","names":["#schema","#taskID","#id","#failService","#db","#writes","#pendingRowRecordUpdates","#forceUpdates","#rowCache","#loadAttemptIntervalMs","#maxLoadAttempts","#pendingQueryUpdates","#pendingDesireUpdates","#pendingQueryPartialUpdates","#load","#cvr","#pendingInstanceWrite","#updateQueryFields","#checkVersionAndOwnership","#flush","#rowCount","#flushDesires","#flushQueries"],"sources":["../../../../../../zero-cache/src/services/view-syncer/cvr-store.ts"],"sourcesContent":["import {trace} from '@opentelemetry/api';\nimport type {LogContext} from '@rocicorp/logger';\nimport type {MaybeRow, PendingQuery, Row} from 'postgres';\nimport {startAsyncSpan} from '../../../../otel/src/span.ts';\nimport {version} from '../../../../otel/src/version.ts';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {CustomKeyMap} from '../../../../shared/src/custom-key-map.ts';\nimport {CustomKeySet} from '../../../../shared/src/custom-key-set.ts';\nimport {\n deepEqual,\n type ReadonlyJSONValue,\n} from '../../../../shared/src/json.ts';\nimport {sleep} from '../../../../shared/src/sleep.ts';\nimport * as v from '../../../../shared/src/valita.ts';\nimport {astSchema} from '../../../../zero-protocol/src/ast.ts';\nimport {clientSchemaSchema} from '../../../../zero-protocol/src/client-schema.ts';\nimport {ErrorKind} from '../../../../zero-protocol/src/error-kind.ts';\nimport {ErrorOrigin} from '../../../../zero-protocol/src/error-origin.ts';\nimport type {InspectQueryRow} from '../../../../zero-protocol/src/inspect-down.ts';\nimport {clampTTL, DEFAULT_TTL_MS} from '../../../../zql/src/query/ttl.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {runTx} from '../../db/run-transaction.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {recordRowsSynced} from '../../server/anonymous-otel-start.ts';\nimport {ProtocolErrorWithLevel} from '../../types/error-with-level.ts';\nimport type {PostgresDB, PostgresTransaction} from '../../types/pg.ts';\nimport {rowIDString} from '../../types/row-key.ts';\nimport {cvrSchema, type ShardID} from '../../types/shards.ts';\nimport type {Patch, PatchToVersion} from './client-handler.ts';\nimport type {CVR, CVRSnapshot} from './cvr.ts';\nimport {RowRecordCache} from './row-record-cache.ts';\nimport {\n type ClientsRow,\n type DesiresRow,\n type InstancesRow,\n type QueriesRow,\n type RowsRow,\n} from './schema/cvr.ts';\nimport {\n type ClientQueryRecord,\n type ClientRecord,\n cmpVersions,\n type CustomQueryRecord,\n type CVRVersion,\n EMPTY_CVR_VERSION,\n type InternalQueryRecord,\n type NullableCVRVersion,\n type QueryPatch,\n type QueryRecord,\n queryRecordToQueryRow,\n type RowID,\n type RowRecord,\n versionFromString,\n versionString,\n} from './schema/types.ts';\nimport {\n type TTLClock,\n ttlClockAsNumber,\n ttlClockFromNumber,\n} from './ttl-clock.ts';\n\nexport type CVRFlushStats = {\n instances: number;\n queries: number;\n desires: number;\n clients: number;\n rows: number;\n rowsDeferred: number;\n statements: number;\n};\n\nlet flushCounter = 0;\n\n/**\n * Convert TTL/timestamp values for both old (seconds-based) and new (ms-based) columns.\n * Old columns: inactivatedAt (TIMESTAMPTZ), ttl (INTERVAL) - need conversion ms->seconds\n * New columns: inactivatedAtMs (DOUBLE PRECISION), ttlMs (DOUBLE PRECISION) - store ms directly\n */\nfunction convertTTLValues(\n inactivatedAt: TTLClock | undefined,\n ttl: number,\n): {\n ttlInterval: number | null;\n ttlMs: number | null;\n inactivatedAtTimestamp: TTLClock | null;\n inactivatedAtMs: TTLClock | null;\n} {\n return {\n ttlInterval: ttl < 0 ? null : ttl / 1000, // INTERVAL needs seconds\n ttlMs: ttl < 0 ? null : ttl, // New column stores ms directly\n inactivatedAtTimestamp:\n inactivatedAt === undefined\n ? null\n : ttlClockFromNumber(ttlClockAsNumber(inactivatedAt) / 1000),\n inactivatedAtMs: inactivatedAt ?? null,\n };\n}\n\nconst tracer = trace.getTracer('cvr-store', version);\n\n/**\n * QueriesRow with queryArgs as a stringified JSON value.\n * Used for batched config writes where queryArgs are pre-stringified\n * to handle the postgres.js boolean array bug.\n */\ntype StringifiedQueriesRow = Omit<QueriesRow, 'queryArgs'> & {\n queryArgs: string | null;\n};\n\nfunction asQuery(row: QueriesRow): QueryRecord {\n const maybeVersion = (s: string | null) =>\n s === null ? undefined : versionFromString(s);\n\n // Only attach rowSetSignature when the column is non-null, so existing\n // snapshots that don't include the field don't break.\n const sigField = row.rowSetSignature\n ? {rowSetSignature: row.rowSetSignature}\n : {};\n\n if (row.clientAST === null) {\n // custom query\n assert(\n row.queryName !== null && row.queryArgs !== null,\n 'queryName and queryArgs must be set for custom queries',\n );\n return {\n type: 'custom',\n id: row.queryHash,\n name: row.queryName,\n args: row.queryArgs,\n patchVersion: maybeVersion(row.patchVersion),\n clientState: {},\n transformationHash: row.transformationHash ?? undefined,\n transformationVersion: maybeVersion(row.transformationVersion),\n ...sigField,\n } satisfies CustomQueryRecord;\n }\n\n const ast = astSchema.parse(row.clientAST);\n return row.internal\n ? ({\n type: 'internal',\n id: row.queryHash,\n ast,\n transformationHash: row.transformationHash ?? undefined,\n transformationVersion: maybeVersion(row.transformationVersion),\n ...sigField,\n } satisfies InternalQueryRecord)\n : ({\n type: 'client',\n id: row.queryHash,\n ast,\n patchVersion: maybeVersion(row.patchVersion),\n clientState: {},\n transformationHash: row.transformationHash ?? undefined,\n transformationVersion: maybeVersion(row.transformationVersion),\n ...sigField,\n } satisfies ClientQueryRecord);\n}\n\n// The time to wait between load attempts.\nconst LOAD_ATTEMPT_INTERVAL_MS = 500;\n// The maximum number of load() attempts if the rowsVersion is behind.\n// This currently results in a maximum catchup time of ~5 seconds, after\n// which we give up and consider the CVR invalid.\n//\n// TODO: Make this configurable with something like --max-catchup-wait-ms,\n// as it is technically application specific.\nconst MAX_LOAD_ATTEMPTS = 10;\n\nexport class CVRStore {\n readonly #schema: string;\n readonly #taskID: string;\n readonly #id: string;\n readonly #failService: (e: unknown) => void;\n readonly #db: PostgresDB;\n readonly #writes: Set<{\n stats: Partial<CVRFlushStats>;\n write: (\n tx: PostgresTransaction,\n lastConnectTime: number,\n ) => PendingQuery<MaybeRow[]>;\n }> = new Set();\n // Stored separately so repeated putInstance() calls (e.g. setClientSchema,\n // setProfileID, and the final call in #flush) replace each other rather than\n // accumulating as independent statements in #writes.\n #pendingInstanceWrite:\n | ((\n tx: PostgresTransaction,\n lastConnectTime: number,\n ) => PendingQuery<MaybeRow[]>)\n | undefined = undefined;\n readonly #pendingRowRecordUpdates = new CustomKeyMap<RowID, RowRecord | null>(\n rowIDString,\n );\n readonly #forceUpdates = new CustomKeySet<RowID>(rowIDString);\n readonly #rowCache: RowRecordCache;\n readonly #loadAttemptIntervalMs: number;\n readonly #maxLoadAttempts: number;\n #rowCount: number = 0;\n readonly #pendingQueryUpdates = new Map<string, StringifiedQueriesRow>();\n readonly #pendingDesireUpdates = new Map<string, DesiresRow>();\n readonly #pendingQueryPartialUpdates = new Map<string, Partial<QueriesRow>>();\n\n constructor(\n lc: LogContext,\n cvrDb: PostgresDB,\n shard: ShardID,\n taskID: string,\n cvrID: string,\n failService: (e: unknown) => void,\n loadAttemptIntervalMs = LOAD_ATTEMPT_INTERVAL_MS,\n maxLoadAttempts = MAX_LOAD_ATTEMPTS,\n deferredRowFlushThreshold = 100, // somewhat arbitrary\n setTimeoutFn = setTimeout,\n ) {\n this.#failService = failService;\n this.#db = cvrDb;\n this.#schema = cvrSchema(shard);\n this.#taskID = taskID;\n this.#id = cvrID;\n this.#rowCache = new RowRecordCache(\n lc,\n cvrDb,\n shard,\n cvrID,\n failService,\n deferredRowFlushThreshold,\n setTimeoutFn,\n );\n this.#loadAttemptIntervalMs = loadAttemptIntervalMs;\n this.#maxLoadAttempts = maxLoadAttempts;\n }\n\n #cvr(table: string) {\n return this.#db(`${this.#schema}.${table}`);\n }\n\n #updateQueryFields(queryHash: string, fields: Partial<QueriesRow>): void {\n // Track as partial-only update for batched flush. Merge into any\n // pre-existing partial update for the same hash so independent callers\n // (e.g. updateQuery + updateRowSetSignature) don't clobber each other.\n const existing = this.#pendingQueryPartialUpdates.get(queryHash);\n this.#pendingQueryPartialUpdates.set(\n queryHash,\n existing ? {...existing, ...fields} : fields,\n );\n }\n\n load(lc: LogContext, lastConnectTime: number): Promise<CVR> {\n return startAsyncSpan(tracer, 'cvr.load', async () => {\n let err: RowsVersionBehindError | undefined;\n for (let i = 0; i < this.#maxLoadAttempts; i++) {\n if (i > 0) {\n await sleep(this.#loadAttemptIntervalMs);\n }\n const result = await this.#load(lc, lastConnectTime);\n if (result instanceof RowsVersionBehindError) {\n lc.info?.(`attempt ${i + 1}: ${String(result)}`);\n err = result;\n continue;\n }\n return result;\n }\n assert(err, 'Expected error to be set after retry loop exhausted');\n throw new ClientNotFoundError(\n `max attempts exceeded waiting for CVR@${err.cvrVersion} to catch up from ${err.rowsVersion}`,\n );\n });\n }\n\n async #load(\n lc: LogContext,\n lastConnectTime: number,\n ): Promise<CVR | RowsVersionBehindError> {\n const start = Date.now();\n\n const id = this.#id;\n const cvr: CVR = {\n id,\n version: EMPTY_CVR_VERSION,\n lastActive: 0,\n ttlClock: ttlClockFromNumber(0), // TTL clock starts at 0, not Date.now()\n replicaVersion: null,\n clients: {},\n queries: {},\n clientSchema: null,\n profileID: null,\n };\n\n const [instance, clientsRows, queryRows, desiresRows] = await runTx(\n this.#db,\n tx => {\n lc.debug?.(`CVR tx started after ${Date.now() - start} ms`);\n return [\n tx<\n (Omit<InstancesRow, 'clientGroupID'> & {\n profileID: string | null;\n deleted: boolean;\n rowsVersion: string | null;\n })[]\n >`SELECT cvr.\"version\",\n \"lastActive\",\n \"ttlClock\",\n \"replicaVersion\",\n \"owner\",\n \"grantedAt\",\n \"clientSchema\",\n \"profileID\",\n \"deleted\",\n rows.\"version\" as \"rowsVersion\"\n FROM ${this.#cvr('instances')} AS cvr\n LEFT JOIN ${this.#cvr('rowsVersion')} AS rows\n ON cvr.\"clientGroupID\" = rows.\"clientGroupID\"\n WHERE cvr.\"clientGroupID\" = ${id}`,\n tx<Pick<ClientsRow, 'clientID'>[]>`SELECT \"clientID\" FROM ${this.#cvr(\n 'clients',\n )}\n WHERE \"clientGroupID\" = ${id}`,\n tx<QueriesRow[]>`SELECT * FROM ${this.#cvr('queries')}\n WHERE \"clientGroupID\" = ${id} AND deleted IS DISTINCT FROM true`,\n tx<DesiresRow[]>`SELECT\n \"clientGroupID\",\n \"clientID\",\n \"queryHash\",\n \"patchVersion\",\n \"deleted\",\n \"ttlMs\" AS \"ttl\",\n \"inactivatedAtMs\" AS \"inactivatedAt\"\n FROM ${this.#cvr('desires')}\n WHERE \"clientGroupID\" = ${id}`,\n ];\n },\n {mode: Mode.READONLY},\n );\n lc.debug?.(\n `CVR tx completed after ${Date.now() - start} ms ` +\n `(${clientsRows.length} clients, ${queryRows.length} queries, ${desiresRows.length} desires)`,\n );\n\n if (instance.length === 0) {\n // This is the first time we see this CVR.\n this.putInstance({\n version: cvr.version,\n lastActive: 0,\n ttlClock: ttlClockFromNumber(0), // TTL clock starts at 0 for new instances\n replicaVersion: null,\n clientSchema: null,\n profileID: null,\n });\n } else {\n assert(\n instance.length === 1,\n () => `Expected exactly one CVR instance, got ${instance.length}`,\n );\n const {\n version,\n lastActive,\n ttlClock,\n replicaVersion,\n owner,\n grantedAt,\n rowsVersion,\n clientSchema,\n profileID,\n deleted,\n } = instance[0];\n\n if (deleted) {\n throw new ClientNotFoundError(\n 'Client has been purged due to inactivity',\n );\n }\n\n if (owner !== this.#taskID) {\n if ((grantedAt ?? 0) > lastConnectTime) {\n throw new OwnershipError(owner, grantedAt, lastConnectTime);\n } else {\n // Fire-and-forget an ownership change to signal the current owner.\n // Note that the query is structured such that it only succeeds in the\n // correct conditions (i.e. gated on `grantedAt`).\n void this.#db`\n UPDATE ${this.#cvr('instances')} \n SET \"owner\" = ${this.#taskID}, \n \"grantedAt\" = ${lastConnectTime}\n WHERE \"clientGroupID\" = ${this.#id} AND\n (\"grantedAt\" IS NULL OR\n \"grantedAt\" <= to_timestamp(${lastConnectTime / 1000}))\n `\n .execute()\n .catch(this.#failService);\n }\n }\n\n if (version !== (rowsVersion ?? EMPTY_CVR_VERSION.stateVersion)) {\n // This will cause the load() method to wait for row catchup and retry.\n // Assuming the ownership signal succeeds, the current owner will stop\n // modifying the CVR and flush its pending row changes.\n return new RowsVersionBehindError(version, rowsVersion);\n }\n\n cvr.version = versionFromString(version);\n cvr.lastActive = lastActive;\n cvr.ttlClock = ttlClock;\n cvr.replicaVersion = replicaVersion;\n cvr.profileID = profileID;\n\n try {\n cvr.clientSchema =\n clientSchema === null\n ? null\n : v.parse(clientSchema, clientSchemaSchema);\n } catch (e) {\n throw new InvalidClientSchemaError(e);\n }\n }\n\n for (const row of clientsRows) {\n cvr.clients[row.clientID] = {\n id: row.clientID,\n desiredQueryIDs: [],\n };\n }\n\n for (const row of queryRows) {\n const query = asQuery(row);\n cvr.queries[row.queryHash] = query;\n }\n\n for (const row of desiresRows) {\n const client = cvr.clients[row.clientID];\n // Note: row.inactivatedAt is mapped from inactivatedAtMs in the SQL query\n if (!row.deleted && row.inactivatedAt === null) {\n if (client) {\n client.desiredQueryIDs.push(row.queryHash);\n } else {\n // This can happen if the client was deleted but the queries are still alive.\n lc.debug?.(\n `Not adding to desiredQueryIDs for client ${row.clientID} because it has been deleted.`,\n );\n }\n }\n\n const query = cvr.queries[row.queryHash];\n if (\n query &&\n query.type !== 'internal' &&\n (!row.deleted || row.inactivatedAt !== null)\n ) {\n query.clientState[row.clientID] = {\n inactivatedAt: row.inactivatedAt ?? undefined,\n ttl: clampTTL(row.ttl ?? DEFAULT_TTL_MS),\n version: versionFromString(row.patchVersion),\n };\n }\n }\n\n lc.info?.(\n `loaded cvr@${versionString(cvr.version)} (${Date.now() - start} ms)`,\n );\n\n // why do we not sort `desiredQueryIDs` here?\n\n return cvr;\n }\n\n getRowRecords(): Promise<ReadonlyMap<RowID, RowRecord>> {\n return this.#rowCache.getRowRecords();\n }\n\n putRowRecord(row: RowRecord): void {\n this.#pendingRowRecordUpdates.set(row.id, row);\n }\n\n /**\n * Note: Removing a row from the CVR should be represented by a\n * {@link putRowRecord()} with `refCounts: null` in order to properly\n * produce the appropriate delete patch when catching up old clients.\n *\n * This `delRowRecord()` method, on the other hand, is only used for\n * \"canceling\" the put of a row that was not in the CVR in the first place.\n */\n delRowRecord(id: RowID): void {\n this.#pendingRowRecordUpdates.set(id, null);\n }\n\n /**\n * Overrides the default logic that removes no-op writes and forces\n * the updates for the given row `ids`. This has no effect if there\n * are no corresponding puts or dels for the associated row records.\n */\n forceUpdates(...ids: RowID[]) {\n for (const id of ids) {\n this.#forceUpdates.add(id);\n }\n }\n\n /**\n * Updates the `ttlClock` of the CVR instance. The ttlClock starts at 0 when\n * the CVR instance is first created and increments based on elapsed time\n * since the base time established by the ViewSyncerService.\n */\n async updateTTLClock(ttlClock: TTLClock, lastActive: number): Promise<void> {\n await this.#db`UPDATE ${this.#cvr('instances')}\n SET \"lastActive\" = ${lastActive},\n \"ttlClock\" = ${ttlClock}\n WHERE \"clientGroupID\" = ${this.#id}`.execute();\n }\n\n /**\n * @returns This returns the current `ttlClock` of the CVR instance. The ttlClock\n * represents elapsed time since the instance was created (starting from 0).\n * If the CVR has never been initialized for this client group, it returns\n * `undefined`.\n */\n async getTTLClock(): Promise<TTLClock | undefined> {\n const result = await this.#db<Pick<InstancesRow, 'ttlClock'>[]>`\n SELECT \"ttlClock\" FROM ${this.#cvr('instances')}\n WHERE \"clientGroupID\" = ${this.#id}`.values();\n if (result.length === 0) {\n // This can happen if the CVR has not been initialized yet.\n return undefined;\n }\n assert(\n result.length === 1,\n () => `Expected exactly one rowsVersion result, got ${result.length}`,\n );\n return result[0][0];\n }\n\n putInstance({\n version,\n replicaVersion,\n lastActive,\n clientSchema,\n profileID,\n ttlClock,\n }: Pick<\n CVRSnapshot,\n | 'version'\n | 'replicaVersion'\n | 'lastActive'\n | 'clientSchema'\n | 'profileID'\n | 'ttlClock'\n >): void {\n // Overwrite any previously queued instance write — only the last call\n // matters since they all target the same row.\n this.#pendingInstanceWrite = (tx, lastConnectTime) => {\n const change: InstancesRow = {\n clientGroupID: this.#id,\n version: versionString(version),\n lastActive,\n ttlClock,\n replicaVersion,\n owner: this.#taskID,\n grantedAt: lastConnectTime,\n clientSchema,\n profileID,\n };\n return tx`\n INSERT INTO ${this.#cvr('instances')} ${tx(change)} \n ON CONFLICT (\"clientGroupID\") DO UPDATE SET ${tx(change)}`;\n };\n }\n\n markQueryAsDeleted(version: CVRVersion, queryPatch: QueryPatch): void {\n this.#updateQueryFields(queryPatch.id, {\n patchVersion: versionString(version),\n deleted: true,\n transformationHash: null,\n transformationVersion: null,\n });\n }\n\n putQuery(query: QueryRecord): void {\n const change = queryRecordToQueryRow(this.#id, query);\n\n const c = {\n ...change,\n // Pre-stringify queryArgs to handle postgres.js boolean array bug\n queryArgs:\n change.queryArgs !== null ? JSON.stringify(change.queryArgs) : null,\n transformationHash: change.transformationHash ?? null,\n transformationVersion: change.transformationVersion ?? null,\n deleted: change.deleted ?? false,\n };\n this.#pendingQueryUpdates.set(query.id, c);\n }\n\n updateQuery(query: QueryRecord) {\n const maybeVersionString = (v: CVRVersion | undefined) =>\n v ? versionString(v) : null;\n this.#updateQueryFields(query.id, {\n patchVersion:\n query.type === 'internal'\n ? null\n : maybeVersionString(query.patchVersion),\n transformationHash: query.transformationHash ?? null,\n transformationVersion: maybeVersionString(query.transformationVersion),\n deleted: false,\n });\n }\n\n updateRowSetSignature(queryHash: string, signature: string): void {\n this.#updateQueryFields(queryHash, {rowSetSignature: signature});\n }\n\n insertClient(client: ClientRecord): void {\n const change: ClientsRow = {\n clientGroupID: this.#id,\n clientID: client.id,\n };\n\n this.#writes.add({\n stats: {clients: 1},\n write: tx => tx`INSERT INTO ${this.#cvr('clients')} ${tx(change)}`,\n });\n }\n\n deleteClient(clientID: string) {\n this.#writes.add({\n stats: {clients: 1},\n write: sql =>\n sql`DELETE FROM ${this.#cvr('clients')}\n WHERE \"clientGroupID\" = ${this.#id}\n AND \"clientID\" = ${clientID}`,\n });\n }\n\n putDesiredQuery(\n newVersion: CVRVersion,\n query: {id: string},\n client: {id: string},\n deleted: boolean,\n inactivatedAt: TTLClock | undefined,\n ttl: number,\n ): void {\n const {ttlMs, inactivatedAtMs} = convertTTLValues(inactivatedAt, ttl);\n\n const change: DesiresRow = {\n clientGroupID: this.#id,\n clientID: client.id,\n deleted,\n inactivatedAt: inactivatedAtMs,\n patchVersion: versionString(newVersion),\n queryHash: query.id,\n ttl: ttlMs,\n };\n\n // Use composite key to deduplicate/replace entries for the same client-query pair\n const key = `${client.id}:${query.id}`;\n this.#pendingDesireUpdates.set(key, change);\n }\n\n catchupRowPatches(\n lc: LogContext,\n afterVersion: NullableCVRVersion,\n upToCVR: CVRSnapshot,\n current: CVRVersion,\n excludeQueryHashes: string[] = [],\n ): AsyncGenerator<RowsRow[], void, undefined> {\n return this.#rowCache.catchupRowPatches(\n lc,\n afterVersion,\n upToCVR,\n current,\n excludeQueryHashes,\n );\n }\n\n async catchupConfigPatches(\n lc: LogContext,\n afterVersion: NullableCVRVersion,\n upToCVR: CVRSnapshot,\n current: CVRVersion,\n ): Promise<PatchToVersion[]> {\n if (cmpVersions(afterVersion, upToCVR.version) >= 0) {\n return [];\n }\n\n const startMs = Date.now();\n const start = afterVersion ? versionString(afterVersion) : '';\n const end = versionString(upToCVR.version);\n lc.debug?.(`scanning config patches for clients from ${start}`);\n\n const reader = new TransactionPool(lc, {mode: Mode.READONLY}).run(this.#db);\n try {\n // Verify that we are reading the right version of the CVR.\n await reader.processReadTask(tx =>\n checkVersion(tx, this.#schema, this.#id, current),\n );\n\n const [allDesires, queryRows] = await reader.processReadTask(tx =>\n Promise.all([\n tx<DesiresRow[]>`\n SELECT * FROM ${this.#cvr('desires')}\n WHERE \"clientGroupID\" = ${this.#id}\n AND \"patchVersion\" > ${start}\n AND \"patchVersion\" <= ${end}`,\n tx<Pick<QueriesRow, 'deleted' | 'queryHash' | 'patchVersion'>[]>`\n SELECT deleted, \"queryHash\", \"patchVersion\" FROM ${this.#cvr('queries')}\n WHERE \"clientGroupID\" = ${this.#id}\n AND \"patchVersion\" > ${start}\n AND \"patchVersion\" <= ${end}`,\n ]),\n );\n\n const patches: PatchToVersion[] = [];\n for (const row of queryRows) {\n const {queryHash: id} = row;\n const patch: Patch = row.deleted\n ? {type: 'query', op: 'del', id}\n : {type: 'query', op: 'put', id};\n const v = row.patchVersion;\n assert(v, 'patchVersion must be set for query patches');\n patches.push({patch, toVersion: versionFromString(v)});\n }\n for (const row of allDesires) {\n const {clientID, queryHash: id} = row;\n const patch: Patch = row.deleted\n ? {type: 'query', op: 'del', id, clientID}\n : {type: 'query', op: 'put', id, clientID};\n patches.push({patch, toVersion: versionFromString(row.patchVersion)});\n }\n\n lc.debug?.(\n `${patches.length} config patches (${Date.now() - startMs} ms)`,\n );\n return patches;\n } finally {\n reader.setDone();\n }\n }\n\n #flushQueries(\n tx: PostgresTransaction,\n lc: LogContext,\n ): PendingQuery<Row[]>[] {\n // Merge partial updates into full updates\n const partialOnly = new Map<string, Partial<QueriesRow>>();\n for (const [queryHash, partial] of this.#pendingQueryPartialUpdates) {\n const existing = this.#pendingQueryUpdates.get(queryHash);\n if (existing) {\n // Merge partial into full update\n Object.assign(existing, partial);\n } else {\n // Track partial-only updates to batch separately\n partialOnly.set(queryHash, partial);\n }\n }\n\n const queries: PendingQuery<Row[]>[] = [];\n\n // Batch full updates\n if (this.#pendingQueryUpdates.size > 0) {\n const rows = [...this.#pendingQueryUpdates.values()];\n lc.debug?.(`Batch flushing ${rows.length} full query updates`);\n\n queries.push(tx`\n INSERT INTO ${this.#cvr('queries')} (\n \"clientGroupID\",\n \"queryHash\",\n \"clientAST\",\n \"queryName\",\n \"queryArgs\",\n \"patchVersion\",\n \"transformationHash\",\n \"transformationVersion\",\n \"internal\",\n \"deleted\",\n \"rowSetSignature\"\n )\n SELECT\n \"clientGroupID\",\n \"queryHash\",\n \"clientAST\",\n \"queryName\",\n CASE\n WHEN \"queryArgs\" IS NULL THEN NULL\n ELSE \"queryArgs\"::json\n END,\n \"patchVersion\",\n \"transformationHash\",\n \"transformationVersion\",\n \"internal\",\n \"deleted\",\n \"rowSetSignature\"\n FROM json_to_recordset(${rows}) AS x(\n \"clientGroupID\" TEXT,\n \"queryHash\" TEXT,\n \"clientAST\" JSONB,\n \"queryName\" TEXT,\n \"queryArgs\" TEXT,\n \"patchVersion\" TEXT,\n \"transformationHash\" TEXT,\n \"transformationVersion\" TEXT,\n \"internal\" BOOLEAN,\n \"deleted\" BOOLEAN,\n \"rowSetSignature\" TEXT\n )\n ON CONFLICT (\"clientGroupID\", \"queryHash\") DO UPDATE SET\n \"clientAST\" = excluded.\"clientAST\",\n \"queryName\" = excluded.\"queryName\",\n \"queryArgs\" = CASE\n WHEN excluded.\"queryArgs\" IS NULL THEN NULL\n ELSE excluded.\"queryArgs\"::json\n END,\n \"patchVersion\" = excluded.\"patchVersion\",\n \"transformationHash\" = excluded.\"transformationHash\",\n \"transformationVersion\" = excluded.\"transformationVersion\",\n \"internal\" = excluded.\"internal\",\n \"deleted\" = excluded.\"deleted\",\n \"rowSetSignature\" = excluded.\"rowSetSignature\"\n `);\n }\n\n // Batch partial-only updates\n if (partialOnly.size > 0) {\n lc.debug?.(`Batch flushing ${partialOnly.size} partial query updates`);\n const rows = Array.from(\n partialOnly.entries(),\n ([queryHash, partial]) => ({\n clientGroupID: this.#id,\n queryHash,\n patchVersionSet: partial.patchVersion !== undefined,\n patchVersion: partial.patchVersion ?? null,\n deletedSet: partial.deleted !== undefined,\n deleted: partial.deleted ?? null,\n transformationHashSet: partial.transformationHash !== undefined,\n transformationHash: partial.transformationHash ?? null,\n transformationVersionSet: partial.transformationVersion !== undefined,\n transformationVersion: partial.transformationVersion ?? null,\n rowSetSignatureSet: partial.rowSetSignature !== undefined,\n rowSetSignature: partial.rowSetSignature ?? null,\n }),\n );\n queries.push(tx`\n UPDATE ${this.#cvr('queries')} AS q\n SET\n \"patchVersion\" = CASE\n WHEN u.\"patchVersionSet\" THEN u.\"patchVersion\"\n ELSE q.\"patchVersion\"\n END,\n \"deleted\" = CASE\n WHEN u.\"deletedSet\" THEN u.\"deleted\"\n ELSE q.\"deleted\"\n END,\n \"transformationHash\" = CASE\n WHEN u.\"transformationHashSet\" THEN u.\"transformationHash\"\n ELSE q.\"transformationHash\"\n END,\n \"transformationVersion\" = CASE\n WHEN u.\"transformationVersionSet\" THEN u.\"transformationVersion\"\n ELSE q.\"transformationVersion\"\n END,\n \"rowSetSignature\" = CASE\n WHEN u.\"rowSetSignatureSet\" THEN u.\"rowSetSignature\"\n ELSE q.\"rowSetSignature\"\n END\n FROM json_to_recordset(${rows}) AS u(\n \"clientGroupID\" TEXT,\n \"queryHash\" TEXT,\n \"patchVersionSet\" BOOLEAN,\n \"patchVersion\" TEXT,\n \"deletedSet\" BOOLEAN,\n \"deleted\" BOOLEAN,\n \"transformationHashSet\" BOOLEAN,\n \"transformationHash\" TEXT,\n \"transformationVersionSet\" BOOLEAN,\n \"transformationVersion\" TEXT,\n \"rowSetSignatureSet\" BOOLEAN,\n \"rowSetSignature\" TEXT\n )\n WHERE q.\"clientGroupID\" = u.\"clientGroupID\"\n AND q.\"queryHash\" = u.\"queryHash\"\n `);\n }\n\n return queries;\n }\n\n #flushDesires(\n tx: PostgresTransaction,\n lc: LogContext,\n ): PendingQuery<Row[]> | null {\n if (this.#pendingDesireUpdates.size === 0) {\n return null;\n }\n\n const rows = Array.from(this.#pendingDesireUpdates.values(), row => {\n const {ttlInterval, ttlMs, inactivatedAtTimestamp, inactivatedAtMs} =\n convertTTLValues(row.inactivatedAt ?? undefined, row.ttl ?? -1);\n return {\n clientGroupID: row.clientGroupID,\n clientID: row.clientID,\n queryHash: row.queryHash,\n patchVersion: row.patchVersion,\n deleted: row.deleted,\n ttl: ttlInterval,\n ttlMs,\n inactivatedAt: inactivatedAtTimestamp,\n inactivatedAtMs,\n };\n });\n\n lc.debug?.(`Batch flushing ${rows.length} desire updates`);\n\n return tx`\n INSERT INTO ${this.#cvr('desires')} (\n \"clientGroupID\",\n \"clientID\",\n \"queryHash\",\n \"patchVersion\",\n \"deleted\",\n \"ttl\",\n \"ttlMs\",\n \"inactivatedAt\",\n \"inactivatedAtMs\"\n )\n SELECT\n \"clientGroupID\",\n \"clientID\",\n \"queryHash\",\n \"patchVersion\",\n \"deleted\",\n \"ttl\",\n \"ttlMs\",\n CASE\n WHEN \"inactivatedAt\" IS NULL THEN NULL\n -- Divide by 1000 because postgres.js serializeTimestamp treats numbers as ms\n -- and to_timestamp expects seconds. This matches non-batched behavior.\n ELSE to_timestamp(\"inactivatedAt\" / 1000.0)\n END,\n \"inactivatedAtMs\"\n FROM json_to_recordset(${rows}) AS x(\n \"clientGroupID\" TEXT,\n \"clientID\" TEXT,\n \"queryHash\" TEXT,\n \"patchVersion\" TEXT,\n \"deleted\" BOOLEAN,\n \"ttl\" INTERVAL,\n \"ttlMs\" DOUBLE PRECISION,\n \"inactivatedAt\" DOUBLE PRECISION,\n \"inactivatedAtMs\" DOUBLE PRECISION\n )\n ON CONFLICT (\"clientGroupID\", \"clientID\", \"queryHash\") DO UPDATE SET\n \"patchVersion\" = excluded.\"patchVersion\",\n \"deleted\" = excluded.\"deleted\",\n \"ttl\" = excluded.\"ttl\",\n \"ttlMs\" = excluded.\"ttlMs\",\n \"inactivatedAt\" = excluded.\"inactivatedAt\",\n \"inactivatedAtMs\" = excluded.\"inactivatedAtMs\"\n `;\n }\n\n async #checkVersionAndOwnership(\n lc: LogContext,\n tx: PostgresTransaction,\n expectedCurrentVersion: CVRVersion,\n lastConnectTime: number,\n ): Promise<void> {\n const start = Date.now();\n lc.debug?.('checking cvr version and ownership');\n const result = await tx<\n Pick<InstancesRow, 'version' | 'owner' | 'grantedAt'>[]\n >`SELECT \"version\", \"owner\", \"grantedAt\" FROM ${this.#cvr('instances')}\n WHERE \"clientGroupID\" = ${this.#id}\n FOR UPDATE`;\n const expected = versionString(expectedCurrentVersion);\n const {version, owner, grantedAt} =\n result.length > 0\n ? result[0]\n : {\n version: EMPTY_CVR_VERSION.stateVersion,\n owner: null,\n grantedAt: null,\n };\n lc.debug?.(\n 'checked cvr version and ownership in ' + (Date.now() - start) + ' ms',\n );\n if (owner !== this.#taskID && (grantedAt ?? 0) > lastConnectTime) {\n throw new OwnershipError(owner, grantedAt, lastConnectTime);\n }\n if (version !== expected) {\n throw new ConcurrentModificationException(expected, version);\n }\n }\n\n async #flush(\n lc: LogContext,\n expectedCurrentVersion: CVRVersion,\n cvr: CVRSnapshot,\n lastConnectTime: number,\n ): Promise<CVRFlushStats | null> {\n const stats: CVRFlushStats = {\n instances: 0,\n queries: 0,\n desires: 0,\n clients: 0,\n rows: 0,\n rowsDeferred: 0,\n statements: 0,\n };\n if (this.#pendingRowRecordUpdates.size) {\n const existingRowRecords = await this.getRowRecords();\n this.#rowCount = existingRowRecords.size;\n for (const [id, row] of this.#pendingRowRecordUpdates.entries()) {\n if (this.#forceUpdates.has(id)) {\n continue;\n }\n const existing = existingRowRecords.get(id);\n if (\n // Don't delete or add an unreferenced row if it's not in the CVR.\n (existing === undefined && !row?.refCounts) ||\n // Don't write a row record that exactly matches what's in the CVR.\n deepEqual(\n (row ?? undefined) as ReadonlyJSONValue | undefined,\n existing as ReadonlyJSONValue | undefined,\n )\n ) {\n this.#pendingRowRecordUpdates.delete(id);\n }\n }\n }\n if (\n this.#pendingRowRecordUpdates.size === 0 &&\n this.#writes.size === 0 &&\n this.#pendingInstanceWrite === undefined &&\n this.#pendingQueryUpdates.size === 0 &&\n this.#pendingQueryPartialUpdates.size === 0 &&\n this.#pendingDesireUpdates.size === 0\n ) {\n return null;\n }\n // Note: The CVR instance itself is only updated if there are material\n // changes (i.e. changes to the CVR contents) to flush.\n this.putInstance(cvr);\n const start = Date.now();\n lc.debug?.('flush tx beginning');\n\n // Use an async callback so we can await the version/ownership check and\n // validate it INSIDE the transaction. If validation fails, the exception\n // causes postgres.js to ROLLBACK, ensuring no writes are committed on error.\n const results = await runTx(\n this.#db,\n async tx => {\n lc.debug?.(`flush tx begun after ${Date.now() - start} ms`);\n\n // Acquire row-level lock and validate version/ownership before queuing writes.\n // Throwing here (inside the begin callback) rolls back the transaction so that\n // no writes are committed when concurrent modification or ownership errors occur.\n await this.#checkVersionAndOwnership(\n lc,\n tx,\n expectedCurrentVersion,\n lastConnectTime,\n );\n\n const writeQueries = [];\n if (this.#pendingInstanceWrite) {\n writeQueries.push(this.#pendingInstanceWrite(tx, lastConnectTime));\n stats.instances++;\n stats.statements++;\n }\n for (const write of this.#writes) {\n stats.clients += write.stats.clients ?? 0;\n stats.rows += write.stats.rows ?? 0;\n\n writeQueries.push(write.write(tx, lastConnectTime));\n stats.statements++;\n }\n\n // Batch flush config writes\n // Flush queries first (desires depend on queries via foreign key)\n const hasQueryUpdates =\n this.#pendingQueryUpdates.size > 0 ||\n this.#pendingQueryPartialUpdates.size > 0;\n\n const desireFlush = this.#flushDesires(tx, lc);\n\n let queryFlushes: PendingQuery<Row[]>[] = [];\n if (hasQueryUpdates) {\n queryFlushes = this.#flushQueries(tx, lc);\n\n // Count both full updates and partial-only updates\n const partialOnlyCount = [\n ...this.#pendingQueryPartialUpdates.keys(),\n ].filter(key => !this.#pendingQueryUpdates.has(key)).length;\n\n stats.queries = this.#pendingQueryUpdates.size + partialOnlyCount;\n stats.statements +=\n (this.#pendingQueryUpdates.size > 0 ? 1 : 0) +\n (partialOnlyCount > 0 ? 1 : 0);\n\n if (desireFlush) {\n stats.desires = this.#pendingDesireUpdates.size;\n stats.statements++;\n }\n } else if (desireFlush) {\n stats.desires = this.#pendingDesireUpdates.size;\n stats.statements++;\n }\n\n const rowUpdates = this.#rowCache.executeRowUpdates(\n tx,\n cvr.version,\n this.#pendingRowRecordUpdates,\n 'allow-defer',\n lc,\n );\n stats.statements += rowUpdates.length;\n\n // Pipeline writes now that the version check has passed.\n const pipelined = [\n ...writeQueries,\n ...queryFlushes,\n ...(desireFlush ? [desireFlush] : []),\n ...rowUpdates,\n ];\n\n lc.debug?.(`returning ${pipelined.length} queries for pipelining`);\n\n // Explicitly await all pipelined queries. When the begin callback is async,\n // postgres.js does not call Promise.all() on the return value the way it does\n // for sync callbacks, so we must do it ourselves.\n return Promise.all(pipelined);\n },\n {mode: Mode.READ_COMMITTED},\n );\n\n lc.debug?.(`flush tx completed after ${Date.now() - start} ms`);\n\n // Calculate how many row update queries were in the pipeline.\n // Note: the version check was awaited separately and is not in the results array.\n const baseQueries =\n (this.#pendingInstanceWrite ? 1 : 0) +\n this.#writes.size +\n (this.#pendingQueryUpdates.size > 0 ? 1 : 0) +\n ([...this.#pendingQueryPartialUpdates.keys()].filter(\n key => !this.#pendingQueryUpdates.has(key),\n ).length > 0\n ? 1\n : 0) +\n (this.#pendingDesireUpdates.size > 0 ? 1 : 0);\n const rowUpdateCount = results.length - baseQueries;\n\n const rowsFlushed = rowUpdateCount > 0;\n if (!rowsFlushed) {\n stats.rowsDeferred = this.#pendingRowRecordUpdates.size;\n } else {\n stats.rows += this.#pendingRowRecordUpdates.size;\n }\n\n this.#rowCount = await this.#rowCache.apply(\n this.#pendingRowRecordUpdates,\n cvr.version,\n rowsFlushed,\n );\n recordRowsSynced(this.#rowCount);\n\n return stats;\n }\n\n get rowCount(): number {\n return this.#rowCount;\n }\n\n async flush(\n lc: LogContext,\n expectedCurrentVersion: CVRVersion,\n cvr: CVRSnapshot,\n lastConnectTime: number,\n ): Promise<CVRFlushStats | null> {\n const start = performance.now();\n lc = lc.withContext('cvrFlushID', flushCounter++);\n try {\n const stats = await this.#flush(\n lc,\n expectedCurrentVersion,\n cvr,\n lastConnectTime,\n );\n if (stats) {\n const elapsed = performance.now() - start;\n lc.info?.(\n `flushed cvr@${versionString(cvr.version)} ` +\n `${JSON.stringify(stats)} in (${elapsed} ms)`,\n );\n this.#rowCache.recordSyncFlushStats(stats, elapsed);\n }\n return stats;\n } catch (e) {\n // Clear cached state if an error (e.g. ConcurrentModificationException) is encountered.\n this.#rowCache.clear();\n throw e;\n } finally {\n this.#writes.clear();\n this.#pendingInstanceWrite = undefined;\n this.#pendingRowRecordUpdates.clear();\n this.#forceUpdates.clear();\n this.#pendingQueryUpdates.clear();\n this.#pendingDesireUpdates.clear();\n this.#pendingQueryPartialUpdates.clear();\n }\n }\n\n hasPendingUpdates(): boolean {\n return this.#rowCache.hasPendingUpdates();\n }\n\n /** Resolves when all pending updates are flushed. */\n flushed(lc: LogContext): Promise<void> {\n return this.#rowCache.flushed(lc);\n }\n\n async inspectQueries(\n lc: LogContext,\n ttlClock: TTLClock,\n clientID?: string,\n ): Promise<InspectQueryRow[]> {\n const db = this.#db;\n const clientGroupID = this.#id;\n\n const reader = new TransactionPool(lc, {mode: Mode.READONLY}).run(db);\n try {\n return await reader.processReadTask(\n tx => tx<InspectQueryRow[]>`\n SELECT DISTINCT ON (d.\"clientID\", d.\"queryHash\")\n d.\"clientID\",\n d.\"queryHash\" AS \"queryID\",\n COALESCE(d.\"ttlMs\", ${DEFAULT_TTL_MS}) AS \"ttl\",\n d.\"inactivatedAtMs\" AS \"inactivatedAt\",\n (SELECT COUNT(*)::INT FROM ${this.#cvr('rows')} r \n WHERE r.\"clientGroupID\" = d.\"clientGroupID\" \n AND r.\"refCounts\" ? d.\"queryHash\") AS \"rowCount\",\n q.\"clientAST\" AS \"ast\",\n (q.\"patchVersion\" IS NOT NULL) AS \"got\",\n COALESCE(d.\"deleted\", FALSE) AS \"deleted\",\n q.\"queryName\" AS \"name\",\n q.\"queryArgs\" AS \"args\"\n FROM ${this.#cvr('desires')} d\n LEFT JOIN ${this.#cvr('queries')} q\n ON q.\"clientGroupID\" = d.\"clientGroupID\"\n AND q.\"queryHash\" = d.\"queryHash\"\n WHERE d.\"clientGroupID\" = ${clientGroupID}\n ${clientID ? tx`AND d.\"clientID\" = ${clientID}` : tx``}\n AND NOT (\n d.\"inactivatedAtMs\" IS NOT NULL \n AND d.\"ttlMs\" IS NOT NULL \n AND (d.\"inactivatedAtMs\" + d.\"ttlMs\") <= ${ttlClockAsNumber(ttlClock)}\n )\n ORDER BY d.\"clientID\", d.\"queryHash\"`,\n );\n } finally {\n reader.setDone();\n }\n }\n}\n\n/**\n * This is similar to {@link CVRStore.#checkVersionAndOwnership} except\n * that it only checks the version and is suitable for snapshot reads\n * (i.e. by doing a plain `SELECT` rather than a `SELECT ... FOR UPDATE`).\n */\nexport async function checkVersion(\n tx: PostgresTransaction,\n schema: string,\n clientGroupID: string,\n expectedCurrentVersion: CVRVersion,\n): Promise<void> {\n const expected = versionString(expectedCurrentVersion);\n const result = await tx<Pick<InstancesRow, 'version'>[]>`\n SELECT version FROM ${tx(schema)}.instances \n WHERE \"clientGroupID\" = ${clientGroupID}`;\n const {version} =\n result.length > 0 ? result[0] : {version: EMPTY_CVR_VERSION.stateVersion};\n if (version !== expected) {\n throw new ConcurrentModificationException(expected, version);\n }\n}\n\nexport class ClientNotFoundError extends ProtocolErrorWithLevel {\n constructor(message: string) {\n super(\n {\n kind: ErrorKind.ClientNotFound,\n message,\n origin: ErrorOrigin.ZeroCache,\n },\n 'warn',\n );\n }\n}\n\nexport class ConcurrentModificationException extends ProtocolErrorWithLevel {\n readonly name = 'ConcurrentModificationException';\n\n constructor(expectedVersion: string, actualVersion: string) {\n super(\n {\n kind: ErrorKind.Internal,\n message: `CVR has been concurrently modified. Expected ${expectedVersion}, got ${actualVersion}`,\n origin: ErrorOrigin.ZeroCache,\n },\n 'warn',\n );\n }\n}\n\nexport class OwnershipError extends ProtocolErrorWithLevel {\n readonly name = 'OwnershipError';\n\n constructor(\n owner: string | null,\n grantedAt: number | null,\n lastConnectTime: number,\n ) {\n super(\n {\n kind: ErrorKind.Rehome,\n message:\n `CVR ownership was transferred to ${owner} at ` +\n `${new Date(grantedAt ?? 0).toISOString()} ` +\n `(last connect time: ${new Date(lastConnectTime).toISOString()})`,\n maxBackoffMs: 0,\n origin: ErrorOrigin.ZeroCache,\n },\n 'info',\n );\n }\n}\n\nexport class InvalidClientSchemaError extends ProtocolErrorWithLevel {\n readonly name = 'InvalidClientSchemaError';\n\n constructor(cause: unknown) {\n super(\n {\n kind: ErrorKind.SchemaVersionNotSupported,\n message: `Could not parse clientSchema stored in CVR: ${String(cause)}`,\n origin: ErrorOrigin.ZeroCache,\n },\n 'warn',\n {cause},\n );\n }\n}\n\nexport class RowsVersionBehindError extends Error {\n readonly name = 'RowsVersionBehindError';\n readonly cvrVersion: string;\n readonly rowsVersion: string | null;\n\n constructor(cvrVersion: string, rowsVersion: string | null) {\n super(`rowsVersion (${rowsVersion}) is behind CVR ${cvrVersion}`);\n this.cvrVersion = cvrVersion;\n this.rowsVersion = rowsVersion;\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;AAuEA,IAAI,eAAe;;;;;;AAOnB,SAAS,iBACP,eACA,KAMA;AACA,QAAO;EACL,aAAa,MAAM,IAAI,OAAO,MAAM;EACpC,OAAO,MAAM,IAAI,OAAO;EACxB,wBACE,kBAAkB,KAAA,IACd,OACA,mBAAmB,iBAAiB,cAAc,GAAG,IAAK;EAChE,iBAAiB,iBAAiB;EACnC;;AAGH,IAAM,SAAS,MAAM,UAAU,aAAa,QAAQ;AAWpD,SAAS,QAAQ,KAA8B;CAC7C,MAAM,gBAAgB,MACpB,MAAM,OAAO,KAAA,IAAY,kBAAkB,EAAE;CAI/C,MAAM,WAAW,IAAI,kBACjB,EAAC,iBAAiB,IAAI,iBAAgB,GACtC,EAAE;AAEN,KAAI,IAAI,cAAc,MAAM;AAE1B,SACE,IAAI,cAAc,QAAQ,IAAI,cAAc,MAC5C,yDACD;AACD,SAAO;GACL,MAAM;GACN,IAAI,IAAI;GACR,MAAM,IAAI;GACV,MAAM,IAAI;GACV,cAAc,aAAa,IAAI,aAAa;GAC5C,aAAa,EAAE;GACf,oBAAoB,IAAI,sBAAsB,KAAA;GAC9C,uBAAuB,aAAa,IAAI,sBAAsB;GAC9D,GAAG;GACJ;;CAGH,MAAM,MAAM,UAAU,MAAM,IAAI,UAAU;AAC1C,QAAO,IAAI,WACN;EACC,MAAM;EACN,IAAI,IAAI;EACR;EACA,oBAAoB,IAAI,sBAAsB,KAAA;EAC9C,uBAAuB,aAAa,IAAI,sBAAsB;EAC9D,GAAG;EACJ,GACA;EACC,MAAM;EACN,IAAI,IAAI;EACR;EACA,cAAc,aAAa,IAAI,aAAa;EAC5C,aAAa,EAAE;EACf,oBAAoB,IAAI,sBAAsB,KAAA;EAC9C,uBAAuB,aAAa,IAAI,sBAAsB;EAC9D,GAAG;EACJ;;AAIP,IAAM,2BAA2B;AAOjC,IAAM,oBAAoB;AAE1B,IAAa,WAAb,MAAsB;CACpB;CACA;CACA;CACA;CACA;CACA,0BAMK,IAAI,KAAK;CAId,wBAKgB,KAAA;CAChB,2BAAoC,IAAI,aACtC,YACD;CACD,gBAAyB,IAAI,aAAoB,YAAY;CAC7D;CACA;CACA;CACA,YAAoB;CACpB,uCAAgC,IAAI,KAAoC;CACxE,wCAAiC,IAAI,KAAyB;CAC9D,8CAAuC,IAAI,KAAkC;CAE7E,YACE,IACA,OACA,OACA,QACA,OACA,aACA,wBAAwB,0BACxB,kBAAkB,mBAClB,4BAA4B,KAC5B,eAAe,YACf;AACA,QAAA,cAAoB;AACpB,QAAA,KAAW;AACX,QAAA,SAAe,UAAU,MAAM;AAC/B,QAAA,SAAe;AACf,QAAA,KAAW;AACX,QAAA,WAAiB,IAAI,eACnB,IACA,OACA,OACA,OACA,aACA,2BACA,aACD;AACD,QAAA,wBAA8B;AAC9B,QAAA,kBAAwB;;CAG1B,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,MAAA,OAAa,GAAG,QAAQ;;CAG7C,mBAAmB,WAAmB,QAAmC;EAIvE,MAAM,WAAW,MAAA,2BAAiC,IAAI,UAAU;AAChE,QAAA,2BAAiC,IAC/B,WACA,WAAW;GAAC,GAAG;GAAU,GAAG;GAAO,GAAG,OACvC;;CAGH,KAAK,IAAgB,iBAAuC;AAC1D,SAAO,eAAe,QAAQ,YAAY,YAAY;GACpD,IAAI;AACJ,QAAK,IAAI,IAAI,GAAG,IAAI,MAAA,iBAAuB,KAAK;AAC9C,QAAI,IAAI,EACN,OAAM,MAAM,MAAA,sBAA4B;IAE1C,MAAM,SAAS,MAAM,MAAA,KAAW,IAAI,gBAAgB;AACpD,QAAI,kBAAkB,wBAAwB;AAC5C,QAAG,OAAO,WAAW,IAAI,EAAE,IAAI,OAAO,OAAO,GAAG;AAChD,WAAM;AACN;;AAEF,WAAO;;AAET,UAAO,KAAK,sDAAsD;AAClE,SAAM,IAAI,oBACR,yCAAyC,IAAI,WAAW,oBAAoB,IAAI,cACjF;IACD;;CAGJ,OAAA,KACE,IACA,iBACuC;EACvC,MAAM,QAAQ,KAAK,KAAK;EAExB,MAAM,KAAK,MAAA;EACX,MAAM,MAAW;GACf;GACA,SAAS;GACT,YAAY;GACZ,UAAU,mBAAmB,EAAE;GAC/B,gBAAgB;GAChB,SAAS,EAAE;GACX,SAAS,EAAE;GACX,cAAc;GACd,WAAW;GACZ;EAED,MAAM,CAAC,UAAU,aAAa,WAAW,eAAe,MAAM,MAC5D,MAAA,KACA,OAAM;AACJ,MAAG,QAAQ,wBAAwB,KAAK,KAAK,GAAG,MAAM,KAAK;AAC3D,UAAO;IACL,EAMC;;;;;;;;;;mBAUQ,MAAA,IAAU,YAAY,CAAC;wBAClB,MAAA,IAAU,cAAc,CAAC;;0CAEP;IAChC,EAAkC,0BAA0B,MAAA,IAC1D,UACD,CAAC;qCACyB;IAC3B,EAAgB,iBAAiB,MAAA,IAAU,UAAU,CAAC;oCAC5B,GAAG;IAC7B,EAAgB;;;;;;;;iBAQT,MAAA,IAAU,UAAU,CAAC;oCACF;IAC3B;KAEH,EAAC,MAAM,UAAc,CACtB;AACD,KAAG,QACD,0BAA0B,KAAK,KAAK,GAAG,MAAM,OACvC,YAAY,OAAO,YAAY,UAAU,OAAO,YAAY,YAAY,OAAO,WACtF;AAED,MAAI,SAAS,WAAW,EAEtB,MAAK,YAAY;GACf,SAAS,IAAI;GACb,YAAY;GACZ,UAAU,mBAAmB,EAAE;GAC/B,gBAAgB;GAChB,cAAc;GACd,WAAW;GACZ,CAAC;OACG;AACL,UACE,SAAS,WAAW,SACd,0CAA0C,SAAS,SAC1D;GACD,MAAM,EACJ,SACA,YACA,UACA,gBACA,OACA,WACA,aACA,cACA,WACA,YACE,SAAS;AAEb,OAAI,QACF,OAAM,IAAI,oBACR,2CACD;AAGH,OAAI,UAAU,MAAA,OACZ,MAAK,aAAa,KAAK,gBACrB,OAAM,IAAI,eAAe,OAAO,WAAW,gBAAgB;OAKtD,OAAA,EAAQ;qBACF,MAAA,IAAU,YAAY,CAAC;kCACV,MAAA,OAAa;kCACb,gBAAgB;wCACV,MAAA,GAAS;;mDAEE,kBAAkB,IAAK;UAE7D,SAAS,CACT,MAAM,MAAA,YAAkB;AAI/B,OAAI,aAAa,eAAe,kBAAkB,cAIhD,QAAO,IAAI,uBAAuB,SAAS,YAAY;AAGzD,OAAI,UAAU,kBAAkB,QAAQ;AACxC,OAAI,aAAa;AACjB,OAAI,WAAW;AACf,OAAI,iBAAiB;AACrB,OAAI,YAAY;AAEhB,OAAI;AACF,QAAI,eACF,iBAAiB,OACb,OACA,MAAQ,cAAc,mBAAmB;YACxC,GAAG;AACV,UAAM,IAAI,yBAAyB,EAAE;;;AAIzC,OAAK,MAAM,OAAO,YAChB,KAAI,QAAQ,IAAI,YAAY;GAC1B,IAAI,IAAI;GACR,iBAAiB,EAAE;GACpB;AAGH,OAAK,MAAM,OAAO,WAAW;GAC3B,MAAM,QAAQ,QAAQ,IAAI;AAC1B,OAAI,QAAQ,IAAI,aAAa;;AAG/B,OAAK,MAAM,OAAO,aAAa;GAC7B,MAAM,SAAS,IAAI,QAAQ,IAAI;AAE/B,OAAI,CAAC,IAAI,WAAW,IAAI,kBAAkB,KACxC,KAAI,OACF,QAAO,gBAAgB,KAAK,IAAI,UAAU;OAG1C,IAAG,QACD,4CAA4C,IAAI,SAAS,+BAC1D;GAIL,MAAM,QAAQ,IAAI,QAAQ,IAAI;AAC9B,OACE,SACA,MAAM,SAAS,eACd,CAAC,IAAI,WAAW,IAAI,kBAAkB,MAEvC,OAAM,YAAY,IAAI,YAAY;IAChC,eAAe,IAAI,iBAAiB,KAAA;IACpC,KAAK,SAAS,IAAI,OAAA,IAAsB;IACxC,SAAS,kBAAkB,IAAI,aAAa;IAC7C;;AAIL,KAAG,OACD,cAAc,cAAc,IAAI,QAAQ,CAAC,IAAI,KAAK,KAAK,GAAG,MAAM,MACjE;AAID,SAAO;;CAGT,gBAAwD;AACtD,SAAO,MAAA,SAAe,eAAe;;CAGvC,aAAa,KAAsB;AACjC,QAAA,wBAA8B,IAAI,IAAI,IAAI,IAAI;;;;;;;;;;CAWhD,aAAa,IAAiB;AAC5B,QAAA,wBAA8B,IAAI,IAAI,KAAK;;;;;;;CAQ7C,aAAa,GAAG,KAAc;AAC5B,OAAK,MAAM,MAAM,IACf,OAAA,aAAmB,IAAI,GAAG;;;;;;;CAS9B,MAAM,eAAe,UAAoB,YAAmC;AAC1E,QAAM,MAAA,EAAQ,UAAU,MAAA,IAAU,YAAY,CAAC;+BACpB,WAAW;6BACb,SAAS;oCACF,MAAA,KAAW,SAAS;;;;;;;;CAStD,MAAM,cAA6C;EACjD,MAAM,SAAS,MAAM,MAAA,EAA0C;+BACpC,MAAA,IAAU,YAAY,CAAC;gCACtB,MAAA,KAAW,QAAQ;AAC/C,MAAI,OAAO,WAAW,EAEpB;AAEF,SACE,OAAO,WAAW,SACZ,gDAAgD,OAAO,SAC9D;AACD,SAAO,OAAO,GAAG;;CAGnB,YAAY,EACV,SACA,gBACA,YACA,cACA,WACA,YASO;AAGP,QAAA,wBAA8B,IAAI,oBAAoB;GACpD,MAAM,SAAuB;IAC3B,eAAe,MAAA;IACf,SAAS,cAAc,QAAQ;IAC/B;IACA;IACA;IACA,OAAO,MAAA;IACP,WAAW;IACX;IACA;IACD;AACD,UAAO,EAAE;sBACO,MAAA,IAAU,YAAY,CAAC,GAAG,GAAG,OAAO,CAAC;wDACH,GAAG,OAAO;;;CAIhE,mBAAmB,SAAqB,YAA8B;AACpE,QAAA,kBAAwB,WAAW,IAAI;GACrC,cAAc,cAAc,QAAQ;GACpC,SAAS;GACT,oBAAoB;GACpB,uBAAuB;GACxB,CAAC;;CAGJ,SAAS,OAA0B;EACjC,MAAM,SAAS,sBAAsB,MAAA,IAAU,MAAM;EAErD,MAAM,IAAI;GACR,GAAG;GAEH,WACE,OAAO,cAAc,OAAO,KAAK,UAAU,OAAO,UAAU,GAAG;GACjE,oBAAoB,OAAO,sBAAsB;GACjD,uBAAuB,OAAO,yBAAyB;GACvD,SAAS,OAAO,WAAW;GAC5B;AACD,QAAA,oBAA0B,IAAI,MAAM,IAAI,EAAE;;CAG5C,YAAY,OAAoB;EAC9B,MAAM,sBAAsB,MAC1B,IAAI,cAAc,EAAE,GAAG;AACzB,QAAA,kBAAwB,MAAM,IAAI;GAChC,cACE,MAAM,SAAS,aACX,OACA,mBAAmB,MAAM,aAAa;GAC5C,oBAAoB,MAAM,sBAAsB;GAChD,uBAAuB,mBAAmB,MAAM,sBAAsB;GACtE,SAAS;GACV,CAAC;;CAGJ,sBAAsB,WAAmB,WAAyB;AAChE,QAAA,kBAAwB,WAAW,EAAC,iBAAiB,WAAU,CAAC;;CAGlE,aAAa,QAA4B;EACvC,MAAM,SAAqB;GACzB,eAAe,MAAA;GACf,UAAU,OAAO;GAClB;AAED,QAAA,OAAa,IAAI;GACf,OAAO,EAAC,SAAS,GAAE;GACnB,QAAO,OAAM,EAAE,eAAe,MAAA,IAAU,UAAU,CAAC,GAAG,GAAG,OAAO;GACjE,CAAC;;CAGJ,aAAa,UAAkB;AAC7B,QAAA,OAAa,IAAI;GACf,OAAO,EAAC,SAAS,GAAE;GACnB,QAAO,QACL,GAAG,eAAe,MAAA,IAAU,UAAU,CAAC;sCACT,MAAA,GAAS;iCACd;GAC5B,CAAC;;CAGJ,gBACE,YACA,OACA,QACA,SACA,eACA,KACM;EACN,MAAM,EAAC,OAAO,oBAAmB,iBAAiB,eAAe,IAAI;EAErE,MAAM,SAAqB;GACzB,eAAe,MAAA;GACf,UAAU,OAAO;GACjB;GACA,eAAe;GACf,cAAc,cAAc,WAAW;GACvC,WAAW,MAAM;GACjB,KAAK;GACN;EAGD,MAAM,MAAM,GAAG,OAAO,GAAG,GAAG,MAAM;AAClC,QAAA,qBAA2B,IAAI,KAAK,OAAO;;CAG7C,kBACE,IACA,cACA,SACA,SACA,qBAA+B,EAAE,EACW;AAC5C,SAAO,MAAA,SAAe,kBACpB,IACA,cACA,SACA,SACA,mBACD;;CAGH,MAAM,qBACJ,IACA,cACA,SACA,SAC2B;AAC3B,MAAI,YAAY,cAAc,QAAQ,QAAQ,IAAI,EAChD,QAAO,EAAE;EAGX,MAAM,UAAU,KAAK,KAAK;EAC1B,MAAM,QAAQ,eAAe,cAAc,aAAa,GAAG;EAC3D,MAAM,MAAM,cAAc,QAAQ,QAAQ;AAC1C,KAAG,QAAQ,4CAA4C,QAAQ;EAE/D,MAAM,SAAS,IAAI,gBAAgB,IAAI,EAAC,MAAM,UAAc,CAAC,CAAC,IAAI,MAAA,GAAS;AAC3E,MAAI;AAEF,SAAM,OAAO,iBAAgB,OAC3B,aAAa,IAAI,MAAA,QAAc,MAAA,IAAU,QAAQ,CAClD;GAED,MAAM,CAAC,YAAY,aAAa,MAAM,OAAO,iBAAgB,OAC3D,QAAQ,IAAI,CACV,EAAgB;sBACJ,MAAA,IAAU,UAAU,CAAC;kCACT,MAAA,GAAS;+BACZ,MAAM;gCACL,OACtB,EAAgE;yDACjB,MAAA,IAAU,UAAU,CAAC;kCAC5C,MAAA,GAAS;+BACZ,MAAM;gCACL,MACvB,CAAC,CACH;GAED,MAAM,UAA4B,EAAE;AACpC,QAAK,MAAM,OAAO,WAAW;IAC3B,MAAM,EAAC,WAAW,OAAM;IACxB,MAAM,QAAe,IAAI,UACrB;KAAC,MAAM;KAAS,IAAI;KAAO;KAAG,GAC9B;KAAC,MAAM;KAAS,IAAI;KAAO;KAAG;IAClC,MAAM,IAAI,IAAI;AACd,WAAO,GAAG,6CAA6C;AACvD,YAAQ,KAAK;KAAC;KAAO,WAAW,kBAAkB,EAAE;KAAC,CAAC;;AAExD,QAAK,MAAM,OAAO,YAAY;IAC5B,MAAM,EAAC,UAAU,WAAW,OAAM;IAClC,MAAM,QAAe,IAAI,UACrB;KAAC,MAAM;KAAS,IAAI;KAAO;KAAI;KAAS,GACxC;KAAC,MAAM;KAAS,IAAI;KAAO;KAAI;KAAS;AAC5C,YAAQ,KAAK;KAAC;KAAO,WAAW,kBAAkB,IAAI,aAAa;KAAC,CAAC;;AAGvE,MAAG,QACD,GAAG,QAAQ,OAAO,mBAAmB,KAAK,KAAK,GAAG,QAAQ,MAC3D;AACD,UAAO;YACC;AACR,UAAO,SAAS;;;CAIpB,cACE,IACA,IACuB;EAEvB,MAAM,8BAAc,IAAI,KAAkC;AAC1D,OAAK,MAAM,CAAC,WAAW,YAAY,MAAA,4BAAkC;GACnE,MAAM,WAAW,MAAA,oBAA0B,IAAI,UAAU;AACzD,OAAI,SAEF,QAAO,OAAO,UAAU,QAAQ;OAGhC,aAAY,IAAI,WAAW,QAAQ;;EAIvC,MAAM,UAAiC,EAAE;AAGzC,MAAI,MAAA,oBAA0B,OAAO,GAAG;GACtC,MAAM,OAAO,CAAC,GAAG,MAAA,oBAA0B,QAAQ,CAAC;AACpD,MAAG,QAAQ,kBAAkB,KAAK,OAAO,qBAAqB;AAE9D,WAAQ,KAAK,EAAE;sBACC,MAAA,IAAU,UAAU,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;iCA4BV,KAAK;;;;;;;;;;;;;;;;;;;;;;;;;;QA0B9B;;AAIJ,MAAI,YAAY,OAAO,GAAG;AACxB,MAAG,QAAQ,kBAAkB,YAAY,KAAK,wBAAwB;GACtE,MAAM,OAAO,MAAM,KACjB,YAAY,SAAS,GACpB,CAAC,WAAW,cAAc;IACzB,eAAe,MAAA;IACf;IACA,iBAAiB,QAAQ,iBAAiB,KAAA;IAC1C,cAAc,QAAQ,gBAAgB;IACtC,YAAY,QAAQ,YAAY,KAAA;IAChC,SAAS,QAAQ,WAAW;IAC5B,uBAAuB,QAAQ,uBAAuB,KAAA;IACtD,oBAAoB,QAAQ,sBAAsB;IAClD,0BAA0B,QAAQ,0BAA0B,KAAA;IAC5D,uBAAuB,QAAQ,yBAAyB;IACxD,oBAAoB,QAAQ,oBAAoB,KAAA;IAChD,iBAAiB,QAAQ,mBAAmB;IAC7C,EACF;AACD,WAAQ,KAAK,EAAE;iBACJ,MAAA,IAAU,UAAU,CAAC;;;;;;;;;;;;;;;;;;;;;;iCAsBL,KAAK;;;;;;;;;;;;;;;;QAgB9B;;AAGJ,SAAO;;CAGT,cACE,IACA,IAC4B;AAC5B,MAAI,MAAA,qBAA2B,SAAS,EACtC,QAAO;EAGT,MAAM,OAAO,MAAM,KAAK,MAAA,qBAA2B,QAAQ,GAAE,QAAO;GAClE,MAAM,EAAC,aAAa,OAAO,wBAAwB,oBACjD,iBAAiB,IAAI,iBAAiB,KAAA,GAAW,IAAI,OAAO,GAAG;AACjE,UAAO;IACL,eAAe,IAAI;IACnB,UAAU,IAAI;IACd,WAAW,IAAI;IACf,cAAc,IAAI;IAClB,SAAS,IAAI;IACb,KAAK;IACL;IACA,eAAe;IACf;IACD;IACD;AAEF,KAAG,QAAQ,kBAAkB,KAAK,OAAO,iBAAiB;AAE1D,SAAO,EAAE;oBACO,MAAA,IAAU,UAAU,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;+BA0BV,KAAK;;;;;;;;;;;;;;;;;;;;CAqBlC,OAAA,yBACE,IACA,IACA,wBACA,iBACe;EACf,MAAM,QAAQ,KAAK,KAAK;AACxB,KAAG,QAAQ,qCAAqC;EAChD,MAAM,SAAS,MAAM,EAEpB,+CAA+C,MAAA,IAAU,YAAY,CAAC;kCACzC,MAAA,GAAS;;EAEvC,MAAM,WAAW,cAAc,uBAAuB;EACtD,MAAM,EAAC,SAAS,OAAO,cACrB,OAAO,SAAS,IACZ,OAAO,KACP;GACE,SAAS,kBAAkB;GAC3B,OAAO;GACP,WAAW;GACZ;AACP,KAAG,QACD,2CAA2C,KAAK,KAAK,GAAG,SAAS,MAClE;AACD,MAAI,UAAU,MAAA,WAAiB,aAAa,KAAK,gBAC/C,OAAM,IAAI,eAAe,OAAO,WAAW,gBAAgB;AAE7D,MAAI,YAAY,SACd,OAAM,IAAI,gCAAgC,UAAU,QAAQ;;CAIhE,OAAA,MACE,IACA,wBACA,KACA,iBAC+B;EAC/B,MAAM,QAAuB;GAC3B,WAAW;GACX,SAAS;GACT,SAAS;GACT,SAAS;GACT,MAAM;GACN,cAAc;GACd,YAAY;GACb;AACD,MAAI,MAAA,wBAA8B,MAAM;GACtC,MAAM,qBAAqB,MAAM,KAAK,eAAe;AACrD,SAAA,WAAiB,mBAAmB;AACpC,QAAK,MAAM,CAAC,IAAI,QAAQ,MAAA,wBAA8B,SAAS,EAAE;AAC/D,QAAI,MAAA,aAAmB,IAAI,GAAG,CAC5B;IAEF,MAAM,WAAW,mBAAmB,IAAI,GAAG;AAC3C,QAEG,aAAa,KAAA,KAAa,CAAC,KAAK,aAEjC,UACG,OAAO,KAAA,GACR,SACD,CAED,OAAA,wBAA8B,OAAO,GAAG;;;AAI9C,MACE,MAAA,wBAA8B,SAAS,KACvC,MAAA,OAAa,SAAS,KACtB,MAAA,yBAA+B,KAAA,KAC/B,MAAA,oBAA0B,SAAS,KACnC,MAAA,2BAAiC,SAAS,KAC1C,MAAA,qBAA2B,SAAS,EAEpC,QAAO;AAIT,OAAK,YAAY,IAAI;EACrB,MAAM,QAAQ,KAAK,KAAK;AACxB,KAAG,QAAQ,qBAAqB;EAKhC,MAAM,UAAU,MAAM,MACpB,MAAA,IACA,OAAM,OAAM;AACV,MAAG,QAAQ,wBAAwB,KAAK,KAAK,GAAG,MAAM,KAAK;AAK3D,SAAM,MAAA,yBACJ,IACA,IACA,wBACA,gBACD;GAED,MAAM,eAAe,EAAE;AACvB,OAAI,MAAA,sBAA4B;AAC9B,iBAAa,KAAK,MAAA,qBAA2B,IAAI,gBAAgB,CAAC;AAClE,UAAM;AACN,UAAM;;AAER,QAAK,MAAM,SAAS,MAAA,QAAc;AAChC,UAAM,WAAW,MAAM,MAAM,WAAW;AACxC,UAAM,QAAQ,MAAM,MAAM,QAAQ;AAElC,iBAAa,KAAK,MAAM,MAAM,IAAI,gBAAgB,CAAC;AACnD,UAAM;;GAKR,MAAM,kBACJ,MAAA,oBAA0B,OAAO,KACjC,MAAA,2BAAiC,OAAO;GAE1C,MAAM,cAAc,MAAA,aAAmB,IAAI,GAAG;GAE9C,IAAI,eAAsC,EAAE;AAC5C,OAAI,iBAAiB;AACnB,mBAAe,MAAA,aAAmB,IAAI,GAAG;IAGzC,MAAM,mBAAmB,CACvB,GAAG,MAAA,2BAAiC,MAAM,CAC3C,CAAC,QAAO,QAAO,CAAC,MAAA,oBAA0B,IAAI,IAAI,CAAC,CAAC;AAErD,UAAM,UAAU,MAAA,oBAA0B,OAAO;AACjD,UAAM,eACH,MAAA,oBAA0B,OAAO,IAAI,IAAI,MACzC,mBAAmB,IAAI,IAAI;AAE9B,QAAI,aAAa;AACf,WAAM,UAAU,MAAA,qBAA2B;AAC3C,WAAM;;cAEC,aAAa;AACtB,UAAM,UAAU,MAAA,qBAA2B;AAC3C,UAAM;;GAGR,MAAM,aAAa,MAAA,SAAe,kBAChC,IACA,IAAI,SACJ,MAAA,yBACA,eACA,GACD;AACD,SAAM,cAAc,WAAW;GAG/B,MAAM,YAAY;IAChB,GAAG;IACH,GAAG;IACH,GAAI,cAAc,CAAC,YAAY,GAAG,EAAE;IACpC,GAAG;IACJ;AAED,MAAG,QAAQ,aAAa,UAAU,OAAO,yBAAyB;AAKlE,UAAO,QAAQ,IAAI,UAAU;KAE/B,EAAC,MAAM,gBAAoB,CAC5B;AAED,KAAG,QAAQ,4BAA4B,KAAK,KAAK,GAAG,MAAM,KAAK;EAI/D,MAAM,eACH,MAAA,uBAA6B,IAAI,KAClC,MAAA,OAAa,QACZ,MAAA,oBAA0B,OAAO,IAAI,IAAI,MACzC,CAAC,GAAG,MAAA,2BAAiC,MAAM,CAAC,CAAC,QAC5C,QAAO,CAAC,MAAA,oBAA0B,IAAI,IAAI,CAC3C,CAAC,SAAS,IACP,IACA,MACH,MAAA,qBAA2B,OAAO,IAAI,IAAI;EAG7C,MAAM,cAFiB,QAAQ,SAAS,cAEH;AACrC,MAAI,CAAC,YACH,OAAM,eAAe,MAAA,wBAA8B;MAEnD,OAAM,QAAQ,MAAA,wBAA8B;AAG9C,QAAA,WAAiB,MAAM,MAAA,SAAe,MACpC,MAAA,yBACA,IAAI,SACJ,YACD;AACD,mBAAiB,MAAA,SAAe;AAEhC,SAAO;;CAGT,IAAI,WAAmB;AACrB,SAAO,MAAA;;CAGT,MAAM,MACJ,IACA,wBACA,KACA,iBAC+B;EAC/B,MAAM,QAAQ,YAAY,KAAK;AAC/B,OAAK,GAAG,YAAY,cAAc,eAAe;AACjD,MAAI;GACF,MAAM,QAAQ,MAAM,MAAA,MAClB,IACA,wBACA,KACA,gBACD;AACD,OAAI,OAAO;IACT,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,OAAG,OACD,eAAe,cAAc,IAAI,QAAQ,CAAC,GACrC,KAAK,UAAU,MAAM,CAAC,OAAO,QAAQ,MAC3C;AACD,UAAA,SAAe,qBAAqB,OAAO,QAAQ;;AAErD,UAAO;WACA,GAAG;AAEV,SAAA,SAAe,OAAO;AACtB,SAAM;YACE;AACR,SAAA,OAAa,OAAO;AACpB,SAAA,uBAA6B,KAAA;AAC7B,SAAA,wBAA8B,OAAO;AACrC,SAAA,aAAmB,OAAO;AAC1B,SAAA,oBAA0B,OAAO;AACjC,SAAA,qBAA2B,OAAO;AAClC,SAAA,2BAAiC,OAAO;;;CAI5C,oBAA6B;AAC3B,SAAO,MAAA,SAAe,mBAAmB;;;CAI3C,QAAQ,IAA+B;AACrC,SAAO,MAAA,SAAe,QAAQ,GAAG;;CAGnC,MAAM,eACJ,IACA,UACA,UAC4B;EAC5B,MAAM,KAAK,MAAA;EACX,MAAM,gBAAgB,MAAA;EAEtB,MAAM,SAAS,IAAI,gBAAgB,IAAI,EAAC,MAAM,UAAc,CAAC,CAAC,IAAI,GAAG;AACrE,MAAI;AACF,UAAO,MAAM,OAAO,iBAClB,OAAM,EAAqB;;;;0BAIT,eAAe;;iCAER,MAAA,IAAU,OAAO,CAAC;;;;;;;;SAQ1C,MAAA,IAAU,UAAU,CAAC;cAChB,MAAA,IAAU,UAAU,CAAC;;;8BAGL,cAAc;MACtC,WAAW,EAAE,sBAAsB,aAAa,EAAE,GAAG;;;;iDAIV,iBAAiB,SAAS,CAAC;;wCAGrE;YACO;AACR,UAAO,SAAS;;;;;;;;;AAUtB,eAAsB,aACpB,IACA,QACA,eACA,wBACe;CACf,MAAM,WAAW,cAAc,uBAAuB;CACtD,MAAM,SAAS,MAAM,EAAmC;0BAChC,GAAG,OAAO,CAAC;gCACL;CAC9B,MAAM,EAAC,YACL,OAAO,SAAS,IAAI,OAAO,KAAK,EAAC,SAAS,kBAAkB,cAAa;AAC3E,KAAI,YAAY,SACd,OAAM,IAAI,gCAAgC,UAAU,QAAQ;;AAIhE,IAAa,sBAAb,cAAyC,uBAAuB;CAC9D,YAAY,SAAiB;AAC3B,QACE;GACE,MAAM;GACN;GACA,QAAQ;GACT,EACD,OACD;;;AAIL,IAAa,kCAAb,cAAqD,uBAAuB;CAC1E,OAAgB;CAEhB,YAAY,iBAAyB,eAAuB;AAC1D,QACE;GACE,MAAM;GACN,SAAS,gDAAgD,gBAAgB,QAAQ;GACjF,QAAQ;GACT,EACD,OACD;;;AAIL,IAAa,iBAAb,cAAoC,uBAAuB;CACzD,OAAgB;CAEhB,YACE,OACA,WACA,iBACA;AACA,QACE;GACE,MAAM;GACN,SACE,oCAAoC,MAAM,MACvC,IAAI,KAAK,aAAa,EAAE,CAAC,aAAa,CAAC,uBACnB,IAAI,KAAK,gBAAgB,CAAC,aAAa,CAAC;GACjE,cAAc;GACd,QAAQ;GACT,EACD,OACD;;;AAIL,IAAa,2BAAb,cAA8C,uBAAuB;CACnE,OAAgB;CAEhB,YAAY,OAAgB;AAC1B,QACE;GACE,MAAM;GACN,SAAS,+CAA+C,OAAO,MAAM;GACrE,QAAQ;GACT,EACD,QACA,EAAC,OAAM,CACR;;;AAIL,IAAa,yBAAb,cAA4C,MAAM;CAChD,OAAgB;CAChB;CACA;CAEA,YAAY,YAAoB,aAA4B;AAC1D,QAAM,gBAAgB,YAAY,kBAAkB,aAAa;AACjE,OAAK,aAAa;AAClB,OAAK,cAAc"}
1
+ {"version":3,"file":"cvr-store.js","names":["#schema","#taskID","#id","#failService","#db","#writes","#pendingRowRecordUpdates","#forceUpdates","#rowCache","#loadAttemptIntervalMs","#maxLoadAttempts","#pendingQueryUpdates","#pendingDesireUpdates","#pendingQueryPartialUpdates","#load","#cvr","#pendingInstanceWrite","#updateQueryFields","#checkVersionAndOwnership","#flush","#rowCount","#flushDesires","#flushQueries"],"sources":["../../../../../../zero-cache/src/services/view-syncer/cvr-store.ts"],"sourcesContent":["import {trace} from '@opentelemetry/api';\nimport type {LogContext} from '@rocicorp/logger';\nimport type {MaybeRow, PendingQuery, Row} from 'postgres';\nimport {startAsyncSpan} from '../../../../otel/src/span.ts';\nimport {version} from '../../../../otel/src/version.ts';\nimport {assert} from '../../../../shared/src/asserts.ts';\nimport {CustomKeyMap} from '../../../../shared/src/custom-key-map.ts';\nimport {CustomKeySet} from '../../../../shared/src/custom-key-set.ts';\nimport {\n deepEqual,\n type ReadonlyJSONValue,\n} from '../../../../shared/src/json.ts';\nimport {sleep} from '../../../../shared/src/sleep.ts';\nimport * as v from '../../../../shared/src/valita.ts';\nimport {astSchema} from '../../../../zero-protocol/src/ast.ts';\nimport {clientSchemaSchema} from '../../../../zero-protocol/src/client-schema.ts';\nimport {ErrorKind} from '../../../../zero-protocol/src/error-kind.ts';\nimport {ErrorOrigin} from '../../../../zero-protocol/src/error-origin.ts';\nimport type {InspectQueryRow} from '../../../../zero-protocol/src/inspect-down.ts';\nimport {clampTTL, DEFAULT_TTL_MS} from '../../../../zql/src/query/ttl.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {runTx} from '../../db/run-transaction.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {recordRowsSynced} from '../../server/anonymous-otel-start.ts';\nimport {ProtocolErrorWithLevel} from '../../types/error-with-level.ts';\nimport type {PostgresDB, PostgresTransaction} from '../../types/pg.ts';\nimport {rowIDString} from '../../types/row-key.ts';\nimport {cvrSchema, type ShardID} from '../../types/shards.ts';\nimport type {Patch, PatchToVersion} from './client-handler.ts';\nimport type {CVR, CVRSnapshot} from './cvr.ts';\nimport {RowRecordCache} from './row-record-cache.ts';\nimport {\n type ClientsRow,\n type DesiresRow,\n type InstancesRow,\n type QueriesRow,\n type RowsRow,\n} from './schema/cvr.ts';\nimport {\n type ClientQueryRecord,\n type ClientRecord,\n cmpVersions,\n type CustomQueryRecord,\n type CVRVersion,\n EMPTY_CVR_VERSION,\n type InternalQueryRecord,\n type NullableCVRVersion,\n type QueryPatch,\n type QueryRecord,\n queryRecordToQueryRow,\n type RowID,\n type RowRecord,\n versionFromString,\n versionString,\n} from './schema/types.ts';\nimport {\n type TTLClock,\n ttlClockAsNumber,\n ttlClockFromNumber,\n} from './ttl-clock.ts';\n\nexport type CVRFlushStats = {\n instances: number;\n queries: number;\n desires: number;\n clients: number;\n rows: number;\n rowsDeferred: number;\n statements: number;\n};\n\nlet flushCounter = 0;\n\n/**\n * Convert TTL/timestamp values for both old (seconds-based) and new (ms-based) columns.\n * Old columns: inactivatedAt (TIMESTAMPTZ), ttl (INTERVAL) - need conversion ms->seconds\n * New columns: inactivatedAtMs (DOUBLE PRECISION), ttlMs (DOUBLE PRECISION) - store ms directly\n */\nfunction convertTTLValues(\n inactivatedAt: TTLClock | undefined,\n ttl: number,\n): {\n ttlInterval: number | null;\n ttlMs: number | null;\n inactivatedAtTimestamp: TTLClock | null;\n inactivatedAtMs: TTLClock | null;\n} {\n return {\n ttlInterval: ttl < 0 ? null : ttl / 1000, // INTERVAL needs seconds\n ttlMs: ttl < 0 ? null : ttl, // New column stores ms directly\n inactivatedAtTimestamp:\n inactivatedAt === undefined\n ? null\n : ttlClockFromNumber(ttlClockAsNumber(inactivatedAt) / 1000),\n inactivatedAtMs: inactivatedAt ?? null,\n };\n}\n\nconst tracer = trace.getTracer('cvr-store', version);\n\n/**\n * QueriesRow with queryArgs as a stringified JSON value.\n * Used for batched config writes where queryArgs are pre-stringified\n * to handle the postgres.js boolean array bug.\n */\ntype StringifiedQueriesRow = Omit<QueriesRow, 'queryArgs'> & {\n queryArgs: string | null;\n};\n\nfunction asQuery(row: QueriesRow): QueryRecord {\n const maybeVersion = (s: string | null) =>\n s === null ? undefined : versionFromString(s);\n\n // Only attach rowSetSignature when the column is non-null, so existing\n // snapshots that don't include the field don't break.\n const sigField = row.rowSetSignature\n ? {rowSetSignature: row.rowSetSignature}\n : {};\n\n if (row.clientAST === null) {\n // custom query\n assert(\n row.queryName !== null && row.queryArgs !== null,\n 'queryName and queryArgs must be set for custom queries',\n );\n return {\n type: 'custom',\n id: row.queryHash,\n name: row.queryName,\n args: row.queryArgs,\n patchVersion: maybeVersion(row.patchVersion),\n clientState: {},\n transformationHash: row.transformationHash ?? undefined,\n transformationVersion: maybeVersion(row.transformationVersion),\n ...sigField,\n } satisfies CustomQueryRecord;\n }\n\n const ast = astSchema.parse(row.clientAST);\n return row.internal\n ? ({\n type: 'internal',\n id: row.queryHash,\n ast,\n transformationHash: row.transformationHash ?? undefined,\n transformationVersion: maybeVersion(row.transformationVersion),\n ...sigField,\n } satisfies InternalQueryRecord)\n : ({\n type: 'client',\n id: row.queryHash,\n ast,\n patchVersion: maybeVersion(row.patchVersion),\n clientState: {},\n transformationHash: row.transformationHash ?? undefined,\n transformationVersion: maybeVersion(row.transformationVersion),\n ...sigField,\n } satisfies ClientQueryRecord);\n}\n\n// The time to wait between load attempts.\nconst LOAD_ATTEMPT_INTERVAL_MS = 500;\n// The maximum number of load() attempts if the rowsVersion is behind.\n// This currently results in a maximum catchup time of ~5 seconds, after\n// which we give up and consider the CVR invalid.\n//\n// TODO: Make this configurable with something like --max-catchup-wait-ms,\n// as it is technically application specific.\nconst MAX_LOAD_ATTEMPTS = 10;\n\nexport class CVRStore {\n readonly #schema: string;\n readonly #taskID: string;\n readonly #id: string;\n readonly #failService: (e: unknown) => void;\n readonly #db: PostgresDB;\n readonly #writes: Set<{\n stats: Partial<CVRFlushStats>;\n write: (\n tx: PostgresTransaction,\n lastConnectTime: number,\n ) => PendingQuery<MaybeRow[]>;\n }> = new Set();\n // Stored separately so repeated putInstance() calls (e.g. setClientSchema,\n // setProfileID, and the final call in #flush) replace each other rather than\n // accumulating as independent statements in #writes.\n #pendingInstanceWrite:\n | ((\n tx: PostgresTransaction,\n lastConnectTime: number,\n ) => PendingQuery<MaybeRow[]>)\n | undefined = undefined;\n readonly #pendingRowRecordUpdates = new CustomKeyMap<RowID, RowRecord | null>(\n rowIDString,\n );\n readonly #forceUpdates = new CustomKeySet<RowID>(rowIDString);\n readonly #rowCache: RowRecordCache;\n readonly #loadAttemptIntervalMs: number;\n readonly #maxLoadAttempts: number;\n #rowCount: number = 0;\n readonly #pendingQueryUpdates = new Map<string, StringifiedQueriesRow>();\n readonly #pendingDesireUpdates = new Map<string, DesiresRow>();\n readonly #pendingQueryPartialUpdates = new Map<string, Partial<QueriesRow>>();\n\n constructor(\n lc: LogContext,\n cvrDb: PostgresDB,\n shard: ShardID,\n taskID: string,\n cvrID: string,\n failService: (e: unknown) => void,\n loadAttemptIntervalMs = LOAD_ATTEMPT_INTERVAL_MS,\n maxLoadAttempts = MAX_LOAD_ATTEMPTS,\n deferredRowFlushThreshold = 100, // somewhat arbitrary\n setTimeoutFn = setTimeout,\n ) {\n this.#failService = failService;\n this.#db = cvrDb;\n this.#schema = cvrSchema(shard);\n this.#taskID = taskID;\n this.#id = cvrID;\n this.#rowCache = new RowRecordCache(\n lc,\n cvrDb,\n shard,\n cvrID,\n failService,\n deferredRowFlushThreshold,\n setTimeoutFn,\n );\n this.#loadAttemptIntervalMs = loadAttemptIntervalMs;\n this.#maxLoadAttempts = maxLoadAttempts;\n }\n\n #cvr(table: string) {\n return this.#db(`${this.#schema}.${table}`);\n }\n\n #updateQueryFields(queryHash: string, fields: Partial<QueriesRow>): void {\n // Track as partial-only update for batched flush. Merge into any\n // pre-existing partial update for the same hash so independent callers\n // (e.g. updateQuery + updateRowSetSignature) don't clobber each other.\n const existing = this.#pendingQueryPartialUpdates.get(queryHash);\n this.#pendingQueryPartialUpdates.set(\n queryHash,\n existing ? {...existing, ...fields} : fields,\n );\n }\n\n load(lc: LogContext, lastConnectTime: number): Promise<CVR> {\n return startAsyncSpan(tracer, 'cvr.load', async () => {\n let err: RowsVersionBehindError | undefined;\n for (let i = 0; i < this.#maxLoadAttempts; i++) {\n if (i > 0) {\n await sleep(this.#loadAttemptIntervalMs);\n }\n const result = await this.#load(lc, lastConnectTime);\n if (result instanceof RowsVersionBehindError) {\n lc.info?.(`attempt ${i + 1}: ${String(result)}`);\n err = result;\n continue;\n }\n return result;\n }\n assert(err, 'Expected error to be set after retry loop exhausted');\n throw new ClientNotFoundError(\n `max attempts exceeded waiting for CVR@${err.cvrVersion} to catch up from ${err.rowsVersion}`,\n );\n });\n }\n\n async #load(\n lc: LogContext,\n lastConnectTime: number,\n ): Promise<CVR | RowsVersionBehindError> {\n const start = Date.now();\n\n const id = this.#id;\n const cvr: CVR = {\n id,\n version: EMPTY_CVR_VERSION,\n lastActive: 0,\n ttlClock: ttlClockFromNumber(0), // TTL clock starts at 0, not Date.now()\n replicaVersion: null,\n clients: {},\n queries: {},\n clientSchema: null,\n profileID: null,\n };\n\n const [instance, clientsRows, queryRows, desiresRows] = await runTx(\n this.#db,\n tx => {\n lc.debug?.(`CVR tx started after ${Date.now() - start} ms`);\n return [\n tx<\n (Omit<InstancesRow, 'clientGroupID'> & {\n profileID: string | null;\n deleted: boolean;\n rowsVersion: string | null;\n })[]\n >`SELECT cvr.\"version\",\n \"lastActive\",\n \"ttlClock\",\n \"replicaVersion\",\n \"owner\",\n \"grantedAt\",\n \"clientSchema\",\n \"profileID\",\n \"deleted\",\n rows.\"version\" as \"rowsVersion\"\n FROM ${this.#cvr('instances')} AS cvr\n LEFT JOIN ${this.#cvr('rowsVersion')} AS rows\n ON cvr.\"clientGroupID\" = rows.\"clientGroupID\"\n WHERE cvr.\"clientGroupID\" = ${id}`,\n tx<Pick<ClientsRow, 'clientID'>[]>`SELECT \"clientID\" FROM ${this.#cvr(\n 'clients',\n )}\n WHERE \"clientGroupID\" = ${id}`,\n tx<QueriesRow[]>`SELECT\n \"clientGroupID\",\n \"queryHash\",\n \"clientAST\",\n \"queryName\",\n \"queryArgs\",\n \"patchVersion\",\n \"transformationHash\",\n \"transformationVersion\",\n \"internal\",\n \"deleted\",\n \"rowSetSignature\"\n FROM ${this.#cvr('queries')}\n WHERE \"clientGroupID\" = ${id} AND deleted IS DISTINCT FROM true`,\n tx<DesiresRow[]>`SELECT\n \"clientGroupID\",\n \"clientID\",\n \"queryHash\",\n \"patchVersion\",\n \"deleted\",\n \"ttlMs\" AS \"ttl\",\n \"inactivatedAtMs\" AS \"inactivatedAt\"\n FROM ${this.#cvr('desires')}\n WHERE \"clientGroupID\" = ${id}`,\n ];\n },\n {mode: Mode.READONLY},\n );\n lc.debug?.(\n `CVR tx completed after ${Date.now() - start} ms ` +\n `(${clientsRows.length} clients, ${queryRows.length} queries, ${desiresRows.length} desires)`,\n );\n\n if (instance.length === 0) {\n // This is the first time we see this CVR.\n this.putInstance({\n version: cvr.version,\n lastActive: 0,\n ttlClock: ttlClockFromNumber(0), // TTL clock starts at 0 for new instances\n replicaVersion: null,\n clientSchema: null,\n profileID: null,\n });\n } else {\n assert(\n instance.length === 1,\n () => `Expected exactly one CVR instance, got ${instance.length}`,\n );\n const {\n version,\n lastActive,\n ttlClock,\n replicaVersion,\n owner,\n grantedAt,\n rowsVersion,\n clientSchema,\n profileID,\n deleted,\n } = instance[0];\n\n if (deleted) {\n throw new ClientNotFoundError(\n 'Client has been purged due to inactivity',\n );\n }\n\n if (owner !== this.#taskID) {\n if ((grantedAt ?? 0) > lastConnectTime) {\n throw new OwnershipError(owner, grantedAt, lastConnectTime);\n } else {\n // Fire-and-forget an ownership change to signal the current owner.\n // Note that the query is structured such that it only succeeds in the\n // correct conditions (i.e. gated on `grantedAt`).\n void this.#db`\n UPDATE ${this.#cvr('instances')} \n SET \"owner\" = ${this.#taskID}, \n \"grantedAt\" = ${lastConnectTime}\n WHERE \"clientGroupID\" = ${this.#id} AND\n (\"grantedAt\" IS NULL OR\n \"grantedAt\" <= to_timestamp(${lastConnectTime / 1000}))\n `\n .execute()\n .catch(this.#failService);\n }\n }\n\n if (version !== (rowsVersion ?? EMPTY_CVR_VERSION.stateVersion)) {\n // This will cause the load() method to wait for row catchup and retry.\n // Assuming the ownership signal succeeds, the current owner will stop\n // modifying the CVR and flush its pending row changes.\n return new RowsVersionBehindError(version, rowsVersion);\n }\n\n cvr.version = versionFromString(version);\n cvr.lastActive = lastActive;\n cvr.ttlClock = ttlClock;\n cvr.replicaVersion = replicaVersion;\n cvr.profileID = profileID;\n\n try {\n cvr.clientSchema =\n clientSchema === null\n ? null\n : v.parse(clientSchema, clientSchemaSchema);\n } catch (e) {\n throw new InvalidClientSchemaError(e);\n }\n }\n\n for (const row of clientsRows) {\n cvr.clients[row.clientID] = {\n id: row.clientID,\n desiredQueryIDs: [],\n };\n }\n\n for (const row of queryRows) {\n const query = asQuery(row);\n cvr.queries[row.queryHash] = query;\n }\n\n for (const row of desiresRows) {\n const client = cvr.clients[row.clientID];\n // Note: row.inactivatedAt is mapped from inactivatedAtMs in the SQL query\n if (!row.deleted && row.inactivatedAt === null) {\n if (client) {\n client.desiredQueryIDs.push(row.queryHash);\n } else {\n // This can happen if the client was deleted but the queries are still alive.\n lc.debug?.(\n `Not adding to desiredQueryIDs for client ${row.clientID} because it has been deleted.`,\n );\n }\n }\n\n const query = cvr.queries[row.queryHash];\n if (\n query &&\n query.type !== 'internal' &&\n (!row.deleted || row.inactivatedAt !== null)\n ) {\n query.clientState[row.clientID] = {\n inactivatedAt: row.inactivatedAt ?? undefined,\n ttl: clampTTL(row.ttl ?? DEFAULT_TTL_MS),\n version: versionFromString(row.patchVersion),\n };\n }\n }\n\n lc.info?.(\n `loaded cvr@${versionString(cvr.version)} (${Date.now() - start} ms)`,\n );\n\n // why do we not sort `desiredQueryIDs` here?\n\n return cvr;\n }\n\n getRowRecords(): Promise<ReadonlyMap<RowID, RowRecord>> {\n return this.#rowCache.getRowRecords();\n }\n\n putRowRecord(row: RowRecord): void {\n this.#pendingRowRecordUpdates.set(row.id, row);\n }\n\n /**\n * Note: Removing a row from the CVR should be represented by a\n * {@link putRowRecord()} with `refCounts: null` in order to properly\n * produce the appropriate delete patch when catching up old clients.\n *\n * This `delRowRecord()` method, on the other hand, is only used for\n * \"canceling\" the put of a row that was not in the CVR in the first place.\n */\n delRowRecord(id: RowID): void {\n this.#pendingRowRecordUpdates.set(id, null);\n }\n\n /**\n * Overrides the default logic that removes no-op writes and forces\n * the updates for the given row `ids`. This has no effect if there\n * are no corresponding puts or dels for the associated row records.\n */\n forceUpdates(...ids: RowID[]) {\n for (const id of ids) {\n this.#forceUpdates.add(id);\n }\n }\n\n /**\n * Updates the `ttlClock` of the CVR instance. The ttlClock starts at 0 when\n * the CVR instance is first created and increments based on elapsed time\n * since the base time established by the ViewSyncerService.\n */\n async updateTTLClock(ttlClock: TTLClock, lastActive: number): Promise<void> {\n await this.#db`UPDATE ${this.#cvr('instances')}\n SET \"lastActive\" = ${lastActive},\n \"ttlClock\" = ${ttlClock}\n WHERE \"clientGroupID\" = ${this.#id}`.execute();\n }\n\n /**\n * @returns This returns the current `ttlClock` of the CVR instance. The ttlClock\n * represents elapsed time since the instance was created (starting from 0).\n * If the CVR has never been initialized for this client group, it returns\n * `undefined`.\n */\n async getTTLClock(): Promise<TTLClock | undefined> {\n const result = await this.#db<Pick<InstancesRow, 'ttlClock'>[]>`\n SELECT \"ttlClock\" FROM ${this.#cvr('instances')}\n WHERE \"clientGroupID\" = ${this.#id}`.values();\n if (result.length === 0) {\n // This can happen if the CVR has not been initialized yet.\n return undefined;\n }\n assert(\n result.length === 1,\n () => `Expected exactly one rowsVersion result, got ${result.length}`,\n );\n return result[0][0];\n }\n\n putInstance({\n version,\n replicaVersion,\n lastActive,\n clientSchema,\n profileID,\n ttlClock,\n }: Pick<\n CVRSnapshot,\n | 'version'\n | 'replicaVersion'\n | 'lastActive'\n | 'clientSchema'\n | 'profileID'\n | 'ttlClock'\n >): void {\n // Overwrite any previously queued instance write — only the last call\n // matters since they all target the same row.\n this.#pendingInstanceWrite = (tx, lastConnectTime) => {\n const change: InstancesRow = {\n clientGroupID: this.#id,\n version: versionString(version),\n lastActive,\n ttlClock,\n replicaVersion,\n owner: this.#taskID,\n grantedAt: lastConnectTime,\n clientSchema,\n profileID,\n };\n return tx`\n INSERT INTO ${this.#cvr('instances')} ${tx(change)} \n ON CONFLICT (\"clientGroupID\") DO UPDATE SET ${tx(change)}`;\n };\n }\n\n markQueryAsDeleted(version: CVRVersion, queryPatch: QueryPatch): void {\n this.#updateQueryFields(queryPatch.id, {\n patchVersion: versionString(version),\n deleted: true,\n transformationHash: null,\n transformationVersion: null,\n });\n }\n\n putQuery(query: QueryRecord): void {\n const change = queryRecordToQueryRow(this.#id, query);\n\n const c = {\n ...change,\n // Pre-stringify queryArgs to handle postgres.js boolean array bug\n queryArgs:\n change.queryArgs !== null ? JSON.stringify(change.queryArgs) : null,\n transformationHash: change.transformationHash ?? null,\n transformationVersion: change.transformationVersion ?? null,\n deleted: change.deleted ?? false,\n };\n this.#pendingQueryUpdates.set(query.id, c);\n }\n\n updateQuery(query: QueryRecord) {\n const maybeVersionString = (v: CVRVersion | undefined) =>\n v ? versionString(v) : null;\n this.#updateQueryFields(query.id, {\n patchVersion:\n query.type === 'internal'\n ? null\n : maybeVersionString(query.patchVersion),\n transformationHash: query.transformationHash ?? null,\n transformationVersion: maybeVersionString(query.transformationVersion),\n deleted: false,\n });\n }\n\n updateRowSetSignature(queryHash: string, signature: string): void {\n this.#updateQueryFields(queryHash, {rowSetSignature: signature});\n }\n\n insertClient(client: ClientRecord): void {\n const change: ClientsRow = {\n clientGroupID: this.#id,\n clientID: client.id,\n };\n\n this.#writes.add({\n stats: {clients: 1},\n write: tx => tx`INSERT INTO ${this.#cvr('clients')} ${tx(change)}`,\n });\n }\n\n deleteClient(clientID: string) {\n this.#writes.add({\n stats: {clients: 1},\n write: sql =>\n sql`DELETE FROM ${this.#cvr('clients')}\n WHERE \"clientGroupID\" = ${this.#id}\n AND \"clientID\" = ${clientID}`,\n });\n }\n\n putDesiredQuery(\n newVersion: CVRVersion,\n query: {id: string},\n client: {id: string},\n deleted: boolean,\n inactivatedAt: TTLClock | undefined,\n ttl: number,\n ): void {\n const {ttlMs, inactivatedAtMs} = convertTTLValues(inactivatedAt, ttl);\n\n const change: DesiresRow = {\n clientGroupID: this.#id,\n clientID: client.id,\n deleted,\n inactivatedAt: inactivatedAtMs,\n patchVersion: versionString(newVersion),\n queryHash: query.id,\n ttl: ttlMs,\n };\n\n // Use composite key to deduplicate/replace entries for the same client-query pair\n const key = `${client.id}:${query.id}`;\n this.#pendingDesireUpdates.set(key, change);\n }\n\n catchupRowPatches(\n lc: LogContext,\n afterVersion: NullableCVRVersion,\n upToCVR: CVRSnapshot,\n current: CVRVersion,\n excludeQueryHashes: string[] = [],\n ): AsyncGenerator<RowsRow[], void, undefined> {\n return this.#rowCache.catchupRowPatches(\n lc,\n afterVersion,\n upToCVR,\n current,\n excludeQueryHashes,\n );\n }\n\n async catchupConfigPatches(\n lc: LogContext,\n afterVersion: NullableCVRVersion,\n upToCVR: CVRSnapshot,\n current: CVRVersion,\n ): Promise<PatchToVersion[]> {\n if (cmpVersions(afterVersion, upToCVR.version) >= 0) {\n return [];\n }\n\n const startMs = Date.now();\n const start = afterVersion ? versionString(afterVersion) : '';\n const end = versionString(upToCVR.version);\n lc.debug?.(`scanning config patches for clients from ${start}`);\n\n const reader = new TransactionPool(lc, {mode: Mode.READONLY}).run(this.#db);\n try {\n // Verify that we are reading the right version of the CVR.\n await reader.processReadTask(tx =>\n checkVersion(tx, this.#schema, this.#id, current),\n );\n\n const [allDesires, queryRows] = await reader.processReadTask(tx =>\n Promise.all([\n tx<DesiresRow[]>`\n SELECT\n \"clientGroupID\",\n \"clientID\",\n \"queryHash\",\n \"patchVersion\",\n \"deleted\",\n \"ttl\",\n \"inactivatedAt\"\n FROM ${this.#cvr('desires')}\n WHERE \"clientGroupID\" = ${this.#id}\n AND \"patchVersion\" > ${start}\n AND \"patchVersion\" <= ${end}`,\n tx<Pick<QueriesRow, 'deleted' | 'queryHash' | 'patchVersion'>[]>`\n SELECT deleted, \"queryHash\", \"patchVersion\" FROM ${this.#cvr('queries')}\n WHERE \"clientGroupID\" = ${this.#id}\n AND \"patchVersion\" > ${start}\n AND \"patchVersion\" <= ${end}`,\n ]),\n );\n\n const patches: PatchToVersion[] = [];\n for (const row of queryRows) {\n const {queryHash: id} = row;\n const patch: Patch = row.deleted\n ? {type: 'query', op: 'del', id}\n : {type: 'query', op: 'put', id};\n const v = row.patchVersion;\n assert(v, 'patchVersion must be set for query patches');\n patches.push({patch, toVersion: versionFromString(v)});\n }\n for (const row of allDesires) {\n const {clientID, queryHash: id} = row;\n const patch: Patch = row.deleted\n ? {type: 'query', op: 'del', id, clientID}\n : {type: 'query', op: 'put', id, clientID};\n patches.push({patch, toVersion: versionFromString(row.patchVersion)});\n }\n\n lc.debug?.(\n `${patches.length} config patches (${Date.now() - startMs} ms)`,\n );\n return patches;\n } finally {\n reader.setDone();\n }\n }\n\n #flushQueries(\n tx: PostgresTransaction,\n lc: LogContext,\n ): PendingQuery<Row[]>[] {\n // Merge partial updates into full updates\n const partialOnly = new Map<string, Partial<QueriesRow>>();\n for (const [queryHash, partial] of this.#pendingQueryPartialUpdates) {\n const existing = this.#pendingQueryUpdates.get(queryHash);\n if (existing) {\n // Merge partial into full update\n Object.assign(existing, partial);\n } else {\n // Track partial-only updates to batch separately\n partialOnly.set(queryHash, partial);\n }\n }\n\n const queries: PendingQuery<Row[]>[] = [];\n\n // Batch full updates\n if (this.#pendingQueryUpdates.size > 0) {\n const rows = [...this.#pendingQueryUpdates.values()];\n lc.debug?.(`Batch flushing ${rows.length} full query updates`);\n\n queries.push(tx`\n INSERT INTO ${this.#cvr('queries')} (\n \"clientGroupID\",\n \"queryHash\",\n \"clientAST\",\n \"queryName\",\n \"queryArgs\",\n \"patchVersion\",\n \"transformationHash\",\n \"transformationVersion\",\n \"internal\",\n \"deleted\",\n \"rowSetSignature\"\n )\n SELECT\n \"clientGroupID\",\n \"queryHash\",\n \"clientAST\",\n \"queryName\",\n CASE\n WHEN \"queryArgs\" IS NULL THEN NULL\n ELSE \"queryArgs\"::json\n END,\n \"patchVersion\",\n \"transformationHash\",\n \"transformationVersion\",\n \"internal\",\n \"deleted\",\n \"rowSetSignature\"\n FROM json_to_recordset(${rows}) AS x(\n \"clientGroupID\" TEXT,\n \"queryHash\" TEXT,\n \"clientAST\" JSONB,\n \"queryName\" TEXT,\n \"queryArgs\" TEXT,\n \"patchVersion\" TEXT,\n \"transformationHash\" TEXT,\n \"transformationVersion\" TEXT,\n \"internal\" BOOLEAN,\n \"deleted\" BOOLEAN,\n \"rowSetSignature\" TEXT\n )\n ON CONFLICT (\"clientGroupID\", \"queryHash\") DO UPDATE SET\n \"clientAST\" = excluded.\"clientAST\",\n \"queryName\" = excluded.\"queryName\",\n \"queryArgs\" = CASE\n WHEN excluded.\"queryArgs\" IS NULL THEN NULL\n ELSE excluded.\"queryArgs\"::json\n END,\n \"patchVersion\" = excluded.\"patchVersion\",\n \"transformationHash\" = excluded.\"transformationHash\",\n \"transformationVersion\" = excluded.\"transformationVersion\",\n \"internal\" = excluded.\"internal\",\n \"deleted\" = excluded.\"deleted\",\n \"rowSetSignature\" = excluded.\"rowSetSignature\"\n `);\n }\n\n // Batch partial-only updates\n if (partialOnly.size > 0) {\n lc.debug?.(`Batch flushing ${partialOnly.size} partial query updates`);\n const rows = Array.from(\n partialOnly.entries(),\n ([queryHash, partial]) => ({\n clientGroupID: this.#id,\n queryHash,\n patchVersionSet: partial.patchVersion !== undefined,\n patchVersion: partial.patchVersion ?? null,\n deletedSet: partial.deleted !== undefined,\n deleted: partial.deleted ?? null,\n transformationHashSet: partial.transformationHash !== undefined,\n transformationHash: partial.transformationHash ?? null,\n transformationVersionSet: partial.transformationVersion !== undefined,\n transformationVersion: partial.transformationVersion ?? null,\n rowSetSignatureSet: partial.rowSetSignature !== undefined,\n rowSetSignature: partial.rowSetSignature ?? null,\n }),\n );\n queries.push(tx`\n UPDATE ${this.#cvr('queries')} AS q\n SET\n \"patchVersion\" = CASE\n WHEN u.\"patchVersionSet\" THEN u.\"patchVersion\"\n ELSE q.\"patchVersion\"\n END,\n \"deleted\" = CASE\n WHEN u.\"deletedSet\" THEN u.\"deleted\"\n ELSE q.\"deleted\"\n END,\n \"transformationHash\" = CASE\n WHEN u.\"transformationHashSet\" THEN u.\"transformationHash\"\n ELSE q.\"transformationHash\"\n END,\n \"transformationVersion\" = CASE\n WHEN u.\"transformationVersionSet\" THEN u.\"transformationVersion\"\n ELSE q.\"transformationVersion\"\n END,\n \"rowSetSignature\" = CASE\n WHEN u.\"rowSetSignatureSet\" THEN u.\"rowSetSignature\"\n ELSE q.\"rowSetSignature\"\n END\n FROM json_to_recordset(${rows}) AS u(\n \"clientGroupID\" TEXT,\n \"queryHash\" TEXT,\n \"patchVersionSet\" BOOLEAN,\n \"patchVersion\" TEXT,\n \"deletedSet\" BOOLEAN,\n \"deleted\" BOOLEAN,\n \"transformationHashSet\" BOOLEAN,\n \"transformationHash\" TEXT,\n \"transformationVersionSet\" BOOLEAN,\n \"transformationVersion\" TEXT,\n \"rowSetSignatureSet\" BOOLEAN,\n \"rowSetSignature\" TEXT\n )\n WHERE q.\"clientGroupID\" = u.\"clientGroupID\"\n AND q.\"queryHash\" = u.\"queryHash\"\n `);\n }\n\n return queries;\n }\n\n #flushDesires(\n tx: PostgresTransaction,\n lc: LogContext,\n ): PendingQuery<Row[]> | null {\n if (this.#pendingDesireUpdates.size === 0) {\n return null;\n }\n\n const rows = Array.from(this.#pendingDesireUpdates.values(), row => {\n const {ttlInterval, ttlMs, inactivatedAtTimestamp, inactivatedAtMs} =\n convertTTLValues(row.inactivatedAt ?? undefined, row.ttl ?? -1);\n return {\n clientGroupID: row.clientGroupID,\n clientID: row.clientID,\n queryHash: row.queryHash,\n patchVersion: row.patchVersion,\n deleted: row.deleted,\n ttl: ttlInterval,\n ttlMs,\n inactivatedAt: inactivatedAtTimestamp,\n inactivatedAtMs,\n };\n });\n\n lc.debug?.(`Batch flushing ${rows.length} desire updates`);\n\n return tx`\n INSERT INTO ${this.#cvr('desires')} (\n \"clientGroupID\",\n \"clientID\",\n \"queryHash\",\n \"patchVersion\",\n \"deleted\",\n \"ttl\",\n \"ttlMs\",\n \"inactivatedAt\",\n \"inactivatedAtMs\"\n )\n SELECT\n \"clientGroupID\",\n \"clientID\",\n \"queryHash\",\n \"patchVersion\",\n \"deleted\",\n \"ttl\",\n \"ttlMs\",\n CASE\n WHEN \"inactivatedAt\" IS NULL THEN NULL\n -- Divide by 1000 because postgres.js serializeTimestamp treats numbers as ms\n -- and to_timestamp expects seconds. This matches non-batched behavior.\n ELSE to_timestamp(\"inactivatedAt\" / 1000.0)\n END,\n \"inactivatedAtMs\"\n FROM json_to_recordset(${rows}) AS x(\n \"clientGroupID\" TEXT,\n \"clientID\" TEXT,\n \"queryHash\" TEXT,\n \"patchVersion\" TEXT,\n \"deleted\" BOOLEAN,\n \"ttl\" INTERVAL,\n \"ttlMs\" DOUBLE PRECISION,\n \"inactivatedAt\" DOUBLE PRECISION,\n \"inactivatedAtMs\" DOUBLE PRECISION\n )\n ON CONFLICT (\"clientGroupID\", \"clientID\", \"queryHash\") DO UPDATE SET\n \"patchVersion\" = excluded.\"patchVersion\",\n \"deleted\" = excluded.\"deleted\",\n \"ttl\" = excluded.\"ttl\",\n \"ttlMs\" = excluded.\"ttlMs\",\n \"inactivatedAt\" = excluded.\"inactivatedAt\",\n \"inactivatedAtMs\" = excluded.\"inactivatedAtMs\"\n `;\n }\n\n async #checkVersionAndOwnership(\n lc: LogContext,\n tx: PostgresTransaction,\n expectedCurrentVersion: CVRVersion,\n lastConnectTime: number,\n ): Promise<void> {\n const start = Date.now();\n lc.debug?.('checking cvr version and ownership');\n const result = await tx<\n Pick<InstancesRow, 'version' | 'owner' | 'grantedAt'>[]\n >`SELECT \"version\", \"owner\", \"grantedAt\" FROM ${this.#cvr('instances')}\n WHERE \"clientGroupID\" = ${this.#id}\n FOR UPDATE`;\n const expected = versionString(expectedCurrentVersion);\n const {version, owner, grantedAt} =\n result.length > 0\n ? result[0]\n : {\n version: EMPTY_CVR_VERSION.stateVersion,\n owner: null,\n grantedAt: null,\n };\n lc.debug?.(\n 'checked cvr version and ownership in ' + (Date.now() - start) + ' ms',\n );\n if (owner !== this.#taskID && (grantedAt ?? 0) > lastConnectTime) {\n throw new OwnershipError(owner, grantedAt, lastConnectTime);\n }\n if (version !== expected) {\n throw new ConcurrentModificationException(expected, version);\n }\n }\n\n async #flush(\n lc: LogContext,\n expectedCurrentVersion: CVRVersion,\n cvr: CVRSnapshot,\n lastConnectTime: number,\n ): Promise<CVRFlushStats | null> {\n const stats: CVRFlushStats = {\n instances: 0,\n queries: 0,\n desires: 0,\n clients: 0,\n rows: 0,\n rowsDeferred: 0,\n statements: 0,\n };\n if (this.#pendingRowRecordUpdates.size) {\n const existingRowRecords = await this.getRowRecords();\n this.#rowCount = existingRowRecords.size;\n for (const [id, row] of this.#pendingRowRecordUpdates.entries()) {\n if (this.#forceUpdates.has(id)) {\n continue;\n }\n const existing = existingRowRecords.get(id);\n if (\n // Don't delete or add an unreferenced row if it's not in the CVR.\n (existing === undefined && !row?.refCounts) ||\n // Don't write a row record that exactly matches what's in the CVR.\n deepEqual(\n (row ?? undefined) as ReadonlyJSONValue | undefined,\n existing as ReadonlyJSONValue | undefined,\n )\n ) {\n this.#pendingRowRecordUpdates.delete(id);\n }\n }\n }\n if (\n this.#pendingRowRecordUpdates.size === 0 &&\n this.#writes.size === 0 &&\n this.#pendingInstanceWrite === undefined &&\n this.#pendingQueryUpdates.size === 0 &&\n this.#pendingQueryPartialUpdates.size === 0 &&\n this.#pendingDesireUpdates.size === 0\n ) {\n return null;\n }\n // Note: The CVR instance itself is only updated if there are material\n // changes (i.e. changes to the CVR contents) to flush.\n this.putInstance(cvr);\n const start = Date.now();\n lc.debug?.('flush tx beginning');\n\n // Use an async callback so we can await the version/ownership check and\n // validate it INSIDE the transaction. If validation fails, the exception\n // causes postgres.js to ROLLBACK, ensuring no writes are committed on error.\n const results = await runTx(\n this.#db,\n async tx => {\n lc.debug?.(`flush tx begun after ${Date.now() - start} ms`);\n\n // Acquire row-level lock and validate version/ownership before queuing writes.\n // Throwing here (inside the begin callback) rolls back the transaction so that\n // no writes are committed when concurrent modification or ownership errors occur.\n await this.#checkVersionAndOwnership(\n lc,\n tx,\n expectedCurrentVersion,\n lastConnectTime,\n );\n\n const writeQueries = [];\n if (this.#pendingInstanceWrite) {\n writeQueries.push(this.#pendingInstanceWrite(tx, lastConnectTime));\n stats.instances++;\n stats.statements++;\n }\n for (const write of this.#writes) {\n stats.clients += write.stats.clients ?? 0;\n stats.rows += write.stats.rows ?? 0;\n\n writeQueries.push(write.write(tx, lastConnectTime));\n stats.statements++;\n }\n\n // Batch flush config writes\n // Flush queries first (desires depend on queries via foreign key)\n const hasQueryUpdates =\n this.#pendingQueryUpdates.size > 0 ||\n this.#pendingQueryPartialUpdates.size > 0;\n\n const desireFlush = this.#flushDesires(tx, lc);\n\n let queryFlushes: PendingQuery<Row[]>[] = [];\n if (hasQueryUpdates) {\n queryFlushes = this.#flushQueries(tx, lc);\n\n // Count both full updates and partial-only updates\n const partialOnlyCount = [\n ...this.#pendingQueryPartialUpdates.keys(),\n ].filter(key => !this.#pendingQueryUpdates.has(key)).length;\n\n stats.queries = this.#pendingQueryUpdates.size + partialOnlyCount;\n stats.statements +=\n (this.#pendingQueryUpdates.size > 0 ? 1 : 0) +\n (partialOnlyCount > 0 ? 1 : 0);\n\n if (desireFlush) {\n stats.desires = this.#pendingDesireUpdates.size;\n stats.statements++;\n }\n } else if (desireFlush) {\n stats.desires = this.#pendingDesireUpdates.size;\n stats.statements++;\n }\n\n const rowUpdates = this.#rowCache.executeRowUpdates(\n tx,\n cvr.version,\n this.#pendingRowRecordUpdates,\n 'allow-defer',\n lc,\n );\n stats.statements += rowUpdates.length;\n\n // Pipeline writes now that the version check has passed.\n const pipelined = [\n ...writeQueries,\n ...queryFlushes,\n ...(desireFlush ? [desireFlush] : []),\n ...rowUpdates,\n ];\n\n lc.debug?.(`returning ${pipelined.length} queries for pipelining`);\n\n // Explicitly await all pipelined queries. When the begin callback is async,\n // postgres.js does not call Promise.all() on the return value the way it does\n // for sync callbacks, so we must do it ourselves.\n return Promise.all(pipelined);\n },\n {mode: Mode.READ_COMMITTED},\n );\n\n lc.debug?.(`flush tx completed after ${Date.now() - start} ms`);\n\n // Calculate how many row update queries were in the pipeline.\n // Note: the version check was awaited separately and is not in the results array.\n const baseQueries =\n (this.#pendingInstanceWrite ? 1 : 0) +\n this.#writes.size +\n (this.#pendingQueryUpdates.size > 0 ? 1 : 0) +\n ([...this.#pendingQueryPartialUpdates.keys()].filter(\n key => !this.#pendingQueryUpdates.has(key),\n ).length > 0\n ? 1\n : 0) +\n (this.#pendingDesireUpdates.size > 0 ? 1 : 0);\n const rowUpdateCount = results.length - baseQueries;\n\n const rowsFlushed = rowUpdateCount > 0;\n if (!rowsFlushed) {\n stats.rowsDeferred = this.#pendingRowRecordUpdates.size;\n } else {\n stats.rows += this.#pendingRowRecordUpdates.size;\n }\n\n this.#rowCount = await this.#rowCache.apply(\n this.#pendingRowRecordUpdates,\n cvr.version,\n rowsFlushed,\n );\n recordRowsSynced(this.#rowCount);\n\n return stats;\n }\n\n get rowCount(): number {\n return this.#rowCount;\n }\n\n async flush(\n lc: LogContext,\n expectedCurrentVersion: CVRVersion,\n cvr: CVRSnapshot,\n lastConnectTime: number,\n ): Promise<CVRFlushStats | null> {\n const start = performance.now();\n lc = lc.withContext('cvrFlushID', flushCounter++);\n try {\n const stats = await this.#flush(\n lc,\n expectedCurrentVersion,\n cvr,\n lastConnectTime,\n );\n if (stats) {\n const elapsed = performance.now() - start;\n lc.info?.(\n `flushed cvr@${versionString(cvr.version)} ` +\n `${JSON.stringify(stats)} in (${elapsed} ms)`,\n );\n this.#rowCache.recordSyncFlushStats(stats, elapsed);\n }\n return stats;\n } catch (e) {\n // Clear cached state if an error (e.g. ConcurrentModificationException) is encountered.\n this.#rowCache.clear();\n throw e;\n } finally {\n this.#writes.clear();\n this.#pendingInstanceWrite = undefined;\n this.#pendingRowRecordUpdates.clear();\n this.#forceUpdates.clear();\n this.#pendingQueryUpdates.clear();\n this.#pendingDesireUpdates.clear();\n this.#pendingQueryPartialUpdates.clear();\n }\n }\n\n hasPendingUpdates(): boolean {\n return this.#rowCache.hasPendingUpdates();\n }\n\n /** Resolves when all pending updates are flushed. */\n flushed(lc: LogContext): Promise<void> {\n return this.#rowCache.flushed(lc);\n }\n\n async inspectQueries(\n lc: LogContext,\n ttlClock: TTLClock,\n clientID?: string,\n ): Promise<InspectQueryRow[]> {\n const db = this.#db;\n const clientGroupID = this.#id;\n\n const reader = new TransactionPool(lc, {mode: Mode.READONLY}).run(db);\n try {\n return await reader.processReadTask(\n tx => tx<InspectQueryRow[]>`\n SELECT DISTINCT ON (d.\"clientID\", d.\"queryHash\")\n d.\"clientID\",\n d.\"queryHash\" AS \"queryID\",\n COALESCE(d.\"ttlMs\", ${DEFAULT_TTL_MS}) AS \"ttl\",\n d.\"inactivatedAtMs\" AS \"inactivatedAt\",\n (SELECT COUNT(*)::INT FROM ${this.#cvr('rows')} r \n WHERE r.\"clientGroupID\" = d.\"clientGroupID\" \n AND r.\"refCounts\" ? d.\"queryHash\") AS \"rowCount\",\n q.\"clientAST\" AS \"ast\",\n (q.\"patchVersion\" IS NOT NULL) AS \"got\",\n COALESCE(d.\"deleted\", FALSE) AS \"deleted\",\n q.\"queryName\" AS \"name\",\n q.\"queryArgs\" AS \"args\"\n FROM ${this.#cvr('desires')} d\n LEFT JOIN ${this.#cvr('queries')} q\n ON q.\"clientGroupID\" = d.\"clientGroupID\"\n AND q.\"queryHash\" = d.\"queryHash\"\n WHERE d.\"clientGroupID\" = ${clientGroupID}\n ${clientID ? tx`AND d.\"clientID\" = ${clientID}` : tx``}\n AND NOT (\n d.\"inactivatedAtMs\" IS NOT NULL \n AND d.\"ttlMs\" IS NOT NULL \n AND (d.\"inactivatedAtMs\" + d.\"ttlMs\") <= ${ttlClockAsNumber(ttlClock)}\n )\n ORDER BY d.\"clientID\", d.\"queryHash\"`,\n );\n } finally {\n reader.setDone();\n }\n }\n}\n\n/**\n * This is similar to {@link CVRStore.#checkVersionAndOwnership} except\n * that it only checks the version and is suitable for snapshot reads\n * (i.e. by doing a plain `SELECT` rather than a `SELECT ... FOR UPDATE`).\n */\nexport async function checkVersion(\n tx: PostgresTransaction,\n schema: string,\n clientGroupID: string,\n expectedCurrentVersion: CVRVersion,\n): Promise<void> {\n const expected = versionString(expectedCurrentVersion);\n const result = await tx<Pick<InstancesRow, 'version'>[]>`\n SELECT version FROM ${tx(schema)}.instances \n WHERE \"clientGroupID\" = ${clientGroupID}`;\n const {version} =\n result.length > 0 ? result[0] : {version: EMPTY_CVR_VERSION.stateVersion};\n if (version !== expected) {\n throw new ConcurrentModificationException(expected, version);\n }\n}\n\nexport class ClientNotFoundError extends ProtocolErrorWithLevel {\n constructor(message: string) {\n super(\n {\n kind: ErrorKind.ClientNotFound,\n message,\n origin: ErrorOrigin.ZeroCache,\n },\n 'warn',\n );\n }\n}\n\nexport class ConcurrentModificationException extends ProtocolErrorWithLevel {\n readonly name = 'ConcurrentModificationException';\n\n constructor(expectedVersion: string, actualVersion: string) {\n super(\n {\n kind: ErrorKind.Internal,\n message: `CVR has been concurrently modified. Expected ${expectedVersion}, got ${actualVersion}`,\n origin: ErrorOrigin.ZeroCache,\n },\n 'warn',\n );\n }\n}\n\nexport class OwnershipError extends ProtocolErrorWithLevel {\n readonly name = 'OwnershipError';\n\n constructor(\n owner: string | null,\n grantedAt: number | null,\n lastConnectTime: number,\n ) {\n super(\n {\n kind: ErrorKind.Rehome,\n message:\n `CVR ownership was transferred to ${owner} at ` +\n `${new Date(grantedAt ?? 0).toISOString()} ` +\n `(last connect time: ${new Date(lastConnectTime).toISOString()})`,\n maxBackoffMs: 0,\n origin: ErrorOrigin.ZeroCache,\n },\n 'info',\n );\n }\n}\n\nexport class InvalidClientSchemaError extends ProtocolErrorWithLevel {\n readonly name = 'InvalidClientSchemaError';\n\n constructor(cause: unknown) {\n super(\n {\n kind: ErrorKind.SchemaVersionNotSupported,\n message: `Could not parse clientSchema stored in CVR: ${String(cause)}`,\n origin: ErrorOrigin.ZeroCache,\n },\n 'warn',\n {cause},\n );\n }\n}\n\nexport class RowsVersionBehindError extends Error {\n readonly name = 'RowsVersionBehindError';\n readonly cvrVersion: string;\n readonly rowsVersion: string | null;\n\n constructor(cvrVersion: string, rowsVersion: string | null) {\n super(`rowsVersion (${rowsVersion}) is behind CVR ${cvrVersion}`);\n this.cvrVersion = cvrVersion;\n this.rowsVersion = rowsVersion;\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;AAuEA,IAAI,eAAe;;;;;;AAOnB,SAAS,iBACP,eACA,KAMA;AACA,QAAO;EACL,aAAa,MAAM,IAAI,OAAO,MAAM;EACpC,OAAO,MAAM,IAAI,OAAO;EACxB,wBACE,kBAAkB,KAAA,IACd,OACA,mBAAmB,iBAAiB,cAAc,GAAG,IAAK;EAChE,iBAAiB,iBAAiB;EACnC;;AAGH,IAAM,SAAS,MAAM,UAAU,aAAa,QAAQ;AAWpD,SAAS,QAAQ,KAA8B;CAC7C,MAAM,gBAAgB,MACpB,MAAM,OAAO,KAAA,IAAY,kBAAkB,EAAE;CAI/C,MAAM,WAAW,IAAI,kBACjB,EAAC,iBAAiB,IAAI,iBAAgB,GACtC,EAAE;AAEN,KAAI,IAAI,cAAc,MAAM;AAE1B,SACE,IAAI,cAAc,QAAQ,IAAI,cAAc,MAC5C,yDACD;AACD,SAAO;GACL,MAAM;GACN,IAAI,IAAI;GACR,MAAM,IAAI;GACV,MAAM,IAAI;GACV,cAAc,aAAa,IAAI,aAAa;GAC5C,aAAa,EAAE;GACf,oBAAoB,IAAI,sBAAsB,KAAA;GAC9C,uBAAuB,aAAa,IAAI,sBAAsB;GAC9D,GAAG;GACJ;;CAGH,MAAM,MAAM,UAAU,MAAM,IAAI,UAAU;AAC1C,QAAO,IAAI,WACN;EACC,MAAM;EACN,IAAI,IAAI;EACR;EACA,oBAAoB,IAAI,sBAAsB,KAAA;EAC9C,uBAAuB,aAAa,IAAI,sBAAsB;EAC9D,GAAG;EACJ,GACA;EACC,MAAM;EACN,IAAI,IAAI;EACR;EACA,cAAc,aAAa,IAAI,aAAa;EAC5C,aAAa,EAAE;EACf,oBAAoB,IAAI,sBAAsB,KAAA;EAC9C,uBAAuB,aAAa,IAAI,sBAAsB;EAC9D,GAAG;EACJ;;AAIP,IAAM,2BAA2B;AAOjC,IAAM,oBAAoB;AAE1B,IAAa,WAAb,MAAsB;CACpB;CACA;CACA;CACA;CACA;CACA,0BAMK,IAAI,KAAK;CAId,wBAKgB,KAAA;CAChB,2BAAoC,IAAI,aACtC,YACD;CACD,gBAAyB,IAAI,aAAoB,YAAY;CAC7D;CACA;CACA;CACA,YAAoB;CACpB,uCAAgC,IAAI,KAAoC;CACxE,wCAAiC,IAAI,KAAyB;CAC9D,8CAAuC,IAAI,KAAkC;CAE7E,YACE,IACA,OACA,OACA,QACA,OACA,aACA,wBAAwB,0BACxB,kBAAkB,mBAClB,4BAA4B,KAC5B,eAAe,YACf;AACA,QAAA,cAAoB;AACpB,QAAA,KAAW;AACX,QAAA,SAAe,UAAU,MAAM;AAC/B,QAAA,SAAe;AACf,QAAA,KAAW;AACX,QAAA,WAAiB,IAAI,eACnB,IACA,OACA,OACA,OACA,aACA,2BACA,aACD;AACD,QAAA,wBAA8B;AAC9B,QAAA,kBAAwB;;CAG1B,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,MAAA,OAAa,GAAG,QAAQ;;CAG7C,mBAAmB,WAAmB,QAAmC;EAIvE,MAAM,WAAW,MAAA,2BAAiC,IAAI,UAAU;AAChE,QAAA,2BAAiC,IAC/B,WACA,WAAW;GAAC,GAAG;GAAU,GAAG;GAAO,GAAG,OACvC;;CAGH,KAAK,IAAgB,iBAAuC;AAC1D,SAAO,eAAe,QAAQ,YAAY,YAAY;GACpD,IAAI;AACJ,QAAK,IAAI,IAAI,GAAG,IAAI,MAAA,iBAAuB,KAAK;AAC9C,QAAI,IAAI,EACN,OAAM,MAAM,MAAA,sBAA4B;IAE1C,MAAM,SAAS,MAAM,MAAA,KAAW,IAAI,gBAAgB;AACpD,QAAI,kBAAkB,wBAAwB;AAC5C,QAAG,OAAO,WAAW,IAAI,EAAE,IAAI,OAAO,OAAO,GAAG;AAChD,WAAM;AACN;;AAEF,WAAO;;AAET,UAAO,KAAK,sDAAsD;AAClE,SAAM,IAAI,oBACR,yCAAyC,IAAI,WAAW,oBAAoB,IAAI,cACjF;IACD;;CAGJ,OAAA,KACE,IACA,iBACuC;EACvC,MAAM,QAAQ,KAAK,KAAK;EAExB,MAAM,KAAK,MAAA;EACX,MAAM,MAAW;GACf;GACA,SAAS;GACT,YAAY;GACZ,UAAU,mBAAmB,EAAE;GAC/B,gBAAgB;GAChB,SAAS,EAAE;GACX,SAAS,EAAE;GACX,cAAc;GACd,WAAW;GACZ;EAED,MAAM,CAAC,UAAU,aAAa,WAAW,eAAe,MAAM,MAC5D,MAAA,KACA,OAAM;AACJ,MAAG,QAAQ,wBAAwB,KAAK,KAAK,GAAG,MAAM,KAAK;AAC3D,UAAO;IACL,EAMC;;;;;;;;;;mBAUQ,MAAA,IAAU,YAAY,CAAC;wBAClB,MAAA,IAAU,cAAc,CAAC;;0CAEP;IAChC,EAAkC,0BAA0B,MAAA,IAC1D,UACD,CAAC;qCACyB;IAC3B,EAAgB;;;;;;;;;;;;iBAYT,MAAA,IAAU,UAAU,CAAC;oCACF,GAAG;IAC7B,EAAgB;;;;;;;;iBAQT,MAAA,IAAU,UAAU,CAAC;oCACF;IAC3B;KAEH,EAAC,MAAM,UAAc,CACtB;AACD,KAAG,QACD,0BAA0B,KAAK,KAAK,GAAG,MAAM,OACvC,YAAY,OAAO,YAAY,UAAU,OAAO,YAAY,YAAY,OAAO,WACtF;AAED,MAAI,SAAS,WAAW,EAEtB,MAAK,YAAY;GACf,SAAS,IAAI;GACb,YAAY;GACZ,UAAU,mBAAmB,EAAE;GAC/B,gBAAgB;GAChB,cAAc;GACd,WAAW;GACZ,CAAC;OACG;AACL,UACE,SAAS,WAAW,SACd,0CAA0C,SAAS,SAC1D;GACD,MAAM,EACJ,SACA,YACA,UACA,gBACA,OACA,WACA,aACA,cACA,WACA,YACE,SAAS;AAEb,OAAI,QACF,OAAM,IAAI,oBACR,2CACD;AAGH,OAAI,UAAU,MAAA,OACZ,MAAK,aAAa,KAAK,gBACrB,OAAM,IAAI,eAAe,OAAO,WAAW,gBAAgB;OAKtD,OAAA,EAAQ;qBACF,MAAA,IAAU,YAAY,CAAC;kCACV,MAAA,OAAa;kCACb,gBAAgB;wCACV,MAAA,GAAS;;mDAEE,kBAAkB,IAAK;UAE7D,SAAS,CACT,MAAM,MAAA,YAAkB;AAI/B,OAAI,aAAa,eAAe,kBAAkB,cAIhD,QAAO,IAAI,uBAAuB,SAAS,YAAY;AAGzD,OAAI,UAAU,kBAAkB,QAAQ;AACxC,OAAI,aAAa;AACjB,OAAI,WAAW;AACf,OAAI,iBAAiB;AACrB,OAAI,YAAY;AAEhB,OAAI;AACF,QAAI,eACF,iBAAiB,OACb,OACA,MAAQ,cAAc,mBAAmB;YACxC,GAAG;AACV,UAAM,IAAI,yBAAyB,EAAE;;;AAIzC,OAAK,MAAM,OAAO,YAChB,KAAI,QAAQ,IAAI,YAAY;GAC1B,IAAI,IAAI;GACR,iBAAiB,EAAE;GACpB;AAGH,OAAK,MAAM,OAAO,WAAW;GAC3B,MAAM,QAAQ,QAAQ,IAAI;AAC1B,OAAI,QAAQ,IAAI,aAAa;;AAG/B,OAAK,MAAM,OAAO,aAAa;GAC7B,MAAM,SAAS,IAAI,QAAQ,IAAI;AAE/B,OAAI,CAAC,IAAI,WAAW,IAAI,kBAAkB,KACxC,KAAI,OACF,QAAO,gBAAgB,KAAK,IAAI,UAAU;OAG1C,IAAG,QACD,4CAA4C,IAAI,SAAS,+BAC1D;GAIL,MAAM,QAAQ,IAAI,QAAQ,IAAI;AAC9B,OACE,SACA,MAAM,SAAS,eACd,CAAC,IAAI,WAAW,IAAI,kBAAkB,MAEvC,OAAM,YAAY,IAAI,YAAY;IAChC,eAAe,IAAI,iBAAiB,KAAA;IACpC,KAAK,SAAS,IAAI,OAAA,IAAsB;IACxC,SAAS,kBAAkB,IAAI,aAAa;IAC7C;;AAIL,KAAG,OACD,cAAc,cAAc,IAAI,QAAQ,CAAC,IAAI,KAAK,KAAK,GAAG,MAAM,MACjE;AAID,SAAO;;CAGT,gBAAwD;AACtD,SAAO,MAAA,SAAe,eAAe;;CAGvC,aAAa,KAAsB;AACjC,QAAA,wBAA8B,IAAI,IAAI,IAAI,IAAI;;;;;;;;;;CAWhD,aAAa,IAAiB;AAC5B,QAAA,wBAA8B,IAAI,IAAI,KAAK;;;;;;;CAQ7C,aAAa,GAAG,KAAc;AAC5B,OAAK,MAAM,MAAM,IACf,OAAA,aAAmB,IAAI,GAAG;;;;;;;CAS9B,MAAM,eAAe,UAAoB,YAAmC;AAC1E,QAAM,MAAA,EAAQ,UAAU,MAAA,IAAU,YAAY,CAAC;+BACpB,WAAW;6BACb,SAAS;oCACF,MAAA,KAAW,SAAS;;;;;;;;CAStD,MAAM,cAA6C;EACjD,MAAM,SAAS,MAAM,MAAA,EAA0C;+BACpC,MAAA,IAAU,YAAY,CAAC;gCACtB,MAAA,KAAW,QAAQ;AAC/C,MAAI,OAAO,WAAW,EAEpB;AAEF,SACE,OAAO,WAAW,SACZ,gDAAgD,OAAO,SAC9D;AACD,SAAO,OAAO,GAAG;;CAGnB,YAAY,EACV,SACA,gBACA,YACA,cACA,WACA,YASO;AAGP,QAAA,wBAA8B,IAAI,oBAAoB;GACpD,MAAM,SAAuB;IAC3B,eAAe,MAAA;IACf,SAAS,cAAc,QAAQ;IAC/B;IACA;IACA;IACA,OAAO,MAAA;IACP,WAAW;IACX;IACA;IACD;AACD,UAAO,EAAE;sBACO,MAAA,IAAU,YAAY,CAAC,GAAG,GAAG,OAAO,CAAC;wDACH,GAAG,OAAO;;;CAIhE,mBAAmB,SAAqB,YAA8B;AACpE,QAAA,kBAAwB,WAAW,IAAI;GACrC,cAAc,cAAc,QAAQ;GACpC,SAAS;GACT,oBAAoB;GACpB,uBAAuB;GACxB,CAAC;;CAGJ,SAAS,OAA0B;EACjC,MAAM,SAAS,sBAAsB,MAAA,IAAU,MAAM;EAErD,MAAM,IAAI;GACR,GAAG;GAEH,WACE,OAAO,cAAc,OAAO,KAAK,UAAU,OAAO,UAAU,GAAG;GACjE,oBAAoB,OAAO,sBAAsB;GACjD,uBAAuB,OAAO,yBAAyB;GACvD,SAAS,OAAO,WAAW;GAC5B;AACD,QAAA,oBAA0B,IAAI,MAAM,IAAI,EAAE;;CAG5C,YAAY,OAAoB;EAC9B,MAAM,sBAAsB,MAC1B,IAAI,cAAc,EAAE,GAAG;AACzB,QAAA,kBAAwB,MAAM,IAAI;GAChC,cACE,MAAM,SAAS,aACX,OACA,mBAAmB,MAAM,aAAa;GAC5C,oBAAoB,MAAM,sBAAsB;GAChD,uBAAuB,mBAAmB,MAAM,sBAAsB;GACtE,SAAS;GACV,CAAC;;CAGJ,sBAAsB,WAAmB,WAAyB;AAChE,QAAA,kBAAwB,WAAW,EAAC,iBAAiB,WAAU,CAAC;;CAGlE,aAAa,QAA4B;EACvC,MAAM,SAAqB;GACzB,eAAe,MAAA;GACf,UAAU,OAAO;GAClB;AAED,QAAA,OAAa,IAAI;GACf,OAAO,EAAC,SAAS,GAAE;GACnB,QAAO,OAAM,EAAE,eAAe,MAAA,IAAU,UAAU,CAAC,GAAG,GAAG,OAAO;GACjE,CAAC;;CAGJ,aAAa,UAAkB;AAC7B,QAAA,OAAa,IAAI;GACf,OAAO,EAAC,SAAS,GAAE;GACnB,QAAO,QACL,GAAG,eAAe,MAAA,IAAU,UAAU,CAAC;sCACT,MAAA,GAAS;iCACd;GAC5B,CAAC;;CAGJ,gBACE,YACA,OACA,QACA,SACA,eACA,KACM;EACN,MAAM,EAAC,OAAO,oBAAmB,iBAAiB,eAAe,IAAI;EAErE,MAAM,SAAqB;GACzB,eAAe,MAAA;GACf,UAAU,OAAO;GACjB;GACA,eAAe;GACf,cAAc,cAAc,WAAW;GACvC,WAAW,MAAM;GACjB,KAAK;GACN;EAGD,MAAM,MAAM,GAAG,OAAO,GAAG,GAAG,MAAM;AAClC,QAAA,qBAA2B,IAAI,KAAK,OAAO;;CAG7C,kBACE,IACA,cACA,SACA,SACA,qBAA+B,EAAE,EACW;AAC5C,SAAO,MAAA,SAAe,kBACpB,IACA,cACA,SACA,SACA,mBACD;;CAGH,MAAM,qBACJ,IACA,cACA,SACA,SAC2B;AAC3B,MAAI,YAAY,cAAc,QAAQ,QAAQ,IAAI,EAChD,QAAO,EAAE;EAGX,MAAM,UAAU,KAAK,KAAK;EAC1B,MAAM,QAAQ,eAAe,cAAc,aAAa,GAAG;EAC3D,MAAM,MAAM,cAAc,QAAQ,QAAQ;AAC1C,KAAG,QAAQ,4CAA4C,QAAQ;EAE/D,MAAM,SAAS,IAAI,gBAAgB,IAAI,EAAC,MAAM,UAAc,CAAC,CAAC,IAAI,MAAA,GAAS;AAC3E,MAAI;AAEF,SAAM,OAAO,iBAAgB,OAC3B,aAAa,IAAI,MAAA,QAAc,MAAA,IAAU,QAAQ,CAClD;GAED,MAAM,CAAC,YAAY,aAAa,MAAM,OAAO,iBAAgB,OAC3D,QAAQ,IAAI,CACV,EAAgB;;;;;;;;;aASb,MAAA,IAAU,UAAU,CAAC;kCACA,MAAA,GAAS;+BACZ,MAAM;gCACL,OACtB,EAAgE;yDACjB,MAAA,IAAU,UAAU,CAAC;kCAC5C,MAAA,GAAS;+BACZ,MAAM;gCACL,MACvB,CAAC,CACH;GAED,MAAM,UAA4B,EAAE;AACpC,QAAK,MAAM,OAAO,WAAW;IAC3B,MAAM,EAAC,WAAW,OAAM;IACxB,MAAM,QAAe,IAAI,UACrB;KAAC,MAAM;KAAS,IAAI;KAAO;KAAG,GAC9B;KAAC,MAAM;KAAS,IAAI;KAAO;KAAG;IAClC,MAAM,IAAI,IAAI;AACd,WAAO,GAAG,6CAA6C;AACvD,YAAQ,KAAK;KAAC;KAAO,WAAW,kBAAkB,EAAE;KAAC,CAAC;;AAExD,QAAK,MAAM,OAAO,YAAY;IAC5B,MAAM,EAAC,UAAU,WAAW,OAAM;IAClC,MAAM,QAAe,IAAI,UACrB;KAAC,MAAM;KAAS,IAAI;KAAO;KAAI;KAAS,GACxC;KAAC,MAAM;KAAS,IAAI;KAAO;KAAI;KAAS;AAC5C,YAAQ,KAAK;KAAC;KAAO,WAAW,kBAAkB,IAAI,aAAa;KAAC,CAAC;;AAGvE,MAAG,QACD,GAAG,QAAQ,OAAO,mBAAmB,KAAK,KAAK,GAAG,QAAQ,MAC3D;AACD,UAAO;YACC;AACR,UAAO,SAAS;;;CAIpB,cACE,IACA,IACuB;EAEvB,MAAM,8BAAc,IAAI,KAAkC;AAC1D,OAAK,MAAM,CAAC,WAAW,YAAY,MAAA,4BAAkC;GACnE,MAAM,WAAW,MAAA,oBAA0B,IAAI,UAAU;AACzD,OAAI,SAEF,QAAO,OAAO,UAAU,QAAQ;OAGhC,aAAY,IAAI,WAAW,QAAQ;;EAIvC,MAAM,UAAiC,EAAE;AAGzC,MAAI,MAAA,oBAA0B,OAAO,GAAG;GACtC,MAAM,OAAO,CAAC,GAAG,MAAA,oBAA0B,QAAQ,CAAC;AACpD,MAAG,QAAQ,kBAAkB,KAAK,OAAO,qBAAqB;AAE9D,WAAQ,KAAK,EAAE;sBACC,MAAA,IAAU,UAAU,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;iCA4BV,KAAK;;;;;;;;;;;;;;;;;;;;;;;;;;QA0B9B;;AAIJ,MAAI,YAAY,OAAO,GAAG;AACxB,MAAG,QAAQ,kBAAkB,YAAY,KAAK,wBAAwB;GACtE,MAAM,OAAO,MAAM,KACjB,YAAY,SAAS,GACpB,CAAC,WAAW,cAAc;IACzB,eAAe,MAAA;IACf;IACA,iBAAiB,QAAQ,iBAAiB,KAAA;IAC1C,cAAc,QAAQ,gBAAgB;IACtC,YAAY,QAAQ,YAAY,KAAA;IAChC,SAAS,QAAQ,WAAW;IAC5B,uBAAuB,QAAQ,uBAAuB,KAAA;IACtD,oBAAoB,QAAQ,sBAAsB;IAClD,0BAA0B,QAAQ,0BAA0B,KAAA;IAC5D,uBAAuB,QAAQ,yBAAyB;IACxD,oBAAoB,QAAQ,oBAAoB,KAAA;IAChD,iBAAiB,QAAQ,mBAAmB;IAC7C,EACF;AACD,WAAQ,KAAK,EAAE;iBACJ,MAAA,IAAU,UAAU,CAAC;;;;;;;;;;;;;;;;;;;;;;iCAsBL,KAAK;;;;;;;;;;;;;;;;QAgB9B;;AAGJ,SAAO;;CAGT,cACE,IACA,IAC4B;AAC5B,MAAI,MAAA,qBAA2B,SAAS,EACtC,QAAO;EAGT,MAAM,OAAO,MAAM,KAAK,MAAA,qBAA2B,QAAQ,GAAE,QAAO;GAClE,MAAM,EAAC,aAAa,OAAO,wBAAwB,oBACjD,iBAAiB,IAAI,iBAAiB,KAAA,GAAW,IAAI,OAAO,GAAG;AACjE,UAAO;IACL,eAAe,IAAI;IACnB,UAAU,IAAI;IACd,WAAW,IAAI;IACf,cAAc,IAAI;IAClB,SAAS,IAAI;IACb,KAAK;IACL;IACA,eAAe;IACf;IACD;IACD;AAEF,KAAG,QAAQ,kBAAkB,KAAK,OAAO,iBAAiB;AAE1D,SAAO,EAAE;oBACO,MAAA,IAAU,UAAU,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;+BA0BV,KAAK;;;;;;;;;;;;;;;;;;;;CAqBlC,OAAA,yBACE,IACA,IACA,wBACA,iBACe;EACf,MAAM,QAAQ,KAAK,KAAK;AACxB,KAAG,QAAQ,qCAAqC;EAChD,MAAM,SAAS,MAAM,EAEpB,+CAA+C,MAAA,IAAU,YAAY,CAAC;kCACzC,MAAA,GAAS;;EAEvC,MAAM,WAAW,cAAc,uBAAuB;EACtD,MAAM,EAAC,SAAS,OAAO,cACrB,OAAO,SAAS,IACZ,OAAO,KACP;GACE,SAAS,kBAAkB;GAC3B,OAAO;GACP,WAAW;GACZ;AACP,KAAG,QACD,2CAA2C,KAAK,KAAK,GAAG,SAAS,MAClE;AACD,MAAI,UAAU,MAAA,WAAiB,aAAa,KAAK,gBAC/C,OAAM,IAAI,eAAe,OAAO,WAAW,gBAAgB;AAE7D,MAAI,YAAY,SACd,OAAM,IAAI,gCAAgC,UAAU,QAAQ;;CAIhE,OAAA,MACE,IACA,wBACA,KACA,iBAC+B;EAC/B,MAAM,QAAuB;GAC3B,WAAW;GACX,SAAS;GACT,SAAS;GACT,SAAS;GACT,MAAM;GACN,cAAc;GACd,YAAY;GACb;AACD,MAAI,MAAA,wBAA8B,MAAM;GACtC,MAAM,qBAAqB,MAAM,KAAK,eAAe;AACrD,SAAA,WAAiB,mBAAmB;AACpC,QAAK,MAAM,CAAC,IAAI,QAAQ,MAAA,wBAA8B,SAAS,EAAE;AAC/D,QAAI,MAAA,aAAmB,IAAI,GAAG,CAC5B;IAEF,MAAM,WAAW,mBAAmB,IAAI,GAAG;AAC3C,QAEG,aAAa,KAAA,KAAa,CAAC,KAAK,aAEjC,UACG,OAAO,KAAA,GACR,SACD,CAED,OAAA,wBAA8B,OAAO,GAAG;;;AAI9C,MACE,MAAA,wBAA8B,SAAS,KACvC,MAAA,OAAa,SAAS,KACtB,MAAA,yBAA+B,KAAA,KAC/B,MAAA,oBAA0B,SAAS,KACnC,MAAA,2BAAiC,SAAS,KAC1C,MAAA,qBAA2B,SAAS,EAEpC,QAAO;AAIT,OAAK,YAAY,IAAI;EACrB,MAAM,QAAQ,KAAK,KAAK;AACxB,KAAG,QAAQ,qBAAqB;EAKhC,MAAM,UAAU,MAAM,MACpB,MAAA,IACA,OAAM,OAAM;AACV,MAAG,QAAQ,wBAAwB,KAAK,KAAK,GAAG,MAAM,KAAK;AAK3D,SAAM,MAAA,yBACJ,IACA,IACA,wBACA,gBACD;GAED,MAAM,eAAe,EAAE;AACvB,OAAI,MAAA,sBAA4B;AAC9B,iBAAa,KAAK,MAAA,qBAA2B,IAAI,gBAAgB,CAAC;AAClE,UAAM;AACN,UAAM;;AAER,QAAK,MAAM,SAAS,MAAA,QAAc;AAChC,UAAM,WAAW,MAAM,MAAM,WAAW;AACxC,UAAM,QAAQ,MAAM,MAAM,QAAQ;AAElC,iBAAa,KAAK,MAAM,MAAM,IAAI,gBAAgB,CAAC;AACnD,UAAM;;GAKR,MAAM,kBACJ,MAAA,oBAA0B,OAAO,KACjC,MAAA,2BAAiC,OAAO;GAE1C,MAAM,cAAc,MAAA,aAAmB,IAAI,GAAG;GAE9C,IAAI,eAAsC,EAAE;AAC5C,OAAI,iBAAiB;AACnB,mBAAe,MAAA,aAAmB,IAAI,GAAG;IAGzC,MAAM,mBAAmB,CACvB,GAAG,MAAA,2BAAiC,MAAM,CAC3C,CAAC,QAAO,QAAO,CAAC,MAAA,oBAA0B,IAAI,IAAI,CAAC,CAAC;AAErD,UAAM,UAAU,MAAA,oBAA0B,OAAO;AACjD,UAAM,eACH,MAAA,oBAA0B,OAAO,IAAI,IAAI,MACzC,mBAAmB,IAAI,IAAI;AAE9B,QAAI,aAAa;AACf,WAAM,UAAU,MAAA,qBAA2B;AAC3C,WAAM;;cAEC,aAAa;AACtB,UAAM,UAAU,MAAA,qBAA2B;AAC3C,UAAM;;GAGR,MAAM,aAAa,MAAA,SAAe,kBAChC,IACA,IAAI,SACJ,MAAA,yBACA,eACA,GACD;AACD,SAAM,cAAc,WAAW;GAG/B,MAAM,YAAY;IAChB,GAAG;IACH,GAAG;IACH,GAAI,cAAc,CAAC,YAAY,GAAG,EAAE;IACpC,GAAG;IACJ;AAED,MAAG,QAAQ,aAAa,UAAU,OAAO,yBAAyB;AAKlE,UAAO,QAAQ,IAAI,UAAU;KAE/B,EAAC,MAAM,gBAAoB,CAC5B;AAED,KAAG,QAAQ,4BAA4B,KAAK,KAAK,GAAG,MAAM,KAAK;EAI/D,MAAM,eACH,MAAA,uBAA6B,IAAI,KAClC,MAAA,OAAa,QACZ,MAAA,oBAA0B,OAAO,IAAI,IAAI,MACzC,CAAC,GAAG,MAAA,2BAAiC,MAAM,CAAC,CAAC,QAC5C,QAAO,CAAC,MAAA,oBAA0B,IAAI,IAAI,CAC3C,CAAC,SAAS,IACP,IACA,MACH,MAAA,qBAA2B,OAAO,IAAI,IAAI;EAG7C,MAAM,cAFiB,QAAQ,SAAS,cAEH;AACrC,MAAI,CAAC,YACH,OAAM,eAAe,MAAA,wBAA8B;MAEnD,OAAM,QAAQ,MAAA,wBAA8B;AAG9C,QAAA,WAAiB,MAAM,MAAA,SAAe,MACpC,MAAA,yBACA,IAAI,SACJ,YACD;AACD,mBAAiB,MAAA,SAAe;AAEhC,SAAO;;CAGT,IAAI,WAAmB;AACrB,SAAO,MAAA;;CAGT,MAAM,MACJ,IACA,wBACA,KACA,iBAC+B;EAC/B,MAAM,QAAQ,YAAY,KAAK;AAC/B,OAAK,GAAG,YAAY,cAAc,eAAe;AACjD,MAAI;GACF,MAAM,QAAQ,MAAM,MAAA,MAClB,IACA,wBACA,KACA,gBACD;AACD,OAAI,OAAO;IACT,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,OAAG,OACD,eAAe,cAAc,IAAI,QAAQ,CAAC,GACrC,KAAK,UAAU,MAAM,CAAC,OAAO,QAAQ,MAC3C;AACD,UAAA,SAAe,qBAAqB,OAAO,QAAQ;;AAErD,UAAO;WACA,GAAG;AAEV,SAAA,SAAe,OAAO;AACtB,SAAM;YACE;AACR,SAAA,OAAa,OAAO;AACpB,SAAA,uBAA6B,KAAA;AAC7B,SAAA,wBAA8B,OAAO;AACrC,SAAA,aAAmB,OAAO;AAC1B,SAAA,oBAA0B,OAAO;AACjC,SAAA,qBAA2B,OAAO;AAClC,SAAA,2BAAiC,OAAO;;;CAI5C,oBAA6B;AAC3B,SAAO,MAAA,SAAe,mBAAmB;;;CAI3C,QAAQ,IAA+B;AACrC,SAAO,MAAA,SAAe,QAAQ,GAAG;;CAGnC,MAAM,eACJ,IACA,UACA,UAC4B;EAC5B,MAAM,KAAK,MAAA;EACX,MAAM,gBAAgB,MAAA;EAEtB,MAAM,SAAS,IAAI,gBAAgB,IAAI,EAAC,MAAM,UAAc,CAAC,CAAC,IAAI,GAAG;AACrE,MAAI;AACF,UAAO,MAAM,OAAO,iBAClB,OAAM,EAAqB;;;;0BAIT,eAAe;;iCAER,MAAA,IAAU,OAAO,CAAC;;;;;;;;SAQ1C,MAAA,IAAU,UAAU,CAAC;cAChB,MAAA,IAAU,UAAU,CAAC;;;8BAGL,cAAc;MACtC,WAAW,EAAE,sBAAsB,aAAa,EAAE,GAAG;;;;iDAIV,iBAAiB,SAAS,CAAC;;wCAGrE;YACO;AACR,UAAO,SAAS;;;;;;;;;AAUtB,eAAsB,aACpB,IACA,QACA,eACA,wBACe;CACf,MAAM,WAAW,cAAc,uBAAuB;CACtD,MAAM,SAAS,MAAM,EAAmC;0BAChC,GAAG,OAAO,CAAC;gCACL;CAC9B,MAAM,EAAC,YACL,OAAO,SAAS,IAAI,OAAO,KAAK,EAAC,SAAS,kBAAkB,cAAa;AAC3E,KAAI,YAAY,SACd,OAAM,IAAI,gCAAgC,UAAU,QAAQ;;AAIhE,IAAa,sBAAb,cAAyC,uBAAuB;CAC9D,YAAY,SAAiB;AAC3B,QACE;GACE,MAAM;GACN;GACA,QAAQ;GACT,EACD,OACD;;;AAIL,IAAa,kCAAb,cAAqD,uBAAuB;CAC1E,OAAgB;CAEhB,YAAY,iBAAyB,eAAuB;AAC1D,QACE;GACE,MAAM;GACN,SAAS,gDAAgD,gBAAgB,QAAQ;GACjF,QAAQ;GACT,EACD,OACD;;;AAIL,IAAa,iBAAb,cAAoC,uBAAuB;CACzD,OAAgB;CAEhB,YACE,OACA,WACA,iBACA;AACA,QACE;GACE,MAAM;GACN,SACE,oCAAoC,MAAM,MACvC,IAAI,KAAK,aAAa,EAAE,CAAC,aAAa,CAAC,uBACnB,IAAI,KAAK,gBAAgB,CAAC,aAAa,CAAC;GACjE,cAAc;GACd,QAAQ;GACT,EACD,OACD;;;AAIL,IAAa,2BAAb,cAA8C,uBAAuB;CACnE,OAAgB;CAEhB,YAAY,OAAgB;AAC1B,QACE;GACE,MAAM;GACN,SAAS,+CAA+C,OAAO,MAAM;GACrE,QAAQ;GACT,EACD,QACA,EAAC,OAAM,CACR;;;AAIL,IAAa,yBAAb,cAA4C,MAAM;CAChD,OAAgB;CAChB;CACA;CAEA,YAAY,YAAoB,aAA4B;AAC1D,QAAM,gBAAgB,YAAY,kBAAkB,aAAa;AACjE,OAAK,aAAa;AAClB,OAAK,cAAc"}
@@ -1 +1 @@
1
- {"version":3,"file":"row-record-cache.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/row-record-cache.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,YAAY,EAAE,GAAG,EAAC,MAAM,UAAU,CAAC;AAYhD,OAAO,EAAC,KAAK,UAAU,EAAE,KAAK,mBAAmB,EAAC,MAAM,mBAAmB,CAAC;AAE5E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAAe,KAAK,aAAa,EAAC,MAAM,gBAAgB,CAAC;AAChE,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,UAAU,CAAC;AAC1C,OAAO,EAEL,KAAK,OAAO,EAEb,MAAM,iBAAiB,CAAC;AACzB,OAAO,EAEL,KAAK,UAAU,EACf,KAAK,kBAAkB,EACvB,KAAK,KAAK,EACV,KAAK,SAAS,EAGf,MAAM,mBAAmB,CAAC;AAK3B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAiDG;AACH,qBAAa,cAAc;;gBAgCvB,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,KAAK,EAAE,MAAM,EACb,WAAW,EAAE,CAAC,CAAC,EAAE,OAAO,KAAK,IAAI,EACjC,yBAAyB,SAAM,EAC/B,YAAY,oBAAa;IAW3B,oBAAoB,CAAC,KAAK,EAAE,aAAa,EAAE,SAAS,EAAE,MAAM;IA+D5D,aAAa,IAAI,OAAO,CAAC,WAAW,CAAC,KAAK,EAAE,SAAS,CAAC,CAAC;IAIvD;;;;;;;;;;;;;;OAcG;IACG,KAAK,CACT,UAAU,EAAE,GAAG,CAAC,KAAK,EAAE,SAAS,GAAG,IAAI,CAAC,EACxC,WAAW,EAAE,UAAU,EACvB,OAAO,EAAE,OAAO,GACf,OAAO,CAAC,MAAM,CAAC;IAuElB,iBAAiB;IAIjB;;;OAGG;IACH,OAAO,CAAC,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;IAQtC,KAAK;IAOE,iBAAiB,CACtB,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,kBAAkB,EAChC,OAAO,EAAE,WAAW,EACpB,OAAO,EAAE,UAAU,EACnB,kBAAkB,GAAE,MAAM,EAAO,GAChC,cAAc,CAAC,OAAO,EAAE,EAAE,IAAI,EAAE,SAAS,CAAC;IAmD7C,iBAAiB,CACf,EAAE,EAAE,mBAAmB,EACvB,OAAO,EAAE,UAAU,EACnB,UAAU,EAAE,GAAG,CAAC,KAAK,EAAE,SAAS,GAAG,IAAI,CAAC,EACxC,IAAI,EAAE,aAAa,GAAG,OAAO,EAC7B,EAAE,yDAAW,GACZ,YAAY,CAAC,GAAG,EAAE,CAAC,EAAE;CAiEzB"}
1
+ {"version":3,"file":"row-record-cache.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/view-syncer/row-record-cache.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,YAAY,EAAE,GAAG,EAAC,MAAM,UAAU,CAAC;AAYhD,OAAO,EAAC,KAAK,UAAU,EAAE,KAAK,mBAAmB,EAAC,MAAM,mBAAmB,CAAC;AAE5E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAAe,KAAK,aAAa,EAAC,MAAM,gBAAgB,CAAC;AAChE,OAAO,KAAK,EAAC,WAAW,EAAC,MAAM,UAAU,CAAC;AAC1C,OAAO,EAEL,KAAK,OAAO,EAEb,MAAM,iBAAiB,CAAC;AACzB,OAAO,EAEL,KAAK,UAAU,EACf,KAAK,kBAAkB,EACvB,KAAK,KAAK,EACV,KAAK,SAAS,EAGf,MAAM,mBAAmB,CAAC;AAK3B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAiDG;AACH,qBAAa,cAAc;;gBAgCvB,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,KAAK,EAAE,MAAM,EACb,WAAW,EAAE,CAAC,CAAC,EAAE,OAAO,KAAK,IAAI,EACjC,yBAAyB,SAAM,EAC/B,YAAY,oBAAa;IAW3B,oBAAoB,CAAC,KAAK,EAAE,aAAa,EAAE,SAAS,EAAE,MAAM;IAuE5D,aAAa,IAAI,OAAO,CAAC,WAAW,CAAC,KAAK,EAAE,SAAS,CAAC,CAAC;IAIvD;;;;;;;;;;;;;;OAcG;IACG,KAAK,CACT,UAAU,EAAE,GAAG,CAAC,KAAK,EAAE,SAAS,GAAG,IAAI,CAAC,EACxC,WAAW,EAAE,UAAU,EACvB,OAAO,EAAE,OAAO,GACf,OAAO,CAAC,MAAM,CAAC;IAuElB,iBAAiB;IAIjB;;;OAGG;IACH,OAAO,CAAC,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;IAQtC,KAAK;IAOE,iBAAiB,CACtB,EAAE,EAAE,UAAU,EACd,YAAY,EAAE,kBAAkB,EAChC,OAAO,EAAE,WAAW,EACpB,OAAO,EAAE,UAAU,EACnB,kBAAkB,GAAE,MAAM,EAAO,GAChC,cAAc,CAAC,OAAO,EAAE,EAAE,IAAI,EAAE,SAAS,CAAC;IAmE7C,iBAAiB,CACf,EAAE,EAAE,mBAAmB,EACvB,OAAO,EAAE,UAAU,EACnB,UAAU,EAAE,GAAG,CAAC,KAAK,EAAE,SAAS,GAAG,IAAI,CAAC,EACxC,IAAI,EAAE,aAAa,GAAG,OAAO,EAC7B,EAAE,yDAAW,GACZ,YAAY,CAAC,GAAG,EAAE,CAAC,EAAE;CAiEzB"}
@@ -111,7 +111,15 @@ var RowRecordCache = class {
111
111
  const cache = await startAsyncSpan(tracer, "RowRecordCache.load", async (span) => {
112
112
  const cache = new CustomKeyMap(rowIDString);
113
113
  for await (const rows of this.#db`
114
- SELECT * FROM ${this.#cvr(`rows`)}
114
+ SELECT
115
+ "clientGroupID",
116
+ "schema",
117
+ "table",
118
+ "rowKey",
119
+ "rowVersion",
120
+ "patchVersion",
121
+ "refCounts"
122
+ FROM ${this.#cvr(`rows`)}
115
123
  WHERE "clientGroupID" = ${this.#cvrID} AND "refCounts" IS NOT NULL`.cursor(5e3)) for (const row of rows) {
116
124
  const rowRecord = rowsRowToRowRecord(row);
117
125
  cache.set(rowRecord.id, rowRecord);
@@ -218,10 +226,26 @@ var RowRecordCache = class {
218
226
  try {
219
227
  await reader.processReadTask((tx) => checkVersion(tx, this.#schema, this.#cvrID, current));
220
228
  const { query } = await reader.processReadTask((tx) => {
221
- return { query: excludeQueryHashes.length === 0 ? tx`SELECT * FROM ${this.#cvr("rows")}
229
+ return { query: excludeQueryHashes.length === 0 ? tx`SELECT
230
+ "clientGroupID",
231
+ "schema",
232
+ "table",
233
+ "rowKey",
234
+ "rowVersion",
235
+ "patchVersion",
236
+ "refCounts"
237
+ FROM ${this.#cvr("rows")}
222
238
  WHERE "clientGroupID" = ${this.#cvrID}
223
239
  AND "patchVersion" > ${start}
224
- AND "patchVersion" <= ${end}` : tx`SELECT * FROM ${this.#cvr("rows")}
240
+ AND "patchVersion" <= ${end}` : tx`SELECT
241
+ "clientGroupID",
242
+ "schema",
243
+ "table",
244
+ "rowKey",
245
+ "rowVersion",
246
+ "patchVersion",
247
+ "refCounts"
248
+ FROM ${this.#cvr("rows")}
225
249
  WHERE "clientGroupID" = ${this.#cvrID}
226
250
  AND "patchVersion" > ${start}
227
251
  AND "patchVersion" <= ${end}
@@ -1 +1 @@
1
- {"version":3,"file":"row-record-cache.js","names":["#lc","#db","#schema","#cvrID","#failService","#deferredRowFlushThreshold","#setTimeout","#pending","#cvrFlushTime","#cvrRowsFlushed","#ensureLoaded","#cache","#cvr","#pendingRowsVersion","#flushing","#flush","#flushedRowsVersion","#recordAsyncFlushStats"],"sources":["../../../../../../zero-cache/src/services/view-syncer/row-record-cache.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {type Resolver, resolver} from '@rocicorp/resolver';\nimport type {PendingQuery, Row} from 'postgres';\nimport {startAsyncSpan} from '../../../../otel/src/span.ts';\nimport {CustomKeyMap} from '../../../../shared/src/custom-key-map.ts';\nimport {must} from '../../../../shared/src/must.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {runTx} from '../../db/run-transaction.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {\n getOrCreateCounter,\n getOrCreateLatencyHistogram,\n} from '../../observability/metrics.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../../types/pg.ts';\nimport {rowIDString} from '../../types/row-key.ts';\nimport {cvrSchema, type ShardID} from '../../types/shards.ts';\nimport {checkVersion, type CVRFlushStats} from './cvr-store.ts';\nimport type {CVRSnapshot} from './cvr.ts';\nimport {\n rowRecordToRowsRow,\n type RowsRow,\n rowsRowToRowRecord,\n} from './schema/cvr.ts';\nimport {\n cmpVersions,\n type CVRVersion,\n type NullableCVRVersion,\n type RowID,\n type RowRecord,\n versionString,\n versionToNullableCookie,\n} from './schema/types.ts';\nimport {tracer} from './tracer.ts';\n\nconst FLUSH_TYPE_ATTRIBUTE = 'flush.type';\n\n/**\n * The RowRecordCache is an in-memory cache of the `cvr.rows` tables that\n * operates as both a write-through and write-back cache.\n *\n * For \"small\" CVR updates (i.e. zero or small numbers of rows) the\n * RowRecordCache operates as write-through, executing commits in\n * {@link executeRowUpdates()} before they are {@link apply}-ed to the\n * in-memory state.\n *\n * For \"large\" CVR updates (i.e. with many rows), the cache switches to a\n * write-back mode of operation, in which {@link executeRowUpdates()} is a\n * no-op, and {@link apply()} initiates a background task to flush the pending\n * row changes to the store. This allows the client poke to be completed and\n * committed on the client without waiting for the heavyweight operation of\n * committing the row records to the CVR store.\n *\n * Note that when the cache is in write-back mode, all updates become\n * write-back (i.e. asynchronously flushed) until the pending update queue is\n * fully flushed. This is required because updates must be applied in version\n * order. As with all pending work systems in zero-cache, multiple pending\n * updates are coalesced to reduce buildup of work.\n *\n * ### High level consistency\n *\n * Note that the above caching scheme only applies to the row data in `cvr.rows`\n * and corresponding `cvr.rowsVersion` tables. CVR metadata and query\n * information, on the other hand, are always committed before completing the\n * client poke. In this manner, the difference between the `version` column in\n * `cvr.instances` and the analogous column in `cvr.rowsVersion` determines\n * whether the data in the store is consistent, or whether it is awaiting a\n * pending update.\n *\n * The logic in {@link CVRStore#load()} takes this into account by loading both\n * the `cvr.instances` version and the `cvr.rowsVersion` version and checking\n * if they are in sync, waiting for a configurable delay until they are.\n *\n * ### Eventual conversion\n *\n * In the event of a continual stream of mutations (e.g. an animation-style\n * app), it is conceivable that the row record data be continually behind\n * the CVR metadata. In order to effect eventual convergence, a new view-syncer\n * signals the current view-syncer to stop updating by writing new `owner`\n * information to the `cvr.instances` row. This effectively stops the mutation\n * processing (in {@link CVRStore.#checkVersionAndOwnership}) so that the row\n * data can eventually catch up, allowing the new view-syncer to take over.\n *\n * Of course, there is the pathological situation in which a view-syncer\n * process crashes before the pending row updates are flushed. In this case,\n * the wait timeout will elapse and the CVR considered invalid.\n */\nexport class RowRecordCache {\n // The state in the #cache is always in sync with the CVR metadata\n // (i.e. cvr.instances). It may contain information that has not yet\n // been flushed to cvr.rows.\n #cache: Promise<CustomKeyMap<RowID, RowRecord>> | undefined;\n readonly #lc: LogContext;\n readonly #db: PostgresDB;\n readonly #schema: string;\n readonly #cvrID: string;\n readonly #failService: (e: unknown) => void;\n readonly #deferredRowFlushThreshold: number;\n readonly #setTimeout: typeof setTimeout;\n\n // Write-back cache state.\n readonly #pending = new CustomKeyMap<RowID, RowRecord | null>(rowIDString);\n #pendingRowsVersion: CVRVersion | null = null;\n #flushedRowsVersion: CVRVersion | null = null;\n #flushing: Resolver<void> | null = null;\n\n readonly #cvrFlushTime = getOrCreateLatencyHistogram(\n 'sync',\n 'cvr.flush-time',\n 'Time to flush a CVR transaction. This includes both synchronous ' +\n 'and asynchronous flushes, distinguished by the flush.type attribute.',\n );\n readonly #cvrRowsFlushed = getOrCreateCounter(\n 'sync',\n 'cvr.rows-flushed',\n 'Number of (changed) rows flushed to a CVR',\n );\n\n constructor(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardID,\n cvrID: string,\n failService: (e: unknown) => void,\n deferredRowFlushThreshold = 100,\n setTimeoutFn = setTimeout,\n ) {\n this.#lc = lc;\n this.#db = db;\n this.#schema = cvrSchema(shard);\n this.#cvrID = cvrID;\n this.#failService = failService;\n this.#deferredRowFlushThreshold = deferredRowFlushThreshold;\n this.#setTimeout = setTimeoutFn;\n }\n\n recordSyncFlushStats(stats: CVRFlushStats, elapsedMs: number) {\n this.#cvrFlushTime.recordMs(elapsedMs, {\n [FLUSH_TYPE_ATTRIBUTE]: 'sync',\n });\n if (stats.rowsDeferred === 0) {\n this.#cvrRowsFlushed.add(stats.rows);\n }\n }\n\n #recordAsyncFlushStats(rows: number, elapsedMs: number) {\n this.#cvrFlushTime.recordMs(elapsedMs, {\n [FLUSH_TYPE_ATTRIBUTE]: 'async',\n });\n this.#cvrRowsFlushed.add(rows);\n }\n\n #cvr(table: string) {\n return this.#db(`${this.#schema}.${table}`);\n }\n\n async #ensureLoaded(): Promise<CustomKeyMap<RowID, RowRecord>> {\n if (this.#cache) {\n return this.#cache;\n }\n const start = Date.now();\n const r = resolver<CustomKeyMap<RowID, RowRecord>>();\n r.promise.catch(() => {});\n // Set this.#cache immediately (before await) so that only one db\n // query is made even if there are multiple callers.\n this.#cache = r.promise;\n try {\n const cache = await startAsyncSpan(\n tracer,\n 'RowRecordCache.load',\n async span => {\n const cache: CustomKeyMap<RowID, RowRecord> = new CustomKeyMap(\n rowIDString,\n );\n for await (const rows of this.#db<RowsRow[]>`\n SELECT * FROM ${this.#cvr(`rows`)}\n WHERE \"clientGroupID\" = ${this.#cvrID} AND \"refCounts\" IS NOT NULL`\n // TODO(arv): Arbitrary page size\n .cursor(5000)) {\n for (const row of rows) {\n const rowRecord = rowsRowToRowRecord(row);\n cache.set(rowRecord.id, rowRecord);\n }\n }\n span.setAttribute('rows', cache.size);\n return cache;\n },\n );\n this.#lc.info?.(\n `Loaded ${cache.size} row records in ${Date.now() - start} ms`,\n );\n r.resolve(cache);\n return this.#cache;\n } catch (e) {\n r.reject(e); // Make sure the error is reflected in the cached promise\n throw e;\n }\n }\n\n getRowRecords(): Promise<ReadonlyMap<RowID, RowRecord>> {\n return this.#ensureLoaded();\n }\n\n /**\n * Applies the `rowRecords` corresponding to the `rowsVersion`\n * to the cache, indicating whether the corresponding updates\n * (generated by {@link executeRowUpdates}) were `flushed`.\n *\n * If `flushed` is false, the RowRecordCache will flush the records\n * asynchronously.\n *\n * Note that `apply()` indicates that the CVR metadata associated with\n * the `rowRecords` was successfully committed, which essentially means\n * that this process has the unconditional right (and responsibility) of\n * following up with a flush of the `rowRecords`. In particular, the\n * commit of row records are not conditioned on the version or ownership\n * columns of the `cvr.instances` row.\n */\n async apply(\n rowRecords: Map<RowID, RowRecord | null>,\n rowsVersion: CVRVersion,\n flushed: boolean,\n ): Promise<number> {\n const cache = await this.#ensureLoaded();\n for (const [id, row] of rowRecords.entries()) {\n if (row === null || row.refCounts === null) {\n cache.delete(id);\n } else {\n cache.set(id, row);\n }\n if (!flushed) {\n this.#pending.set(id, row);\n }\n }\n this.#pendingRowsVersion = rowsVersion;\n // Initiate a flush if not already flushing.\n if (!flushed && this.#flushing === null) {\n this.#flushing = resolver();\n // The #flush() method handles propagating errors to #failService.\n // Attach a rejection handler to this promise to avoid unhandled\n // rejections.\n this.#flushing.promise.catch(() => {});\n this.#setTimeout(() => this.#flush(), 0);\n }\n return cache.size;\n }\n\n async #flush() {\n const flushing = must(this.#flushing);\n try {\n while (this.#pendingRowsVersion !== this.#flushedRowsVersion) {\n const start = performance.now();\n\n const {rows, rowsVersion} = await runTx(\n this.#db,\n tx => {\n // Note: This code block is synchronous, guaranteeing that the\n // #pendingRowsVersion is consistent with the #pending rows.\n const rows = this.#pending.size;\n const rowsVersion = must(this.#pendingRowsVersion);\n // Awaiting all of the individual statements incurs too much\n // overhead. Instead, just catch and log exception(s); the outer\n // transaction will properly fail.\n void Promise.all(\n this.executeRowUpdates(tx, rowsVersion, this.#pending, 'force'),\n ).catch(e => this.#lc.error?.(`error flushing cvr rows`, e));\n\n this.#pending.clear();\n return {rows, rowsVersion};\n },\n {mode: Mode.READ_COMMITTED},\n );\n const elapsed = performance.now() - start;\n this.#lc.info?.(\n `flushed ${rows} rows@${versionString(rowsVersion)} (${elapsed} ms)`,\n );\n this.#recordAsyncFlushStats(rows, elapsed);\n this.#flushedRowsVersion = rowsVersion;\n // Note: apply() may have called while the transaction was committing,\n // which will result in looping to commit the next #pendingRowsVersion.\n }\n this.#lc.info?.(\n `up to date rows@${versionToNullableCookie(this.#flushedRowsVersion)}`,\n );\n flushing.resolve();\n this.#flushing = null;\n } catch (e) {\n this.#lc.info?.(`row record flush failed`, e);\n flushing.reject(e);\n this.#failService(e);\n }\n }\n\n hasPendingUpdates() {\n return this.#flushing !== null;\n }\n\n /**\n * Returns a promise that resolves when all outstanding row-records\n * have been committed.\n */\n flushed(lc: LogContext): Promise<void> {\n if (this.#flushing) {\n lc.debug?.('awaiting pending row flush');\n return this.#flushing.promise;\n }\n return promiseVoid;\n }\n\n clear() {\n // Note: Only the #cache is cleared. #pending updates, on the other hand,\n // comprise canonical (i.e. already flushed) data and must be flushed\n // even if the snapshot of the present state (the #cache) is cleared.\n this.#cache = undefined;\n }\n\n async *catchupRowPatches(\n lc: LogContext,\n afterVersion: NullableCVRVersion,\n upToCVR: CVRSnapshot,\n current: CVRVersion,\n excludeQueryHashes: string[] = [],\n ): AsyncGenerator<RowsRow[], void, undefined> {\n if (cmpVersions(afterVersion, upToCVR.version) >= 0) {\n return;\n }\n\n const startMs = Date.now();\n const start = afterVersion ? versionString(afterVersion) : '';\n const end = versionString(upToCVR.version);\n lc.debug?.(`scanning row patches for clients from ${start}`);\n\n // Before accessing the CVR db, pending row records must be flushed.\n // Note that because catchupRowPatches() is called from within the\n // view syncer lock, this flush is guaranteed to complete since no\n // new CVR updates can happen while the lock is held.\n await this.flushed(lc);\n const flushMs = Date.now() - startMs;\n\n const reader = new TransactionPool(lc, {mode: Mode.READONLY}).run(this.#db);\n try {\n // Verify that we are reading the right version of the CVR.\n await reader.processReadTask(tx =>\n checkVersion(tx, this.#schema, this.#cvrID, current),\n );\n\n const {query} = await reader.processReadTask(tx => {\n const query =\n excludeQueryHashes.length === 0\n ? tx<RowsRow[]>`SELECT * FROM ${this.#cvr('rows')}\n WHERE \"clientGroupID\" = ${this.#cvrID}\n AND \"patchVersion\" > ${start}\n AND \"patchVersion\" <= ${end}`\n : // Exclude rows that were already sent as part of query hydration.\n tx<RowsRow[]>`SELECT * FROM ${this.#cvr('rows')}\n WHERE \"clientGroupID\" = ${this.#cvrID}\n AND \"patchVersion\" > ${start}\n AND \"patchVersion\" <= ${end}\n AND (\"refCounts\" IS NULL OR NOT \"refCounts\" ?| ${excludeQueryHashes})`;\n return {query};\n });\n\n yield* query.cursor(10000);\n } finally {\n reader.setDone();\n }\n\n const totalMs = Date.now() - startMs;\n lc.info?.(\n `finished row catchup (flush: ${flushMs} ms, total: ${totalMs} ms)`,\n );\n }\n\n executeRowUpdates(\n tx: PostgresTransaction,\n version: CVRVersion,\n rowUpdates: Map<RowID, RowRecord | null>,\n mode: 'allow-defer' | 'force',\n lc = this.#lc,\n ): PendingQuery<Row[]>[] {\n if (\n mode === 'allow-defer' &&\n // defer if pending rows are being flushed\n (this.#flushing !== null ||\n // or if the new batch is above the limit.\n rowUpdates.size > this.#deferredRowFlushThreshold)\n ) {\n return [];\n }\n const rowsVersion = {\n clientGroupID: this.#cvrID,\n version: versionString(version),\n };\n const pending: PendingQuery<Row[]>[] = [\n tx`INSERT INTO ${this.#cvr('rowsVersion')} ${tx(rowsVersion)}\n ON CONFLICT (\"clientGroupID\") \n DO UPDATE SET ${tx(rowsVersion)}`,\n ];\n\n const rowRecordRows: RowsRow[] = [];\n for (const [id, row] of rowUpdates.entries()) {\n if (row === null) {\n pending.push(\n tx`\n DELETE FROM ${this.#cvr('rows')}\n WHERE \"clientGroupID\" = ${this.#cvrID}\n AND \"schema\" = ${id.schema}\n AND \"table\" = ${id.table}\n AND \"rowKey\" = ${id.rowKey}\n `,\n );\n } else {\n rowRecordRows.push(rowRecordToRowsRow(this.#cvrID, row));\n }\n }\n if (rowRecordRows.length) {\n pending.push(\n tx`\n INSERT INTO ${this.#cvr('rows')}(\n \"clientGroupID\", \"schema\", \"table\", \"rowKey\", \"rowVersion\", \"patchVersion\", \"refCounts\"\n ) SELECT\n \"clientGroupID\", \"schema\", \"table\", \"rowKey\", \"rowVersion\", \"patchVersion\", \"refCounts\"\n FROM json_to_recordset(${rowRecordRows}) AS x(\n \"clientGroupID\" TEXT,\n \"schema\" TEXT,\n \"table\" TEXT,\n \"rowKey\" JSONB,\n \"rowVersion\" TEXT,\n \"patchVersion\" TEXT,\n \"refCounts\" JSONB\n ) ON CONFLICT (\"clientGroupID\", \"schema\", \"table\", \"rowKey\")\n DO UPDATE SET \"rowVersion\" = excluded.\"rowVersion\",\n \"patchVersion\" = excluded.\"patchVersion\",\n \"refCounts\" = excluded.\"refCounts\"\n `,\n );\n lc.info?.(\n `flushing ${rowUpdates.size} rows (${rowRecordRows.length} inserts, ${\n rowUpdates.size - rowRecordRows.length\n } deletes)`,\n );\n }\n return pending;\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;AAmCA,IAAM,uBAAuB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAoD7B,IAAa,iBAAb,MAA4B;CAI1B;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CAGA,WAAoB,IAAI,aAAsC,YAAY;CAC1E,sBAAyC;CACzC,sBAAyC;CACzC,YAAmC;CAEnC,gBAAyB,4BACvB,QACA,kBACA,uIAED;CACD,kBAA2B,mBACzB,QACA,oBACA,4CACD;CAED,YACE,IACA,IACA,OACA,OACA,aACA,4BAA4B,KAC5B,eAAe,YACf;AACA,QAAA,KAAW;AACX,QAAA,KAAW;AACX,QAAA,SAAe,UAAU,MAAM;AAC/B,QAAA,QAAc;AACd,QAAA,cAAoB;AACpB,QAAA,4BAAkC;AAClC,QAAA,aAAmB;;CAGrB,qBAAqB,OAAsB,WAAmB;AAC5D,QAAA,aAAmB,SAAS,WAAW,GACpC,uBAAuB,QACzB,CAAC;AACF,MAAI,MAAM,iBAAiB,EACzB,OAAA,eAAqB,IAAI,MAAM,KAAK;;CAIxC,uBAAuB,MAAc,WAAmB;AACtD,QAAA,aAAmB,SAAS,WAAW,GACpC,uBAAuB,SACzB,CAAC;AACF,QAAA,eAAqB,IAAI,KAAK;;CAGhC,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,MAAA,OAAa,GAAG,QAAQ;;CAG7C,OAAA,eAA+D;AAC7D,MAAI,MAAA,MACF,QAAO,MAAA;EAET,MAAM,QAAQ,KAAK,KAAK;EACxB,MAAM,IAAI,UAA0C;AACpD,IAAE,QAAQ,YAAY,GAAG;AAGzB,QAAA,QAAc,EAAE;AAChB,MAAI;GACF,MAAM,QAAQ,MAAM,eAClB,QACA,uBACA,OAAM,SAAQ;IACZ,MAAM,QAAwC,IAAI,aAChD,YACD;AACD,eAAW,MAAM,QAAQ,MAAA,EAAmB;4BAC1B,MAAA,IAAU,OAAO,CAAC;wCACN,MAAA,MAAY,8BAEvC,OAAO,IAAK,CACb,MAAK,MAAM,OAAO,MAAM;KACtB,MAAM,YAAY,mBAAmB,IAAI;AACzC,WAAM,IAAI,UAAU,IAAI,UAAU;;AAGtC,SAAK,aAAa,QAAQ,MAAM,KAAK;AACrC,WAAO;KAEV;AACD,SAAA,GAAS,OACP,UAAU,MAAM,KAAK,kBAAkB,KAAK,KAAK,GAAG,MAAM,KAC3D;AACD,KAAE,QAAQ,MAAM;AAChB,UAAO,MAAA;WACA,GAAG;AACV,KAAE,OAAO,EAAE;AACX,SAAM;;;CAIV,gBAAwD;AACtD,SAAO,MAAA,cAAoB;;;;;;;;;;;;;;;;;CAkB7B,MAAM,MACJ,YACA,aACA,SACiB;EACjB,MAAM,QAAQ,MAAM,MAAA,cAAoB;AACxC,OAAK,MAAM,CAAC,IAAI,QAAQ,WAAW,SAAS,EAAE;AAC5C,OAAI,QAAQ,QAAQ,IAAI,cAAc,KACpC,OAAM,OAAO,GAAG;OAEhB,OAAM,IAAI,IAAI,IAAI;AAEpB,OAAI,CAAC,QACH,OAAA,QAAc,IAAI,IAAI,IAAI;;AAG9B,QAAA,qBAA2B;AAE3B,MAAI,CAAC,WAAW,MAAA,aAAmB,MAAM;AACvC,SAAA,WAAiB,UAAU;AAI3B,SAAA,SAAe,QAAQ,YAAY,GAAG;AACtC,SAAA,iBAAuB,MAAA,OAAa,EAAE,EAAE;;AAE1C,SAAO,MAAM;;CAGf,OAAA,QAAe;EACb,MAAM,WAAW,KAAK,MAAA,SAAe;AACrC,MAAI;AACF,UAAO,MAAA,uBAA6B,MAAA,oBAA0B;IAC5D,MAAM,QAAQ,YAAY,KAAK;IAE/B,MAAM,EAAC,MAAM,gBAAe,MAAM,MAChC,MAAA,KACA,OAAM;KAGJ,MAAM,OAAO,MAAA,QAAc;KAC3B,MAAM,cAAc,KAAK,MAAA,mBAAyB;AAI7C,aAAQ,IACX,KAAK,kBAAkB,IAAI,aAAa,MAAA,SAAe,QAAQ,CAChE,CAAC,OAAM,MAAK,MAAA,GAAS,QAAQ,2BAA2B,EAAE,CAAC;AAE5D,WAAA,QAAc,OAAO;AACrB,YAAO;MAAC;MAAM;MAAY;OAE5B,EAAC,MAAM,gBAAoB,CAC5B;IACD,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,UAAA,GAAS,OACP,WAAW,KAAK,QAAQ,cAAc,YAAY,CAAC,IAAI,QAAQ,MAChE;AACD,UAAA,sBAA4B,MAAM,QAAQ;AAC1C,UAAA,qBAA2B;;AAI7B,SAAA,GAAS,OACP,mBAAmB,wBAAwB,MAAA,mBAAyB,GACrE;AACD,YAAS,SAAS;AAClB,SAAA,WAAiB;WACV,GAAG;AACV,SAAA,GAAS,OAAO,2BAA2B,EAAE;AAC7C,YAAS,OAAO,EAAE;AAClB,SAAA,YAAkB,EAAE;;;CAIxB,oBAAoB;AAClB,SAAO,MAAA,aAAmB;;;;;;CAO5B,QAAQ,IAA+B;AACrC,MAAI,MAAA,UAAgB;AAClB,MAAG,QAAQ,6BAA6B;AACxC,UAAO,MAAA,SAAe;;AAExB,SAAO;;CAGT,QAAQ;AAIN,QAAA,QAAc,KAAA;;CAGhB,OAAO,kBACL,IACA,cACA,SACA,SACA,qBAA+B,EAAE,EACW;AAC5C,MAAI,YAAY,cAAc,QAAQ,QAAQ,IAAI,EAChD;EAGF,MAAM,UAAU,KAAK,KAAK;EAC1B,MAAM,QAAQ,eAAe,cAAc,aAAa,GAAG;EAC3D,MAAM,MAAM,cAAc,QAAQ,QAAQ;AAC1C,KAAG,QAAQ,yCAAyC,QAAQ;AAM5D,QAAM,KAAK,QAAQ,GAAG;EACtB,MAAM,UAAU,KAAK,KAAK,GAAG;EAE7B,MAAM,SAAS,IAAI,gBAAgB,IAAI,EAAC,MAAM,UAAc,CAAC,CAAC,IAAI,MAAA,GAAS;AAC3E,MAAI;AAEF,SAAM,OAAO,iBAAgB,OAC3B,aAAa,IAAI,MAAA,QAAc,MAAA,OAAa,QAAQ,CACrD;GAED,MAAM,EAAC,UAAS,MAAM,OAAO,iBAAgB,OAAM;AAajD,WAAO,EAAC,OAXN,mBAAmB,WAAW,IAC1B,EAAa,iBAAiB,MAAA,IAAU,OAAO,CAAC;kCAC5B,MAAA,MAAY;iCACb,MAAM;kCACL,QAEpB,EAAa,iBAAiB,MAAA,IAAU,OAAO,CAAC;kCAC5B,MAAA,MAAY;iCACb,MAAM;kCACL,IAAI;2DACqB,mBAAmB,IACxD;KACd;AAEF,UAAO,MAAM,OAAO,IAAM;YAClB;AACR,UAAO,SAAS;;EAGlB,MAAM,UAAU,KAAK,KAAK,GAAG;AAC7B,KAAG,OACD,gCAAgC,QAAQ,cAAc,QAAQ,MAC/D;;CAGH,kBACE,IACA,SACA,YACA,MACA,KAAK,MAAA,IACkB;AACvB,MACE,SAAS,kBAER,MAAA,aAAmB,QAElB,WAAW,OAAO,MAAA,2BAEpB,QAAO,EAAE;EAEX,MAAM,cAAc;GAClB,eAAe,MAAA;GACf,SAAS,cAAc,QAAQ;GAChC;EACD,MAAM,UAAiC,CACrC,EAAE,eAAe,MAAA,IAAU,cAAc,CAAC,GAAG,GAAG,YAAY,CAAC;;2BAExC,GAAG,YAAY,GACrC;EAED,MAAM,gBAA2B,EAAE;AACnC,OAAK,MAAM,CAAC,IAAI,QAAQ,WAAW,SAAS,CAC1C,KAAI,QAAQ,KACV,SAAQ,KACN,EAAE;wBACY,MAAA,IAAU,OAAO,CAAC;sCACJ,MAAA,MAAY;+BACnB,GAAG,OAAO;8BACX,GAAG,MAAM;+BACR,GAAG,OAAO;SAEhC;MAED,eAAc,KAAK,mBAAmB,MAAA,OAAa,IAAI,CAAC;AAG5D,MAAI,cAAc,QAAQ;AACxB,WAAQ,KACN,EAAE;gBACM,MAAA,IAAU,OAAO,CAAC;;;;6BAIL,cAAc;;;;;;;;;;;;MAapC;AACD,MAAG,OACD,YAAY,WAAW,KAAK,SAAS,cAAc,OAAO,YACxD,WAAW,OAAO,cAAc,OACjC,WACF;;AAEH,SAAO"}
1
+ {"version":3,"file":"row-record-cache.js","names":["#lc","#db","#schema","#cvrID","#failService","#deferredRowFlushThreshold","#setTimeout","#pending","#cvrFlushTime","#cvrRowsFlushed","#ensureLoaded","#cache","#cvr","#pendingRowsVersion","#flushing","#flush","#flushedRowsVersion","#recordAsyncFlushStats"],"sources":["../../../../../../zero-cache/src/services/view-syncer/row-record-cache.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {type Resolver, resolver} from '@rocicorp/resolver';\nimport type {PendingQuery, Row} from 'postgres';\nimport {startAsyncSpan} from '../../../../otel/src/span.ts';\nimport {CustomKeyMap} from '../../../../shared/src/custom-key-map.ts';\nimport {must} from '../../../../shared/src/must.ts';\nimport {promiseVoid} from '../../../../shared/src/resolved-promises.ts';\nimport * as Mode from '../../db/mode-enum.ts';\nimport {runTx} from '../../db/run-transaction.ts';\nimport {TransactionPool} from '../../db/transaction-pool.ts';\nimport {\n getOrCreateCounter,\n getOrCreateLatencyHistogram,\n} from '../../observability/metrics.ts';\nimport {type PostgresDB, type PostgresTransaction} from '../../types/pg.ts';\nimport {rowIDString} from '../../types/row-key.ts';\nimport {cvrSchema, type ShardID} from '../../types/shards.ts';\nimport {checkVersion, type CVRFlushStats} from './cvr-store.ts';\nimport type {CVRSnapshot} from './cvr.ts';\nimport {\n rowRecordToRowsRow,\n type RowsRow,\n rowsRowToRowRecord,\n} from './schema/cvr.ts';\nimport {\n cmpVersions,\n type CVRVersion,\n type NullableCVRVersion,\n type RowID,\n type RowRecord,\n versionString,\n versionToNullableCookie,\n} from './schema/types.ts';\nimport {tracer} from './tracer.ts';\n\nconst FLUSH_TYPE_ATTRIBUTE = 'flush.type';\n\n/**\n * The RowRecordCache is an in-memory cache of the `cvr.rows` tables that\n * operates as both a write-through and write-back cache.\n *\n * For \"small\" CVR updates (i.e. zero or small numbers of rows) the\n * RowRecordCache operates as write-through, executing commits in\n * {@link executeRowUpdates()} before they are {@link apply}-ed to the\n * in-memory state.\n *\n * For \"large\" CVR updates (i.e. with many rows), the cache switches to a\n * write-back mode of operation, in which {@link executeRowUpdates()} is a\n * no-op, and {@link apply()} initiates a background task to flush the pending\n * row changes to the store. This allows the client poke to be completed and\n * committed on the client without waiting for the heavyweight operation of\n * committing the row records to the CVR store.\n *\n * Note that when the cache is in write-back mode, all updates become\n * write-back (i.e. asynchronously flushed) until the pending update queue is\n * fully flushed. This is required because updates must be applied in version\n * order. As with all pending work systems in zero-cache, multiple pending\n * updates are coalesced to reduce buildup of work.\n *\n * ### High level consistency\n *\n * Note that the above caching scheme only applies to the row data in `cvr.rows`\n * and corresponding `cvr.rowsVersion` tables. CVR metadata and query\n * information, on the other hand, are always committed before completing the\n * client poke. In this manner, the difference between the `version` column in\n * `cvr.instances` and the analogous column in `cvr.rowsVersion` determines\n * whether the data in the store is consistent, or whether it is awaiting a\n * pending update.\n *\n * The logic in {@link CVRStore#load()} takes this into account by loading both\n * the `cvr.instances` version and the `cvr.rowsVersion` version and checking\n * if they are in sync, waiting for a configurable delay until they are.\n *\n * ### Eventual conversion\n *\n * In the event of a continual stream of mutations (e.g. an animation-style\n * app), it is conceivable that the row record data be continually behind\n * the CVR metadata. In order to effect eventual convergence, a new view-syncer\n * signals the current view-syncer to stop updating by writing new `owner`\n * information to the `cvr.instances` row. This effectively stops the mutation\n * processing (in {@link CVRStore.#checkVersionAndOwnership}) so that the row\n * data can eventually catch up, allowing the new view-syncer to take over.\n *\n * Of course, there is the pathological situation in which a view-syncer\n * process crashes before the pending row updates are flushed. In this case,\n * the wait timeout will elapse and the CVR considered invalid.\n */\nexport class RowRecordCache {\n // The state in the #cache is always in sync with the CVR metadata\n // (i.e. cvr.instances). It may contain information that has not yet\n // been flushed to cvr.rows.\n #cache: Promise<CustomKeyMap<RowID, RowRecord>> | undefined;\n readonly #lc: LogContext;\n readonly #db: PostgresDB;\n readonly #schema: string;\n readonly #cvrID: string;\n readonly #failService: (e: unknown) => void;\n readonly #deferredRowFlushThreshold: number;\n readonly #setTimeout: typeof setTimeout;\n\n // Write-back cache state.\n readonly #pending = new CustomKeyMap<RowID, RowRecord | null>(rowIDString);\n #pendingRowsVersion: CVRVersion | null = null;\n #flushedRowsVersion: CVRVersion | null = null;\n #flushing: Resolver<void> | null = null;\n\n readonly #cvrFlushTime = getOrCreateLatencyHistogram(\n 'sync',\n 'cvr.flush-time',\n 'Time to flush a CVR transaction. This includes both synchronous ' +\n 'and asynchronous flushes, distinguished by the flush.type attribute.',\n );\n readonly #cvrRowsFlushed = getOrCreateCounter(\n 'sync',\n 'cvr.rows-flushed',\n 'Number of (changed) rows flushed to a CVR',\n );\n\n constructor(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardID,\n cvrID: string,\n failService: (e: unknown) => void,\n deferredRowFlushThreshold = 100,\n setTimeoutFn = setTimeout,\n ) {\n this.#lc = lc;\n this.#db = db;\n this.#schema = cvrSchema(shard);\n this.#cvrID = cvrID;\n this.#failService = failService;\n this.#deferredRowFlushThreshold = deferredRowFlushThreshold;\n this.#setTimeout = setTimeoutFn;\n }\n\n recordSyncFlushStats(stats: CVRFlushStats, elapsedMs: number) {\n this.#cvrFlushTime.recordMs(elapsedMs, {\n [FLUSH_TYPE_ATTRIBUTE]: 'sync',\n });\n if (stats.rowsDeferred === 0) {\n this.#cvrRowsFlushed.add(stats.rows);\n }\n }\n\n #recordAsyncFlushStats(rows: number, elapsedMs: number) {\n this.#cvrFlushTime.recordMs(elapsedMs, {\n [FLUSH_TYPE_ATTRIBUTE]: 'async',\n });\n this.#cvrRowsFlushed.add(rows);\n }\n\n #cvr(table: string) {\n return this.#db(`${this.#schema}.${table}`);\n }\n\n async #ensureLoaded(): Promise<CustomKeyMap<RowID, RowRecord>> {\n if (this.#cache) {\n return this.#cache;\n }\n const start = Date.now();\n const r = resolver<CustomKeyMap<RowID, RowRecord>>();\n r.promise.catch(() => {});\n // Set this.#cache immediately (before await) so that only one db\n // query is made even if there are multiple callers.\n this.#cache = r.promise;\n try {\n const cache = await startAsyncSpan(\n tracer,\n 'RowRecordCache.load',\n async span => {\n const cache: CustomKeyMap<RowID, RowRecord> = new CustomKeyMap(\n rowIDString,\n );\n for await (const rows of this.#db<RowsRow[]>`\n SELECT\n \"clientGroupID\",\n \"schema\",\n \"table\",\n \"rowKey\",\n \"rowVersion\",\n \"patchVersion\",\n \"refCounts\"\n FROM ${this.#cvr(`rows`)}\n WHERE \"clientGroupID\" = ${this.#cvrID} AND \"refCounts\" IS NOT NULL`\n // TODO(arv): Arbitrary page size\n .cursor(5000)) {\n for (const row of rows) {\n const rowRecord = rowsRowToRowRecord(row);\n cache.set(rowRecord.id, rowRecord);\n }\n }\n span.setAttribute('rows', cache.size);\n return cache;\n },\n );\n this.#lc.info?.(\n `Loaded ${cache.size} row records in ${Date.now() - start} ms`,\n );\n r.resolve(cache);\n return this.#cache;\n } catch (e) {\n r.reject(e); // Make sure the error is reflected in the cached promise\n throw e;\n }\n }\n\n getRowRecords(): Promise<ReadonlyMap<RowID, RowRecord>> {\n return this.#ensureLoaded();\n }\n\n /**\n * Applies the `rowRecords` corresponding to the `rowsVersion`\n * to the cache, indicating whether the corresponding updates\n * (generated by {@link executeRowUpdates}) were `flushed`.\n *\n * If `flushed` is false, the RowRecordCache will flush the records\n * asynchronously.\n *\n * Note that `apply()` indicates that the CVR metadata associated with\n * the `rowRecords` was successfully committed, which essentially means\n * that this process has the unconditional right (and responsibility) of\n * following up with a flush of the `rowRecords`. In particular, the\n * commit of row records are not conditioned on the version or ownership\n * columns of the `cvr.instances` row.\n */\n async apply(\n rowRecords: Map<RowID, RowRecord | null>,\n rowsVersion: CVRVersion,\n flushed: boolean,\n ): Promise<number> {\n const cache = await this.#ensureLoaded();\n for (const [id, row] of rowRecords.entries()) {\n if (row === null || row.refCounts === null) {\n cache.delete(id);\n } else {\n cache.set(id, row);\n }\n if (!flushed) {\n this.#pending.set(id, row);\n }\n }\n this.#pendingRowsVersion = rowsVersion;\n // Initiate a flush if not already flushing.\n if (!flushed && this.#flushing === null) {\n this.#flushing = resolver();\n // The #flush() method handles propagating errors to #failService.\n // Attach a rejection handler to this promise to avoid unhandled\n // rejections.\n this.#flushing.promise.catch(() => {});\n this.#setTimeout(() => this.#flush(), 0);\n }\n return cache.size;\n }\n\n async #flush() {\n const flushing = must(this.#flushing);\n try {\n while (this.#pendingRowsVersion !== this.#flushedRowsVersion) {\n const start = performance.now();\n\n const {rows, rowsVersion} = await runTx(\n this.#db,\n tx => {\n // Note: This code block is synchronous, guaranteeing that the\n // #pendingRowsVersion is consistent with the #pending rows.\n const rows = this.#pending.size;\n const rowsVersion = must(this.#pendingRowsVersion);\n // Awaiting all of the individual statements incurs too much\n // overhead. Instead, just catch and log exception(s); the outer\n // transaction will properly fail.\n void Promise.all(\n this.executeRowUpdates(tx, rowsVersion, this.#pending, 'force'),\n ).catch(e => this.#lc.error?.(`error flushing cvr rows`, e));\n\n this.#pending.clear();\n return {rows, rowsVersion};\n },\n {mode: Mode.READ_COMMITTED},\n );\n const elapsed = performance.now() - start;\n this.#lc.info?.(\n `flushed ${rows} rows@${versionString(rowsVersion)} (${elapsed} ms)`,\n );\n this.#recordAsyncFlushStats(rows, elapsed);\n this.#flushedRowsVersion = rowsVersion;\n // Note: apply() may have called while the transaction was committing,\n // which will result in looping to commit the next #pendingRowsVersion.\n }\n this.#lc.info?.(\n `up to date rows@${versionToNullableCookie(this.#flushedRowsVersion)}`,\n );\n flushing.resolve();\n this.#flushing = null;\n } catch (e) {\n this.#lc.info?.(`row record flush failed`, e);\n flushing.reject(e);\n this.#failService(e);\n }\n }\n\n hasPendingUpdates() {\n return this.#flushing !== null;\n }\n\n /**\n * Returns a promise that resolves when all outstanding row-records\n * have been committed.\n */\n flushed(lc: LogContext): Promise<void> {\n if (this.#flushing) {\n lc.debug?.('awaiting pending row flush');\n return this.#flushing.promise;\n }\n return promiseVoid;\n }\n\n clear() {\n // Note: Only the #cache is cleared. #pending updates, on the other hand,\n // comprise canonical (i.e. already flushed) data and must be flushed\n // even if the snapshot of the present state (the #cache) is cleared.\n this.#cache = undefined;\n }\n\n async *catchupRowPatches(\n lc: LogContext,\n afterVersion: NullableCVRVersion,\n upToCVR: CVRSnapshot,\n current: CVRVersion,\n excludeQueryHashes: string[] = [],\n ): AsyncGenerator<RowsRow[], void, undefined> {\n if (cmpVersions(afterVersion, upToCVR.version) >= 0) {\n return;\n }\n\n const startMs = Date.now();\n const start = afterVersion ? versionString(afterVersion) : '';\n const end = versionString(upToCVR.version);\n lc.debug?.(`scanning row patches for clients from ${start}`);\n\n // Before accessing the CVR db, pending row records must be flushed.\n // Note that because catchupRowPatches() is called from within the\n // view syncer lock, this flush is guaranteed to complete since no\n // new CVR updates can happen while the lock is held.\n await this.flushed(lc);\n const flushMs = Date.now() - startMs;\n\n const reader = new TransactionPool(lc, {mode: Mode.READONLY}).run(this.#db);\n try {\n // Verify that we are reading the right version of the CVR.\n await reader.processReadTask(tx =>\n checkVersion(tx, this.#schema, this.#cvrID, current),\n );\n\n const {query} = await reader.processReadTask(tx => {\n const query =\n excludeQueryHashes.length === 0\n ? tx<RowsRow[]>`SELECT\n \"clientGroupID\",\n \"schema\",\n \"table\",\n \"rowKey\",\n \"rowVersion\",\n \"patchVersion\",\n \"refCounts\"\n FROM ${this.#cvr('rows')}\n WHERE \"clientGroupID\" = ${this.#cvrID}\n AND \"patchVersion\" > ${start}\n AND \"patchVersion\" <= ${end}`\n : // Exclude rows that were already sent as part of query hydration.\n tx<RowsRow[]>`SELECT\n \"clientGroupID\",\n \"schema\",\n \"table\",\n \"rowKey\",\n \"rowVersion\",\n \"patchVersion\",\n \"refCounts\"\n FROM ${this.#cvr('rows')}\n WHERE \"clientGroupID\" = ${this.#cvrID}\n AND \"patchVersion\" > ${start}\n AND \"patchVersion\" <= ${end}\n AND (\"refCounts\" IS NULL OR NOT \"refCounts\" ?| ${excludeQueryHashes})`;\n return {query};\n });\n\n yield* query.cursor(10000);\n } finally {\n reader.setDone();\n }\n\n const totalMs = Date.now() - startMs;\n lc.info?.(\n `finished row catchup (flush: ${flushMs} ms, total: ${totalMs} ms)`,\n );\n }\n\n executeRowUpdates(\n tx: PostgresTransaction,\n version: CVRVersion,\n rowUpdates: Map<RowID, RowRecord | null>,\n mode: 'allow-defer' | 'force',\n lc = this.#lc,\n ): PendingQuery<Row[]>[] {\n if (\n mode === 'allow-defer' &&\n // defer if pending rows are being flushed\n (this.#flushing !== null ||\n // or if the new batch is above the limit.\n rowUpdates.size > this.#deferredRowFlushThreshold)\n ) {\n return [];\n }\n const rowsVersion = {\n clientGroupID: this.#cvrID,\n version: versionString(version),\n };\n const pending: PendingQuery<Row[]>[] = [\n tx`INSERT INTO ${this.#cvr('rowsVersion')} ${tx(rowsVersion)}\n ON CONFLICT (\"clientGroupID\") \n DO UPDATE SET ${tx(rowsVersion)}`,\n ];\n\n const rowRecordRows: RowsRow[] = [];\n for (const [id, row] of rowUpdates.entries()) {\n if (row === null) {\n pending.push(\n tx`\n DELETE FROM ${this.#cvr('rows')}\n WHERE \"clientGroupID\" = ${this.#cvrID}\n AND \"schema\" = ${id.schema}\n AND \"table\" = ${id.table}\n AND \"rowKey\" = ${id.rowKey}\n `,\n );\n } else {\n rowRecordRows.push(rowRecordToRowsRow(this.#cvrID, row));\n }\n }\n if (rowRecordRows.length) {\n pending.push(\n tx`\n INSERT INTO ${this.#cvr('rows')}(\n \"clientGroupID\", \"schema\", \"table\", \"rowKey\", \"rowVersion\", \"patchVersion\", \"refCounts\"\n ) SELECT\n \"clientGroupID\", \"schema\", \"table\", \"rowKey\", \"rowVersion\", \"patchVersion\", \"refCounts\"\n FROM json_to_recordset(${rowRecordRows}) AS x(\n \"clientGroupID\" TEXT,\n \"schema\" TEXT,\n \"table\" TEXT,\n \"rowKey\" JSONB,\n \"rowVersion\" TEXT,\n \"patchVersion\" TEXT,\n \"refCounts\" JSONB\n ) ON CONFLICT (\"clientGroupID\", \"schema\", \"table\", \"rowKey\")\n DO UPDATE SET \"rowVersion\" = excluded.\"rowVersion\",\n \"patchVersion\" = excluded.\"patchVersion\",\n \"refCounts\" = excluded.\"refCounts\"\n `,\n );\n lc.info?.(\n `flushing ${rowUpdates.size} rows (${rowRecordRows.length} inserts, ${\n rowUpdates.size - rowRecordRows.length\n } deletes)`,\n );\n }\n return pending;\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;AAmCA,IAAM,uBAAuB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAoD7B,IAAa,iBAAb,MAA4B;CAI1B;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CAGA,WAAoB,IAAI,aAAsC,YAAY;CAC1E,sBAAyC;CACzC,sBAAyC;CACzC,YAAmC;CAEnC,gBAAyB,4BACvB,QACA,kBACA,uIAED;CACD,kBAA2B,mBACzB,QACA,oBACA,4CACD;CAED,YACE,IACA,IACA,OACA,OACA,aACA,4BAA4B,KAC5B,eAAe,YACf;AACA,QAAA,KAAW;AACX,QAAA,KAAW;AACX,QAAA,SAAe,UAAU,MAAM;AAC/B,QAAA,QAAc;AACd,QAAA,cAAoB;AACpB,QAAA,4BAAkC;AAClC,QAAA,aAAmB;;CAGrB,qBAAqB,OAAsB,WAAmB;AAC5D,QAAA,aAAmB,SAAS,WAAW,GACpC,uBAAuB,QACzB,CAAC;AACF,MAAI,MAAM,iBAAiB,EACzB,OAAA,eAAqB,IAAI,MAAM,KAAK;;CAIxC,uBAAuB,MAAc,WAAmB;AACtD,QAAA,aAAmB,SAAS,WAAW,GACpC,uBAAuB,SACzB,CAAC;AACF,QAAA,eAAqB,IAAI,KAAK;;CAGhC,KAAK,OAAe;AAClB,SAAO,MAAA,GAAS,GAAG,MAAA,OAAa,GAAG,QAAQ;;CAG7C,OAAA,eAA+D;AAC7D,MAAI,MAAA,MACF,QAAO,MAAA;EAET,MAAM,QAAQ,KAAK,KAAK;EACxB,MAAM,IAAI,UAA0C;AACpD,IAAE,QAAQ,YAAY,GAAG;AAGzB,QAAA,QAAc,EAAE;AAChB,MAAI;GACF,MAAM,QAAQ,MAAM,eAClB,QACA,uBACA,OAAM,SAAQ;IACZ,MAAM,QAAwC,IAAI,aAChD,YACD;AACD,eAAW,MAAM,QAAQ,MAAA,EAAmB;;;;;;;;;mBASnC,MAAA,IAAU,OAAO,CAAC;wCACG,MAAA,MAAY,8BAEvC,OAAO,IAAK,CACb,MAAK,MAAM,OAAO,MAAM;KACtB,MAAM,YAAY,mBAAmB,IAAI;AACzC,WAAM,IAAI,UAAU,IAAI,UAAU;;AAGtC,SAAK,aAAa,QAAQ,MAAM,KAAK;AACrC,WAAO;KAEV;AACD,SAAA,GAAS,OACP,UAAU,MAAM,KAAK,kBAAkB,KAAK,KAAK,GAAG,MAAM,KAC3D;AACD,KAAE,QAAQ,MAAM;AAChB,UAAO,MAAA;WACA,GAAG;AACV,KAAE,OAAO,EAAE;AACX,SAAM;;;CAIV,gBAAwD;AACtD,SAAO,MAAA,cAAoB;;;;;;;;;;;;;;;;;CAkB7B,MAAM,MACJ,YACA,aACA,SACiB;EACjB,MAAM,QAAQ,MAAM,MAAA,cAAoB;AACxC,OAAK,MAAM,CAAC,IAAI,QAAQ,WAAW,SAAS,EAAE;AAC5C,OAAI,QAAQ,QAAQ,IAAI,cAAc,KACpC,OAAM,OAAO,GAAG;OAEhB,OAAM,IAAI,IAAI,IAAI;AAEpB,OAAI,CAAC,QACH,OAAA,QAAc,IAAI,IAAI,IAAI;;AAG9B,QAAA,qBAA2B;AAE3B,MAAI,CAAC,WAAW,MAAA,aAAmB,MAAM;AACvC,SAAA,WAAiB,UAAU;AAI3B,SAAA,SAAe,QAAQ,YAAY,GAAG;AACtC,SAAA,iBAAuB,MAAA,OAAa,EAAE,EAAE;;AAE1C,SAAO,MAAM;;CAGf,OAAA,QAAe;EACb,MAAM,WAAW,KAAK,MAAA,SAAe;AACrC,MAAI;AACF,UAAO,MAAA,uBAA6B,MAAA,oBAA0B;IAC5D,MAAM,QAAQ,YAAY,KAAK;IAE/B,MAAM,EAAC,MAAM,gBAAe,MAAM,MAChC,MAAA,KACA,OAAM;KAGJ,MAAM,OAAO,MAAA,QAAc;KAC3B,MAAM,cAAc,KAAK,MAAA,mBAAyB;AAI7C,aAAQ,IACX,KAAK,kBAAkB,IAAI,aAAa,MAAA,SAAe,QAAQ,CAChE,CAAC,OAAM,MAAK,MAAA,GAAS,QAAQ,2BAA2B,EAAE,CAAC;AAE5D,WAAA,QAAc,OAAO;AACrB,YAAO;MAAC;MAAM;MAAY;OAE5B,EAAC,MAAM,gBAAoB,CAC5B;IACD,MAAM,UAAU,YAAY,KAAK,GAAG;AACpC,UAAA,GAAS,OACP,WAAW,KAAK,QAAQ,cAAc,YAAY,CAAC,IAAI,QAAQ,MAChE;AACD,UAAA,sBAA4B,MAAM,QAAQ;AAC1C,UAAA,qBAA2B;;AAI7B,SAAA,GAAS,OACP,mBAAmB,wBAAwB,MAAA,mBAAyB,GACrE;AACD,YAAS,SAAS;AAClB,SAAA,WAAiB;WACV,GAAG;AACV,SAAA,GAAS,OAAO,2BAA2B,EAAE;AAC7C,YAAS,OAAO,EAAE;AAClB,SAAA,YAAkB,EAAE;;;CAIxB,oBAAoB;AAClB,SAAO,MAAA,aAAmB;;;;;;CAO5B,QAAQ,IAA+B;AACrC,MAAI,MAAA,UAAgB;AAClB,MAAG,QAAQ,6BAA6B;AACxC,UAAO,MAAA,SAAe;;AAExB,SAAO;;CAGT,QAAQ;AAIN,QAAA,QAAc,KAAA;;CAGhB,OAAO,kBACL,IACA,cACA,SACA,SACA,qBAA+B,EAAE,EACW;AAC5C,MAAI,YAAY,cAAc,QAAQ,QAAQ,IAAI,EAChD;EAGF,MAAM,UAAU,KAAK,KAAK;EAC1B,MAAM,QAAQ,eAAe,cAAc,aAAa,GAAG;EAC3D,MAAM,MAAM,cAAc,QAAQ,QAAQ;AAC1C,KAAG,QAAQ,yCAAyC,QAAQ;AAM5D,QAAM,KAAK,QAAQ,GAAG;EACtB,MAAM,UAAU,KAAK,KAAK,GAAG;EAE7B,MAAM,SAAS,IAAI,gBAAgB,IAAI,EAAC,MAAM,UAAc,CAAC,CAAC,IAAI,MAAA,GAAS;AAC3E,MAAI;AAEF,SAAM,OAAO,iBAAgB,OAC3B,aAAa,IAAI,MAAA,QAAc,MAAA,OAAa,QAAQ,CACrD;GAED,MAAM,EAAC,UAAS,MAAM,OAAO,iBAAgB,OAAM;AA6BjD,WAAO,EAAC,OA3BN,mBAAmB,WAAW,IAC1B,EAAa;;;;;;;;mBAQR,MAAA,IAAU,OAAO,CAAC;kCACH,MAAA,MAAY;iCACb,MAAM;kCACL,QAEpB,EAAa;;;;;;;;mBAQR,MAAA,IAAU,OAAO,CAAC;kCACH,MAAA,MAAY;iCACb,MAAM;kCACL,IAAI;2DACqB,mBAAmB,IACxD;KACd;AAEF,UAAO,MAAM,OAAO,IAAM;YAClB;AACR,UAAO,SAAS;;EAGlB,MAAM,UAAU,KAAK,KAAK,GAAG;AAC7B,KAAG,OACD,gCAAgC,QAAQ,cAAc,QAAQ,MAC/D;;CAGH,kBACE,IACA,SACA,YACA,MACA,KAAK,MAAA,IACkB;AACvB,MACE,SAAS,kBAER,MAAA,aAAmB,QAElB,WAAW,OAAO,MAAA,2BAEpB,QAAO,EAAE;EAEX,MAAM,cAAc;GAClB,eAAe,MAAA;GACf,SAAS,cAAc,QAAQ;GAChC;EACD,MAAM,UAAiC,CACrC,EAAE,eAAe,MAAA,IAAU,cAAc,CAAC,GAAG,GAAG,YAAY,CAAC;;2BAExC,GAAG,YAAY,GACrC;EAED,MAAM,gBAA2B,EAAE;AACnC,OAAK,MAAM,CAAC,IAAI,QAAQ,WAAW,SAAS,CAC1C,KAAI,QAAQ,KACV,SAAQ,KACN,EAAE;wBACY,MAAA,IAAU,OAAO,CAAC;sCACJ,MAAA,MAAY;+BACnB,GAAG,OAAO;8BACX,GAAG,MAAM;+BACR,GAAG,OAAO;SAEhC;MAED,eAAc,KAAK,mBAAmB,MAAA,OAAa,IAAI,CAAC;AAG5D,MAAI,cAAc,QAAQ;AACxB,WAAQ,KACN,EAAE;gBACM,MAAA,IAAU,OAAO,CAAC;;;;6BAIL,cAAc;;;;;;;;;;;;MAapC;AACD,MAAG,OACD,YAAY,WAAW,KAAK,SAAS,cAAc,OAAO,YACxD,WAAW,OAAO,cAAc,OACjC,WACF;;AAEH,SAAO"}
@@ -2,7 +2,7 @@
2
2
  /**
3
3
  * The current version of Zero.
4
4
  */
5
- var version = "1.4.0-canary.5";
5
+ var version = "1.4.0";
6
6
  //#endregion
7
7
  export { version };
8
8
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rocicorp/zero",
3
- "version": "1.4.0-canary.5",
3
+ "version": "1.4.0",
4
4
  "description": "Zero is a web framework for serverless web development.",
5
5
  "homepage": "https://zero.rocicorp.dev",
6
6
  "bugs": {