@rocicorp/zero 1.0.1-canary.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/out/_virtual/{_@oxc-project_runtime@0.115.0 → _@oxc-project_runtime@0.122.0}/helpers/usingCtx.js +1 -1
- package/out/replicache/src/mutation-recovery.js +0 -3
- package/out/zero/package.js +7 -6
- package/out/zero/package.js.map +1 -1
- package/out/zero-cache/src/services/analyze.js +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.js +37 -16
- package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/lsn.js +1 -1
- package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
- package/out/zero-cache/src/services/life-cycle.js +6 -2
- package/out/zero-cache/src/services/life-cycle.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/inspect-handler.js +1 -1
- package/out/zero-cache/src/workers/replicator.d.ts.map +1 -1
- package/out/zero-cache/src/workers/replicator.js +1 -0
- package/out/zero-cache/src/workers/replicator.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zql/src/builder/builder.d.ts.map +1 -1
- package/out/zql/src/builder/builder.js +15 -5
- package/out/zql/src/builder/builder.js.map +1 -1
- package/out/zql/src/ivm/cap.d.ts +32 -0
- package/out/zql/src/ivm/cap.d.ts.map +1 -0
- package/out/zql/src/ivm/cap.js +226 -0
- package/out/zql/src/ivm/cap.js.map +1 -0
- package/out/zql/src/ivm/join-utils.d.ts +2 -0
- package/out/zql/src/ivm/join-utils.d.ts.map +1 -1
- package/out/zql/src/ivm/join-utils.js +35 -1
- package/out/zql/src/ivm/join-utils.js.map +1 -1
- package/out/zql/src/ivm/join.d.ts.map +1 -1
- package/out/zql/src/ivm/join.js +6 -2
- package/out/zql/src/ivm/join.js.map +1 -1
- package/out/zql/src/ivm/memory-source.d.ts +15 -2
- package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
- package/out/zql/src/ivm/memory-source.js +69 -8
- package/out/zql/src/ivm/memory-source.js.map +1 -1
- package/out/zql/src/ivm/schema.d.ts +1 -1
- package/out/zql/src/ivm/schema.d.ts.map +1 -1
- package/out/zql/src/ivm/skip.d.ts.map +1 -1
- package/out/zql/src/ivm/skip.js +3 -0
- package/out/zql/src/ivm/skip.js.map +1 -1
- package/out/zql/src/ivm/source.d.ts +1 -1
- package/out/zql/src/ivm/source.d.ts.map +1 -1
- package/out/zql/src/ivm/take.d.ts +4 -1
- package/out/zql/src/ivm/take.d.ts.map +1 -1
- package/out/zql/src/ivm/take.js +4 -2
- package/out/zql/src/ivm/take.js.map +1 -1
- package/out/zql/src/ivm/union-fan-in.d.ts.map +1 -1
- package/out/zql/src/ivm/union-fan-in.js +1 -0
- package/out/zql/src/ivm/union-fan-in.js.map +1 -1
- package/out/zqlite/src/query-builder.d.ts +1 -1
- package/out/zqlite/src/query-builder.d.ts.map +1 -1
- package/out/zqlite/src/query-builder.js +7 -2
- package/out/zqlite/src/query-builder.js.map +1 -1
- package/out/zqlite/src/table-source.d.ts +1 -1
- package/out/zqlite/src/table-source.d.ts.map +1 -1
- package/out/zqlite/src/table-source.js +15 -10
- package/out/zqlite/src/table-source.js.map +1 -1
- package/package.json +7 -6
- package/out/replicache/src/mutation-recovery.js.map +0 -1
package/out/zero/package.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
var package_default = {
|
|
2
2
|
name: "@rocicorp/zero",
|
|
3
|
-
version: "1.
|
|
3
|
+
version: "1.1.0",
|
|
4
4
|
description: "Zero is a web framework for serverless web development.",
|
|
5
5
|
author: "Rocicorp, Inc.",
|
|
6
6
|
repository: {
|
|
@@ -77,17 +77,18 @@ var package_default = {
|
|
|
77
77
|
},
|
|
78
78
|
devDependencies: {
|
|
79
79
|
"@op-engineering/op-sqlite": ">=15",
|
|
80
|
-
"@vitest/runner": "4.1.
|
|
80
|
+
"@vitest/runner": "4.1.2",
|
|
81
81
|
"analyze-query": "0.0.0",
|
|
82
82
|
"ast-to-zql": "0.0.0",
|
|
83
83
|
"expo-sqlite": ">=15",
|
|
84
84
|
"replicache": "15.2.1",
|
|
85
85
|
"shared": "0.0.0",
|
|
86
|
+
"syncpack": "^14.2.1",
|
|
86
87
|
"typedoc": "^0.28.17",
|
|
87
88
|
"typedoc-plugin-markdown": "^4.10.0",
|
|
88
89
|
"typescript": "~5.9.3",
|
|
89
|
-
"vite": "8.0.
|
|
90
|
-
"vitest": "4.1.
|
|
90
|
+
"vite": "8.0.3",
|
|
91
|
+
"vitest": "4.1.2",
|
|
91
92
|
"zero-cache": "0.0.0",
|
|
92
93
|
"zero-client": "0.0.0",
|
|
93
94
|
"zero-pg": "0.0.0",
|
|
@@ -98,8 +99,8 @@ var package_default = {
|
|
|
98
99
|
"zqlite": "0.0.0"
|
|
99
100
|
},
|
|
100
101
|
peerDependencies: {
|
|
101
|
-
"
|
|
102
|
-
"
|
|
102
|
+
"@op-engineering/op-sqlite": ">=15",
|
|
103
|
+
"expo-sqlite": ">=15"
|
|
103
104
|
},
|
|
104
105
|
peerDependenciesMeta: {
|
|
105
106
|
"expo-sqlite": { "optional": true },
|
package/out/zero/package.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"package.js","names":[],"sources":["../../package.json"],"sourcesContent":["{\n \"name\": \"@rocicorp/zero\",\n \"version\": \"1.
|
|
1
|
+
{"version":3,"file":"package.js","names":[],"sources":["../../package.json"],"sourcesContent":["{\n \"name\": \"@rocicorp/zero\",\n \"version\": \"1.1.0\",\n \"description\": \"Zero is a web framework for serverless web development.\",\n \"author\": \"Rocicorp, Inc.\",\n \"repository\": {\n \"type\": \"git\",\n \"url\": \"git+https://github.com/rocicorp/mono.git\",\n \"directory\": \"packages/zero\"\n },\n \"license\": \"Apache-2.0\",\n \"homepage\": \"https://zero.rocicorp.dev\",\n \"bugs\": {\n \"url\": \"https://bugs.rocicorp.dev\"\n },\n \"scripts\": {\n \"build\": \"node --experimental-strip-types --no-warnings tool/build.ts\",\n \"build:watch\": \"node --experimental-strip-types --no-warnings tool/build.ts --watch\",\n \"check-types\": \"tsc -p tsconfig.client.json && tsc -p tsconfig.server.json\",\n \"check-types:client:watch\": \"tsc -p tsconfig.client.json --watch\",\n \"check-types:server:watch\": \"tsc -p tsconfig.server.json --watch\",\n \"format\": \"prettier --write .\",\n \"check-format\": \"prettier --check .\",\n \"lint\": \"oxlint --type-aware src/\",\n \"docs\": \"node --experimental-strip-types --no-warnings tool/generate-docs.ts\",\n \"docs:server\": \"node --watch --experimental-strip-types --no-warnings tool/generate-docs.ts --server\"\n },\n \"dependencies\": {\n \"@badrap/valita\": \"0.3.11\",\n \"@databases/escape-identifier\": \"^1.0.3\",\n \"@databases/sql\": \"^3.3.0\",\n \"@dotenvx/dotenvx\": \"^1.39.0\",\n \"@drdgvhbh/postgres-error-codes\": \"^0.0.6\",\n \"@fastify/cors\": \"^10.0.0\",\n \"@fastify/websocket\": \"^11.0.0\",\n \"@google-cloud/precise-date\": \"^4.0.0\",\n \"@opentelemetry/api\": \"^1.9.0\",\n \"@opentelemetry/api-logs\": \"^0.203.0\",\n \"@opentelemetry/auto-instrumentations-node\": \"^0.62.0\",\n \"@opentelemetry/exporter-metrics-otlp-http\": \"^0.203.0\",\n \"@opentelemetry/resources\": \"^2.0.1\",\n \"@opentelemetry/sdk-metrics\": \"^2.0.1\",\n \"@opentelemetry/sdk-node\": \"^0.203.0\",\n \"@opentelemetry/sdk-trace-node\": \"^2.0.1\",\n \"@postgresql-typed/oids\": \"^0.2.0\",\n \"@rocicorp/lock\": \"^1.0.4\",\n \"@rocicorp/logger\": \"^5.4.0\",\n \"@rocicorp/resolver\": \"^1.0.2\",\n \"@rocicorp/zero-sqlite3\": \"^1.0.15\",\n \"@standard-schema/spec\": \"^1.0.0\",\n \"@types/basic-auth\": \"^1.1.8\",\n \"@types/ws\": \"^8.5.12\",\n \"basic-auth\": \"^2.0.1\",\n \"chalk\": \"^5.3.0\",\n \"chalk-template\": \"^1.1.0\",\n \"chokidar\": \"^4.0.1\",\n \"cloudevents\": \"^10.0.0\",\n \"command-line-args\": \"^6.0.1\",\n \"command-line-usage\": \"^7.0.3\",\n \"compare-utf8\": \"^0.1.1\",\n \"defu\": \"^6.1.4\",\n \"eventemitter3\": \"^5.0.1\",\n \"fastify\": \"^5.0.0\",\n \"is-in-subnet\": \"^4.0.1\",\n \"jose\": \"^5.9.3\",\n \"js-xxhash\": \"^4.0.0\",\n \"json-custom-numbers\": \"^3.1.1\",\n \"kasi\": \"^1.1.0\",\n \"nanoid\": \"^5.1.2\",\n \"parse-prometheus-text-format\": \"^1.1.1\",\n \"pg-format\": \"npm:pg-format-fix@^1.0.5\",\n \"postgres\": \"3.4.7\",\n \"prettier\": \"^3.8.1\",\n \"semver\": \"^7.5.4\",\n \"tsx\": \"^4.21.0\",\n \"url-pattern\": \"^1.0.3\",\n \"urlpattern-polyfill\": \"^10.1.0\",\n \"ws\": \"^8.18.1\"\n },\n \"devDependencies\": {\n \"@op-engineering/op-sqlite\": \">=15\",\n \"@vitest/runner\": \"4.1.2\",\n \"analyze-query\": \"0.0.0\",\n \"ast-to-zql\": \"0.0.0\",\n \"expo-sqlite\": \">=15\",\n \"replicache\": \"15.2.1\",\n \"shared\": \"0.0.0\",\n \"syncpack\": \"^14.2.1\",\n \"typedoc\": \"^0.28.17\",\n \"typedoc-plugin-markdown\": \"^4.10.0\",\n \"typescript\": \"~5.9.3\",\n \"vite\": \"8.0.3\",\n \"vitest\": \"4.1.2\",\n \"zero-cache\": \"0.0.0\",\n \"zero-client\": \"0.0.0\",\n \"zero-pg\": \"0.0.0\",\n \"zero-protocol\": \"0.0.0\",\n \"zero-react\": \"0.0.0\",\n \"zero-server\": \"0.0.0\",\n \"zero-solid\": \"0.0.0\",\n \"zqlite\": \"0.0.0\"\n },\n \"peerDependencies\": {\n \"@op-engineering/op-sqlite\": \">=15\",\n \"expo-sqlite\": \">=15\"\n },\n \"peerDependenciesMeta\": {\n \"expo-sqlite\": {\n \"optional\": true\n },\n \"@op-engineering/op-sqlite\": {\n \"optional\": true\n }\n },\n \"type\": \"module\",\n \"main\": \"out/zero/src/zero.js\",\n \"module\": \"out/zero/src/zero.js\",\n \"types\": \"out/zero/src/zero.d.ts\",\n \"exports\": {\n \".\": {\n \"types\": \"./out/zero/src/zero.d.ts\",\n \"default\": \"./out/zero/src/zero.js\"\n },\n \"./bindings\": {\n \"types\": \"./out/zero/src/bindings.d.ts\",\n \"default\": \"./out/zero/src/bindings.js\"\n },\n \"./change-protocol/v0\": {\n \"types\": \"./out/zero/src/change-protocol/v0.d.ts\",\n \"default\": \"./out/zero/src/change-protocol/v0.js\"\n },\n \"./expo-sqlite\": {\n \"types\": \"./out/zero/src/expo-sqlite.d.ts\",\n \"default\": \"./out/zero/src/expo-sqlite.js\"\n },\n \"./op-sqlite\": {\n \"types\": \"./out/zero/src/op-sqlite.d.ts\",\n \"default\": \"./out/zero/src/op-sqlite.js\"\n },\n \"./pg\": {\n \"types\": \"./out/zero/src/pg.d.ts\",\n \"default\": \"./out/zero/src/pg.js\"\n },\n \"./react\": {\n \"types\": \"./out/zero/src/react.d.ts\",\n \"default\": \"./out/zero/src/react.js\"\n },\n \"./react-native\": {\n \"types\": \"./out/zero/src/react-native.d.ts\",\n \"default\": \"./out/zero/src/react-native.js\"\n },\n \"./server\": {\n \"types\": \"./out/zero/src/server.d.ts\",\n \"default\": \"./out/zero/src/server.js\"\n },\n \"./server/adapters/drizzle\": {\n \"types\": \"./out/zero/src/adapters/drizzle.d.ts\",\n \"default\": \"./out/zero/src/adapters/drizzle.js\"\n },\n \"./server/adapters/prisma\": {\n \"types\": \"./out/zero/src/adapters/prisma.d.ts\",\n \"default\": \"./out/zero/src/adapters/prisma.js\"\n },\n \"./server/adapters/pg\": {\n \"types\": \"./out/zero/src/adapters/pg.d.ts\",\n \"default\": \"./out/zero/src/adapters/pg.js\"\n },\n \"./server/adapters/postgresjs\": {\n \"types\": \"./out/zero/src/adapters/postgresjs.d.ts\",\n \"default\": \"./out/zero/src/adapters/postgresjs.js\"\n },\n \"./solid\": {\n \"types\": \"./out/zero/src/solid.d.ts\",\n \"default\": \"./out/zero/src/solid.js\"\n },\n \"./sqlite\": {\n \"types\": \"./out/zero/src/sqlite.d.ts\",\n \"default\": \"./out/zero/src/sqlite.js\"\n },\n \"./zqlite\": {\n \"types\": \"./out/zero/src/zqlite.d.ts\",\n \"default\": \"./out/zero/src/zqlite.js\"\n }\n },\n \"bin\": {\n \"zero-build-schema\": \"./out/zero/src/build-schema.js\",\n \"zero-cache\": \"./out/zero/src/cli.js\",\n \"zero-cache-dev\": \"./out/zero/src/zero-cache-dev.js\",\n \"zero-deploy-permissions\": \"./out/zero/src/deploy-permissions.js\",\n \"zero-out\": \"./out/zero/src/zero-out.js\",\n \"ast-to-zql\": \"./out/zero/src/ast-to-zql.js\",\n \"analyze-query\": \"./out/zero/src/analyze-query.js\",\n \"transform-query\": \"./out/zero/src/transform-query.js\"\n },\n \"engines\": {\n \"node\": \">=22\"\n },\n \"files\": [\n \"out\",\n \"!*.tsbuildinfo\"\n ]\n}"],"mappings":""}
|
|
@@ -7,7 +7,7 @@ import { computeZqlSpecs, mustGetTableSpec } from "../db/lite-tables.js";
|
|
|
7
7
|
import { createSQLiteCostModel } from "../../../zqlite/src/sqlite-cost-model.js";
|
|
8
8
|
import { runAst } from "./run-ast.js";
|
|
9
9
|
import { explainQueries } from "../../../zqlite/src/explain-queries.js";
|
|
10
|
-
import { _usingCtx } from "../../../_virtual/_@oxc-project_runtime@0.
|
|
10
|
+
import { _usingCtx } from "../../../_virtual/_@oxc-project_runtime@0.122.0/helpers/usingCtx.js";
|
|
11
11
|
import { TimeSliceTimer } from "./view-syncer/view-syncer.js";
|
|
12
12
|
//#region ../zero-cache/src/services/analyze.ts
|
|
13
13
|
var TIME_SLICE_LAP_THRESHOLD_MS = 200;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-source.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAejD,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AAOzD,OAAO,KAAK,EAGV,kBAAkB,EACnB,MAAM,sBAAsB,CAAC;AAE9B,OAAO,EAAC,KAAK,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAEhE,OAAO,EAEL,KAAK,WAAW,EAEjB,MAAM,0BAA0B,CAAC;AAKlC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,2BAA2B,CAAC;AAEpD,OAAO,EAEL,KAAK,iBAAiB,EAEvB,MAAM,8CAA8C,CAAC;AACtD,OAAO,KAAK,EAAC,YAAY,EAAe,MAAM,qBAAqB,CAAC;AAEpE,OAAO,EAEL,KAAK,QAAQ,EACd,MAAM,wCAAwC,CAAC;AAchD,OAAO,KAAK,EAEV,mBAAmB,EAEpB,MAAM,mCAAmC,CAAC;AAG3C,OAAO,EAEL,KAAK,kBAAkB,EACvB,KAAK,aAAa,EACnB,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EAGV,eAAe,IAAI,gBAAgB,EACpC,MAAM,yCAAyC,CAAC;AA2BjD;;;;GAIG;AACH,wBAAsB,8BAA8B,CAClD,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,WAAW,EAClB,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,kBAAkB,EAC/B,OAAO,EAAE,aAAa,EACtB,mBAAmB,CAAC,EAAE,MAAM,GAC3B,OAAO,CAAC;IAAC,iBAAiB,EAAE,iBAAiB,CAAC;IAAC,YAAY,EAAE,YAAY,CAAA;CAAC,CAAC,CAsC7E;
|
|
1
|
+
{"version":3,"file":"change-source.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAejD,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AAOzD,OAAO,KAAK,EAGV,kBAAkB,EACnB,MAAM,sBAAsB,CAAC;AAE9B,OAAO,EAAC,KAAK,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAEhE,OAAO,EAEL,KAAK,WAAW,EAEjB,MAAM,0BAA0B,CAAC;AAKlC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,2BAA2B,CAAC;AAEpD,OAAO,EAEL,KAAK,iBAAiB,EAEvB,MAAM,8CAA8C,CAAC;AACtD,OAAO,KAAK,EAAC,YAAY,EAAe,MAAM,qBAAqB,CAAC;AAEpE,OAAO,EAEL,KAAK,QAAQ,EACd,MAAM,wCAAwC,CAAC;AAchD,OAAO,KAAK,EAEV,mBAAmB,EAEpB,MAAM,mCAAmC,CAAC;AAG3C,OAAO,EAEL,KAAK,kBAAkB,EACvB,KAAK,aAAa,EACnB,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EAGV,eAAe,IAAI,gBAAgB,EACpC,MAAM,yCAAyC,CAAC;AA2BjD;;;;GAIG;AACH,wBAAsB,8BAA8B,CAClD,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,WAAW,EAClB,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,kBAAkB,EAC/B,OAAO,EAAE,aAAa,EACtB,mBAAmB,CAAC,EAAE,MAAM,GAC3B,OAAO,CAAC;IAAC,iBAAiB,EAAE,iBAAiB,CAAC;IAAC,YAAY,EAAE,YAAY,CAAA;CAAC,CAAC,CAsC7E;AAqaD,qBAAa,KAAM,YAAW,QAAQ;;gBAIxB,IAAI,EAAE,IAAI,CAAC,MAAM,CAAC;IAI9B,QAAQ,CAAC,MAAM,EAAE,mBAAmB,GAAG,IAAI;IAgC3C,GAAG,CAAC,SAAS,EAAE,WAAW;CAoB3B;AAED,QAAA,MAAM,eAAe;;;;aAInB,CAAC;AAEH,MAAM,MAAM,SAAS,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,eAAe,CAAC,CAAC;AAitBxD,wBAAgB,iBAAiB,CAAC,CAAC,EAAE,kBAAkB,EAAE,CAAC,EAAE,gBAAgB,WAwB3E"}
|
|
@@ -12,7 +12,7 @@ import { upstreamSchema } from "../../../types/shards.js";
|
|
|
12
12
|
import { StatementRunner } from "../../../db/statements.js";
|
|
13
13
|
import { pgClient } from "../../../types/pg.js";
|
|
14
14
|
import { majorVersionFromString, majorVersionToString } from "../../../types/state-version.js";
|
|
15
|
-
import { fromBigInt, toStateVersionString } from "./lsn.js";
|
|
15
|
+
import { fromBigInt, toBigInt, toStateVersionString } from "./lsn.js";
|
|
16
16
|
import { UnsupportedColumnDefaultError, mapPostgresToLiteColumn } from "../../../db/pg-to-lite.js";
|
|
17
17
|
import { getSubscriptionStateAndContext } from "../../replicator/schema/replication-state.js";
|
|
18
18
|
import { runTx } from "../../../db/run-transaction.js";
|
|
@@ -103,7 +103,7 @@ var PostgresChangeSource = class {
|
|
|
103
103
|
this.#lagReporter = lagReportIntervalMs ? new LagReporter(lc.withContext("component", "lag-reporter"), shard, this.#db, lagReportIntervalMs) : null;
|
|
104
104
|
}
|
|
105
105
|
startLagReporter() {
|
|
106
|
-
return this.#lagReporter ? this.#lagReporter.initiateLagReport() : null;
|
|
106
|
+
return this.#lagReporter ? this.#lagReporter.initiateLagReport(true) : null;
|
|
107
107
|
}
|
|
108
108
|
async startStream(clientWatermark, backfillRequests = []) {
|
|
109
109
|
const { slot } = this.#replica;
|
|
@@ -126,6 +126,11 @@ var PostgresChangeSource = class {
|
|
|
126
126
|
* non-transactional messages with a downstream status message.
|
|
127
127
|
*/
|
|
128
128
|
const isTransactionalMessage = (lsn, msg) => {
|
|
129
|
+
if (msg.tag === "message" && msg.prefix === this.#lagReporter?.messagePrefix) {
|
|
130
|
+
changes.pushStatus(this.#lagReporter.processLagReport(msg));
|
|
131
|
+
return false;
|
|
132
|
+
}
|
|
133
|
+
this.#lagReporter?.checkCurrentLSN(lsn);
|
|
129
134
|
if (msg.tag === "keepalive") {
|
|
130
135
|
changes.pushStatus([
|
|
131
136
|
"status",
|
|
@@ -134,10 +139,6 @@ var PostgresChangeSource = class {
|
|
|
134
139
|
]);
|
|
135
140
|
return false;
|
|
136
141
|
}
|
|
137
|
-
if (msg.tag === "message" && msg.prefix === this.#lagReporter?.messagePrefix) {
|
|
138
|
-
changes.pushStatus(this.#lagReporter.processLagReport(msg));
|
|
139
|
-
return false;
|
|
140
|
-
}
|
|
141
142
|
return true;
|
|
142
143
|
};
|
|
143
144
|
(async () => {
|
|
@@ -311,7 +312,7 @@ var LagReporter = class LagReporter {
|
|
|
311
312
|
#db;
|
|
312
313
|
#lagIntervalMs;
|
|
313
314
|
#pgVersion;
|
|
314
|
-
#
|
|
315
|
+
#expectingLagReport = null;
|
|
315
316
|
#timer;
|
|
316
317
|
constructor(lc, shard, db, lagIntervalMs) {
|
|
317
318
|
this.#lc = lc;
|
|
@@ -327,35 +328,54 @@ var LagReporter = class LagReporter {
|
|
|
327
328
|
}
|
|
328
329
|
return this.#pgVersion;
|
|
329
330
|
}
|
|
330
|
-
async initiateLagReport(
|
|
331
|
+
async initiateLagReport(log = false) {
|
|
331
332
|
const pgVersion = this.#pgVersion ?? await this.#getPgVersion();
|
|
332
|
-
|
|
333
|
-
|
|
333
|
+
const now = Date.now();
|
|
334
|
+
const id = nanoid();
|
|
335
|
+
const lagReport = {
|
|
336
|
+
id,
|
|
337
|
+
lsn: 0n
|
|
338
|
+
};
|
|
339
|
+
this.#expectingLagReport = lagReport;
|
|
340
|
+
let lsn;
|
|
341
|
+
if (pgVersion >= 17e4) [{lsn}] = await this.#db`
|
|
334
342
|
SELECT pg_logical_emit_message(
|
|
335
343
|
false,
|
|
336
344
|
${this.messagePrefix},
|
|
337
345
|
json_build_object(
|
|
338
|
-
'id', ${
|
|
346
|
+
'id', ${id}::text,
|
|
339
347
|
'sendTimeMs', ${now}::int8,
|
|
340
348
|
'commitTimeMs', extract(epoch from now()) * 1000
|
|
341
349
|
)::text,
|
|
342
350
|
true
|
|
343
|
-
);
|
|
351
|
+
) as lsn;
|
|
344
352
|
`;
|
|
345
|
-
else await this.#db`
|
|
353
|
+
else [{lsn}] = await this.#db`
|
|
346
354
|
SELECT pg_logical_emit_message(
|
|
347
355
|
false,
|
|
348
356
|
${this.messagePrefix},
|
|
349
357
|
json_build_object(
|
|
350
|
-
'id', ${
|
|
358
|
+
'id', ${id}::text,
|
|
351
359
|
'sendTimeMs', ${now}::int8,
|
|
352
360
|
'commitTimeMs', extract(epoch from now()) * 1000
|
|
353
361
|
)::text
|
|
354
|
-
);
|
|
362
|
+
) as lsn;
|
|
355
363
|
`;
|
|
364
|
+
lagReport.lsn = toBigInt(lsn);
|
|
365
|
+
if (log) this.#lc.info?.(`initiated lag report at lsn ${lsn}`, {
|
|
366
|
+
id,
|
|
367
|
+
lsn
|
|
368
|
+
});
|
|
356
369
|
return { nextSendTimeMs: now };
|
|
357
370
|
}
|
|
371
|
+
checkCurrentLSN(lsn) {
|
|
372
|
+
if (this.#expectingLagReport?.lsn && lsn > this.#expectingLagReport.lsn) {
|
|
373
|
+
this.#lc.warn?.(`LSN ${fromBigInt(lsn)} is passed expected lag report ${fromBigInt(this.#expectingLagReport.lsn)}. Initiating new report.`);
|
|
374
|
+
this.#scheduleNextReport(0);
|
|
375
|
+
}
|
|
376
|
+
}
|
|
358
377
|
#scheduleNextReport(delayMs) {
|
|
378
|
+
this.#expectingLagReport = null;
|
|
359
379
|
clearTimeout(this.#timer);
|
|
360
380
|
this.#timer = setTimeout(async () => {
|
|
361
381
|
try {
|
|
@@ -371,7 +391,8 @@ var LagReporter = class LagReporter {
|
|
|
371
391
|
const report = parseLogicalMessageContent(msg, lagReportSchema);
|
|
372
392
|
const now = Date.now();
|
|
373
393
|
const nextSendTimeMs = Math.max(now, report.sendTimeMs + this.#lagIntervalMs);
|
|
374
|
-
if (report.id === this.#
|
|
394
|
+
if (report.id === this.#expectingLagReport?.id) this.#scheduleNextReport(nextSendTimeMs - now);
|
|
395
|
+
else this.#lc.debug?.(`received extraneous lag report`, { report });
|
|
375
396
|
const { sendTimeMs, commitTimeMs } = report;
|
|
376
397
|
return [
|
|
377
398
|
"status",
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-source.js","names":["#lc","#db","#upstreamUri","#shard","#replica","#context","#lagReporter","#stopExistingReplicationSlotSubscribers","#startStream","#logCurrentReplicaInfo","#dropReplicationSlots","#acks","#expectDownstreamAck","#ackIfDownstreamIsCaughtUp","#waitingForDownstreamAck","#sendAck","#lagIntervalMs","#getPgVersion","#pgVersion","#lastReportID","#timer","#scheduleNextReport","#shardPrefix","#shardConfig","#initialSchema","#error","#logError","#makeChanges","#handleDdlMessage","#lastSnapshotInTx","#handleRelation","#replicaIdentityTimer","#preSchema","#makeSchemaChanges","#getTableChanges"],"sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"sourcesContent":["import {\n PG_ADMIN_SHUTDOWN,\n PG_OBJECT_IN_USE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {nanoid} from 'nanoid';\nimport postgres from 'postgres';\nimport {AbortError} from '../../../../../shared/src/abort-error.ts';\nimport {assert} from '../../../../../shared/src/asserts.ts';\nimport {stringify} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport {must} from '../../../../../shared/src/must.ts';\nimport {mapValues} from '../../../../../shared/src/objects.ts';\nimport {\n equals,\n intersection,\n symmetricDifferences,\n} from '../../../../../shared/src/set-utils.ts';\nimport {sleep} from '../../../../../shared/src/sleep.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {\n mapPostgresToLiteColumn,\n UnsupportedColumnDefaultError,\n} from '../../../db/pg-to-lite.ts';\nimport {runTx} from '../../../db/run-transaction.ts';\nimport type {\n ColumnSpec,\n PublishedIndexSpec,\n PublishedTableSpec,\n} from '../../../db/specs.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport {type LexiVersion} from '../../../types/lexi-version.ts';\nimport {pgClient, type PostgresDB} from '../../../types/pg.ts';\nimport {\n upstreamSchema,\n type ShardConfig,\n type ShardID,\n} from '../../../types/shards.ts';\nimport {\n majorVersionFromString,\n majorVersionToString,\n} from '../../../types/state-version.ts';\nimport type {Sink} from '../../../types/streams.ts';\nimport {AutoResetSignal} from '../../change-streamer/schema/tables.ts';\nimport {\n getSubscriptionStateAndContext,\n type SubscriptionState,\n type SubscriptionStateAndContext,\n} from '../../replicator/schema/replication-state.ts';\nimport type {ChangeSource, ChangeStream} from '../change-source.ts';\nimport {BackfillManager} from '../common/backfill-manager.ts';\nimport {\n ChangeStreamMultiplexer,\n type Listener,\n} from '../common/change-stream-multiplexer.ts';\nimport {initReplica} from '../common/replica-schema.ts';\nimport type {\n BackfillRequest,\n DownstreamStatusMessage,\n JSONObject,\n} from '../protocol/current.ts';\nimport type {\n ColumnAdd,\n Identifier,\n MessageRelation,\n SchemaChange,\n TableCreate,\n} from '../protocol/current/data.ts';\nimport type {\n ChangeStreamData,\n ChangeStreamMessage,\n Data,\n} from '../protocol/current/downstream.ts';\nimport type {ColumnMetadata, TableMetadata} from './backfill-metadata.ts';\nimport {streamBackfill} from './backfill-stream.ts';\nimport {\n initialSync,\n type InitialSyncOptions,\n type ServerContext,\n} from './initial-sync.ts';\nimport type {\n Message,\n MessageMessage,\n MessageRelation as PostgresRelation,\n} from './logical-replication/pgoutput.types.ts';\nimport {subscribe, type StreamMessage} from './logical-replication/stream.ts';\nimport {fromBigInt, toStateVersionString, type LSN} from './lsn.ts';\nimport {\n replicationEventSchema,\n type DdlUpdateEvent,\n type SchemaSnapshotEvent,\n} from './schema/ddl.ts';\nimport {updateShardSchema} from './schema/init.ts';\nimport {\n getPublicationInfo,\n type PublishedSchema,\n type PublishedTableWithReplicaIdentity,\n} from './schema/published.ts';\nimport {\n dropShard,\n getInternalShardConfig,\n getReplicaAtVersion,\n internalPublicationPrefix,\n legacyReplicationSlot,\n replicaIdentitiesForTablesWithoutPrimaryKeys,\n replicationSlotExpression,\n type InternalShardConfig,\n type Replica,\n} from './schema/shard.ts';\nimport {validate} from './schema/validation.ts';\n\n/**\n * Initializes a Postgres change source, including the initial sync of the\n * replica, before streaming changes from the corresponding logical replication\n * stream.\n */\nexport async function initializePostgresChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n syncOptions: InitialSyncOptions,\n context: ServerContext,\n lagReportIntervalMs?: number,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initReplica(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n replicaDbFile,\n (log, tx) => initialSync(log, shard, tx, upstreamURI, syncOptions, context),\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionStateAndContext(\n new StatementRunner(replica),\n );\n replica.close();\n\n // Check that upstream is properly setup, and throw an AutoReset to re-run\n // initial sync if not.\n const db = pgClient(lc, upstreamURI);\n try {\n const upstreamReplica = await checkAndUpdateUpstream(\n lc,\n db,\n shard,\n subscriptionState,\n );\n\n const changeSource = new PostgresChangeSource(\n lc,\n upstreamURI,\n shard,\n upstreamReplica,\n context,\n lagReportIntervalMs ?? null,\n );\n\n return {subscriptionState, changeSource};\n } finally {\n await db.end();\n }\n}\n\nasync function checkAndUpdateUpstream(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n {\n replicaVersion,\n publications: subscribed,\n initialSyncContext,\n }: SubscriptionStateAndContext,\n) {\n // Perform any shard schema updates\n await updateShardSchema(lc, sql, shard, replicaVersion);\n\n const upstreamReplica = await getReplicaAtVersion(\n lc,\n sql,\n shard,\n replicaVersion,\n initialSyncContext,\n );\n if (!upstreamReplica) {\n throw new AutoResetSignal(\n `No replication slot for replica at version ${replicaVersion}`,\n );\n }\n\n // Verify that the publications match what is being replicated.\n const requested = [...shard.publications].sort();\n const replicated = upstreamReplica.publications\n .filter(p => !p.startsWith(internalPublicationPrefix(shard)))\n .sort();\n if (!deepEqual(requested, replicated)) {\n lc.warn?.(`Dropping shard to change publications to: [${requested}]`);\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n throw new AutoResetSignal(\n `Requested publications [${requested}] do not match configured ` +\n `publications: [${replicated}]`,\n );\n }\n\n // Sanity check: The subscription state on the replica should have the\n // same publications. This should be guaranteed by the equivalence of the\n // replicaVersion, but it doesn't hurt to verify.\n if (!deepEqual(upstreamReplica.publications, subscribed)) {\n throw new AutoResetSignal(\n `Upstream publications [${upstreamReplica.publications}] do not ` +\n `match subscribed publications [${subscribed}]`,\n );\n }\n\n // Verify that the publications exist.\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(subscribed)};\n `.values();\n if (exists.length !== subscribed.length) {\n throw new AutoResetSignal(\n `Upstream publications [${exists.flat()}] do not contain ` +\n `all subscribed publications [${subscribed}]`,\n );\n }\n\n const {slot} = upstreamReplica;\n const result = await sql<\n {restartLSN: LSN | null; walStatus: string | null}[]\n > /*sql*/ `\n SELECT restart_lsn as \"restartLSN\", wal_status as \"walStatus\" FROM pg_replication_slots\n WHERE slot_name = ${slot}`;\n if (result.length === 0) {\n throw new AutoResetSignal(`replication slot ${slot} is missing`);\n }\n const [{restartLSN, walStatus}] = result;\n if (restartLSN === null || walStatus === 'lost') {\n throw new AutoResetSignal(\n `replication slot ${slot} has been invalidated for exceeding the max_slot_wal_keep_size`,\n );\n }\n return upstreamReplica;\n}\n\n// Parameterize this if necessary. In practice starvation may never happen.\nconst MAX_LOW_PRIORITY_DELAY_MS = 1000;\n\ntype ReservationState = {\n lastWatermark?: string;\n};\n\n/**\n * Postgres implementation of a {@link ChangeSource} backed by a logical\n * replication stream.\n */\nclass PostgresChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #db: PostgresDB;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replica: Replica;\n readonly #context: ServerContext;\n readonly #lagReporter: LagReporter | null;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replica: Replica,\n context: ServerContext,\n lagReportIntervalMs: number | null,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#db = pgClient(lc, upstreamUri, {\n // used occasionally for schema changes, periodically for lag reporting\n ['idle_timeout']: 60,\n connection: {['application_name']: 'zero-replication-monitor'},\n });\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replica = replica;\n this.#context = context;\n this.#lagReporter = lagReportIntervalMs\n ? new LagReporter(\n lc.withContext('component', 'lag-reporter'),\n shard,\n this.#db,\n lagReportIntervalMs,\n )\n : null;\n }\n\n startLagReporter(): Promise<{nextSendTimeMs: number}> | null {\n return this.#lagReporter ? this.#lagReporter.initiateLagReport() : null;\n }\n\n async startStream(\n clientWatermark: string,\n backfillRequests: BackfillRequest[] = [],\n ): Promise<ChangeStream> {\n const {slot} = this.#replica;\n\n await this.#stopExistingReplicationSlotSubscribers(slot);\n const config = await getInternalShardConfig(this.#db, this.#shard);\n this.#lc.info?.(`starting replication stream@${slot}`);\n return this.#startStream(slot, clientWatermark, config, backfillRequests);\n }\n\n async #startStream(\n slot: string,\n clientWatermark: string,\n shardConfig: InternalShardConfig,\n backfillRequests: BackfillRequest[],\n ): Promise<ChangeStream> {\n const clientStart = majorVersionFromString(clientWatermark) + 1n;\n const {messages, acks} = await subscribe(\n this.#lc,\n this.#db,\n slot,\n [...shardConfig.publications],\n clientStart,\n );\n const acker = new Acker(acks);\n\n // The ChangeStreamMultiplexer facilitates cooperative streaming from\n // the main replication stream and backfill streams initiated by the\n // BackfillManager.\n const changes = new ChangeStreamMultiplexer(this.#lc, clientWatermark);\n const backfillManager = new BackfillManager(this.#lc, changes, req =>\n streamBackfill(this.#lc, this.#upstreamUri, this.#replica, req),\n );\n changes\n .addProducers(messages, backfillManager)\n .addListeners(backfillManager, acker);\n backfillManager.run(clientWatermark, backfillRequests);\n\n const changeMaker = new ChangeMaker(\n this.#lc,\n this.#shard,\n shardConfig,\n this.#db,\n this.#replica.initialSchema,\n );\n\n /**\n * Determines if the incoming message is transactional, otherwise handling\n * non-transactional messages with a downstream status message.\n */\n const isTransactionalMessage = (\n lsn: bigint,\n msg: StreamMessage[1],\n ): msg is Message => {\n if (msg.tag === 'keepalive') {\n changes.pushStatus([\n 'status',\n {ack: msg.shouldRespond},\n {watermark: majorVersionToString(lsn)},\n ]);\n return false;\n }\n if (\n msg.tag === 'message' &&\n msg.prefix === this.#lagReporter?.messagePrefix\n ) {\n changes.pushStatus(this.#lagReporter.processLagReport(msg));\n return false;\n }\n return true;\n };\n\n void (async () => {\n try {\n let reservation: ReservationState | null = null;\n let inTransaction = false;\n\n for await (const [lsn, msg] of messages) {\n if (!isTransactionalMessage(lsn, msg)) {\n // If we're not in a transaction but the last reservation was kept\n // because of pending keepalives or lag reports in the queue,\n // release the reservation.\n if (!inTransaction && reservation?.lastWatermark) {\n changes.release(reservation.lastWatermark);\n reservation = null;\n }\n continue;\n }\n\n if (!reservation) {\n const res = changes.reserve('replication');\n typeof res === 'string' || (await res); // awaits should be uncommon\n reservation = {};\n }\n\n let lastChange: ChangeStreamMessage | undefined;\n for (const change of await changeMaker.makeChanges(lsn, msg)) {\n await changes.push(change); // Allow the change-streamer to push back.\n lastChange = change;\n }\n\n switch (lastChange?.[0]) {\n case 'begin':\n inTransaction = true;\n break;\n case 'commit':\n inTransaction = false;\n reservation.lastWatermark = lastChange[2].watermark;\n if (\n messages.queued === 0 ||\n changes.waiterDelay() > MAX_LOW_PRIORITY_DELAY_MS\n ) {\n // After each transaction, release the reservation:\n // - if there are no pending upstream messages\n // - or if a low priority request has been waiting for longer\n // than MAX_LOW_PRIORITY_DELAY_MS. This is to prevent\n // (backfill) starvation on very active upstreams.\n changes.release(reservation.lastWatermark);\n reservation = null;\n }\n break;\n }\n }\n } catch (e) {\n // Note: no need to worry about reservations here since downstream\n // is being completely canceled.\n const err = translateError(e);\n if (err instanceof ShutdownSignal) {\n // Log the new state of the replica to surface information about the\n // server that sent the shutdown signal, if any.\n await this.#logCurrentReplicaInfo();\n }\n changes.fail(err);\n }\n })();\n\n this.#lc.info?.(\n `started replication stream@${slot} from ${clientWatermark} (replicaVersion: ${\n this.#replica.version\n })`,\n );\n\n return {\n changes: changes.asSource(),\n acks: {push: status => acker.ack(status[2].watermark)},\n };\n }\n\n async #logCurrentReplicaInfo() {\n try {\n const replica = await getReplicaAtVersion(\n this.#lc,\n this.#db,\n this.#shard,\n this.#replica.version,\n );\n if (replica) {\n this.#lc.info?.(\n `Shutdown signal from replica@${this.#replica.version}: ${stringify(replica.subscriberContext)}`,\n );\n }\n } catch (e) {\n this.#lc.warn?.(`error logging replica info`, e);\n }\n }\n\n /**\n * Stops replication slots associated with this shard, and returns\n * a `cleanup` task that drops any slot other than the specified\n * `slotToKeep`.\n *\n * Note that replication slots created after `slotToKeep` (as indicated by\n * the timestamp suffix) are preserved, as those are newly syncing replicas\n * that will soon take over the slot.\n */\n async #stopExistingReplicationSlotSubscribers(slotToKeep: string) {\n const slotExpression = replicationSlotExpression(this.#shard);\n const legacySlotName = legacyReplicationSlot(this.#shard);\n\n const result = await runTx(this.#db, async sql => {\n // Note: `slot_name <= slotToKeep` uses a string compare of the millisecond\n // timestamp, which works until it exceeds 13 digits (sometime in 2286).\n const result = await sql<\n {slot: string; pid: string | null; terminated: boolean | null}[]\n > /*sql*/ `\n SELECT slot_name as slot, pg_terminate_backend(active_pid) as terminated, active_pid as pid\n FROM pg_replication_slots \n WHERE (slot_name LIKE ${slotExpression} OR slot_name = ${legacySlotName})\n AND slot_name <= ${slotToKeep}`;\n this.#lc.info?.(\n `terminated replication slots: ${JSON.stringify(result)}`,\n );\n const replicasTable = `${upstreamSchema(this.#shard)}.replicas`;\n const replicasBefore = await sql`\n SELECT slot, version, \"initialSyncContext\", \"subscriberContext\" \n FROM ${sql(replicasTable)} ORDER BY slot`;\n\n if (result.length === 0) {\n const shardSlots = await sql`\n SELECT slot_name as slot, active, active_pid as pid\n FROM pg_replication_slots\n WHERE slot_name LIKE ${slotExpression} OR slot_name = ${legacySlotName}\n ORDER BY slot_name`;\n this.#lc.warn?.(\n `slot ${slotToKeep} not found while cleaning subscribers`,\n {slots: shardSlots, replicas: replicasBefore},\n );\n throw new AbortError(\n `replication slot ${slotToKeep} is missing. A different ` +\n `replication-manager should now be running on a new ` +\n `replication slot.`,\n );\n }\n // Clear the state of the older replicas.\n this.#lc.info?.(\n `replicas before cleanup (slotToKeep=${slotToKeep}): ${JSON.stringify(\n replicasBefore,\n )}`,\n );\n await sql`\n DELETE FROM ${sql(replicasTable)} WHERE slot < ${slotToKeep}`;\n await sql`\n UPDATE ${sql(replicasTable)} \n SET \"subscriberContext\" = ${this.#context}\n WHERE slot = ${slotToKeep}`;\n const replicasAfter = await sql<{slot: string; version: string}[]>`\n SELECT slot, version FROM ${sql(replicasTable)} ORDER BY slot`;\n this.#lc.info?.(\n `replicas after cleanup (slotToKeep=${slotToKeep}): ${JSON.stringify(\n replicasAfter,\n )}`,\n );\n return result;\n });\n\n const pids = result.filter(({pid}) => pid !== null).map(({pid}) => pid);\n if (pids.length) {\n this.#lc.info?.(`signaled subscriber ${pids} to shut down`);\n }\n const otherSlots = result\n .filter(({slot}) => slot !== slotToKeep)\n .map(({slot}) => slot);\n\n if (otherSlots.length) {\n void this.#dropReplicationSlots(otherSlots).catch(e =>\n this.#lc.warn?.(`error dropping replication slots`, e),\n );\n }\n }\n\n async #dropReplicationSlots(slots: string[]) {\n this.#lc.info?.(`dropping other replication slot(s) ${slots}`);\n const sql = this.#db;\n for (let i = 0; i < 5; i++) {\n try {\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name IN ${sql(slots)}\n `;\n this.#lc.info?.(`successfully dropped ${slots}`);\n return;\n } catch (e) {\n // error: replication slot \"zero_slot_change_source_test_id\" is active for PID 268\n if (\n e instanceof postgres.PostgresError &&\n e.code === PG_OBJECT_IN_USE\n ) {\n // The freeing up of the replication slot is not transactional;\n // sometimes it takes time for Postgres to consider the slot\n // inactive.\n this.#lc.debug?.(`attempt ${i + 1}: ${String(e)}`, e);\n } else {\n this.#lc.warn?.(`error dropping ${slots}`, e);\n }\n await sleep(1000);\n }\n }\n this.#lc.warn?.(`maximum attempts exceeded dropping ${slots}`);\n }\n}\n\n// Exported for testing.\nexport class Acker implements Listener {\n #acks: Sink<bigint>;\n #waitingForDownstreamAck: string | null = null;\n\n constructor(acks: Sink<bigint>) {\n this.#acks = acks;\n }\n\n onChange(change: ChangeStreamMessage): void {\n switch (change[0]) {\n case 'status':\n const {watermark} = change[2];\n if (change[1].ack) {\n this.#expectDownstreamAck(watermark);\n } else {\n // Keepalives with shouldRespond = false are sent to Listeners,\n // but for efficiency they are not sent downstream to the\n // change-streamer. Ack them here if the change-streamer is caught\n // up. This updates the replication slot's `confirmed_flush_lsn`\n // more quickly (rather than waiting for the periodic shouldRespond),\n // which is useful for monitoring replication slot lag.\n this.#ackIfDownstreamIsCaughtUp(watermark);\n }\n break;\n case 'begin':\n // Mark the commit watermark as being expected so that any intermediate\n // shouldRespond=false watermarks, which will be at the\n // commitWatermark, are *not* acked, as the ack must come from\n // change-streamer after it commits the transaction.\n if (!change[1].skipAck) {\n this.#expectDownstreamAck(change[2].commitWatermark);\n }\n break;\n }\n }\n\n #expectDownstreamAck(watermark: string) {\n this.#waitingForDownstreamAck = watermark;\n }\n\n ack(watermark: LexiVersion) {\n if (\n this.#waitingForDownstreamAck &&\n this.#waitingForDownstreamAck <= watermark\n ) {\n this.#waitingForDownstreamAck = null;\n }\n this.#sendAck(watermark);\n }\n\n #ackIfDownstreamIsCaughtUp(watermark: string) {\n if (this.#waitingForDownstreamAck === null) {\n this.#sendAck(watermark);\n }\n }\n\n #sendAck(watermark: LexiVersion) {\n const lsn = majorVersionFromString(watermark);\n this.#acks.push(lsn);\n }\n}\n\nconst lagReportSchema = v.object({\n id: v.string(),\n sendTimeMs: v.number(),\n commitTimeMs: v.number(),\n});\n\nexport type LagReport = v.Infer<typeof lagReportSchema>;\n\nclass LagReporter {\n static readonly MESSAGE_SUFFIX = '/lag-report/v1';\n\n readonly #lc: LogContext;\n readonly messagePrefix: string;\n\n // Weird issue with oxlint, which thinks:\n // × eslint(no-unused-private-class-members): 'db' is defined but never used.\n // oxlint-disable-next-line eslint(no-unused-private-class-members)\n readonly #db: PostgresDB;\n readonly #lagIntervalMs: number;\n\n #pgVersion: number | undefined;\n #lastReportID: string = '';\n #timer: NodeJS.Timeout | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n db: PostgresDB,\n lagIntervalMs: number,\n ) {\n this.#lc = lc;\n this.messagePrefix = `${shard.appID}/${shard.shardNum}${LagReporter.MESSAGE_SUFFIX}`;\n this.#db = db;\n this.#lagIntervalMs = lagIntervalMs;\n }\n\n async #getPgVersion() {\n if (this.#pgVersion === undefined) {\n const [{pgVersion}] = await this.#db<{pgVersion: number}[]> /*sql*/ `\n SELECT current_setting('server_version_num')::int as \"pgVersion\"`;\n this.#pgVersion = pgVersion;\n }\n return this.#pgVersion;\n }\n\n async initiateLagReport(now = Date.now()) {\n const pgVersion = this.#pgVersion ?? (await this.#getPgVersion());\n this.#lastReportID = nanoid();\n\n if (pgVersion >= 170000) {\n await this.#db /*sql*/ `\n SELECT pg_logical_emit_message(\n false,\n ${this.messagePrefix},\n json_build_object(\n 'id', ${this.#lastReportID}::text,\n 'sendTimeMs', ${now}::int8,\n 'commitTimeMs', extract(epoch from now()) * 1000\n )::text,\n true\n );\n `;\n } else {\n // Versions before PG 17 do not support the final `flush` option of\n // pg_logical_emit_message(). This results in an extra 50~100ms latency\n // for replication reports when the db is idle, which is still\n // acceptable for the purpose for alerting on pathological lag, for\n // which the threshold is much higher (e.g. many seconds).\n await this.#db /*sql*/ `\n SELECT pg_logical_emit_message(\n false,\n ${this.messagePrefix},\n json_build_object(\n 'id', ${this.#lastReportID}::text,\n 'sendTimeMs', ${now}::int8,\n 'commitTimeMs', extract(epoch from now()) * 1000\n )::text\n );\n `;\n }\n return {nextSendTimeMs: now};\n }\n\n #scheduleNextReport(delayMs: number) {\n clearTimeout(this.#timer);\n this.#timer = setTimeout(async () => {\n try {\n await this.initiateLagReport();\n } catch (e) {\n this.#lc.warn?.(`error initiating lag report`, e);\n this.#scheduleNextReport(this.#lagIntervalMs);\n }\n }, delayMs);\n }\n\n processLagReport(msg: MessageMessage): DownstreamStatusMessage {\n assert(\n msg.prefix === this.messagePrefix,\n `unexpected message prefix: ${msg.prefix}`,\n );\n const report = parseLogicalMessageContent(msg, lagReportSchema);\n const now = Date.now();\n const nextSendTimeMs = Math.max(\n now,\n report.sendTimeMs + this.#lagIntervalMs,\n );\n if (report.id === this.#lastReportID) {\n // Only schedule the next report when receiving the previous report.\n // For historic reports in the WAL, or reports generated by other\n // replication-managers, status messages are still sent downstream,\n // but the next report is not actually scheduled.\n this.#scheduleNextReport(nextSendTimeMs - now);\n }\n const {sendTimeMs, commitTimeMs} = report;\n return [\n 'status',\n {\n ack: false,\n lagReport: {\n lastTimings: {\n sendTimeMs,\n commitTimeMs,\n receiveTimeMs: now,\n },\n nextSendTimeMs,\n },\n },\n {watermark: toStateVersionString(msg.messageLsn ?? '0/0')},\n ];\n }\n}\n\ntype ReplicationError = {\n lsn: bigint;\n msg: Message;\n err: unknown;\n lastLogTime: number;\n};\n\nconst SET_REPLICA_IDENTITY_DELAY_MS = 50;\n\nclass ChangeMaker {\n readonly #lc: LogContext;\n readonly #shardPrefix: string;\n readonly #shardConfig: InternalShardConfig;\n readonly #initialSchema: PublishedSchema;\n readonly #db: PostgresDB;\n\n #replicaIdentityTimer: NodeJS.Timeout | undefined;\n #error: ReplicationError | undefined;\n\n constructor(\n lc: LogContext,\n {appID, shardNum}: ShardID,\n shardConfig: InternalShardConfig,\n db: PostgresDB,\n initialSchema: PublishedSchema,\n ) {\n this.#lc = lc;\n // Note: This matches the prefix used in pg_logical_emit_message() in pg/schema/ddl.ts.\n this.#shardPrefix = `${appID}/${shardNum}`;\n this.#shardConfig = shardConfig;\n this.#initialSchema = initialSchema;\n this.#db = db;\n }\n\n async makeChanges(lsn: bigint, msg: Message): Promise<ChangeStreamMessage[]> {\n if (this.#error) {\n this.#logError(this.#error);\n return [];\n }\n try {\n return await this.#makeChanges(msg);\n } catch (err) {\n this.#error = {lsn, msg, err, lastLogTime: 0};\n this.#logError(this.#error);\n\n const message = `Unable to continue replication from LSN ${fromBigInt(lsn)}`;\n const errorDetails: JSONObject = {error: message};\n if (err instanceof UnsupportedSchemaChangeError) {\n errorDetails.reason = err.description;\n errorDetails.context = err.event.context;\n } else {\n errorDetails.reason = String(err);\n }\n\n // Rollback the current transaction to avoid dangling transactions in\n // downstream processors (i.e. changeLog, replicator).\n return [\n ['rollback', {tag: 'rollback'}],\n ['control', {tag: 'reset-required', message, errorDetails}],\n ];\n }\n }\n\n #logError(error: ReplicationError) {\n const {lsn, msg, err, lastLogTime} = error;\n const now = Date.now();\n\n // Output an error to logs as replication messages continue to be dropped,\n // at most once a minute.\n if (now - lastLogTime > 60_000) {\n this.#lc.error?.(\n `Unable to continue replication from LSN ${fromBigInt(lsn)}: ${String(\n err,\n )}`,\n err instanceof UnsupportedSchemaChangeError\n ? err.event.context\n : // 'content' can be a large byte Buffer. Exclude it from logging output.\n {...msg, content: undefined},\n );\n error.lastLogTime = now;\n }\n }\n\n // oxlint-disable-next-line require-await\n async #makeChanges(msg: Message): Promise<ChangeStreamData[]> {\n switch (msg.tag) {\n case 'begin':\n return [\n [\n 'begin',\n {...msg, json: 's'},\n {commitWatermark: toStateVersionString(must(msg.commitLsn))},\n ],\n ];\n\n case 'delete': {\n if (!(msg.key ?? msg.old)) {\n throw new Error(\n `Invalid DELETE msg (missing key): ${stringify(msg)}`,\n );\n }\n return [\n [\n 'data',\n {\n ...msg,\n relation: makeRelation(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-DELETE\n key: must(msg.old ?? msg.key),\n },\n ],\n ];\n }\n\n case 'update': {\n return [\n [\n 'data',\n {\n ...msg,\n relation: makeRelation(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-UPDATE\n key: msg.old ?? msg.key,\n },\n ],\n ];\n }\n\n case 'insert':\n return [['data', {...msg, relation: makeRelation(msg.relation)}]];\n case 'truncate':\n return [['data', {...msg, relations: msg.relations.map(makeRelation)}]];\n\n case 'message':\n if (!msg.prefix.startsWith(this.#shardPrefix)) {\n this.#lc.debug?.('ignoring message for different shard', msg.prefix);\n return [];\n }\n switch (msg.prefix.substring(this.#shardPrefix.length)) {\n case '': // Legacy prefix\n case '/ddl':\n return this.#handleDdlMessage(msg);\n default:\n this.#lc.debug?.('ignoring unknown message type', msg.prefix);\n return [];\n }\n\n case 'commit':\n this.#lastSnapshotInTx = undefined;\n return [\n [\n 'commit',\n msg,\n {watermark: toStateVersionString(must(msg.commitLsn))},\n ],\n ];\n\n case 'relation':\n return this.#handleRelation(msg);\n case 'type':\n return []; // Nothing need be done for custom types.\n case 'origin':\n // No need to detect replication loops since we are not a\n // PG replication source.\n return [];\n default:\n msg satisfies never;\n throw new Error(`Unexpected message type ${stringify(msg)}`);\n }\n }\n\n #preSchema: PublishedSchema | undefined;\n #lastSnapshotInTx: PublishedSchema | undefined;\n\n #handleDdlMessage(msg: MessageMessage) {\n const event = parseLogicalMessageContent(msg, replicationEventSchema);\n // Cancel manual schema adjustment timeouts when an upstream schema change\n // is about to happen, so as to avoid interfering / redundant work.\n clearTimeout(this.#replicaIdentityTimer);\n\n let previousSchema: PublishedSchema | null;\n const {type} = event;\n switch (type) {\n case 'ddlStart':\n // Store the schema in order to diff it with a subsequent ddlUpdate.\n this.#preSchema = event.schema;\n return [];\n case 'ddlUpdate':\n // guaranteed by event triggers\n previousSchema = must(\n this.#preSchema,\n `ddlUpdate received without a ddlStart`,\n );\n break;\n case 'schemaSnapshot':\n previousSchema = this.#lastSnapshotInTx ?? null;\n break;\n default: // Ignore unknown types for forwards compatibility\n this.#lc.info?.(`ignoring unknown ddl message type: ${type}`);\n return [];\n }\n\n // Store the schema (from either a ddlUpdate or schemaSnapshot) to\n // diff against the next schemaSnapshot.\n this.#lastSnapshotInTx = event.schema;\n if (!previousSchema) {\n this.#lc.info?.(`received ${msg.prefix}/${type} event`);\n return []; // First schemaSnapshot in the tx.\n }\n this.#lc.info?.(`processing ${msg.prefix}/${type} event`, event);\n\n const changes = this.#makeSchemaChanges(previousSchema, event).map(\n change => ['data', change] satisfies Data,\n );\n\n this.#lc\n .withContext('tag', event.event.tag)\n .withContext('query', event.context.query)\n .info?.(`${changes.length} schema change(s)`, {changes});\n\n const replicaIdentities = replicaIdentitiesForTablesWithoutPrimaryKeys(\n event.schema,\n );\n if (replicaIdentities) {\n this.#replicaIdentityTimer = setTimeout(async () => {\n try {\n await replicaIdentities.apply(this.#lc, this.#db);\n } catch (err) {\n this.#lc.warn?.(`error setting replica identities`, err);\n }\n }, SET_REPLICA_IDENTITY_DELAY_MS);\n }\n\n return changes;\n }\n\n /**\n * A note on operation order:\n *\n * Postgres will drop related indexes when columns are dropped,\n * but SQLite will error instead (https://sqlite.org/forum/forumpost/2e62dba69f?t=c&hist).\n * The current workaround is to drop indexes first.\n *\n * Also note that although it should not be possible to both rename and\n * add/drop tables/columns in a single statement, the operations are\n * ordered to handle that possibility, by always dropping old entities,\n * then modifying kept entities, and then adding new entities.\n *\n * Thus, the order of replicating DDL updates is:\n * - drop indexes\n * - drop tables\n * - alter tables\n * - drop columns\n * - alter columns\n * - add columns\n * - create tables\n * - create indexes\n *\n * In the future the replication logic should be improved to handle this\n * behavior in SQLite by dropping dependent indexes manually before dropping\n * columns. This, for example, would be needed to properly support changing\n * the type of a column that's indexed.\n */\n #makeSchemaChanges(\n preSchema: PublishedSchema,\n update: DdlUpdateEvent | SchemaSnapshotEvent,\n ): SchemaChange[] {\n try {\n const [prevTbl, prevIdx] = specsByID(preSchema);\n const [nextTbl, nextIdx] = specsByID(update.schema);\n const changes: SchemaChange[] = [];\n\n // Validate the new table schemas\n for (const table of nextTbl.values()) {\n validate(this.#lc, table);\n }\n\n const [droppedIdx, createdIdx] = symmetricDifferences(prevIdx, nextIdx);\n\n // Detect modified indexes (same name, different definition).\n // This happens when a constraint is dropped and recreated with the\n // same name in a single ALTER TABLE statement.\n // Note: We compare using stable column attnums rather than names,\n // because table/column renames change the index spec cosmetically\n // (tableName, column keys) without the index actually being recreated.\n const keptIdx = intersection(prevIdx, nextIdx);\n for (const id of keptIdx) {\n if (\n isIndexStructurallyChanged(\n must(prevIdx.get(id)),\n must(nextIdx.get(id)),\n prevTbl,\n nextTbl,\n )\n ) {\n droppedIdx.add(id);\n createdIdx.add(id);\n }\n }\n\n for (const id of droppedIdx) {\n const {schema, name} = must(prevIdx.get(id));\n changes.push({tag: 'drop-index', id: {schema, name}});\n }\n\n // DROP\n const [droppedTbl, createdTbl] = symmetricDifferences(prevTbl, nextTbl);\n for (const id of droppedTbl) {\n const {schema, name} = must(prevTbl.get(id));\n changes.push({tag: 'drop-table', id: {schema, name}});\n }\n // ALTER TABLE | ALTER PUBLICATION\n const tables = intersection(prevTbl, nextTbl);\n for (const id of tables) {\n changes.push(\n ...this.#getTableChanges(\n must(prevTbl.get(id)),\n must(nextTbl.get(id)),\n update.event.tag,\n ),\n );\n }\n // CREATE\n for (const id of createdTbl) {\n const spec = must(nextTbl.get(id));\n const createTable: TableCreate = {\n tag: 'create-table',\n spec,\n metadata: getMetadata(spec),\n };\n if (!update.event.tag.startsWith('CREATE')) {\n // Tables introduced to the publication via ALTER statements\n // or the COMMENT statement (from schemaSnapshots) must be\n // backfilled.\n createTable.backfill = mapValues(spec.columns, ({pos: attNum}) => ({\n attNum,\n })) satisfies Record<string, ColumnMetadata>;\n }\n changes.push(createTable);\n }\n\n // Add indexes last since they may reference tables / columns that need\n // to be created first.\n for (const id of createdIdx) {\n const spec = must(nextIdx.get(id));\n changes.push({tag: 'create-index', spec});\n }\n return changes;\n } catch (e) {\n throw new UnsupportedSchemaChangeError(String(e), update, {cause: e});\n }\n }\n\n #getTableChanges(\n oldTable: PublishedTableWithReplicaIdentity,\n newTable: PublishedTableWithReplicaIdentity,\n ddlTag: string,\n ): SchemaChange[] {\n const changes: SchemaChange[] = [];\n if (\n oldTable.schema !== newTable.schema ||\n oldTable.name !== newTable.name\n ) {\n changes.push({\n tag: 'rename-table',\n old: {schema: oldTable.schema, name: oldTable.name},\n new: {schema: newTable.schema, name: newTable.name},\n });\n }\n const oldMetadata = getMetadata(oldTable);\n const newMetadata = getMetadata(newTable);\n if (!deepEqual(oldMetadata, newMetadata)) {\n changes.push({\n tag: 'update-table-metadata',\n table: {schema: newTable.schema, name: newTable.name},\n old: oldMetadata,\n new: newMetadata,\n });\n }\n const table = {schema: newTable.schema, name: newTable.name};\n const oldColumns = columnsByID(oldTable.columns);\n const newColumns = columnsByID(newTable.columns);\n\n // DROP\n const [dropped, added] = symmetricDifferences(oldColumns, newColumns);\n for (const id of dropped) {\n const {name: column} = must(oldColumns.get(id));\n changes.push({tag: 'drop-column', table, column});\n }\n\n // ALTER\n const both = intersection(oldColumns, newColumns);\n for (const id of both) {\n const {name: oldName, ...oldSpec} = must(oldColumns.get(id));\n const {name: newName, ...newSpec} = must(newColumns.get(id));\n // The three things that we care about are:\n // 1. name\n // 2. type\n // 3. not-null\n if (\n oldName !== newName ||\n oldSpec.dataType !== newSpec.dataType ||\n oldSpec.notNull !== newSpec.notNull\n ) {\n changes.push({\n tag: 'update-column',\n table,\n old: {name: oldName, spec: oldSpec},\n new: {name: newName, spec: newSpec},\n });\n }\n }\n\n // All columns introduced by a publication change require backfill\n // (which appear as ALTER PUBLICATION or COMMENT tags).\n // Columns created by ALTER TABLE, on the other hand, only require\n // backfill if they have non-constant defaults.\n const alwaysBackfill = ddlTag !== 'ALTER TABLE';\n\n // ADD\n for (const id of added) {\n const {name, ...spec} = must(newColumns.get(id));\n const column = {name, spec};\n const addColumn: ColumnAdd = {\n tag: 'add-column',\n table,\n column,\n tableMetadata: getMetadata(newTable),\n };\n if (alwaysBackfill) {\n addColumn.column.spec.dflt = null;\n addColumn.backfill = {attNum: spec.pos} satisfies ColumnMetadata;\n } else {\n // Determine if the ChangeProcessor will accept the column add as is.\n try {\n mapPostgresToLiteColumn(table.name, column);\n } catch (e) {\n if (!(e instanceof UnsupportedColumnDefaultError)) {\n // Note: mapPostgresToLiteColumn is not expected to throw any other\n // types of errors.\n throw e;\n }\n // If the column has an unsupported default (e.g. an expression or a\n // generated value), create the column as initially hidden with a\n // `null` default, and publish it after backfilling the values from\n // upstream. Note that this does require that the table have a valid\n // REPLICA IDENTITY, since backfill relies on merging new data with\n // an existing row.\n this.#lc.info?.(\n `Backfilling column ${table.name}.${name}: ${String(e)}`,\n );\n addColumn.column.spec.dflt = null;\n addColumn.backfill = {attNum: spec.pos} satisfies ColumnMetadata;\n }\n }\n changes.push(addColumn);\n }\n return changes;\n }\n\n /**\n * If `ddlDetection === true`, relation messages are irrelevant,\n * as schema changes are detected by event triggers that\n * emit custom messages.\n *\n * For degraded-mode replication (`ddlDetection === false`):\n * 1. query the current published schemas on upstream\n * 2. compare that with the InternalShardConfig.initialSchema\n * 3. compare that with the incoming MessageRelation\n * 4. On any discrepancy, throw an UnsupportedSchemaChangeError\n * to halt replication.\n *\n * Note that schemas queried in step [1] will be *post-transaction*\n * schemas, which are not necessarily suitable for actually processing\n * the statements in the transaction being replicated. In other words,\n * this mechanism cannot be used to reliably *replicate* schema changes.\n * However, they serve the purpose determining if schemas have changed.\n */\n async #handleRelation(rel: PostgresRelation): Promise<ChangeStreamData[]> {\n const {publications, ddlDetection} = this.#shardConfig;\n if (ddlDetection) {\n return [];\n }\n const currentSchema = await getPublicationInfo(this.#db, publications);\n const difference = getSchemaDifference(this.#initialSchema, currentSchema);\n if (difference !== null) {\n throw new MissingEventTriggerSupport(difference);\n }\n // Even if the currentSchema is equal to the initialSchema, the\n // MessageRelation itself must be checked to detect transient\n // schema changes within the transaction (e.g. adding and dropping\n // a table, or renaming a column and then renaming it back).\n const orel = this.#initialSchema.tables.find(\n t => t.oid === rel.relationOid,\n );\n if (!orel) {\n // Can happen if a table is created and then dropped in the same transaction.\n throw new MissingEventTriggerSupport(\n `relation not in initialSchema: ${stringify(rel)}`,\n );\n }\n if (relationDifferent(orel, rel)) {\n throw new MissingEventTriggerSupport(\n `relation has changed within the transaction: ${stringify(orel)} vs ${stringify(rel)}`,\n );\n }\n return [];\n }\n}\n\nfunction getSchemaDifference(\n a: PublishedSchema,\n b: PublishedSchema,\n): string | null {\n // Note: ignore indexes since changes need not to halt replication\n if (a.tables.length !== b.tables.length) {\n return `tables created or dropped`;\n }\n for (let i = 0; i < a.tables.length; i++) {\n const at = a.tables[i];\n const bt = b.tables[i];\n const difference = getTableDifference(at, bt);\n if (difference) {\n return difference;\n }\n }\n return null;\n}\n\n// ColumnSpec comparator\nconst byColumnPos = (a: [string, ColumnSpec], b: [string, ColumnSpec]) =>\n a[1].pos < b[1].pos ? -1 : a[1].pos > b[1].pos ? 1 : 0;\n\nfunction getTableDifference(\n a: PublishedTableSpec,\n b: PublishedTableSpec,\n): string | null {\n if (a.oid !== b.oid || a.schema !== b.schema || a.name !== b.name) {\n return `Table \"${a.name}\" differs from table \"${b.name}\"`;\n }\n if (!deepEqual(a.primaryKey, b.primaryKey)) {\n return `Primary key of table \"${a.name}\" has changed`;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = Object.entries(b.columns).sort(byColumnPos);\n if (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const [bname, bcol] = bcols[i];\n return (\n aname !== bname ||\n acol.pos !== bcol.pos ||\n acol.typeOID !== bcol.typeOID ||\n acol.notNull !== bcol.notNull\n );\n })\n ) {\n return `Columns of table \"${a.name}\" have changed`;\n }\n return null;\n}\n\nexport function relationDifferent(a: PublishedTableSpec, b: PostgresRelation) {\n if (a.oid !== b.relationOid || a.schema !== b.schema || a.name !== b.name) {\n return true;\n }\n if (\n // The MessageRelation's `keyColumns` field contains the columns in column\n // declaration order, whereas the PublishedTableSpec's `primaryKey`\n // contains the columns in primary key (i.e. index) order. Do an\n // order-agnostic compare here since it is not possible to detect\n // key-order changes from the MessageRelation message alone.\n b.replicaIdentity === 'default' &&\n !equals(new Set(a.primaryKey), new Set(b.keyColumns))\n ) {\n return true;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = b.columns;\n return (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const bcol = bcols[i];\n return aname !== bcol.name || acol.typeOID !== bcol.typeOid;\n })\n );\n}\n\nfunction translateError(e: unknown): Error {\n if (!(e instanceof Error)) {\n return new Error(String(e));\n }\n if (e instanceof postgres.PostgresError && e.code === PG_ADMIN_SHUTDOWN) {\n return new ShutdownSignal(e);\n }\n return e;\n}\nconst idString = (id: Identifier) => `${id.schema}.${id.name}`;\n\nfunction specsByID(published: PublishedSchema) {\n return [\n // It would have been nice to use a CustomKeyMap here, but we rely on set-utils\n // operations which use plain Sets.\n new Map(published.tables.map(t => [t.oid, t])),\n new Map(published.indexes.map(i => [idString(i), i])),\n ] as const;\n}\n\n/**\n * Determines if an index was structurally changed (e.g. constraint dropped\n * and recreated with different columns) vs cosmetically changed (e.g. the\n * index spec changed because the table or a column was renamed).\n *\n * Compares boolean properties directly and resolves column names to their\n * stable attnums (pg_attribute `attnum`) for the column comparison.\n */\nfunction isIndexStructurallyChanged(\n prev: PublishedIndexSpec,\n next: PublishedIndexSpec,\n prevTables: Map<number, PublishedTableWithReplicaIdentity>,\n nextTables: Map<number, PublishedTableWithReplicaIdentity>,\n): boolean {\n if (\n prev.unique !== next.unique ||\n prev.isPrimaryKey !== next.isPrimaryKey ||\n prev.isReplicaIdentity !== next.isReplicaIdentity ||\n prev.isImmediate !== next.isImmediate\n ) {\n return true;\n }\n\n const prevTable = findTableBySchemaAndName(\n prevTables,\n prev.schema,\n prev.tableName,\n );\n const nextTable = findTableBySchemaAndName(\n nextTables,\n next.schema,\n next.tableName,\n );\n if (!prevTable || !nextTable) {\n // Can't resolve tables; conservatively treat as changed.\n return true;\n }\n\n const prevEntries = Object.entries(prev.columns);\n const nextEntries = Object.entries(next.columns);\n if (prevEntries.length !== nextEntries.length) {\n return true;\n }\n\n // Resolve column names → attnums and compare.\n const prevByAttnum = new Map<number | undefined, string>(\n prevEntries.map(([name, dir]) => [prevTable.columns[name]?.pos, dir]),\n );\n const nextByAttnum = new Map<number | undefined, string>(\n nextEntries.map(([name, dir]) => [nextTable.columns[name]?.pos, dir]),\n );\n\n if (prevByAttnum.has(undefined) || nextByAttnum.has(undefined)) {\n // Column not found in table spec; conservatively treat as changed.\n return true;\n }\n if (prevByAttnum.size !== nextByAttnum.size) {\n return true;\n }\n for (const [attnum, dir] of prevByAttnum) {\n if (nextByAttnum.get(attnum) !== dir) {\n return true;\n }\n }\n return false;\n}\n\nfunction findTableBySchemaAndName(\n tables: Map<number, PublishedTableWithReplicaIdentity>,\n schema: string,\n name: string,\n): PublishedTableWithReplicaIdentity | undefined {\n for (const table of tables.values()) {\n if (table.schema === schema && table.name === name) {\n return table;\n }\n }\n return undefined;\n}\n\nfunction columnsByID(\n columns: Record<string, ColumnSpec>,\n): Map<number, ColumnSpec & {name: string}> {\n const colsByID = new Map<number, ColumnSpec & {name: string}>();\n for (const [name, spec] of Object.entries(columns)) {\n // The `pos` field is the `attnum` in `pg_attribute`, which is a stable\n // identifier for the column in this table (i.e. never reused).\n colsByID.set(spec.pos, {...spec, name});\n }\n return colsByID;\n}\n\nfunction getMetadata(table: PublishedTableWithReplicaIdentity): TableMetadata {\n return {\n schemaOID: must(table.schemaOID),\n relationOID: table.oid,\n rowKey: Object.fromEntries(\n table.replicaIdentityColumns.map(k => [\n k,\n {attNum: table.columns[k].pos},\n ]),\n ),\n };\n}\n\n// Avoid sending the `columns` from the Postgres MessageRelation message.\n// They are not used downstream and the message can be large.\nfunction makeRelation(relation: PostgresRelation): MessageRelation {\n // Avoid sending the `columns` from the Postgres MessageRelation message.\n // They are not used downstream and the message can be large.\n const {columns: _, keyColumns, replicaIdentity, ...rest} = relation;\n return {\n ...rest,\n rowKey: {\n columns: keyColumns,\n type: replicaIdentity,\n },\n // For now, deprecated columns are sent for backwards compatibility.\n // These can be removed when bumping the MIN_PROTOCOL_VERSION to 5.\n keyColumns,\n replicaIdentity,\n };\n}\n\nclass UnsupportedSchemaChangeError extends Error {\n readonly name = 'UnsupportedSchemaChangeError';\n readonly description: string;\n readonly event: DdlUpdateEvent | SchemaSnapshotEvent;\n\n constructor(\n description: string,\n event: DdlUpdateEvent | SchemaSnapshotEvent,\n options?: ErrorOptions,\n ) {\n super(\n `Replication halted. Resync the replica to recover: ${description}`,\n options,\n );\n this.description = description;\n this.event = event;\n }\n}\n\nclass MissingEventTriggerSupport extends Error {\n readonly name = 'MissingEventTriggerSupport';\n\n constructor(msg: string) {\n super(\n `${msg}. Schema changes cannot be reliably replicated without event trigger support.`,\n );\n }\n}\n\n// TODO(0xcadams): should this be a ProtocolError?\nclass ShutdownSignal extends AbortError {\n readonly name = 'ShutdownSignal';\n\n constructor(cause: unknown) {\n super(\n 'shutdown signal received (e.g. another zero-cache taking over the replication stream)',\n {\n cause,\n },\n );\n }\n}\n\nfunction parseLogicalMessageContent<T>(\n {content}: MessageMessage,\n schema: v.Type<T>,\n) {\n const str =\n content instanceof Buffer\n ? content.toString('utf-8')\n : new TextDecoder().decode(content);\n const json = JSON.parse(str);\n return v.parse(json, schema, 'passthrough');\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqHA,eAAsB,+BACpB,IACA,aACA,OACA,eACA,aACA,SACA,qBAC6E;AAC7E,OAAM,YACJ,IACA,WAAW,MAAM,MAAM,GAAG,MAAM,YAChC,gBACC,KAAK,OAAO,YAAY,KAAK,OAAO,IAAI,aAAa,aAAa,QAAQ,CAC5E;CAED,MAAM,UAAU,IAAI,SAAS,IAAI,cAAc;CAC/C,MAAM,oBAAoB,+BACxB,IAAI,gBAAgB,QAAQ,CAC7B;AACD,SAAQ,OAAO;CAIf,MAAM,KAAK,SAAS,IAAI,YAAY;AACpC,KAAI;AAiBF,SAAO;GAAC;GAAmB,cATN,IAAI,qBACvB,IACA,aACA,OAVsB,MAAM,uBAC5B,IACA,IACA,OACA,kBACD,EAOC,SACA,uBAAuB,KACxB;GAEuC;WAChC;AACR,QAAM,GAAG,KAAK;;;AAIlB,eAAe,uBACb,IACA,KACA,OACA,EACE,gBACA,cAAc,YACd,sBAEF;AAEA,OAAM,kBAAkB,IAAI,KAAK,OAAO,eAAe;CAEvD,MAAM,kBAAkB,MAAM,oBAC5B,IACA,KACA,OACA,gBACA,mBACD;AACD,KAAI,CAAC,gBACH,OAAM,IAAI,gBACR,8CAA8C,iBAC/C;CAIH,MAAM,YAAY,CAAC,GAAG,MAAM,aAAa,CAAC,MAAM;CAChD,MAAM,aAAa,gBAAgB,aAChC,QAAO,MAAK,CAAC,EAAE,WAAW,0BAA0B,MAAM,CAAC,CAAC,CAC5D,MAAM;AACT,KAAI,CAAC,UAAU,WAAW,WAAW,EAAE;AACrC,KAAG,OAAO,8CAA8C,UAAU,GAAG;AACrE,QAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,SAAS,CAAC;AACxD,QAAM,IAAI,gBACR,2BAA2B,UAAU,2CACjB,WAAW,GAChC;;AAMH,KAAI,CAAC,UAAU,gBAAgB,cAAc,WAAW,CACtD,OAAM,IAAI,gBACR,0BAA0B,gBAAgB,aAAa,0CACnB,WAAW,GAChD;CAIH,MAAM,SAAS,MAAM,GAAG;0DACgC,IAAI,WAAW,CAAC;IACtE,QAAQ;AACV,KAAI,OAAO,WAAW,WAAW,OAC/B,OAAM,IAAI,gBACR,0BAA0B,OAAO,MAAM,CAAC,gDACN,WAAW,GAC9C;CAGH,MAAM,EAAC,SAAQ;CACf,MAAM,SAAS,MAAM,GAEX;;0BAEc;AACxB,KAAI,OAAO,WAAW,EACpB,OAAM,IAAI,gBAAgB,oBAAoB,KAAK,aAAa;CAElE,MAAM,CAAC,EAAC,YAAY,eAAc;AAClC,KAAI,eAAe,QAAQ,cAAc,OACvC,OAAM,IAAI,gBACR,oBAAoB,KAAK,gEAC1B;AAEH,QAAO;;AAIT,IAAM,4BAA4B;;;;;AAUlC,IAAM,uBAAN,MAAmD;CACjD;CACA;CACA;CACA;CACA;CACA;CACA;CAEA,YACE,IACA,aACA,OACA,SACA,SACA,qBACA;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,gBAAgB;AACvD,QAAA,KAAW,SAAS,IAAI,aAAa;IAElC,iBAAiB;GAClB,YAAY,GAAE,qBAAqB,4BAA2B;GAC/D,CAAC;AACF,QAAA,cAAoB;AACpB,QAAA,QAAc;AACd,QAAA,UAAgB;AAChB,QAAA,UAAgB;AAChB,QAAA,cAAoB,sBAChB,IAAI,YACF,GAAG,YAAY,aAAa,eAAe,EAC3C,OACA,MAAA,IACA,oBACD,GACD;;CAGN,mBAA6D;AAC3D,SAAO,MAAA,cAAoB,MAAA,YAAkB,mBAAmB,GAAG;;CAGrE,MAAM,YACJ,iBACA,mBAAsC,EAAE,EACjB;EACvB,MAAM,EAAC,SAAQ,MAAA;AAEf,QAAM,MAAA,uCAA6C,KAAK;EACxD,MAAM,SAAS,MAAM,uBAAuB,MAAA,IAAU,MAAA,MAAY;AAClE,QAAA,GAAS,OAAO,+BAA+B,OAAO;AACtD,SAAO,MAAA,YAAkB,MAAM,iBAAiB,QAAQ,iBAAiB;;CAG3E,OAAA,YACE,MACA,iBACA,aACA,kBACuB;EACvB,MAAM,cAAc,uBAAuB,gBAAgB,GAAG;EAC9D,MAAM,EAAC,UAAU,SAAQ,MAAM,UAC7B,MAAA,IACA,MAAA,IACA,MACA,CAAC,GAAG,YAAY,aAAa,EAC7B,YACD;EACD,MAAM,QAAQ,IAAI,MAAM,KAAK;EAK7B,MAAM,UAAU,IAAI,wBAAwB,MAAA,IAAU,gBAAgB;EACtE,MAAM,kBAAkB,IAAI,gBAAgB,MAAA,IAAU,UAAS,QAC7D,eAAe,MAAA,IAAU,MAAA,aAAmB,MAAA,SAAe,IAAI,CAChE;AACD,UACG,aAAa,UAAU,gBAAgB,CACvC,aAAa,iBAAiB,MAAM;AACvC,kBAAgB,IAAI,iBAAiB,iBAAiB;EAEtD,MAAM,cAAc,IAAI,YACtB,MAAA,IACA,MAAA,OACA,aACA,MAAA,IACA,MAAA,QAAc,cACf;;;;;EAMD,MAAM,0BACJ,KACA,QACmB;AACnB,OAAI,IAAI,QAAQ,aAAa;AAC3B,YAAQ,WAAW;KACjB;KACA,EAAC,KAAK,IAAI,eAAc;KACxB,EAAC,WAAW,qBAAqB,IAAI,EAAC;KACvC,CAAC;AACF,WAAO;;AAET,OACE,IAAI,QAAQ,aACZ,IAAI,WAAW,MAAA,aAAmB,eAClC;AACA,YAAQ,WAAW,MAAA,YAAkB,iBAAiB,IAAI,CAAC;AAC3D,WAAO;;AAET,UAAO;;AAGT,GAAM,YAAY;AAChB,OAAI;IACF,IAAI,cAAuC;IAC3C,IAAI,gBAAgB;AAEpB,eAAW,MAAM,CAAC,KAAK,QAAQ,UAAU;AACvC,SAAI,CAAC,uBAAuB,KAAK,IAAI,EAAE;AAIrC,UAAI,CAAC,iBAAiB,aAAa,eAAe;AAChD,eAAQ,QAAQ,YAAY,cAAc;AAC1C,qBAAc;;AAEhB;;AAGF,SAAI,CAAC,aAAa;MAChB,MAAM,MAAM,QAAQ,QAAQ,cAAc;AAC1C,aAAO,QAAQ,YAAa,MAAM;AAClC,oBAAc,EAAE;;KAGlB,IAAI;AACJ,UAAK,MAAM,UAAU,MAAM,YAAY,YAAY,KAAK,IAAI,EAAE;AAC5D,YAAM,QAAQ,KAAK,OAAO;AAC1B,mBAAa;;AAGf,aAAQ,aAAa,IAArB;MACE,KAAK;AACH,uBAAgB;AAChB;MACF,KAAK;AACH,uBAAgB;AAChB,mBAAY,gBAAgB,WAAW,GAAG;AAC1C,WACE,SAAS,WAAW,KACpB,QAAQ,aAAa,GAAG,2BACxB;AAMA,gBAAQ,QAAQ,YAAY,cAAc;AAC1C,sBAAc;;AAEhB;;;YAGC,GAAG;IAGV,MAAM,MAAM,eAAe,EAAE;AAC7B,QAAI,eAAe,eAGjB,OAAM,MAAA,uBAA6B;AAErC,YAAQ,KAAK,IAAI;;MAEjB;AAEJ,QAAA,GAAS,OACP,8BAA8B,KAAK,QAAQ,gBAAgB,oBACzD,MAAA,QAAc,QACf,GACF;AAED,SAAO;GACL,SAAS,QAAQ,UAAU;GAC3B,MAAM,EAAC,OAAM,WAAU,MAAM,IAAI,OAAO,GAAG,UAAU,EAAC;GACvD;;CAGH,OAAA,wBAA+B;AAC7B,MAAI;GACF,MAAM,UAAU,MAAM,oBACpB,MAAA,IACA,MAAA,IACA,MAAA,OACA,MAAA,QAAc,QACf;AACD,OAAI,QACF,OAAA,GAAS,OACP,gCAAgC,MAAA,QAAc,QAAQ,IAAI,UAAU,QAAQ,kBAAkB,GAC/F;WAEI,GAAG;AACV,SAAA,GAAS,OAAO,8BAA8B,EAAE;;;;;;;;;;;;CAapD,OAAA,uCAA8C,YAAoB;EAChE,MAAM,iBAAiB,0BAA0B,MAAA,MAAY;EAC7D,MAAM,iBAAiB,sBAAsB,MAAA,MAAY;EAEzD,MAAM,SAAS,MAAM,MAAM,MAAA,IAAU,OAAM,QAAO;GAGhD,MAAM,SAAS,MAAM,GAEX;;;gCAGgB,eAAe,kBAAkB,eAAe;iCAC/C;AAC3B,SAAA,GAAS,OACP,iCAAiC,KAAK,UAAU,OAAO,GACxD;GACD,MAAM,gBAAgB,GAAG,eAAe,MAAA,MAAY,CAAC;GACrD,MAAM,iBAAiB,MAAM,GAAG;;iBAErB,IAAI,cAAc,CAAC;AAE9B,OAAI,OAAO,WAAW,GAAG;IACvB,MAAM,aAAa,MAAM,GAAG;;;iCAGH,eAAe,kBAAkB,eAAe;;AAEzE,UAAA,GAAS,OACP,QAAQ,WAAW,wCACnB;KAAC,OAAO;KAAY,UAAU;KAAe,CAC9C;AACD,UAAM,IAAI,WACR,oBAAoB,WAAW,+FAGhC;;AAGH,SAAA,GAAS,OACP,uCAAuC,WAAW,KAAK,KAAK,UAC1D,eACD,GACF;AACD,SAAM,GAAG;sBACO,IAAI,cAAc,CAAC,gBAAgB;AACnD,SAAM,GAAG;iBACE,IAAI,cAAc,CAAC;sCACE,MAAA,QAAc;yBAC3B;GACnB,MAAM,gBAAgB,MAAM,GAAsC;kCACtC,IAAI,cAAc,CAAC;AAC/C,SAAA,GAAS,OACP,sCAAsC,WAAW,KAAK,KAAK,UACzD,cACD,GACF;AACD,UAAO;IACP;EAEF,MAAM,OAAO,OAAO,QAAQ,EAAC,UAAS,QAAQ,KAAK,CAAC,KAAK,EAAC,UAAS,IAAI;AACvE,MAAI,KAAK,OACP,OAAA,GAAS,OAAO,uBAAuB,KAAK,eAAe;EAE7D,MAAM,aAAa,OAChB,QAAQ,EAAC,WAAU,SAAS,WAAW,CACvC,KAAK,EAAC,WAAU,KAAK;AAExB,MAAI,WAAW,OACR,OAAA,qBAA2B,WAAW,CAAC,OAAM,MAChD,MAAA,GAAS,OAAO,oCAAoC,EAAE,CACvD;;CAIL,OAAA,qBAA4B,OAAiB;AAC3C,QAAA,GAAS,OAAO,sCAAsC,QAAQ;EAC9D,MAAM,MAAM,MAAA;AACZ,OAAK,IAAI,IAAI,GAAG,IAAI,GAAG,IACrB,KAAI;AACF,SAAM,GAAG;;iCAEgB,IAAI,MAAM,CAAC;;AAEpC,SAAA,GAAS,OAAO,wBAAwB,QAAQ;AAChD;WACO,GAAG;AAEV,OACE,aAAa,SAAS,iBACtB,EAAE,SAAS,iBAKX,OAAA,GAAS,QAAQ,WAAW,IAAI,EAAE,IAAI,OAAO,EAAE,IAAI,EAAE;OAErD,OAAA,GAAS,OAAO,kBAAkB,SAAS,EAAE;AAE/C,SAAM,MAAM,IAAK;;AAGrB,QAAA,GAAS,OAAO,sCAAsC,QAAQ;;;AAKlE,IAAa,QAAb,MAAuC;CACrC;CACA,2BAA0C;CAE1C,YAAY,MAAoB;AAC9B,QAAA,OAAa;;CAGf,SAAS,QAAmC;AAC1C,UAAQ,OAAO,IAAf;GACE,KAAK;IACH,MAAM,EAAC,cAAa,OAAO;AAC3B,QAAI,OAAO,GAAG,IACZ,OAAA,oBAA0B,UAAU;QAQpC,OAAA,0BAAgC,UAAU;AAE5C;GACF,KAAK;AAKH,QAAI,CAAC,OAAO,GAAG,QACb,OAAA,oBAA0B,OAAO,GAAG,gBAAgB;AAEtD;;;CAIN,qBAAqB,WAAmB;AACtC,QAAA,0BAAgC;;CAGlC,IAAI,WAAwB;AAC1B,MACE,MAAA,2BACA,MAAA,2BAAiC,UAEjC,OAAA,0BAAgC;AAElC,QAAA,QAAc,UAAU;;CAG1B,2BAA2B,WAAmB;AAC5C,MAAI,MAAA,4BAAkC,KACpC,OAAA,QAAc,UAAU;;CAI5B,SAAS,WAAwB;EAC/B,MAAM,MAAM,uBAAuB,UAAU;AAC7C,QAAA,KAAW,KAAK,IAAI;;;AAIxB,IAAM,kBAAkB,eAAE,OAAO;CAC/B,IAAI,eAAE,QAAQ;CACd,YAAY,eAAE,QAAQ;CACtB,cAAc,eAAE,QAAQ;CACzB,CAAC;AAIF,IAAM,cAAN,MAAM,YAAY;CAChB,OAAgB,iBAAiB;CAEjC;CACA;CAKA;CACA;CAEA;CACA,gBAAwB;CACxB;CAEA,YACE,IACA,OACA,IACA,eACA;AACA,QAAA,KAAW;AACX,OAAK,gBAAgB,GAAG,MAAM,MAAM,GAAG,MAAM,WAAW,YAAY;AACpE,QAAA,KAAW;AACX,QAAA,gBAAsB;;CAGxB,OAAA,eAAsB;AACpB,MAAI,MAAA,cAAoB,KAAA,GAAW;GACjC,MAAM,CAAC,EAAC,eAAc,MAAM,MAAA,EAAwC;;AAEpE,SAAA,YAAkB;;AAEpB,SAAO,MAAA;;CAGT,MAAM,kBAAkB,MAAM,KAAK,KAAK,EAAE;EACxC,MAAM,YAAY,MAAA,aAAoB,MAAM,MAAA,cAAoB;AAChE,QAAA,eAAqB,QAAQ;AAE7B,MAAI,aAAa,KACf,OAAM,MAAA,EAAiB;;;YAGjB,KAAK,cAAc;;oBAEX,MAAA,aAAmB;4BACX,IAAI;;;;;;MAY1B,OAAM,MAAA,EAAiB;;;YAGjB,KAAK,cAAc;;oBAEX,MAAA,aAAmB;4BACX,IAAI;;;;;AAM5B,SAAO,EAAC,gBAAgB,KAAI;;CAG9B,oBAAoB,SAAiB;AACnC,eAAa,MAAA,MAAY;AACzB,QAAA,QAAc,WAAW,YAAY;AACnC,OAAI;AACF,UAAM,KAAK,mBAAmB;YACvB,GAAG;AACV,UAAA,GAAS,OAAO,+BAA+B,EAAE;AACjD,UAAA,mBAAyB,MAAA,cAAoB;;KAE9C,QAAQ;;CAGb,iBAAiB,KAA8C;AAC7D,SACE,IAAI,WAAW,KAAK,eACpB,8BAA8B,IAAI,SACnC;EACD,MAAM,SAAS,2BAA2B,KAAK,gBAAgB;EAC/D,MAAM,MAAM,KAAK,KAAK;EACtB,MAAM,iBAAiB,KAAK,IAC1B,KACA,OAAO,aAAa,MAAA,cACrB;AACD,MAAI,OAAO,OAAO,MAAA,aAKhB,OAAA,mBAAyB,iBAAiB,IAAI;EAEhD,MAAM,EAAC,YAAY,iBAAgB;AACnC,SAAO;GACL;GACA;IACE,KAAK;IACL,WAAW;KACT,aAAa;MACX;MACA;MACA,eAAe;MAChB;KACD;KACD;IACF;GACD,EAAC,WAAW,qBAAqB,IAAI,cAAc,MAAM,EAAC;GAC3D;;;AAWL,IAAM,gCAAgC;AAEtC,IAAM,cAAN,MAAkB;CAChB;CACA;CACA;CACA;CACA;CAEA;CACA;CAEA,YACE,IACA,EAAC,OAAO,YACR,aACA,IACA,eACA;AACA,QAAA,KAAW;AAEX,QAAA,cAAoB,GAAG,MAAM,GAAG;AAChC,QAAA,cAAoB;AACpB,QAAA,gBAAsB;AACtB,QAAA,KAAW;;CAGb,MAAM,YAAY,KAAa,KAA8C;AAC3E,MAAI,MAAA,OAAa;AACf,SAAA,SAAe,MAAA,MAAY;AAC3B,UAAO,EAAE;;AAEX,MAAI;AACF,UAAO,MAAM,MAAA,YAAkB,IAAI;WAC5B,KAAK;AACZ,SAAA,QAAc;IAAC;IAAK;IAAK;IAAK,aAAa;IAAE;AAC7C,SAAA,SAAe,MAAA,MAAY;GAE3B,MAAM,UAAU,2CAA2C,WAAW,IAAI;GAC1E,MAAM,eAA2B,EAAC,OAAO,SAAQ;AACjD,OAAI,eAAe,8BAA8B;AAC/C,iBAAa,SAAS,IAAI;AAC1B,iBAAa,UAAU,IAAI,MAAM;SAEjC,cAAa,SAAS,OAAO,IAAI;AAKnC,UAAO,CACL,CAAC,YAAY,EAAC,KAAK,YAAW,CAAC,EAC/B,CAAC,WAAW;IAAC,KAAK;IAAkB;IAAS;IAAa,CAAC,CAC5D;;;CAIL,UAAU,OAAyB;EACjC,MAAM,EAAC,KAAK,KAAK,KAAK,gBAAe;EACrC,MAAM,MAAM,KAAK,KAAK;AAItB,MAAI,MAAM,cAAc,KAAQ;AAC9B,SAAA,GAAS,QACP,2CAA2C,WAAW,IAAI,CAAC,IAAI,OAC7D,IACD,IACD,eAAe,+BACX,IAAI,MAAM,UAEV;IAAC,GAAG;IAAK,SAAS,KAAA;IAAU,CACjC;AACD,SAAM,cAAc;;;CAKxB,OAAA,YAAmB,KAA2C;AAC5D,UAAQ,IAAI,KAAZ;GACE,KAAK,QACH,QAAO,CACL;IACE;IACA;KAAC,GAAG;KAAK,MAAM;KAAI;IACnB,EAAC,iBAAiB,qBAAqB,KAAK,IAAI,UAAU,CAAC,EAAC;IAC7D,CACF;GAEH,KAAK;AACH,QAAI,EAAE,IAAI,OAAO,IAAI,KACnB,OAAM,IAAI,MACR,qCAAqC,UAAU,IAAI,GACpD;AAEH,WAAO,CACL,CACE,QACA;KACE,GAAG;KACH,UAAU,aAAa,IAAI,SAAS;KAEpC,KAAK,KAAK,IAAI,OAAO,IAAI,IAAI;KAC9B,CACF,CACF;GAGH,KAAK,SACH,QAAO,CACL,CACE,QACA;IACE,GAAG;IACH,UAAU,aAAa,IAAI,SAAS;IAEpC,KAAK,IAAI,OAAO,IAAI;IACrB,CACF,CACF;GAGH,KAAK,SACH,QAAO,CAAC,CAAC,QAAQ;IAAC,GAAG;IAAK,UAAU,aAAa,IAAI,SAAS;IAAC,CAAC,CAAC;GACnE,KAAK,WACH,QAAO,CAAC,CAAC,QAAQ;IAAC,GAAG;IAAK,WAAW,IAAI,UAAU,IAAI,aAAa;IAAC,CAAC,CAAC;GAEzE,KAAK;AACH,QAAI,CAAC,IAAI,OAAO,WAAW,MAAA,YAAkB,EAAE;AAC7C,WAAA,GAAS,QAAQ,wCAAwC,IAAI,OAAO;AACpE,YAAO,EAAE;;AAEX,YAAQ,IAAI,OAAO,UAAU,MAAA,YAAkB,OAAO,EAAtD;KACE,KAAK;KACL,KAAK,OACH,QAAO,MAAA,iBAAuB,IAAI;KACpC;AACE,YAAA,GAAS,QAAQ,iCAAiC,IAAI,OAAO;AAC7D,aAAO,EAAE;;GAGf,KAAK;AACH,UAAA,mBAAyB,KAAA;AACzB,WAAO,CACL;KACE;KACA;KACA,EAAC,WAAW,qBAAqB,KAAK,IAAI,UAAU,CAAC,EAAC;KACvD,CACF;GAEH,KAAK,WACH,QAAO,MAAA,eAAqB,IAAI;GAClC,KAAK,OACH,QAAO,EAAE;GACX,KAAK,SAGH,QAAO,EAAE;GACX,QAEE,OAAM,IAAI,MAAM,2BAA2B,UAAU,IAAI,GAAG;;;CAIlE;CACA;CAEA,kBAAkB,KAAqB;EACrC,MAAM,QAAQ,2BAA2B,KAAK,uBAAuB;AAGrE,eAAa,MAAA,qBAA2B;EAExC,IAAI;EACJ,MAAM,EAAC,SAAQ;AACf,UAAQ,MAAR;GACE,KAAK;AAEH,UAAA,YAAkB,MAAM;AACxB,WAAO,EAAE;GACX,KAAK;AAEH,qBAAiB,KACf,MAAA,WACA,wCACD;AACD;GACF,KAAK;AACH,qBAAiB,MAAA,oBAA0B;AAC3C;GACF;AACE,UAAA,GAAS,OAAO,sCAAsC,OAAO;AAC7D,WAAO,EAAE;;AAKb,QAAA,mBAAyB,MAAM;AAC/B,MAAI,CAAC,gBAAgB;AACnB,SAAA,GAAS,OAAO,YAAY,IAAI,OAAO,GAAG,KAAK,QAAQ;AACvD,UAAO,EAAE;;AAEX,QAAA,GAAS,OAAO,cAAc,IAAI,OAAO,GAAG,KAAK,SAAS,MAAM;EAEhE,MAAM,UAAU,MAAA,kBAAwB,gBAAgB,MAAM,CAAC,KAC7D,WAAU,CAAC,QAAQ,OAAO,CAC3B;AAED,QAAA,GACG,YAAY,OAAO,MAAM,MAAM,IAAI,CACnC,YAAY,SAAS,MAAM,QAAQ,MAAM,CACzC,OAAO,GAAG,QAAQ,OAAO,oBAAoB,EAAC,SAAQ,CAAC;EAE1D,MAAM,oBAAoB,6CACxB,MAAM,OACP;AACD,MAAI,kBACF,OAAA,uBAA6B,WAAW,YAAY;AAClD,OAAI;AACF,UAAM,kBAAkB,MAAM,MAAA,IAAU,MAAA,GAAS;YAC1C,KAAK;AACZ,UAAA,GAAS,OAAO,oCAAoC,IAAI;;KAEzD,8BAA8B;AAGnC,SAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA8BT,mBACE,WACA,QACgB;AAChB,MAAI;GACF,MAAM,CAAC,SAAS,WAAW,UAAU,UAAU;GAC/C,MAAM,CAAC,SAAS,WAAW,UAAU,OAAO,OAAO;GACnD,MAAM,UAA0B,EAAE;AAGlC,QAAK,MAAM,SAAS,QAAQ,QAAQ,CAClC,UAAS,MAAA,IAAU,MAAM;GAG3B,MAAM,CAAC,YAAY,cAAc,qBAAqB,SAAS,QAAQ;GAQvE,MAAM,UAAU,aAAa,SAAS,QAAQ;AAC9C,QAAK,MAAM,MAAM,QACf,KACE,2BACE,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,SACA,QACD,EACD;AACA,eAAW,IAAI,GAAG;AAClB,eAAW,IAAI,GAAG;;AAItB,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,EAAC,QAAQ,SAAQ,KAAK,QAAQ,IAAI,GAAG,CAAC;AAC5C,YAAQ,KAAK;KAAC,KAAK;KAAc,IAAI;MAAC;MAAQ;MAAK;KAAC,CAAC;;GAIvD,MAAM,CAAC,YAAY,cAAc,qBAAqB,SAAS,QAAQ;AACvE,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,EAAC,QAAQ,SAAQ,KAAK,QAAQ,IAAI,GAAG,CAAC;AAC5C,YAAQ,KAAK;KAAC,KAAK;KAAc,IAAI;MAAC;MAAQ;MAAK;KAAC,CAAC;;GAGvD,MAAM,SAAS,aAAa,SAAS,QAAQ;AAC7C,QAAK,MAAM,MAAM,OACf,SAAQ,KACN,GAAG,MAAA,gBACD,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,OAAO,MAAM,IACd,CACF;AAGH,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,OAAO,KAAK,QAAQ,IAAI,GAAG,CAAC;IAClC,MAAM,cAA2B;KAC/B,KAAK;KACL;KACA,UAAU,YAAY,KAAK;KAC5B;AACD,QAAI,CAAC,OAAO,MAAM,IAAI,WAAW,SAAS,CAIxC,aAAY,WAAW,UAAU,KAAK,UAAU,EAAC,KAAK,cAAa,EACjE,QACD,EAAE;AAEL,YAAQ,KAAK,YAAY;;AAK3B,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,OAAO,KAAK,QAAQ,IAAI,GAAG,CAAC;AAClC,YAAQ,KAAK;KAAC,KAAK;KAAgB;KAAK,CAAC;;AAE3C,UAAO;WACA,GAAG;AACV,SAAM,IAAI,6BAA6B,OAAO,EAAE,EAAE,QAAQ,EAAC,OAAO,GAAE,CAAC;;;CAIzE,iBACE,UACA,UACA,QACgB;EAChB,MAAM,UAA0B,EAAE;AAClC,MACE,SAAS,WAAW,SAAS,UAC7B,SAAS,SAAS,SAAS,KAE3B,SAAQ,KAAK;GACX,KAAK;GACL,KAAK;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACnD,KAAK;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACpD,CAAC;EAEJ,MAAM,cAAc,YAAY,SAAS;EACzC,MAAM,cAAc,YAAY,SAAS;AACzC,MAAI,CAAC,UAAU,aAAa,YAAY,CACtC,SAAQ,KAAK;GACX,KAAK;GACL,OAAO;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACrD,KAAK;GACL,KAAK;GACN,CAAC;EAEJ,MAAM,QAAQ;GAAC,QAAQ,SAAS;GAAQ,MAAM,SAAS;GAAK;EAC5D,MAAM,aAAa,YAAY,SAAS,QAAQ;EAChD,MAAM,aAAa,YAAY,SAAS,QAAQ;EAGhD,MAAM,CAAC,SAAS,SAAS,qBAAqB,YAAY,WAAW;AACrE,OAAK,MAAM,MAAM,SAAS;GACxB,MAAM,EAAC,MAAM,WAAU,KAAK,WAAW,IAAI,GAAG,CAAC;AAC/C,WAAQ,KAAK;IAAC,KAAK;IAAe;IAAO;IAAO,CAAC;;EAInD,MAAM,OAAO,aAAa,YAAY,WAAW;AACjD,OAAK,MAAM,MAAM,MAAM;GACrB,MAAM,EAAC,MAAM,SAAS,GAAG,YAAW,KAAK,WAAW,IAAI,GAAG,CAAC;GAC5D,MAAM,EAAC,MAAM,SAAS,GAAG,YAAW,KAAK,WAAW,IAAI,GAAG,CAAC;AAK5D,OACE,YAAY,WACZ,QAAQ,aAAa,QAAQ,YAC7B,QAAQ,YAAY,QAAQ,QAE5B,SAAQ,KAAK;IACX,KAAK;IACL;IACA,KAAK;KAAC,MAAM;KAAS,MAAM;KAAQ;IACnC,KAAK;KAAC,MAAM;KAAS,MAAM;KAAQ;IACpC,CAAC;;EAQN,MAAM,iBAAiB,WAAW;AAGlC,OAAK,MAAM,MAAM,OAAO;GACtB,MAAM,EAAC,MAAM,GAAG,SAAQ,KAAK,WAAW,IAAI,GAAG,CAAC;GAChD,MAAM,SAAS;IAAC;IAAM;IAAK;GAC3B,MAAM,YAAuB;IAC3B,KAAK;IACL;IACA;IACA,eAAe,YAAY,SAAS;IACrC;AACD,OAAI,gBAAgB;AAClB,cAAU,OAAO,KAAK,OAAO;AAC7B,cAAU,WAAW,EAAC,QAAQ,KAAK,KAAI;SAGvC,KAAI;AACF,4BAAwB,MAAM,MAAM,OAAO;YACpC,GAAG;AACV,QAAI,EAAE,aAAa,+BAGjB,OAAM;AAQR,UAAA,GAAS,OACP,sBAAsB,MAAM,KAAK,GAAG,KAAK,IAAI,OAAO,EAAE,GACvD;AACD,cAAU,OAAO,KAAK,OAAO;AAC7B,cAAU,WAAW,EAAC,QAAQ,KAAK,KAAI;;AAG3C,WAAQ,KAAK,UAAU;;AAEzB,SAAO;;;;;;;;;;;;;;;;;;;;CAqBT,OAAA,eAAsB,KAAoD;EACxE,MAAM,EAAC,cAAc,iBAAgB,MAAA;AACrC,MAAI,aACF,QAAO,EAAE;EAEX,MAAM,gBAAgB,MAAM,mBAAmB,MAAA,IAAU,aAAa;EACtE,MAAM,aAAa,oBAAoB,MAAA,eAAqB,cAAc;AAC1E,MAAI,eAAe,KACjB,OAAM,IAAI,2BAA2B,WAAW;EAMlD,MAAM,OAAO,MAAA,cAAoB,OAAO,MACtC,MAAK,EAAE,QAAQ,IAAI,YACpB;AACD,MAAI,CAAC,KAEH,OAAM,IAAI,2BACR,kCAAkC,UAAU,IAAI,GACjD;AAEH,MAAI,kBAAkB,MAAM,IAAI,CAC9B,OAAM,IAAI,2BACR,gDAAgD,UAAU,KAAK,CAAC,MAAM,UAAU,IAAI,GACrF;AAEH,SAAO,EAAE;;;AAIb,SAAS,oBACP,GACA,GACe;AAEf,KAAI,EAAE,OAAO,WAAW,EAAE,OAAO,OAC/B,QAAO;AAET,MAAK,IAAI,IAAI,GAAG,IAAI,EAAE,OAAO,QAAQ,KAAK;EACxC,MAAM,KAAK,EAAE,OAAO;EACpB,MAAM,KAAK,EAAE,OAAO;EACpB,MAAM,aAAa,mBAAmB,IAAI,GAAG;AAC7C,MAAI,WACF,QAAO;;AAGX,QAAO;;AAIT,IAAM,eAAe,GAAyB,MAC5C,EAAE,GAAG,MAAM,EAAE,GAAG,MAAM,KAAK,EAAE,GAAG,MAAM,EAAE,GAAG,MAAM,IAAI;AAEvD,SAAS,mBACP,GACA,GACe;AACf,KAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,KAC3D,QAAO,UAAU,EAAE,KAAK,wBAAwB,EAAE,KAAK;AAEzD,KAAI,CAAC,UAAU,EAAE,YAAY,EAAE,WAAW,CACxC,QAAO,yBAAyB,EAAE,KAAK;CAEzC,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;CACzD,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;AACzD,KACE,MAAM,WAAW,MAAM,UACvB,MAAM,MAAM,CAAC,OAAO,OAAO,MAAM;EAC/B,MAAM,CAAC,OAAO,QAAQ,MAAM;AAC5B,SACE,UAAU,SACV,KAAK,QAAQ,KAAK,OAClB,KAAK,YAAY,KAAK,WACtB,KAAK,YAAY,KAAK;GAExB,CAEF,QAAO,qBAAqB,EAAE,KAAK;AAErC,QAAO;;AAGT,SAAgB,kBAAkB,GAAuB,GAAqB;AAC5E,KAAI,EAAE,QAAQ,EAAE,eAAe,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,KACnE,QAAO;AAET,KAME,EAAE,oBAAoB,aACtB,CAAC,OAAO,IAAI,IAAI,EAAE,WAAW,EAAE,IAAI,IAAI,EAAE,WAAW,CAAC,CAErD,QAAO;CAET,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;CACzD,MAAM,QAAQ,EAAE;AAChB,QACE,MAAM,WAAW,MAAM,UACvB,MAAM,MAAM,CAAC,OAAO,OAAO,MAAM;EAC/B,MAAM,OAAO,MAAM;AACnB,SAAO,UAAU,KAAK,QAAQ,KAAK,YAAY,KAAK;GACpD;;AAIN,SAAS,eAAe,GAAmB;AACzC,KAAI,EAAE,aAAa,OACjB,QAAO,IAAI,MAAM,OAAO,EAAE,CAAC;AAE7B,KAAI,aAAa,SAAS,iBAAiB,EAAE,SAAS,kBACpD,QAAO,IAAI,eAAe,EAAE;AAE9B,QAAO;;AAET,IAAM,YAAY,OAAmB,GAAG,GAAG,OAAO,GAAG,GAAG;AAExD,SAAS,UAAU,WAA4B;AAC7C,QAAO,CAGL,IAAI,IAAI,UAAU,OAAO,KAAI,MAAK,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC,EAC9C,IAAI,IAAI,UAAU,QAAQ,KAAI,MAAK,CAAC,SAAS,EAAE,EAAE,EAAE,CAAC,CAAC,CACtD;;;;;;;;;;AAWH,SAAS,2BACP,MACA,MACA,YACA,YACS;AACT,KACE,KAAK,WAAW,KAAK,UACrB,KAAK,iBAAiB,KAAK,gBAC3B,KAAK,sBAAsB,KAAK,qBAChC,KAAK,gBAAgB,KAAK,YAE1B,QAAO;CAGT,MAAM,YAAY,yBAChB,YACA,KAAK,QACL,KAAK,UACN;CACD,MAAM,YAAY,yBAChB,YACA,KAAK,QACL,KAAK,UACN;AACD,KAAI,CAAC,aAAa,CAAC,UAEjB,QAAO;CAGT,MAAM,cAAc,OAAO,QAAQ,KAAK,QAAQ;CAChD,MAAM,cAAc,OAAO,QAAQ,KAAK,QAAQ;AAChD,KAAI,YAAY,WAAW,YAAY,OACrC,QAAO;CAIT,MAAM,eAAe,IAAI,IACvB,YAAY,KAAK,CAAC,MAAM,SAAS,CAAC,UAAU,QAAQ,OAAO,KAAK,IAAI,CAAC,CACtE;CACD,MAAM,eAAe,IAAI,IACvB,YAAY,KAAK,CAAC,MAAM,SAAS,CAAC,UAAU,QAAQ,OAAO,KAAK,IAAI,CAAC,CACtE;AAED,KAAI,aAAa,IAAI,KAAA,EAAU,IAAI,aAAa,IAAI,KAAA,EAAU,CAE5D,QAAO;AAET,KAAI,aAAa,SAAS,aAAa,KACrC,QAAO;AAET,MAAK,MAAM,CAAC,QAAQ,QAAQ,aAC1B,KAAI,aAAa,IAAI,OAAO,KAAK,IAC/B,QAAO;AAGX,QAAO;;AAGT,SAAS,yBACP,QACA,QACA,MAC+C;AAC/C,MAAK,MAAM,SAAS,OAAO,QAAQ,CACjC,KAAI,MAAM,WAAW,UAAU,MAAM,SAAS,KAC5C,QAAO;;AAMb,SAAS,YACP,SAC0C;CAC1C,MAAM,2BAAW,IAAI,KAA0C;AAC/D,MAAK,MAAM,CAAC,MAAM,SAAS,OAAO,QAAQ,QAAQ,CAGhD,UAAS,IAAI,KAAK,KAAK;EAAC,GAAG;EAAM;EAAK,CAAC;AAEzC,QAAO;;AAGT,SAAS,YAAY,OAAyD;AAC5E,QAAO;EACL,WAAW,KAAK,MAAM,UAAU;EAChC,aAAa,MAAM;EACnB,QAAQ,OAAO,YACb,MAAM,uBAAuB,KAAI,MAAK,CACpC,GACA,EAAC,QAAQ,MAAM,QAAQ,GAAG,KAAI,CAC/B,CAAC,CACH;EACF;;AAKH,SAAS,aAAa,UAA6C;CAGjE,MAAM,EAAC,SAAS,GAAG,YAAY,iBAAiB,GAAG,SAAQ;AAC3D,QAAO;EACL,GAAG;EACH,QAAQ;GACN,SAAS;GACT,MAAM;GACP;EAGD;EACA;EACD;;AAGH,IAAM,+BAAN,cAA2C,MAAM;CAC/C,OAAgB;CAChB;CACA;CAEA,YACE,aACA,OACA,SACA;AACA,QACE,sDAAsD,eACtD,QACD;AACD,OAAK,cAAc;AACnB,OAAK,QAAQ;;;AAIjB,IAAM,6BAAN,cAAyC,MAAM;CAC7C,OAAgB;CAEhB,YAAY,KAAa;AACvB,QACE,GAAG,IAAI,+EACR;;;AAKL,IAAM,iBAAN,cAA6B,WAAW;CACtC,OAAgB;CAEhB,YAAY,OAAgB;AAC1B,QACE,yFACA,EACE,OACD,CACF;;;AAIL,SAAS,2BACP,EAAC,WACD,QACA;CACA,MAAM,MACJ,mBAAmB,SACf,QAAQ,SAAS,QAAQ,GACzB,IAAI,aAAa,CAAC,OAAO,QAAQ;AAEvC,QAAO,MADM,KAAK,MAAM,IAAI,EACP,QAAQ,cAAc"}
|
|
1
|
+
{"version":3,"file":"change-source.js","names":["#lc","#db","#upstreamUri","#shard","#replica","#context","#lagReporter","#stopExistingReplicationSlotSubscribers","#startStream","#logCurrentReplicaInfo","#dropReplicationSlots","#acks","#expectDownstreamAck","#ackIfDownstreamIsCaughtUp","#waitingForDownstreamAck","#sendAck","#lagIntervalMs","#getPgVersion","#pgVersion","#expectingLagReport","#scheduleNextReport","#timer","#shardPrefix","#shardConfig","#initialSchema","#error","#logError","#makeChanges","#handleDdlMessage","#lastSnapshotInTx","#handleRelation","#replicaIdentityTimer","#preSchema","#makeSchemaChanges","#getTableChanges"],"sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"sourcesContent":["import {\n PG_ADMIN_SHUTDOWN,\n PG_OBJECT_IN_USE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {nanoid} from 'nanoid';\nimport postgres from 'postgres';\nimport {AbortError} from '../../../../../shared/src/abort-error.ts';\nimport {assert} from '../../../../../shared/src/asserts.ts';\nimport {stringify} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport {must} from '../../../../../shared/src/must.ts';\nimport {mapValues} from '../../../../../shared/src/objects.ts';\nimport {\n equals,\n intersection,\n symmetricDifferences,\n} from '../../../../../shared/src/set-utils.ts';\nimport {sleep} from '../../../../../shared/src/sleep.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {\n mapPostgresToLiteColumn,\n UnsupportedColumnDefaultError,\n} from '../../../db/pg-to-lite.ts';\nimport {runTx} from '../../../db/run-transaction.ts';\nimport type {\n ColumnSpec,\n PublishedIndexSpec,\n PublishedTableSpec,\n} from '../../../db/specs.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport {type LexiVersion} from '../../../types/lexi-version.ts';\nimport {pgClient, type PostgresDB} from '../../../types/pg.ts';\nimport {\n upstreamSchema,\n type ShardConfig,\n type ShardID,\n} from '../../../types/shards.ts';\nimport {\n majorVersionFromString,\n majorVersionToString,\n} from '../../../types/state-version.ts';\nimport type {Sink} from '../../../types/streams.ts';\nimport {AutoResetSignal} from '../../change-streamer/schema/tables.ts';\nimport {\n getSubscriptionStateAndContext,\n type SubscriptionState,\n type SubscriptionStateAndContext,\n} from '../../replicator/schema/replication-state.ts';\nimport type {ChangeSource, ChangeStream} from '../change-source.ts';\nimport {BackfillManager} from '../common/backfill-manager.ts';\nimport {\n ChangeStreamMultiplexer,\n type Listener,\n} from '../common/change-stream-multiplexer.ts';\nimport {initReplica} from '../common/replica-schema.ts';\nimport type {\n BackfillRequest,\n DownstreamStatusMessage,\n JSONObject,\n} from '../protocol/current.ts';\nimport type {\n ColumnAdd,\n Identifier,\n MessageRelation,\n SchemaChange,\n TableCreate,\n} from '../protocol/current/data.ts';\nimport type {\n ChangeStreamData,\n ChangeStreamMessage,\n Data,\n} from '../protocol/current/downstream.ts';\nimport type {ColumnMetadata, TableMetadata} from './backfill-metadata.ts';\nimport {streamBackfill} from './backfill-stream.ts';\nimport {\n initialSync,\n type InitialSyncOptions,\n type ServerContext,\n} from './initial-sync.ts';\nimport type {\n Message,\n MessageMessage,\n MessageRelation as PostgresRelation,\n} from './logical-replication/pgoutput.types.ts';\nimport {subscribe, type StreamMessage} from './logical-replication/stream.ts';\nimport {fromBigInt, toBigInt, toStateVersionString, type LSN} from './lsn.ts';\nimport {\n replicationEventSchema,\n type DdlUpdateEvent,\n type SchemaSnapshotEvent,\n} from './schema/ddl.ts';\nimport {updateShardSchema} from './schema/init.ts';\nimport {\n getPublicationInfo,\n type PublishedSchema,\n type PublishedTableWithReplicaIdentity,\n} from './schema/published.ts';\nimport {\n dropShard,\n getInternalShardConfig,\n getReplicaAtVersion,\n internalPublicationPrefix,\n legacyReplicationSlot,\n replicaIdentitiesForTablesWithoutPrimaryKeys,\n replicationSlotExpression,\n type InternalShardConfig,\n type Replica,\n} from './schema/shard.ts';\nimport {validate} from './schema/validation.ts';\n\n/**\n * Initializes a Postgres change source, including the initial sync of the\n * replica, before streaming changes from the corresponding logical replication\n * stream.\n */\nexport async function initializePostgresChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n syncOptions: InitialSyncOptions,\n context: ServerContext,\n lagReportIntervalMs?: number,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initReplica(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n replicaDbFile,\n (log, tx) => initialSync(log, shard, tx, upstreamURI, syncOptions, context),\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionStateAndContext(\n new StatementRunner(replica),\n );\n replica.close();\n\n // Check that upstream is properly setup, and throw an AutoReset to re-run\n // initial sync if not.\n const db = pgClient(lc, upstreamURI);\n try {\n const upstreamReplica = await checkAndUpdateUpstream(\n lc,\n db,\n shard,\n subscriptionState,\n );\n\n const changeSource = new PostgresChangeSource(\n lc,\n upstreamURI,\n shard,\n upstreamReplica,\n context,\n lagReportIntervalMs ?? null,\n );\n\n return {subscriptionState, changeSource};\n } finally {\n await db.end();\n }\n}\n\nasync function checkAndUpdateUpstream(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n {\n replicaVersion,\n publications: subscribed,\n initialSyncContext,\n }: SubscriptionStateAndContext,\n) {\n // Perform any shard schema updates\n await updateShardSchema(lc, sql, shard, replicaVersion);\n\n const upstreamReplica = await getReplicaAtVersion(\n lc,\n sql,\n shard,\n replicaVersion,\n initialSyncContext,\n );\n if (!upstreamReplica) {\n throw new AutoResetSignal(\n `No replication slot for replica at version ${replicaVersion}`,\n );\n }\n\n // Verify that the publications match what is being replicated.\n const requested = [...shard.publications].sort();\n const replicated = upstreamReplica.publications\n .filter(p => !p.startsWith(internalPublicationPrefix(shard)))\n .sort();\n if (!deepEqual(requested, replicated)) {\n lc.warn?.(`Dropping shard to change publications to: [${requested}]`);\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n throw new AutoResetSignal(\n `Requested publications [${requested}] do not match configured ` +\n `publications: [${replicated}]`,\n );\n }\n\n // Sanity check: The subscription state on the replica should have the\n // same publications. This should be guaranteed by the equivalence of the\n // replicaVersion, but it doesn't hurt to verify.\n if (!deepEqual(upstreamReplica.publications, subscribed)) {\n throw new AutoResetSignal(\n `Upstream publications [${upstreamReplica.publications}] do not ` +\n `match subscribed publications [${subscribed}]`,\n );\n }\n\n // Verify that the publications exist.\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(subscribed)};\n `.values();\n if (exists.length !== subscribed.length) {\n throw new AutoResetSignal(\n `Upstream publications [${exists.flat()}] do not contain ` +\n `all subscribed publications [${subscribed}]`,\n );\n }\n\n const {slot} = upstreamReplica;\n const result = await sql<\n {restartLSN: LSN | null; walStatus: string | null}[]\n > /*sql*/ `\n SELECT restart_lsn as \"restartLSN\", wal_status as \"walStatus\" FROM pg_replication_slots\n WHERE slot_name = ${slot}`;\n if (result.length === 0) {\n throw new AutoResetSignal(`replication slot ${slot} is missing`);\n }\n const [{restartLSN, walStatus}] = result;\n if (restartLSN === null || walStatus === 'lost') {\n throw new AutoResetSignal(\n `replication slot ${slot} has been invalidated for exceeding the max_slot_wal_keep_size`,\n );\n }\n return upstreamReplica;\n}\n\n// Parameterize this if necessary. In practice starvation may never happen.\nconst MAX_LOW_PRIORITY_DELAY_MS = 1000;\n\ntype ReservationState = {\n lastWatermark?: string;\n};\n\n/**\n * Postgres implementation of a {@link ChangeSource} backed by a logical\n * replication stream.\n */\nclass PostgresChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #db: PostgresDB;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replica: Replica;\n readonly #context: ServerContext;\n readonly #lagReporter: LagReporter | null;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replica: Replica,\n context: ServerContext,\n lagReportIntervalMs: number | null,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#db = pgClient(lc, upstreamUri, {\n // used occasionally for schema changes, periodically for lag reporting\n ['idle_timeout']: 60,\n connection: {['application_name']: 'zero-replication-monitor'},\n });\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replica = replica;\n this.#context = context;\n this.#lagReporter = lagReportIntervalMs\n ? new LagReporter(\n lc.withContext('component', 'lag-reporter'),\n shard,\n this.#db,\n lagReportIntervalMs,\n )\n : null;\n }\n\n startLagReporter(): Promise<{nextSendTimeMs: number}> | null {\n return this.#lagReporter ? this.#lagReporter.initiateLagReport(true) : null;\n }\n\n async startStream(\n clientWatermark: string,\n backfillRequests: BackfillRequest[] = [],\n ): Promise<ChangeStream> {\n const {slot} = this.#replica;\n\n await this.#stopExistingReplicationSlotSubscribers(slot);\n const config = await getInternalShardConfig(this.#db, this.#shard);\n this.#lc.info?.(`starting replication stream@${slot}`);\n return this.#startStream(slot, clientWatermark, config, backfillRequests);\n }\n\n async #startStream(\n slot: string,\n clientWatermark: string,\n shardConfig: InternalShardConfig,\n backfillRequests: BackfillRequest[],\n ): Promise<ChangeStream> {\n const clientStart = majorVersionFromString(clientWatermark) + 1n;\n const {messages, acks} = await subscribe(\n this.#lc,\n this.#db,\n slot,\n [...shardConfig.publications],\n clientStart,\n );\n const acker = new Acker(acks);\n\n // The ChangeStreamMultiplexer facilitates cooperative streaming from\n // the main replication stream and backfill streams initiated by the\n // BackfillManager.\n const changes = new ChangeStreamMultiplexer(this.#lc, clientWatermark);\n const backfillManager = new BackfillManager(this.#lc, changes, req =>\n streamBackfill(this.#lc, this.#upstreamUri, this.#replica, req),\n );\n changes\n .addProducers(messages, backfillManager)\n .addListeners(backfillManager, acker);\n backfillManager.run(clientWatermark, backfillRequests);\n\n const changeMaker = new ChangeMaker(\n this.#lc,\n this.#shard,\n shardConfig,\n this.#db,\n this.#replica.initialSchema,\n );\n\n /**\n * Determines if the incoming message is transactional, otherwise handling\n * non-transactional messages with a downstream status message.\n */\n const isTransactionalMessage = (\n lsn: bigint,\n msg: StreamMessage[1],\n ): msg is Message => {\n if (\n msg.tag === 'message' &&\n msg.prefix === this.#lagReporter?.messagePrefix\n ) {\n changes.pushStatus(this.#lagReporter.processLagReport(msg));\n return false;\n }\n // Checks if we are passed the LSN of the expected lag report, in which\n // case a new one is initiated.\n this.#lagReporter?.checkCurrentLSN(lsn);\n\n if (msg.tag === 'keepalive') {\n changes.pushStatus([\n 'status',\n {ack: msg.shouldRespond},\n {watermark: majorVersionToString(lsn)},\n ]);\n return false;\n }\n return true;\n };\n\n void (async () => {\n try {\n let reservation: ReservationState | null = null;\n let inTransaction = false;\n\n for await (const [lsn, msg] of messages) {\n if (!isTransactionalMessage(lsn, msg)) {\n // If we're not in a transaction but the last reservation was kept\n // because of pending keepalives or lag reports in the queue,\n // release the reservation.\n if (!inTransaction && reservation?.lastWatermark) {\n changes.release(reservation.lastWatermark);\n reservation = null;\n }\n continue;\n }\n\n if (!reservation) {\n const res = changes.reserve('replication');\n typeof res === 'string' || (await res); // awaits should be uncommon\n reservation = {};\n }\n\n let lastChange: ChangeStreamMessage | undefined;\n for (const change of await changeMaker.makeChanges(lsn, msg)) {\n await changes.push(change); // Allow the change-streamer to push back.\n lastChange = change;\n }\n\n switch (lastChange?.[0]) {\n case 'begin':\n inTransaction = true;\n break;\n case 'commit':\n inTransaction = false;\n reservation.lastWatermark = lastChange[2].watermark;\n if (\n messages.queued === 0 ||\n changes.waiterDelay() > MAX_LOW_PRIORITY_DELAY_MS\n ) {\n // After each transaction, release the reservation:\n // - if there are no pending upstream messages\n // - or if a low priority request has been waiting for longer\n // than MAX_LOW_PRIORITY_DELAY_MS. This is to prevent\n // (backfill) starvation on very active upstreams.\n changes.release(reservation.lastWatermark);\n reservation = null;\n }\n break;\n }\n }\n } catch (e) {\n // Note: no need to worry about reservations here since downstream\n // is being completely canceled.\n const err = translateError(e);\n if (err instanceof ShutdownSignal) {\n // Log the new state of the replica to surface information about the\n // server that sent the shutdown signal, if any.\n await this.#logCurrentReplicaInfo();\n }\n changes.fail(err);\n }\n })();\n\n this.#lc.info?.(\n `started replication stream@${slot} from ${clientWatermark} (replicaVersion: ${\n this.#replica.version\n })`,\n );\n\n return {\n changes: changes.asSource(),\n acks: {push: status => acker.ack(status[2].watermark)},\n };\n }\n\n async #logCurrentReplicaInfo() {\n try {\n const replica = await getReplicaAtVersion(\n this.#lc,\n this.#db,\n this.#shard,\n this.#replica.version,\n );\n if (replica) {\n this.#lc.info?.(\n `Shutdown signal from replica@${this.#replica.version}: ${stringify(replica.subscriberContext)}`,\n );\n }\n } catch (e) {\n this.#lc.warn?.(`error logging replica info`, e);\n }\n }\n\n /**\n * Stops replication slots associated with this shard, and returns\n * a `cleanup` task that drops any slot other than the specified\n * `slotToKeep`.\n *\n * Note that replication slots created after `slotToKeep` (as indicated by\n * the timestamp suffix) are preserved, as those are newly syncing replicas\n * that will soon take over the slot.\n */\n async #stopExistingReplicationSlotSubscribers(slotToKeep: string) {\n const slotExpression = replicationSlotExpression(this.#shard);\n const legacySlotName = legacyReplicationSlot(this.#shard);\n\n const result = await runTx(this.#db, async sql => {\n // Note: `slot_name <= slotToKeep` uses a string compare of the millisecond\n // timestamp, which works until it exceeds 13 digits (sometime in 2286).\n const result = await sql<\n {slot: string; pid: string | null; terminated: boolean | null}[]\n > /*sql*/ `\n SELECT slot_name as slot, pg_terminate_backend(active_pid) as terminated, active_pid as pid\n FROM pg_replication_slots \n WHERE (slot_name LIKE ${slotExpression} OR slot_name = ${legacySlotName})\n AND slot_name <= ${slotToKeep}`;\n this.#lc.info?.(\n `terminated replication slots: ${JSON.stringify(result)}`,\n );\n const replicasTable = `${upstreamSchema(this.#shard)}.replicas`;\n const replicasBefore = await sql`\n SELECT slot, version, \"initialSyncContext\", \"subscriberContext\" \n FROM ${sql(replicasTable)} ORDER BY slot`;\n\n if (result.length === 0) {\n const shardSlots = await sql`\n SELECT slot_name as slot, active, active_pid as pid\n FROM pg_replication_slots\n WHERE slot_name LIKE ${slotExpression} OR slot_name = ${legacySlotName}\n ORDER BY slot_name`;\n this.#lc.warn?.(\n `slot ${slotToKeep} not found while cleaning subscribers`,\n {slots: shardSlots, replicas: replicasBefore},\n );\n throw new AbortError(\n `replication slot ${slotToKeep} is missing. A different ` +\n `replication-manager should now be running on a new ` +\n `replication slot.`,\n );\n }\n // Clear the state of the older replicas.\n this.#lc.info?.(\n `replicas before cleanup (slotToKeep=${slotToKeep}): ${JSON.stringify(\n replicasBefore,\n )}`,\n );\n await sql`\n DELETE FROM ${sql(replicasTable)} WHERE slot < ${slotToKeep}`;\n await sql`\n UPDATE ${sql(replicasTable)} \n SET \"subscriberContext\" = ${this.#context}\n WHERE slot = ${slotToKeep}`;\n const replicasAfter = await sql<{slot: string; version: string}[]>`\n SELECT slot, version FROM ${sql(replicasTable)} ORDER BY slot`;\n this.#lc.info?.(\n `replicas after cleanup (slotToKeep=${slotToKeep}): ${JSON.stringify(\n replicasAfter,\n )}`,\n );\n return result;\n });\n\n const pids = result.filter(({pid}) => pid !== null).map(({pid}) => pid);\n if (pids.length) {\n this.#lc.info?.(`signaled subscriber ${pids} to shut down`);\n }\n const otherSlots = result\n .filter(({slot}) => slot !== slotToKeep)\n .map(({slot}) => slot);\n\n if (otherSlots.length) {\n void this.#dropReplicationSlots(otherSlots).catch(e =>\n this.#lc.warn?.(`error dropping replication slots`, e),\n );\n }\n }\n\n async #dropReplicationSlots(slots: string[]) {\n this.#lc.info?.(`dropping other replication slot(s) ${slots}`);\n const sql = this.#db;\n for (let i = 0; i < 5; i++) {\n try {\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name IN ${sql(slots)}\n `;\n this.#lc.info?.(`successfully dropped ${slots}`);\n return;\n } catch (e) {\n // error: replication slot \"zero_slot_change_source_test_id\" is active for PID 268\n if (\n e instanceof postgres.PostgresError &&\n e.code === PG_OBJECT_IN_USE\n ) {\n // The freeing up of the replication slot is not transactional;\n // sometimes it takes time for Postgres to consider the slot\n // inactive.\n this.#lc.debug?.(`attempt ${i + 1}: ${String(e)}`, e);\n } else {\n this.#lc.warn?.(`error dropping ${slots}`, e);\n }\n await sleep(1000);\n }\n }\n this.#lc.warn?.(`maximum attempts exceeded dropping ${slots}`);\n }\n}\n\n// Exported for testing.\nexport class Acker implements Listener {\n #acks: Sink<bigint>;\n #waitingForDownstreamAck: string | null = null;\n\n constructor(acks: Sink<bigint>) {\n this.#acks = acks;\n }\n\n onChange(change: ChangeStreamMessage): void {\n switch (change[0]) {\n case 'status':\n const {watermark} = change[2];\n if (change[1].ack) {\n this.#expectDownstreamAck(watermark);\n } else {\n // Keepalives with shouldRespond = false are sent to Listeners,\n // but for efficiency they are not sent downstream to the\n // change-streamer. Ack them here if the change-streamer is caught\n // up. This updates the replication slot's `confirmed_flush_lsn`\n // more quickly (rather than waiting for the periodic shouldRespond),\n // which is useful for monitoring replication slot lag.\n this.#ackIfDownstreamIsCaughtUp(watermark);\n }\n break;\n case 'begin':\n // Mark the commit watermark as being expected so that any intermediate\n // shouldRespond=false watermarks, which will be at the\n // commitWatermark, are *not* acked, as the ack must come from\n // change-streamer after it commits the transaction.\n if (!change[1].skipAck) {\n this.#expectDownstreamAck(change[2].commitWatermark);\n }\n break;\n }\n }\n\n #expectDownstreamAck(watermark: string) {\n this.#waitingForDownstreamAck = watermark;\n }\n\n ack(watermark: LexiVersion) {\n if (\n this.#waitingForDownstreamAck &&\n this.#waitingForDownstreamAck <= watermark\n ) {\n this.#waitingForDownstreamAck = null;\n }\n this.#sendAck(watermark);\n }\n\n #ackIfDownstreamIsCaughtUp(watermark: string) {\n if (this.#waitingForDownstreamAck === null) {\n this.#sendAck(watermark);\n }\n }\n\n #sendAck(watermark: LexiVersion) {\n const lsn = majorVersionFromString(watermark);\n this.#acks.push(lsn);\n }\n}\n\nconst lagReportSchema = v.object({\n id: v.string(),\n sendTimeMs: v.number(),\n commitTimeMs: v.number(),\n});\n\nexport type LagReport = v.Infer<typeof lagReportSchema>;\n\ntype InitiatedLagReport = {\n id: string;\n lsn: bigint;\n};\n\nclass LagReporter {\n static readonly MESSAGE_SUFFIX = '/lag-report/v1';\n\n readonly #lc: LogContext;\n readonly messagePrefix: string;\n\n // Weird issue with oxlint, which thinks:\n // × eslint(no-unused-private-class-members): 'db' is defined but never used.\n // oxlint-disable-next-line eslint(no-unused-private-class-members)\n readonly #db: PostgresDB;\n readonly #lagIntervalMs: number;\n\n #pgVersion: number | undefined;\n #expectingLagReport: InitiatedLagReport | null = null;\n #timer: NodeJS.Timeout | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n db: PostgresDB,\n lagIntervalMs: number,\n ) {\n this.#lc = lc;\n this.messagePrefix = `${shard.appID}/${shard.shardNum}${LagReporter.MESSAGE_SUFFIX}`;\n this.#db = db;\n this.#lagIntervalMs = lagIntervalMs;\n }\n\n async #getPgVersion() {\n if (this.#pgVersion === undefined) {\n const [{pgVersion}] = await this.#db<{pgVersion: number}[]> /*sql*/ `\n SELECT current_setting('server_version_num')::int as \"pgVersion\"`;\n this.#pgVersion = pgVersion;\n }\n return this.#pgVersion;\n }\n\n async initiateLagReport(log = false) {\n const pgVersion = this.#pgVersion ?? (await this.#getPgVersion());\n const now = Date.now();\n const id = nanoid();\n\n const lagReport = {id, lsn: 0n}; // lsn is filled in after the db call.\n this.#expectingLagReport = lagReport;\n\n let lsn: string;\n if (pgVersion >= 170000) {\n [{lsn}] = await this.#db<{lsn: string}[]> /*sql*/ `\n SELECT pg_logical_emit_message(\n false,\n ${this.messagePrefix},\n json_build_object(\n 'id', ${id}::text,\n 'sendTimeMs', ${now}::int8,\n 'commitTimeMs', extract(epoch from now()) * 1000\n )::text,\n true\n ) as lsn;\n `;\n } else {\n // Versions before PG 17 do not support the final `flush` option of\n // pg_logical_emit_message(). This results in an extra 50~100ms latency\n // for replication reports when the db is idle, which is still\n // acceptable for the purpose for alerting on pathological lag, for\n // which the threshold is much higher (e.g. many seconds).\n [{lsn}] = await this.#db<{lsn: string}[]> /*sql*/ `\n SELECT pg_logical_emit_message(\n false,\n ${this.messagePrefix},\n json_build_object(\n 'id', ${id}::text,\n 'sendTimeMs', ${now}::int8,\n 'commitTimeMs', extract(epoch from now()) * 1000\n )::text\n ) as lsn;\n `;\n }\n\n // Note: We don't know the lsn until after pg_logical_emit_message()\n // returns, at which point it is possible that the report has\n // already been sent through the replication stream, but this\n // is okay since this.#expectingLagReport will have be updated.\n lagReport.lsn = toBigInt(lsn);\n\n if (log) {\n this.#lc.info?.(`initiated lag report at lsn ${lsn}`, {id, lsn});\n }\n return {nextSendTimeMs: now};\n }\n\n checkCurrentLSN(lsn: bigint) {\n if (this.#expectingLagReport?.lsn && lsn > this.#expectingLagReport.lsn) {\n this.#lc.warn?.(\n `LSN ${fromBigInt(lsn)} is passed expected lag report ` +\n `${fromBigInt(this.#expectingLagReport.lsn)}. Initiating new report.`,\n );\n this.#scheduleNextReport(0);\n }\n }\n\n #scheduleNextReport(delayMs: number) {\n this.#expectingLagReport = null;\n clearTimeout(this.#timer);\n this.#timer = setTimeout(async () => {\n try {\n await this.initiateLagReport();\n } catch (e) {\n this.#lc.warn?.(`error initiating lag report`, e);\n this.#scheduleNextReport(this.#lagIntervalMs);\n }\n }, delayMs);\n }\n\n processLagReport(msg: MessageMessage): DownstreamStatusMessage {\n assert(\n msg.prefix === this.messagePrefix,\n `unexpected message prefix: ${msg.prefix}`,\n );\n const report = parseLogicalMessageContent(msg, lagReportSchema);\n const now = Date.now();\n const nextSendTimeMs = Math.max(\n now,\n report.sendTimeMs + this.#lagIntervalMs,\n );\n if (report.id === this.#expectingLagReport?.id) {\n this.#scheduleNextReport(nextSendTimeMs - now);\n } else {\n // Only schedule the next report when receiving the previous report.\n // For historic reports in the WAL, or reports generated by other\n // replication-managers, status messages are still sent downstream,\n // but the next report is not actually scheduled.\n this.#lc.debug?.(`received extraneous lag report`, {report});\n }\n const {sendTimeMs, commitTimeMs} = report;\n return [\n 'status',\n {\n ack: false,\n lagReport: {\n lastTimings: {\n sendTimeMs,\n commitTimeMs,\n receiveTimeMs: now,\n },\n nextSendTimeMs,\n },\n },\n {watermark: toStateVersionString(msg.messageLsn ?? '0/0')},\n ];\n }\n}\n\ntype ReplicationError = {\n lsn: bigint;\n msg: Message;\n err: unknown;\n lastLogTime: number;\n};\n\nconst SET_REPLICA_IDENTITY_DELAY_MS = 50;\n\nclass ChangeMaker {\n readonly #lc: LogContext;\n readonly #shardPrefix: string;\n readonly #shardConfig: InternalShardConfig;\n readonly #initialSchema: PublishedSchema;\n readonly #db: PostgresDB;\n\n #replicaIdentityTimer: NodeJS.Timeout | undefined;\n #error: ReplicationError | undefined;\n\n constructor(\n lc: LogContext,\n {appID, shardNum}: ShardID,\n shardConfig: InternalShardConfig,\n db: PostgresDB,\n initialSchema: PublishedSchema,\n ) {\n this.#lc = lc;\n // Note: This matches the prefix used in pg_logical_emit_message() in pg/schema/ddl.ts.\n this.#shardPrefix = `${appID}/${shardNum}`;\n this.#shardConfig = shardConfig;\n this.#initialSchema = initialSchema;\n this.#db = db;\n }\n\n async makeChanges(lsn: bigint, msg: Message): Promise<ChangeStreamMessage[]> {\n if (this.#error) {\n this.#logError(this.#error);\n return [];\n }\n try {\n return await this.#makeChanges(msg);\n } catch (err) {\n this.#error = {lsn, msg, err, lastLogTime: 0};\n this.#logError(this.#error);\n\n const message = `Unable to continue replication from LSN ${fromBigInt(lsn)}`;\n const errorDetails: JSONObject = {error: message};\n if (err instanceof UnsupportedSchemaChangeError) {\n errorDetails.reason = err.description;\n errorDetails.context = err.event.context;\n } else {\n errorDetails.reason = String(err);\n }\n\n // Rollback the current transaction to avoid dangling transactions in\n // downstream processors (i.e. changeLog, replicator).\n return [\n ['rollback', {tag: 'rollback'}],\n ['control', {tag: 'reset-required', message, errorDetails}],\n ];\n }\n }\n\n #logError(error: ReplicationError) {\n const {lsn, msg, err, lastLogTime} = error;\n const now = Date.now();\n\n // Output an error to logs as replication messages continue to be dropped,\n // at most once a minute.\n if (now - lastLogTime > 60_000) {\n this.#lc.error?.(\n `Unable to continue replication from LSN ${fromBigInt(lsn)}: ${String(\n err,\n )}`,\n err instanceof UnsupportedSchemaChangeError\n ? err.event.context\n : // 'content' can be a large byte Buffer. Exclude it from logging output.\n {...msg, content: undefined},\n );\n error.lastLogTime = now;\n }\n }\n\n // oxlint-disable-next-line require-await\n async #makeChanges(msg: Message): Promise<ChangeStreamData[]> {\n switch (msg.tag) {\n case 'begin':\n return [\n [\n 'begin',\n {...msg, json: 's'},\n {commitWatermark: toStateVersionString(must(msg.commitLsn))},\n ],\n ];\n\n case 'delete': {\n if (!(msg.key ?? msg.old)) {\n throw new Error(\n `Invalid DELETE msg (missing key): ${stringify(msg)}`,\n );\n }\n return [\n [\n 'data',\n {\n ...msg,\n relation: makeRelation(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-DELETE\n key: must(msg.old ?? msg.key),\n },\n ],\n ];\n }\n\n case 'update': {\n return [\n [\n 'data',\n {\n ...msg,\n relation: makeRelation(msg.relation),\n // https://www.postgresql.org/docs/current/protocol-logicalrep-message-formats.html#PROTOCOL-LOGICALREP-MESSAGE-FORMATS-UPDATE\n key: msg.old ?? msg.key,\n },\n ],\n ];\n }\n\n case 'insert':\n return [['data', {...msg, relation: makeRelation(msg.relation)}]];\n case 'truncate':\n return [['data', {...msg, relations: msg.relations.map(makeRelation)}]];\n\n case 'message':\n if (!msg.prefix.startsWith(this.#shardPrefix)) {\n this.#lc.debug?.('ignoring message for different shard', msg.prefix);\n return [];\n }\n switch (msg.prefix.substring(this.#shardPrefix.length)) {\n case '': // Legacy prefix\n case '/ddl':\n return this.#handleDdlMessage(msg);\n default:\n this.#lc.debug?.('ignoring unknown message type', msg.prefix);\n return [];\n }\n\n case 'commit':\n this.#lastSnapshotInTx = undefined;\n return [\n [\n 'commit',\n msg,\n {watermark: toStateVersionString(must(msg.commitLsn))},\n ],\n ];\n\n case 'relation':\n return this.#handleRelation(msg);\n case 'type':\n return []; // Nothing need be done for custom types.\n case 'origin':\n // No need to detect replication loops since we are not a\n // PG replication source.\n return [];\n default:\n msg satisfies never;\n throw new Error(`Unexpected message type ${stringify(msg)}`);\n }\n }\n\n #preSchema: PublishedSchema | undefined;\n #lastSnapshotInTx: PublishedSchema | undefined;\n\n #handleDdlMessage(msg: MessageMessage) {\n const event = parseLogicalMessageContent(msg, replicationEventSchema);\n // Cancel manual schema adjustment timeouts when an upstream schema change\n // is about to happen, so as to avoid interfering / redundant work.\n clearTimeout(this.#replicaIdentityTimer);\n\n let previousSchema: PublishedSchema | null;\n const {type} = event;\n switch (type) {\n case 'ddlStart':\n // Store the schema in order to diff it with a subsequent ddlUpdate.\n this.#preSchema = event.schema;\n return [];\n case 'ddlUpdate':\n // guaranteed by event triggers\n previousSchema = must(\n this.#preSchema,\n `ddlUpdate received without a ddlStart`,\n );\n break;\n case 'schemaSnapshot':\n previousSchema = this.#lastSnapshotInTx ?? null;\n break;\n default: // Ignore unknown types for forwards compatibility\n this.#lc.info?.(`ignoring unknown ddl message type: ${type}`);\n return [];\n }\n\n // Store the schema (from either a ddlUpdate or schemaSnapshot) to\n // diff against the next schemaSnapshot.\n this.#lastSnapshotInTx = event.schema;\n if (!previousSchema) {\n this.#lc.info?.(`received ${msg.prefix}/${type} event`);\n return []; // First schemaSnapshot in the tx.\n }\n this.#lc.info?.(`processing ${msg.prefix}/${type} event`, event);\n\n const changes = this.#makeSchemaChanges(previousSchema, event).map(\n change => ['data', change] satisfies Data,\n );\n\n this.#lc\n .withContext('tag', event.event.tag)\n .withContext('query', event.context.query)\n .info?.(`${changes.length} schema change(s)`, {changes});\n\n const replicaIdentities = replicaIdentitiesForTablesWithoutPrimaryKeys(\n event.schema,\n );\n if (replicaIdentities) {\n this.#replicaIdentityTimer = setTimeout(async () => {\n try {\n await replicaIdentities.apply(this.#lc, this.#db);\n } catch (err) {\n this.#lc.warn?.(`error setting replica identities`, err);\n }\n }, SET_REPLICA_IDENTITY_DELAY_MS);\n }\n\n return changes;\n }\n\n /**\n * A note on operation order:\n *\n * Postgres will drop related indexes when columns are dropped,\n * but SQLite will error instead (https://sqlite.org/forum/forumpost/2e62dba69f?t=c&hist).\n * The current workaround is to drop indexes first.\n *\n * Also note that although it should not be possible to both rename and\n * add/drop tables/columns in a single statement, the operations are\n * ordered to handle that possibility, by always dropping old entities,\n * then modifying kept entities, and then adding new entities.\n *\n * Thus, the order of replicating DDL updates is:\n * - drop indexes\n * - drop tables\n * - alter tables\n * - drop columns\n * - alter columns\n * - add columns\n * - create tables\n * - create indexes\n *\n * In the future the replication logic should be improved to handle this\n * behavior in SQLite by dropping dependent indexes manually before dropping\n * columns. This, for example, would be needed to properly support changing\n * the type of a column that's indexed.\n */\n #makeSchemaChanges(\n preSchema: PublishedSchema,\n update: DdlUpdateEvent | SchemaSnapshotEvent,\n ): SchemaChange[] {\n try {\n const [prevTbl, prevIdx] = specsByID(preSchema);\n const [nextTbl, nextIdx] = specsByID(update.schema);\n const changes: SchemaChange[] = [];\n\n // Validate the new table schemas\n for (const table of nextTbl.values()) {\n validate(this.#lc, table);\n }\n\n const [droppedIdx, createdIdx] = symmetricDifferences(prevIdx, nextIdx);\n\n // Detect modified indexes (same name, different definition).\n // This happens when a constraint is dropped and recreated with the\n // same name in a single ALTER TABLE statement.\n // Note: We compare using stable column attnums rather than names,\n // because table/column renames change the index spec cosmetically\n // (tableName, column keys) without the index actually being recreated.\n const keptIdx = intersection(prevIdx, nextIdx);\n for (const id of keptIdx) {\n if (\n isIndexStructurallyChanged(\n must(prevIdx.get(id)),\n must(nextIdx.get(id)),\n prevTbl,\n nextTbl,\n )\n ) {\n droppedIdx.add(id);\n createdIdx.add(id);\n }\n }\n\n for (const id of droppedIdx) {\n const {schema, name} = must(prevIdx.get(id));\n changes.push({tag: 'drop-index', id: {schema, name}});\n }\n\n // DROP\n const [droppedTbl, createdTbl] = symmetricDifferences(prevTbl, nextTbl);\n for (const id of droppedTbl) {\n const {schema, name} = must(prevTbl.get(id));\n changes.push({tag: 'drop-table', id: {schema, name}});\n }\n // ALTER TABLE | ALTER PUBLICATION\n const tables = intersection(prevTbl, nextTbl);\n for (const id of tables) {\n changes.push(\n ...this.#getTableChanges(\n must(prevTbl.get(id)),\n must(nextTbl.get(id)),\n update.event.tag,\n ),\n );\n }\n // CREATE\n for (const id of createdTbl) {\n const spec = must(nextTbl.get(id));\n const createTable: TableCreate = {\n tag: 'create-table',\n spec,\n metadata: getMetadata(spec),\n };\n if (!update.event.tag.startsWith('CREATE')) {\n // Tables introduced to the publication via ALTER statements\n // or the COMMENT statement (from schemaSnapshots) must be\n // backfilled.\n createTable.backfill = mapValues(spec.columns, ({pos: attNum}) => ({\n attNum,\n })) satisfies Record<string, ColumnMetadata>;\n }\n changes.push(createTable);\n }\n\n // Add indexes last since they may reference tables / columns that need\n // to be created first.\n for (const id of createdIdx) {\n const spec = must(nextIdx.get(id));\n changes.push({tag: 'create-index', spec});\n }\n return changes;\n } catch (e) {\n throw new UnsupportedSchemaChangeError(String(e), update, {cause: e});\n }\n }\n\n #getTableChanges(\n oldTable: PublishedTableWithReplicaIdentity,\n newTable: PublishedTableWithReplicaIdentity,\n ddlTag: string,\n ): SchemaChange[] {\n const changes: SchemaChange[] = [];\n if (\n oldTable.schema !== newTable.schema ||\n oldTable.name !== newTable.name\n ) {\n changes.push({\n tag: 'rename-table',\n old: {schema: oldTable.schema, name: oldTable.name},\n new: {schema: newTable.schema, name: newTable.name},\n });\n }\n const oldMetadata = getMetadata(oldTable);\n const newMetadata = getMetadata(newTable);\n if (!deepEqual(oldMetadata, newMetadata)) {\n changes.push({\n tag: 'update-table-metadata',\n table: {schema: newTable.schema, name: newTable.name},\n old: oldMetadata,\n new: newMetadata,\n });\n }\n const table = {schema: newTable.schema, name: newTable.name};\n const oldColumns = columnsByID(oldTable.columns);\n const newColumns = columnsByID(newTable.columns);\n\n // DROP\n const [dropped, added] = symmetricDifferences(oldColumns, newColumns);\n for (const id of dropped) {\n const {name: column} = must(oldColumns.get(id));\n changes.push({tag: 'drop-column', table, column});\n }\n\n // ALTER\n const both = intersection(oldColumns, newColumns);\n for (const id of both) {\n const {name: oldName, ...oldSpec} = must(oldColumns.get(id));\n const {name: newName, ...newSpec} = must(newColumns.get(id));\n // The three things that we care about are:\n // 1. name\n // 2. type\n // 3. not-null\n if (\n oldName !== newName ||\n oldSpec.dataType !== newSpec.dataType ||\n oldSpec.notNull !== newSpec.notNull\n ) {\n changes.push({\n tag: 'update-column',\n table,\n old: {name: oldName, spec: oldSpec},\n new: {name: newName, spec: newSpec},\n });\n }\n }\n\n // All columns introduced by a publication change require backfill\n // (which appear as ALTER PUBLICATION or COMMENT tags).\n // Columns created by ALTER TABLE, on the other hand, only require\n // backfill if they have non-constant defaults.\n const alwaysBackfill = ddlTag !== 'ALTER TABLE';\n\n // ADD\n for (const id of added) {\n const {name, ...spec} = must(newColumns.get(id));\n const column = {name, spec};\n const addColumn: ColumnAdd = {\n tag: 'add-column',\n table,\n column,\n tableMetadata: getMetadata(newTable),\n };\n if (alwaysBackfill) {\n addColumn.column.spec.dflt = null;\n addColumn.backfill = {attNum: spec.pos} satisfies ColumnMetadata;\n } else {\n // Determine if the ChangeProcessor will accept the column add as is.\n try {\n mapPostgresToLiteColumn(table.name, column);\n } catch (e) {\n if (!(e instanceof UnsupportedColumnDefaultError)) {\n // Note: mapPostgresToLiteColumn is not expected to throw any other\n // types of errors.\n throw e;\n }\n // If the column has an unsupported default (e.g. an expression or a\n // generated value), create the column as initially hidden with a\n // `null` default, and publish it after backfilling the values from\n // upstream. Note that this does require that the table have a valid\n // REPLICA IDENTITY, since backfill relies on merging new data with\n // an existing row.\n this.#lc.info?.(\n `Backfilling column ${table.name}.${name}: ${String(e)}`,\n );\n addColumn.column.spec.dflt = null;\n addColumn.backfill = {attNum: spec.pos} satisfies ColumnMetadata;\n }\n }\n changes.push(addColumn);\n }\n return changes;\n }\n\n /**\n * If `ddlDetection === true`, relation messages are irrelevant,\n * as schema changes are detected by event triggers that\n * emit custom messages.\n *\n * For degraded-mode replication (`ddlDetection === false`):\n * 1. query the current published schemas on upstream\n * 2. compare that with the InternalShardConfig.initialSchema\n * 3. compare that with the incoming MessageRelation\n * 4. On any discrepancy, throw an UnsupportedSchemaChangeError\n * to halt replication.\n *\n * Note that schemas queried in step [1] will be *post-transaction*\n * schemas, which are not necessarily suitable for actually processing\n * the statements in the transaction being replicated. In other words,\n * this mechanism cannot be used to reliably *replicate* schema changes.\n * However, they serve the purpose determining if schemas have changed.\n */\n async #handleRelation(rel: PostgresRelation): Promise<ChangeStreamData[]> {\n const {publications, ddlDetection} = this.#shardConfig;\n if (ddlDetection) {\n return [];\n }\n const currentSchema = await getPublicationInfo(this.#db, publications);\n const difference = getSchemaDifference(this.#initialSchema, currentSchema);\n if (difference !== null) {\n throw new MissingEventTriggerSupport(difference);\n }\n // Even if the currentSchema is equal to the initialSchema, the\n // MessageRelation itself must be checked to detect transient\n // schema changes within the transaction (e.g. adding and dropping\n // a table, or renaming a column and then renaming it back).\n const orel = this.#initialSchema.tables.find(\n t => t.oid === rel.relationOid,\n );\n if (!orel) {\n // Can happen if a table is created and then dropped in the same transaction.\n throw new MissingEventTriggerSupport(\n `relation not in initialSchema: ${stringify(rel)}`,\n );\n }\n if (relationDifferent(orel, rel)) {\n throw new MissingEventTriggerSupport(\n `relation has changed within the transaction: ${stringify(orel)} vs ${stringify(rel)}`,\n );\n }\n return [];\n }\n}\n\nfunction getSchemaDifference(\n a: PublishedSchema,\n b: PublishedSchema,\n): string | null {\n // Note: ignore indexes since changes need not to halt replication\n if (a.tables.length !== b.tables.length) {\n return `tables created or dropped`;\n }\n for (let i = 0; i < a.tables.length; i++) {\n const at = a.tables[i];\n const bt = b.tables[i];\n const difference = getTableDifference(at, bt);\n if (difference) {\n return difference;\n }\n }\n return null;\n}\n\n// ColumnSpec comparator\nconst byColumnPos = (a: [string, ColumnSpec], b: [string, ColumnSpec]) =>\n a[1].pos < b[1].pos ? -1 : a[1].pos > b[1].pos ? 1 : 0;\n\nfunction getTableDifference(\n a: PublishedTableSpec,\n b: PublishedTableSpec,\n): string | null {\n if (a.oid !== b.oid || a.schema !== b.schema || a.name !== b.name) {\n return `Table \"${a.name}\" differs from table \"${b.name}\"`;\n }\n if (!deepEqual(a.primaryKey, b.primaryKey)) {\n return `Primary key of table \"${a.name}\" has changed`;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = Object.entries(b.columns).sort(byColumnPos);\n if (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const [bname, bcol] = bcols[i];\n return (\n aname !== bname ||\n acol.pos !== bcol.pos ||\n acol.typeOID !== bcol.typeOID ||\n acol.notNull !== bcol.notNull\n );\n })\n ) {\n return `Columns of table \"${a.name}\" have changed`;\n }\n return null;\n}\n\nexport function relationDifferent(a: PublishedTableSpec, b: PostgresRelation) {\n if (a.oid !== b.relationOid || a.schema !== b.schema || a.name !== b.name) {\n return true;\n }\n if (\n // The MessageRelation's `keyColumns` field contains the columns in column\n // declaration order, whereas the PublishedTableSpec's `primaryKey`\n // contains the columns in primary key (i.e. index) order. Do an\n // order-agnostic compare here since it is not possible to detect\n // key-order changes from the MessageRelation message alone.\n b.replicaIdentity === 'default' &&\n !equals(new Set(a.primaryKey), new Set(b.keyColumns))\n ) {\n return true;\n }\n const acols = Object.entries(a.columns).sort(byColumnPos);\n const bcols = b.columns;\n return (\n acols.length !== bcols.length ||\n acols.some(([aname, acol], i) => {\n const bcol = bcols[i];\n return aname !== bcol.name || acol.typeOID !== bcol.typeOid;\n })\n );\n}\n\nfunction translateError(e: unknown): Error {\n if (!(e instanceof Error)) {\n return new Error(String(e));\n }\n if (e instanceof postgres.PostgresError && e.code === PG_ADMIN_SHUTDOWN) {\n return new ShutdownSignal(e);\n }\n return e;\n}\nconst idString = (id: Identifier) => `${id.schema}.${id.name}`;\n\nfunction specsByID(published: PublishedSchema) {\n return [\n // It would have been nice to use a CustomKeyMap here, but we rely on set-utils\n // operations which use plain Sets.\n new Map(published.tables.map(t => [t.oid, t])),\n new Map(published.indexes.map(i => [idString(i), i])),\n ] as const;\n}\n\n/**\n * Determines if an index was structurally changed (e.g. constraint dropped\n * and recreated with different columns) vs cosmetically changed (e.g. the\n * index spec changed because the table or a column was renamed).\n *\n * Compares boolean properties directly and resolves column names to their\n * stable attnums (pg_attribute `attnum`) for the column comparison.\n */\nfunction isIndexStructurallyChanged(\n prev: PublishedIndexSpec,\n next: PublishedIndexSpec,\n prevTables: Map<number, PublishedTableWithReplicaIdentity>,\n nextTables: Map<number, PublishedTableWithReplicaIdentity>,\n): boolean {\n if (\n prev.unique !== next.unique ||\n prev.isPrimaryKey !== next.isPrimaryKey ||\n prev.isReplicaIdentity !== next.isReplicaIdentity ||\n prev.isImmediate !== next.isImmediate\n ) {\n return true;\n }\n\n const prevTable = findTableBySchemaAndName(\n prevTables,\n prev.schema,\n prev.tableName,\n );\n const nextTable = findTableBySchemaAndName(\n nextTables,\n next.schema,\n next.tableName,\n );\n if (!prevTable || !nextTable) {\n // Can't resolve tables; conservatively treat as changed.\n return true;\n }\n\n const prevEntries = Object.entries(prev.columns);\n const nextEntries = Object.entries(next.columns);\n if (prevEntries.length !== nextEntries.length) {\n return true;\n }\n\n // Resolve column names → attnums and compare.\n const prevByAttnum = new Map<number | undefined, string>(\n prevEntries.map(([name, dir]) => [prevTable.columns[name]?.pos, dir]),\n );\n const nextByAttnum = new Map<number | undefined, string>(\n nextEntries.map(([name, dir]) => [nextTable.columns[name]?.pos, dir]),\n );\n\n if (prevByAttnum.has(undefined) || nextByAttnum.has(undefined)) {\n // Column not found in table spec; conservatively treat as changed.\n return true;\n }\n if (prevByAttnum.size !== nextByAttnum.size) {\n return true;\n }\n for (const [attnum, dir] of prevByAttnum) {\n if (nextByAttnum.get(attnum) !== dir) {\n return true;\n }\n }\n return false;\n}\n\nfunction findTableBySchemaAndName(\n tables: Map<number, PublishedTableWithReplicaIdentity>,\n schema: string,\n name: string,\n): PublishedTableWithReplicaIdentity | undefined {\n for (const table of tables.values()) {\n if (table.schema === schema && table.name === name) {\n return table;\n }\n }\n return undefined;\n}\n\nfunction columnsByID(\n columns: Record<string, ColumnSpec>,\n): Map<number, ColumnSpec & {name: string}> {\n const colsByID = new Map<number, ColumnSpec & {name: string}>();\n for (const [name, spec] of Object.entries(columns)) {\n // The `pos` field is the `attnum` in `pg_attribute`, which is a stable\n // identifier for the column in this table (i.e. never reused).\n colsByID.set(spec.pos, {...spec, name});\n }\n return colsByID;\n}\n\nfunction getMetadata(table: PublishedTableWithReplicaIdentity): TableMetadata {\n return {\n schemaOID: must(table.schemaOID),\n relationOID: table.oid,\n rowKey: Object.fromEntries(\n table.replicaIdentityColumns.map(k => [\n k,\n {attNum: table.columns[k].pos},\n ]),\n ),\n };\n}\n\n// Avoid sending the `columns` from the Postgres MessageRelation message.\n// They are not used downstream and the message can be large.\nfunction makeRelation(relation: PostgresRelation): MessageRelation {\n // Avoid sending the `columns` from the Postgres MessageRelation message.\n // They are not used downstream and the message can be large.\n const {columns: _, keyColumns, replicaIdentity, ...rest} = relation;\n return {\n ...rest,\n rowKey: {\n columns: keyColumns,\n type: replicaIdentity,\n },\n // For now, deprecated columns are sent for backwards compatibility.\n // These can be removed when bumping the MIN_PROTOCOL_VERSION to 5.\n keyColumns,\n replicaIdentity,\n };\n}\n\nclass UnsupportedSchemaChangeError extends Error {\n readonly name = 'UnsupportedSchemaChangeError';\n readonly description: string;\n readonly event: DdlUpdateEvent | SchemaSnapshotEvent;\n\n constructor(\n description: string,\n event: DdlUpdateEvent | SchemaSnapshotEvent,\n options?: ErrorOptions,\n ) {\n super(\n `Replication halted. Resync the replica to recover: ${description}`,\n options,\n );\n this.description = description;\n this.event = event;\n }\n}\n\nclass MissingEventTriggerSupport extends Error {\n readonly name = 'MissingEventTriggerSupport';\n\n constructor(msg: string) {\n super(\n `${msg}. Schema changes cannot be reliably replicated without event trigger support.`,\n );\n }\n}\n\n// TODO(0xcadams): should this be a ProtocolError?\nclass ShutdownSignal extends AbortError {\n readonly name = 'ShutdownSignal';\n\n constructor(cause: unknown) {\n super(\n 'shutdown signal received (e.g. another zero-cache taking over the replication stream)',\n {\n cause,\n },\n );\n }\n}\n\nfunction parseLogicalMessageContent<T>(\n {content}: MessageMessage,\n schema: v.Type<T>,\n) {\n const str =\n content instanceof Buffer\n ? content.toString('utf-8')\n : new TextDecoder().decode(content);\n const json = JSON.parse(str);\n return v.parse(json, schema, 'passthrough');\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqHA,eAAsB,+BACpB,IACA,aACA,OACA,eACA,aACA,SACA,qBAC6E;AAC7E,OAAM,YACJ,IACA,WAAW,MAAM,MAAM,GAAG,MAAM,YAChC,gBACC,KAAK,OAAO,YAAY,KAAK,OAAO,IAAI,aAAa,aAAa,QAAQ,CAC5E;CAED,MAAM,UAAU,IAAI,SAAS,IAAI,cAAc;CAC/C,MAAM,oBAAoB,+BACxB,IAAI,gBAAgB,QAAQ,CAC7B;AACD,SAAQ,OAAO;CAIf,MAAM,KAAK,SAAS,IAAI,YAAY;AACpC,KAAI;AAiBF,SAAO;GAAC;GAAmB,cATN,IAAI,qBACvB,IACA,aACA,OAVsB,MAAM,uBAC5B,IACA,IACA,OACA,kBACD,EAOC,SACA,uBAAuB,KACxB;GAEuC;WAChC;AACR,QAAM,GAAG,KAAK;;;AAIlB,eAAe,uBACb,IACA,KACA,OACA,EACE,gBACA,cAAc,YACd,sBAEF;AAEA,OAAM,kBAAkB,IAAI,KAAK,OAAO,eAAe;CAEvD,MAAM,kBAAkB,MAAM,oBAC5B,IACA,KACA,OACA,gBACA,mBACD;AACD,KAAI,CAAC,gBACH,OAAM,IAAI,gBACR,8CAA8C,iBAC/C;CAIH,MAAM,YAAY,CAAC,GAAG,MAAM,aAAa,CAAC,MAAM;CAChD,MAAM,aAAa,gBAAgB,aAChC,QAAO,MAAK,CAAC,EAAE,WAAW,0BAA0B,MAAM,CAAC,CAAC,CAC5D,MAAM;AACT,KAAI,CAAC,UAAU,WAAW,WAAW,EAAE;AACrC,KAAG,OAAO,8CAA8C,UAAU,GAAG;AACrE,QAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,SAAS,CAAC;AACxD,QAAM,IAAI,gBACR,2BAA2B,UAAU,2CACjB,WAAW,GAChC;;AAMH,KAAI,CAAC,UAAU,gBAAgB,cAAc,WAAW,CACtD,OAAM,IAAI,gBACR,0BAA0B,gBAAgB,aAAa,0CACnB,WAAW,GAChD;CAIH,MAAM,SAAS,MAAM,GAAG;0DACgC,IAAI,WAAW,CAAC;IACtE,QAAQ;AACV,KAAI,OAAO,WAAW,WAAW,OAC/B,OAAM,IAAI,gBACR,0BAA0B,OAAO,MAAM,CAAC,gDACN,WAAW,GAC9C;CAGH,MAAM,EAAC,SAAQ;CACf,MAAM,SAAS,MAAM,GAEX;;0BAEc;AACxB,KAAI,OAAO,WAAW,EACpB,OAAM,IAAI,gBAAgB,oBAAoB,KAAK,aAAa;CAElE,MAAM,CAAC,EAAC,YAAY,eAAc;AAClC,KAAI,eAAe,QAAQ,cAAc,OACvC,OAAM,IAAI,gBACR,oBAAoB,KAAK,gEAC1B;AAEH,QAAO;;AAIT,IAAM,4BAA4B;;;;;AAUlC,IAAM,uBAAN,MAAmD;CACjD;CACA;CACA;CACA;CACA;CACA;CACA;CAEA,YACE,IACA,aACA,OACA,SACA,SACA,qBACA;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,gBAAgB;AACvD,QAAA,KAAW,SAAS,IAAI,aAAa;IAElC,iBAAiB;GAClB,YAAY,GAAE,qBAAqB,4BAA2B;GAC/D,CAAC;AACF,QAAA,cAAoB;AACpB,QAAA,QAAc;AACd,QAAA,UAAgB;AAChB,QAAA,UAAgB;AAChB,QAAA,cAAoB,sBAChB,IAAI,YACF,GAAG,YAAY,aAAa,eAAe,EAC3C,OACA,MAAA,IACA,oBACD,GACD;;CAGN,mBAA6D;AAC3D,SAAO,MAAA,cAAoB,MAAA,YAAkB,kBAAkB,KAAK,GAAG;;CAGzE,MAAM,YACJ,iBACA,mBAAsC,EAAE,EACjB;EACvB,MAAM,EAAC,SAAQ,MAAA;AAEf,QAAM,MAAA,uCAA6C,KAAK;EACxD,MAAM,SAAS,MAAM,uBAAuB,MAAA,IAAU,MAAA,MAAY;AAClE,QAAA,GAAS,OAAO,+BAA+B,OAAO;AACtD,SAAO,MAAA,YAAkB,MAAM,iBAAiB,QAAQ,iBAAiB;;CAG3E,OAAA,YACE,MACA,iBACA,aACA,kBACuB;EACvB,MAAM,cAAc,uBAAuB,gBAAgB,GAAG;EAC9D,MAAM,EAAC,UAAU,SAAQ,MAAM,UAC7B,MAAA,IACA,MAAA,IACA,MACA,CAAC,GAAG,YAAY,aAAa,EAC7B,YACD;EACD,MAAM,QAAQ,IAAI,MAAM,KAAK;EAK7B,MAAM,UAAU,IAAI,wBAAwB,MAAA,IAAU,gBAAgB;EACtE,MAAM,kBAAkB,IAAI,gBAAgB,MAAA,IAAU,UAAS,QAC7D,eAAe,MAAA,IAAU,MAAA,aAAmB,MAAA,SAAe,IAAI,CAChE;AACD,UACG,aAAa,UAAU,gBAAgB,CACvC,aAAa,iBAAiB,MAAM;AACvC,kBAAgB,IAAI,iBAAiB,iBAAiB;EAEtD,MAAM,cAAc,IAAI,YACtB,MAAA,IACA,MAAA,OACA,aACA,MAAA,IACA,MAAA,QAAc,cACf;;;;;EAMD,MAAM,0BACJ,KACA,QACmB;AACnB,OACE,IAAI,QAAQ,aACZ,IAAI,WAAW,MAAA,aAAmB,eAClC;AACA,YAAQ,WAAW,MAAA,YAAkB,iBAAiB,IAAI,CAAC;AAC3D,WAAO;;AAIT,SAAA,aAAmB,gBAAgB,IAAI;AAEvC,OAAI,IAAI,QAAQ,aAAa;AAC3B,YAAQ,WAAW;KACjB;KACA,EAAC,KAAK,IAAI,eAAc;KACxB,EAAC,WAAW,qBAAqB,IAAI,EAAC;KACvC,CAAC;AACF,WAAO;;AAET,UAAO;;AAGT,GAAM,YAAY;AAChB,OAAI;IACF,IAAI,cAAuC;IAC3C,IAAI,gBAAgB;AAEpB,eAAW,MAAM,CAAC,KAAK,QAAQ,UAAU;AACvC,SAAI,CAAC,uBAAuB,KAAK,IAAI,EAAE;AAIrC,UAAI,CAAC,iBAAiB,aAAa,eAAe;AAChD,eAAQ,QAAQ,YAAY,cAAc;AAC1C,qBAAc;;AAEhB;;AAGF,SAAI,CAAC,aAAa;MAChB,MAAM,MAAM,QAAQ,QAAQ,cAAc;AAC1C,aAAO,QAAQ,YAAa,MAAM;AAClC,oBAAc,EAAE;;KAGlB,IAAI;AACJ,UAAK,MAAM,UAAU,MAAM,YAAY,YAAY,KAAK,IAAI,EAAE;AAC5D,YAAM,QAAQ,KAAK,OAAO;AAC1B,mBAAa;;AAGf,aAAQ,aAAa,IAArB;MACE,KAAK;AACH,uBAAgB;AAChB;MACF,KAAK;AACH,uBAAgB;AAChB,mBAAY,gBAAgB,WAAW,GAAG;AAC1C,WACE,SAAS,WAAW,KACpB,QAAQ,aAAa,GAAG,2BACxB;AAMA,gBAAQ,QAAQ,YAAY,cAAc;AAC1C,sBAAc;;AAEhB;;;YAGC,GAAG;IAGV,MAAM,MAAM,eAAe,EAAE;AAC7B,QAAI,eAAe,eAGjB,OAAM,MAAA,uBAA6B;AAErC,YAAQ,KAAK,IAAI;;MAEjB;AAEJ,QAAA,GAAS,OACP,8BAA8B,KAAK,QAAQ,gBAAgB,oBACzD,MAAA,QAAc,QACf,GACF;AAED,SAAO;GACL,SAAS,QAAQ,UAAU;GAC3B,MAAM,EAAC,OAAM,WAAU,MAAM,IAAI,OAAO,GAAG,UAAU,EAAC;GACvD;;CAGH,OAAA,wBAA+B;AAC7B,MAAI;GACF,MAAM,UAAU,MAAM,oBACpB,MAAA,IACA,MAAA,IACA,MAAA,OACA,MAAA,QAAc,QACf;AACD,OAAI,QACF,OAAA,GAAS,OACP,gCAAgC,MAAA,QAAc,QAAQ,IAAI,UAAU,QAAQ,kBAAkB,GAC/F;WAEI,GAAG;AACV,SAAA,GAAS,OAAO,8BAA8B,EAAE;;;;;;;;;;;;CAapD,OAAA,uCAA8C,YAAoB;EAChE,MAAM,iBAAiB,0BAA0B,MAAA,MAAY;EAC7D,MAAM,iBAAiB,sBAAsB,MAAA,MAAY;EAEzD,MAAM,SAAS,MAAM,MAAM,MAAA,IAAU,OAAM,QAAO;GAGhD,MAAM,SAAS,MAAM,GAEX;;;gCAGgB,eAAe,kBAAkB,eAAe;iCAC/C;AAC3B,SAAA,GAAS,OACP,iCAAiC,KAAK,UAAU,OAAO,GACxD;GACD,MAAM,gBAAgB,GAAG,eAAe,MAAA,MAAY,CAAC;GACrD,MAAM,iBAAiB,MAAM,GAAG;;iBAErB,IAAI,cAAc,CAAC;AAE9B,OAAI,OAAO,WAAW,GAAG;IACvB,MAAM,aAAa,MAAM,GAAG;;;iCAGH,eAAe,kBAAkB,eAAe;;AAEzE,UAAA,GAAS,OACP,QAAQ,WAAW,wCACnB;KAAC,OAAO;KAAY,UAAU;KAAe,CAC9C;AACD,UAAM,IAAI,WACR,oBAAoB,WAAW,+FAGhC;;AAGH,SAAA,GAAS,OACP,uCAAuC,WAAW,KAAK,KAAK,UAC1D,eACD,GACF;AACD,SAAM,GAAG;sBACO,IAAI,cAAc,CAAC,gBAAgB;AACnD,SAAM,GAAG;iBACE,IAAI,cAAc,CAAC;sCACE,MAAA,QAAc;yBAC3B;GACnB,MAAM,gBAAgB,MAAM,GAAsC;kCACtC,IAAI,cAAc,CAAC;AAC/C,SAAA,GAAS,OACP,sCAAsC,WAAW,KAAK,KAAK,UACzD,cACD,GACF;AACD,UAAO;IACP;EAEF,MAAM,OAAO,OAAO,QAAQ,EAAC,UAAS,QAAQ,KAAK,CAAC,KAAK,EAAC,UAAS,IAAI;AACvE,MAAI,KAAK,OACP,OAAA,GAAS,OAAO,uBAAuB,KAAK,eAAe;EAE7D,MAAM,aAAa,OAChB,QAAQ,EAAC,WAAU,SAAS,WAAW,CACvC,KAAK,EAAC,WAAU,KAAK;AAExB,MAAI,WAAW,OACR,OAAA,qBAA2B,WAAW,CAAC,OAAM,MAChD,MAAA,GAAS,OAAO,oCAAoC,EAAE,CACvD;;CAIL,OAAA,qBAA4B,OAAiB;AAC3C,QAAA,GAAS,OAAO,sCAAsC,QAAQ;EAC9D,MAAM,MAAM,MAAA;AACZ,OAAK,IAAI,IAAI,GAAG,IAAI,GAAG,IACrB,KAAI;AACF,SAAM,GAAG;;iCAEgB,IAAI,MAAM,CAAC;;AAEpC,SAAA,GAAS,OAAO,wBAAwB,QAAQ;AAChD;WACO,GAAG;AAEV,OACE,aAAa,SAAS,iBACtB,EAAE,SAAS,iBAKX,OAAA,GAAS,QAAQ,WAAW,IAAI,EAAE,IAAI,OAAO,EAAE,IAAI,EAAE;OAErD,OAAA,GAAS,OAAO,kBAAkB,SAAS,EAAE;AAE/C,SAAM,MAAM,IAAK;;AAGrB,QAAA,GAAS,OAAO,sCAAsC,QAAQ;;;AAKlE,IAAa,QAAb,MAAuC;CACrC;CACA,2BAA0C;CAE1C,YAAY,MAAoB;AAC9B,QAAA,OAAa;;CAGf,SAAS,QAAmC;AAC1C,UAAQ,OAAO,IAAf;GACE,KAAK;IACH,MAAM,EAAC,cAAa,OAAO;AAC3B,QAAI,OAAO,GAAG,IACZ,OAAA,oBAA0B,UAAU;QAQpC,OAAA,0BAAgC,UAAU;AAE5C;GACF,KAAK;AAKH,QAAI,CAAC,OAAO,GAAG,QACb,OAAA,oBAA0B,OAAO,GAAG,gBAAgB;AAEtD;;;CAIN,qBAAqB,WAAmB;AACtC,QAAA,0BAAgC;;CAGlC,IAAI,WAAwB;AAC1B,MACE,MAAA,2BACA,MAAA,2BAAiC,UAEjC,OAAA,0BAAgC;AAElC,QAAA,QAAc,UAAU;;CAG1B,2BAA2B,WAAmB;AAC5C,MAAI,MAAA,4BAAkC,KACpC,OAAA,QAAc,UAAU;;CAI5B,SAAS,WAAwB;EAC/B,MAAM,MAAM,uBAAuB,UAAU;AAC7C,QAAA,KAAW,KAAK,IAAI;;;AAIxB,IAAM,kBAAkB,eAAE,OAAO;CAC/B,IAAI,eAAE,QAAQ;CACd,YAAY,eAAE,QAAQ;CACtB,cAAc,eAAE,QAAQ;CACzB,CAAC;AASF,IAAM,cAAN,MAAM,YAAY;CAChB,OAAgB,iBAAiB;CAEjC;CACA;CAKA;CACA;CAEA;CACA,sBAAiD;CACjD;CAEA,YACE,IACA,OACA,IACA,eACA;AACA,QAAA,KAAW;AACX,OAAK,gBAAgB,GAAG,MAAM,MAAM,GAAG,MAAM,WAAW,YAAY;AACpE,QAAA,KAAW;AACX,QAAA,gBAAsB;;CAGxB,OAAA,eAAsB;AACpB,MAAI,MAAA,cAAoB,KAAA,GAAW;GACjC,MAAM,CAAC,EAAC,eAAc,MAAM,MAAA,EAAwC;;AAEpE,SAAA,YAAkB;;AAEpB,SAAO,MAAA;;CAGT,MAAM,kBAAkB,MAAM,OAAO;EACnC,MAAM,YAAY,MAAA,aAAoB,MAAM,MAAA,cAAoB;EAChE,MAAM,MAAM,KAAK,KAAK;EACtB,MAAM,KAAK,QAAQ;EAEnB,MAAM,YAAY;GAAC;GAAI,KAAK;GAAG;AAC/B,QAAA,qBAA2B;EAE3B,IAAI;AACJ,MAAI,aAAa,KACf,EAAC,CAAC,QAAQ,MAAM,MAAA,EAAkC;;;YAG5C,KAAK,cAAc;;oBAEX,GAAG;4BACK,IAAI;;;;;;MAY1B,EAAC,CAAC,QAAQ,MAAM,MAAA,EAAkC;;;YAG5C,KAAK,cAAc;;oBAEX,GAAG;4BACK,IAAI;;;;;AAW5B,YAAU,MAAM,SAAS,IAAI;AAE7B,MAAI,IACF,OAAA,GAAS,OAAO,+BAA+B,OAAO;GAAC;GAAI;GAAI,CAAC;AAElE,SAAO,EAAC,gBAAgB,KAAI;;CAG9B,gBAAgB,KAAa;AAC3B,MAAI,MAAA,oBAA0B,OAAO,MAAM,MAAA,mBAAyB,KAAK;AACvE,SAAA,GAAS,OACP,OAAO,WAAW,IAAI,CAAC,iCAClB,WAAW,MAAA,mBAAyB,IAAI,CAAC,0BAC/C;AACD,SAAA,mBAAyB,EAAE;;;CAI/B,oBAAoB,SAAiB;AACnC,QAAA,qBAA2B;AAC3B,eAAa,MAAA,MAAY;AACzB,QAAA,QAAc,WAAW,YAAY;AACnC,OAAI;AACF,UAAM,KAAK,mBAAmB;YACvB,GAAG;AACV,UAAA,GAAS,OAAO,+BAA+B,EAAE;AACjD,UAAA,mBAAyB,MAAA,cAAoB;;KAE9C,QAAQ;;CAGb,iBAAiB,KAA8C;AAC7D,SACE,IAAI,WAAW,KAAK,eACpB,8BAA8B,IAAI,SACnC;EACD,MAAM,SAAS,2BAA2B,KAAK,gBAAgB;EAC/D,MAAM,MAAM,KAAK,KAAK;EACtB,MAAM,iBAAiB,KAAK,IAC1B,KACA,OAAO,aAAa,MAAA,cACrB;AACD,MAAI,OAAO,OAAO,MAAA,oBAA0B,GAC1C,OAAA,mBAAyB,iBAAiB,IAAI;MAM9C,OAAA,GAAS,QAAQ,kCAAkC,EAAC,QAAO,CAAC;EAE9D,MAAM,EAAC,YAAY,iBAAgB;AACnC,SAAO;GACL;GACA;IACE,KAAK;IACL,WAAW;KACT,aAAa;MACX;MACA;MACA,eAAe;MAChB;KACD;KACD;IACF;GACD,EAAC,WAAW,qBAAqB,IAAI,cAAc,MAAM,EAAC;GAC3D;;;AAWL,IAAM,gCAAgC;AAEtC,IAAM,cAAN,MAAkB;CAChB;CACA;CACA;CACA;CACA;CAEA;CACA;CAEA,YACE,IACA,EAAC,OAAO,YACR,aACA,IACA,eACA;AACA,QAAA,KAAW;AAEX,QAAA,cAAoB,GAAG,MAAM,GAAG;AAChC,QAAA,cAAoB;AACpB,QAAA,gBAAsB;AACtB,QAAA,KAAW;;CAGb,MAAM,YAAY,KAAa,KAA8C;AAC3E,MAAI,MAAA,OAAa;AACf,SAAA,SAAe,MAAA,MAAY;AAC3B,UAAO,EAAE;;AAEX,MAAI;AACF,UAAO,MAAM,MAAA,YAAkB,IAAI;WAC5B,KAAK;AACZ,SAAA,QAAc;IAAC;IAAK;IAAK;IAAK,aAAa;IAAE;AAC7C,SAAA,SAAe,MAAA,MAAY;GAE3B,MAAM,UAAU,2CAA2C,WAAW,IAAI;GAC1E,MAAM,eAA2B,EAAC,OAAO,SAAQ;AACjD,OAAI,eAAe,8BAA8B;AAC/C,iBAAa,SAAS,IAAI;AAC1B,iBAAa,UAAU,IAAI,MAAM;SAEjC,cAAa,SAAS,OAAO,IAAI;AAKnC,UAAO,CACL,CAAC,YAAY,EAAC,KAAK,YAAW,CAAC,EAC/B,CAAC,WAAW;IAAC,KAAK;IAAkB;IAAS;IAAa,CAAC,CAC5D;;;CAIL,UAAU,OAAyB;EACjC,MAAM,EAAC,KAAK,KAAK,KAAK,gBAAe;EACrC,MAAM,MAAM,KAAK,KAAK;AAItB,MAAI,MAAM,cAAc,KAAQ;AAC9B,SAAA,GAAS,QACP,2CAA2C,WAAW,IAAI,CAAC,IAAI,OAC7D,IACD,IACD,eAAe,+BACX,IAAI,MAAM,UAEV;IAAC,GAAG;IAAK,SAAS,KAAA;IAAU,CACjC;AACD,SAAM,cAAc;;;CAKxB,OAAA,YAAmB,KAA2C;AAC5D,UAAQ,IAAI,KAAZ;GACE,KAAK,QACH,QAAO,CACL;IACE;IACA;KAAC,GAAG;KAAK,MAAM;KAAI;IACnB,EAAC,iBAAiB,qBAAqB,KAAK,IAAI,UAAU,CAAC,EAAC;IAC7D,CACF;GAEH,KAAK;AACH,QAAI,EAAE,IAAI,OAAO,IAAI,KACnB,OAAM,IAAI,MACR,qCAAqC,UAAU,IAAI,GACpD;AAEH,WAAO,CACL,CACE,QACA;KACE,GAAG;KACH,UAAU,aAAa,IAAI,SAAS;KAEpC,KAAK,KAAK,IAAI,OAAO,IAAI,IAAI;KAC9B,CACF,CACF;GAGH,KAAK,SACH,QAAO,CACL,CACE,QACA;IACE,GAAG;IACH,UAAU,aAAa,IAAI,SAAS;IAEpC,KAAK,IAAI,OAAO,IAAI;IACrB,CACF,CACF;GAGH,KAAK,SACH,QAAO,CAAC,CAAC,QAAQ;IAAC,GAAG;IAAK,UAAU,aAAa,IAAI,SAAS;IAAC,CAAC,CAAC;GACnE,KAAK,WACH,QAAO,CAAC,CAAC,QAAQ;IAAC,GAAG;IAAK,WAAW,IAAI,UAAU,IAAI,aAAa;IAAC,CAAC,CAAC;GAEzE,KAAK;AACH,QAAI,CAAC,IAAI,OAAO,WAAW,MAAA,YAAkB,EAAE;AAC7C,WAAA,GAAS,QAAQ,wCAAwC,IAAI,OAAO;AACpE,YAAO,EAAE;;AAEX,YAAQ,IAAI,OAAO,UAAU,MAAA,YAAkB,OAAO,EAAtD;KACE,KAAK;KACL,KAAK,OACH,QAAO,MAAA,iBAAuB,IAAI;KACpC;AACE,YAAA,GAAS,QAAQ,iCAAiC,IAAI,OAAO;AAC7D,aAAO,EAAE;;GAGf,KAAK;AACH,UAAA,mBAAyB,KAAA;AACzB,WAAO,CACL;KACE;KACA;KACA,EAAC,WAAW,qBAAqB,KAAK,IAAI,UAAU,CAAC,EAAC;KACvD,CACF;GAEH,KAAK,WACH,QAAO,MAAA,eAAqB,IAAI;GAClC,KAAK,OACH,QAAO,EAAE;GACX,KAAK,SAGH,QAAO,EAAE;GACX,QAEE,OAAM,IAAI,MAAM,2BAA2B,UAAU,IAAI,GAAG;;;CAIlE;CACA;CAEA,kBAAkB,KAAqB;EACrC,MAAM,QAAQ,2BAA2B,KAAK,uBAAuB;AAGrE,eAAa,MAAA,qBAA2B;EAExC,IAAI;EACJ,MAAM,EAAC,SAAQ;AACf,UAAQ,MAAR;GACE,KAAK;AAEH,UAAA,YAAkB,MAAM;AACxB,WAAO,EAAE;GACX,KAAK;AAEH,qBAAiB,KACf,MAAA,WACA,wCACD;AACD;GACF,KAAK;AACH,qBAAiB,MAAA,oBAA0B;AAC3C;GACF;AACE,UAAA,GAAS,OAAO,sCAAsC,OAAO;AAC7D,WAAO,EAAE;;AAKb,QAAA,mBAAyB,MAAM;AAC/B,MAAI,CAAC,gBAAgB;AACnB,SAAA,GAAS,OAAO,YAAY,IAAI,OAAO,GAAG,KAAK,QAAQ;AACvD,UAAO,EAAE;;AAEX,QAAA,GAAS,OAAO,cAAc,IAAI,OAAO,GAAG,KAAK,SAAS,MAAM;EAEhE,MAAM,UAAU,MAAA,kBAAwB,gBAAgB,MAAM,CAAC,KAC7D,WAAU,CAAC,QAAQ,OAAO,CAC3B;AAED,QAAA,GACG,YAAY,OAAO,MAAM,MAAM,IAAI,CACnC,YAAY,SAAS,MAAM,QAAQ,MAAM,CACzC,OAAO,GAAG,QAAQ,OAAO,oBAAoB,EAAC,SAAQ,CAAC;EAE1D,MAAM,oBAAoB,6CACxB,MAAM,OACP;AACD,MAAI,kBACF,OAAA,uBAA6B,WAAW,YAAY;AAClD,OAAI;AACF,UAAM,kBAAkB,MAAM,MAAA,IAAU,MAAA,GAAS;YAC1C,KAAK;AACZ,UAAA,GAAS,OAAO,oCAAoC,IAAI;;KAEzD,8BAA8B;AAGnC,SAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA8BT,mBACE,WACA,QACgB;AAChB,MAAI;GACF,MAAM,CAAC,SAAS,WAAW,UAAU,UAAU;GAC/C,MAAM,CAAC,SAAS,WAAW,UAAU,OAAO,OAAO;GACnD,MAAM,UAA0B,EAAE;AAGlC,QAAK,MAAM,SAAS,QAAQ,QAAQ,CAClC,UAAS,MAAA,IAAU,MAAM;GAG3B,MAAM,CAAC,YAAY,cAAc,qBAAqB,SAAS,QAAQ;GAQvE,MAAM,UAAU,aAAa,SAAS,QAAQ;AAC9C,QAAK,MAAM,MAAM,QACf,KACE,2BACE,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,SACA,QACD,EACD;AACA,eAAW,IAAI,GAAG;AAClB,eAAW,IAAI,GAAG;;AAItB,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,EAAC,QAAQ,SAAQ,KAAK,QAAQ,IAAI,GAAG,CAAC;AAC5C,YAAQ,KAAK;KAAC,KAAK;KAAc,IAAI;MAAC;MAAQ;MAAK;KAAC,CAAC;;GAIvD,MAAM,CAAC,YAAY,cAAc,qBAAqB,SAAS,QAAQ;AACvE,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,EAAC,QAAQ,SAAQ,KAAK,QAAQ,IAAI,GAAG,CAAC;AAC5C,YAAQ,KAAK;KAAC,KAAK;KAAc,IAAI;MAAC;MAAQ;MAAK;KAAC,CAAC;;GAGvD,MAAM,SAAS,aAAa,SAAS,QAAQ;AAC7C,QAAK,MAAM,MAAM,OACf,SAAQ,KACN,GAAG,MAAA,gBACD,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,KAAK,QAAQ,IAAI,GAAG,CAAC,EACrB,OAAO,MAAM,IACd,CACF;AAGH,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,OAAO,KAAK,QAAQ,IAAI,GAAG,CAAC;IAClC,MAAM,cAA2B;KAC/B,KAAK;KACL;KACA,UAAU,YAAY,KAAK;KAC5B;AACD,QAAI,CAAC,OAAO,MAAM,IAAI,WAAW,SAAS,CAIxC,aAAY,WAAW,UAAU,KAAK,UAAU,EAAC,KAAK,cAAa,EACjE,QACD,EAAE;AAEL,YAAQ,KAAK,YAAY;;AAK3B,QAAK,MAAM,MAAM,YAAY;IAC3B,MAAM,OAAO,KAAK,QAAQ,IAAI,GAAG,CAAC;AAClC,YAAQ,KAAK;KAAC,KAAK;KAAgB;KAAK,CAAC;;AAE3C,UAAO;WACA,GAAG;AACV,SAAM,IAAI,6BAA6B,OAAO,EAAE,EAAE,QAAQ,EAAC,OAAO,GAAE,CAAC;;;CAIzE,iBACE,UACA,UACA,QACgB;EAChB,MAAM,UAA0B,EAAE;AAClC,MACE,SAAS,WAAW,SAAS,UAC7B,SAAS,SAAS,SAAS,KAE3B,SAAQ,KAAK;GACX,KAAK;GACL,KAAK;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACnD,KAAK;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACpD,CAAC;EAEJ,MAAM,cAAc,YAAY,SAAS;EACzC,MAAM,cAAc,YAAY,SAAS;AACzC,MAAI,CAAC,UAAU,aAAa,YAAY,CACtC,SAAQ,KAAK;GACX,KAAK;GACL,OAAO;IAAC,QAAQ,SAAS;IAAQ,MAAM,SAAS;IAAK;GACrD,KAAK;GACL,KAAK;GACN,CAAC;EAEJ,MAAM,QAAQ;GAAC,QAAQ,SAAS;GAAQ,MAAM,SAAS;GAAK;EAC5D,MAAM,aAAa,YAAY,SAAS,QAAQ;EAChD,MAAM,aAAa,YAAY,SAAS,QAAQ;EAGhD,MAAM,CAAC,SAAS,SAAS,qBAAqB,YAAY,WAAW;AACrE,OAAK,MAAM,MAAM,SAAS;GACxB,MAAM,EAAC,MAAM,WAAU,KAAK,WAAW,IAAI,GAAG,CAAC;AAC/C,WAAQ,KAAK;IAAC,KAAK;IAAe;IAAO;IAAO,CAAC;;EAInD,MAAM,OAAO,aAAa,YAAY,WAAW;AACjD,OAAK,MAAM,MAAM,MAAM;GACrB,MAAM,EAAC,MAAM,SAAS,GAAG,YAAW,KAAK,WAAW,IAAI,GAAG,CAAC;GAC5D,MAAM,EAAC,MAAM,SAAS,GAAG,YAAW,KAAK,WAAW,IAAI,GAAG,CAAC;AAK5D,OACE,YAAY,WACZ,QAAQ,aAAa,QAAQ,YAC7B,QAAQ,YAAY,QAAQ,QAE5B,SAAQ,KAAK;IACX,KAAK;IACL;IACA,KAAK;KAAC,MAAM;KAAS,MAAM;KAAQ;IACnC,KAAK;KAAC,MAAM;KAAS,MAAM;KAAQ;IACpC,CAAC;;EAQN,MAAM,iBAAiB,WAAW;AAGlC,OAAK,MAAM,MAAM,OAAO;GACtB,MAAM,EAAC,MAAM,GAAG,SAAQ,KAAK,WAAW,IAAI,GAAG,CAAC;GAChD,MAAM,SAAS;IAAC;IAAM;IAAK;GAC3B,MAAM,YAAuB;IAC3B,KAAK;IACL;IACA;IACA,eAAe,YAAY,SAAS;IACrC;AACD,OAAI,gBAAgB;AAClB,cAAU,OAAO,KAAK,OAAO;AAC7B,cAAU,WAAW,EAAC,QAAQ,KAAK,KAAI;SAGvC,KAAI;AACF,4BAAwB,MAAM,MAAM,OAAO;YACpC,GAAG;AACV,QAAI,EAAE,aAAa,+BAGjB,OAAM;AAQR,UAAA,GAAS,OACP,sBAAsB,MAAM,KAAK,GAAG,KAAK,IAAI,OAAO,EAAE,GACvD;AACD,cAAU,OAAO,KAAK,OAAO;AAC7B,cAAU,WAAW,EAAC,QAAQ,KAAK,KAAI;;AAG3C,WAAQ,KAAK,UAAU;;AAEzB,SAAO;;;;;;;;;;;;;;;;;;;;CAqBT,OAAA,eAAsB,KAAoD;EACxE,MAAM,EAAC,cAAc,iBAAgB,MAAA;AACrC,MAAI,aACF,QAAO,EAAE;EAEX,MAAM,gBAAgB,MAAM,mBAAmB,MAAA,IAAU,aAAa;EACtE,MAAM,aAAa,oBAAoB,MAAA,eAAqB,cAAc;AAC1E,MAAI,eAAe,KACjB,OAAM,IAAI,2BAA2B,WAAW;EAMlD,MAAM,OAAO,MAAA,cAAoB,OAAO,MACtC,MAAK,EAAE,QAAQ,IAAI,YACpB;AACD,MAAI,CAAC,KAEH,OAAM,IAAI,2BACR,kCAAkC,UAAU,IAAI,GACjD;AAEH,MAAI,kBAAkB,MAAM,IAAI,CAC9B,OAAM,IAAI,2BACR,gDAAgD,UAAU,KAAK,CAAC,MAAM,UAAU,IAAI,GACrF;AAEH,SAAO,EAAE;;;AAIb,SAAS,oBACP,GACA,GACe;AAEf,KAAI,EAAE,OAAO,WAAW,EAAE,OAAO,OAC/B,QAAO;AAET,MAAK,IAAI,IAAI,GAAG,IAAI,EAAE,OAAO,QAAQ,KAAK;EACxC,MAAM,KAAK,EAAE,OAAO;EACpB,MAAM,KAAK,EAAE,OAAO;EACpB,MAAM,aAAa,mBAAmB,IAAI,GAAG;AAC7C,MAAI,WACF,QAAO;;AAGX,QAAO;;AAIT,IAAM,eAAe,GAAyB,MAC5C,EAAE,GAAG,MAAM,EAAE,GAAG,MAAM,KAAK,EAAE,GAAG,MAAM,EAAE,GAAG,MAAM,IAAI;AAEvD,SAAS,mBACP,GACA,GACe;AACf,KAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,KAC3D,QAAO,UAAU,EAAE,KAAK,wBAAwB,EAAE,KAAK;AAEzD,KAAI,CAAC,UAAU,EAAE,YAAY,EAAE,WAAW,CACxC,QAAO,yBAAyB,EAAE,KAAK;CAEzC,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;CACzD,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;AACzD,KACE,MAAM,WAAW,MAAM,UACvB,MAAM,MAAM,CAAC,OAAO,OAAO,MAAM;EAC/B,MAAM,CAAC,OAAO,QAAQ,MAAM;AAC5B,SACE,UAAU,SACV,KAAK,QAAQ,KAAK,OAClB,KAAK,YAAY,KAAK,WACtB,KAAK,YAAY,KAAK;GAExB,CAEF,QAAO,qBAAqB,EAAE,KAAK;AAErC,QAAO;;AAGT,SAAgB,kBAAkB,GAAuB,GAAqB;AAC5E,KAAI,EAAE,QAAQ,EAAE,eAAe,EAAE,WAAW,EAAE,UAAU,EAAE,SAAS,EAAE,KACnE,QAAO;AAET,KAME,EAAE,oBAAoB,aACtB,CAAC,OAAO,IAAI,IAAI,EAAE,WAAW,EAAE,IAAI,IAAI,EAAE,WAAW,CAAC,CAErD,QAAO;CAET,MAAM,QAAQ,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,YAAY;CACzD,MAAM,QAAQ,EAAE;AAChB,QACE,MAAM,WAAW,MAAM,UACvB,MAAM,MAAM,CAAC,OAAO,OAAO,MAAM;EAC/B,MAAM,OAAO,MAAM;AACnB,SAAO,UAAU,KAAK,QAAQ,KAAK,YAAY,KAAK;GACpD;;AAIN,SAAS,eAAe,GAAmB;AACzC,KAAI,EAAE,aAAa,OACjB,QAAO,IAAI,MAAM,OAAO,EAAE,CAAC;AAE7B,KAAI,aAAa,SAAS,iBAAiB,EAAE,SAAS,kBACpD,QAAO,IAAI,eAAe,EAAE;AAE9B,QAAO;;AAET,IAAM,YAAY,OAAmB,GAAG,GAAG,OAAO,GAAG,GAAG;AAExD,SAAS,UAAU,WAA4B;AAC7C,QAAO,CAGL,IAAI,IAAI,UAAU,OAAO,KAAI,MAAK,CAAC,EAAE,KAAK,EAAE,CAAC,CAAC,EAC9C,IAAI,IAAI,UAAU,QAAQ,KAAI,MAAK,CAAC,SAAS,EAAE,EAAE,EAAE,CAAC,CAAC,CACtD;;;;;;;;;;AAWH,SAAS,2BACP,MACA,MACA,YACA,YACS;AACT,KACE,KAAK,WAAW,KAAK,UACrB,KAAK,iBAAiB,KAAK,gBAC3B,KAAK,sBAAsB,KAAK,qBAChC,KAAK,gBAAgB,KAAK,YAE1B,QAAO;CAGT,MAAM,YAAY,yBAChB,YACA,KAAK,QACL,KAAK,UACN;CACD,MAAM,YAAY,yBAChB,YACA,KAAK,QACL,KAAK,UACN;AACD,KAAI,CAAC,aAAa,CAAC,UAEjB,QAAO;CAGT,MAAM,cAAc,OAAO,QAAQ,KAAK,QAAQ;CAChD,MAAM,cAAc,OAAO,QAAQ,KAAK,QAAQ;AAChD,KAAI,YAAY,WAAW,YAAY,OACrC,QAAO;CAIT,MAAM,eAAe,IAAI,IACvB,YAAY,KAAK,CAAC,MAAM,SAAS,CAAC,UAAU,QAAQ,OAAO,KAAK,IAAI,CAAC,CACtE;CACD,MAAM,eAAe,IAAI,IACvB,YAAY,KAAK,CAAC,MAAM,SAAS,CAAC,UAAU,QAAQ,OAAO,KAAK,IAAI,CAAC,CACtE;AAED,KAAI,aAAa,IAAI,KAAA,EAAU,IAAI,aAAa,IAAI,KAAA,EAAU,CAE5D,QAAO;AAET,KAAI,aAAa,SAAS,aAAa,KACrC,QAAO;AAET,MAAK,MAAM,CAAC,QAAQ,QAAQ,aAC1B,KAAI,aAAa,IAAI,OAAO,KAAK,IAC/B,QAAO;AAGX,QAAO;;AAGT,SAAS,yBACP,QACA,QACA,MAC+C;AAC/C,MAAK,MAAM,SAAS,OAAO,QAAQ,CACjC,KAAI,MAAM,WAAW,UAAU,MAAM,SAAS,KAC5C,QAAO;;AAMb,SAAS,YACP,SAC0C;CAC1C,MAAM,2BAAW,IAAI,KAA0C;AAC/D,MAAK,MAAM,CAAC,MAAM,SAAS,OAAO,QAAQ,QAAQ,CAGhD,UAAS,IAAI,KAAK,KAAK;EAAC,GAAG;EAAM;EAAK,CAAC;AAEzC,QAAO;;AAGT,SAAS,YAAY,OAAyD;AAC5E,QAAO;EACL,WAAW,KAAK,MAAM,UAAU;EAChC,aAAa,MAAM;EACnB,QAAQ,OAAO,YACb,MAAM,uBAAuB,KAAI,MAAK,CACpC,GACA,EAAC,QAAQ,MAAM,QAAQ,GAAG,KAAI,CAC/B,CAAC,CACH;EACF;;AAKH,SAAS,aAAa,UAA6C;CAGjE,MAAM,EAAC,SAAS,GAAG,YAAY,iBAAiB,GAAG,SAAQ;AAC3D,QAAO;EACL,GAAG;EACH,QAAQ;GACN,SAAS;GACT,MAAM;GACP;EAGD;EACA;EACD;;AAGH,IAAM,+BAAN,cAA2C,MAAM;CAC/C,OAAgB;CAChB;CACA;CAEA,YACE,aACA,OACA,SACA;AACA,QACE,sDAAsD,eACtD,QACD;AACD,OAAK,cAAc;AACnB,OAAK,QAAQ;;;AAIjB,IAAM,6BAAN,cAAyC,MAAM;CAC7C,OAAgB;CAEhB,YAAY,KAAa;AACvB,QACE,GAAG,IAAI,+EACR;;;AAKL,IAAM,iBAAN,cAA6B,WAAW;CACtC,OAAgB;CAEhB,YAAY,OAAgB;AAC1B,QACE,yFACA,EACE,OACD,CACF;;;AAIL,SAAS,2BACP,EAAC,WACD,QACA;CACA,MAAM,MACJ,mBAAmB,SACf,QAAQ,SAAS,QAAQ,GACzB,IAAI,aAAa,CAAC,OAAO,QAAQ;AAEvC,QAAO,MADM,KAAK,MAAM,IAAI,EACP,QAAQ,cAAc"}
|