@rocicorp/zero 1.4.0-canary.0 → 1.4.0-canary.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/out/analyze-query/src/analyze-cli.d.ts +1 -1
- package/out/analyze-query/src/analyze-cli.d.ts.map +1 -1
- package/out/analyze-query/src/analyze-cli.js +13 -3
- package/out/analyze-query/src/analyze-cli.js.map +1 -1
- package/out/analyze-query/src/bin-analyze.js +1 -1
- package/out/analyze-query/src/bin-analyze.js.map +1 -1
- package/out/analyze-query/src/bin-transform.js +1 -1
- package/out/analyze-query/src/bin-transform.js.map +1 -1
- package/out/replicache/src/btree/node.d.ts +1 -1
- package/out/replicache/src/btree/node.d.ts.map +1 -1
- package/out/replicache/src/btree/node.js +34 -21
- package/out/replicache/src/btree/node.js.map +1 -1
- package/out/replicache/src/btree/write.js +1 -2
- package/out/replicache/src/btree/write.js.map +1 -1
- package/out/shared/src/btree-set.d.ts +6 -0
- package/out/shared/src/btree-set.d.ts.map +1 -1
- package/out/shared/src/btree-set.js +34 -0
- package/out/shared/src/btree-set.js.map +1 -1
- package/out/zero/package.js +1 -1
- package/out/zero/package.js.map +1 -1
- package/out/zero/src/bindings.js +1 -1
- package/out/zero-cache/src/config/zero-config.d.ts +18 -0
- package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
- package/out/zero-cache/src/config/zero-config.js +35 -3
- package/out/zero-cache/src/config/zero-config.js.map +1 -1
- package/out/zero-cache/src/scripts/decommission.d.ts.map +1 -1
- package/out/zero-cache/src/scripts/decommission.js +3 -3
- package/out/zero-cache/src/scripts/decommission.js.map +1 -1
- package/out/zero-cache/src/scripts/deploy-permissions.js +1 -1
- package/out/zero-cache/src/scripts/deploy-permissions.js.map +1 -1
- package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/server/change-streamer.js +2 -5
- package/out/zero-cache/src/server/change-streamer.js.map +1 -1
- package/out/zero-cache/src/server/main.d.ts.map +1 -1
- package/out/zero-cache/src/server/main.js +6 -1
- package/out/zero-cache/src/server/main.js.map +1 -1
- package/out/zero-cache/src/server/reaper.d.ts.map +1 -1
- package/out/zero-cache/src/server/reaper.js +1 -4
- package/out/zero-cache/src/server/reaper.js.map +1 -1
- package/out/zero-cache/src/server/shadow-syncer.js +35 -0
- package/out/zero-cache/src/server/shadow-syncer.js.map +1 -0
- package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
- package/out/zero-cache/src/server/syncer.js +2 -8
- package/out/zero-cache/src/server/syncer.js.map +1 -1
- package/out/zero-cache/src/server/worker-urls.d.ts +1 -0
- package/out/zero-cache/src/server/worker-urls.d.ts.map +1 -1
- package/out/zero-cache/src/server/worker-urls.js +2 -1
- package/out/zero-cache/src/server/worker-urls.js.map +1 -1
- package/out/zero-cache/src/services/change-source/custom/change-source.js +2 -2
- package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.d.ts +8 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +31 -18
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/change-source.js +44 -46
- package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +6 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js +62 -22
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.js +5 -6
- package/out/zero-cache/src/services/change-streamer/change-streamer-http.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/schema/tables.js +1 -1
- package/out/zero-cache/src/services/run-ast.js +1 -1
- package/out/zero-cache/src/services/shadow-sync/shadow-sync-service.js +49 -0
- package/out/zero-cache/src/services/shadow-sync/shadow-sync-service.js.map +1 -0
- package/out/zero-cache/src/services/statz.js +3 -3
- package/out/zero-cache/src/services/statz.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts +1 -0
- package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr-store.js +34 -11
- package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts +16 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js +19 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/inspect-handler.js +1 -1
- package/out/zero-cache/src/services/view-syncer/inspect-handler.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +6 -0
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +46 -3
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/row-set-signature.d.ts +17 -0
- package/out/zero-cache/src/services/view-syncer/row-set-signature.d.ts.map +1 -0
- package/out/zero-cache/src/services/view-syncer/row-set-signature.js +29 -0
- package/out/zero-cache/src/services/view-syncer/row-set-signature.js.map +1 -0
- package/out/zero-cache/src/services/view-syncer/schema/cvr.d.ts +1 -0
- package/out/zero-cache/src/services/view-syncer/schema/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/cvr.js +1 -0
- package/out/zero-cache/src/services/view-syncer/schema/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/init.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/init.js +5 -1
- package/out/zero-cache/src/services/view-syncer/schema/init.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/types.d.ts +105 -0
- package/out/zero-cache/src/services/view-syncer/schema/types.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/schema/types.js +8 -4
- package/out/zero-cache/src/services/view-syncer/schema/types.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/view-syncer.js +2 -2
- package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
- package/out/zero-cache/src/types/pg.d.ts +1 -1
- package/out/zero-cache/src/types/pg.d.ts.map +1 -1
- package/out/zero-cache/src/types/pg.js +8 -2
- package/out/zero-cache/src/types/pg.js.map +1 -1
- package/out/zero-cache/src/types/timeout.d.ts +11 -0
- package/out/zero-cache/src/types/timeout.d.ts.map +1 -0
- package/out/zero-cache/src/types/timeout.js +26 -0
- package/out/zero-cache/src/types/timeout.js.map +1 -0
- package/out/zero-cache/src/workers/connection.js +5 -5
- package/out/zero-cache/src/workers/connection.js.map +1 -1
- package/out/zero-client/src/client/bindings.js +1 -1
- package/out/zero-client/src/client/log-options.d.ts +1 -0
- package/out/zero-client/src/client/log-options.d.ts.map +1 -1
- package/out/zero-client/src/client/log-options.js +3 -2
- package/out/zero-client/src/client/log-options.js.map +1 -1
- package/out/zero-client/src/client/options.d.ts +13 -1
- package/out/zero-client/src/client/options.d.ts.map +1 -1
- package/out/zero-client/src/client/options.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zero-client/src/client/zero.d.ts.map +1 -1
- package/out/zero-client/src/client/zero.js +2 -1
- package/out/zero-client/src/client/zero.js.map +1 -1
- package/out/zero-react/src/bindings.js +1 -1
- package/out/zero-server/src/adapters/postgresjs.d.ts.map +1 -1
- package/out/zero-server/src/adapters/postgresjs.js +1 -1
- package/out/zero-server/src/adapters/postgresjs.js.map +1 -1
- package/out/zero-solid/src/bindings.js +1 -1
- package/out/zero-solid/src/solid-view.js +1 -1
- package/out/zql/src/ivm/array-view.js +1 -1
- package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
- package/out/zql/src/ivm/memory-source.js +4 -4
- package/out/zql/src/ivm/memory-source.js.map +1 -1
- package/out/zql/src/ivm/operator.d.ts +1 -1
- package/out/zql/src/ivm/operator.d.ts.map +1 -1
- package/out/zql/src/ivm/operator.js +2 -4
- package/out/zql/src/ivm/operator.js.map +1 -1
- package/out/zql/src/ivm/skip-yields.d.ts +4 -0
- package/out/zql/src/ivm/skip-yields.d.ts.map +1 -0
- package/out/zql/src/ivm/skip-yields.js +33 -0
- package/out/zql/src/ivm/skip-yields.js.map +1 -0
- package/out/zql/src/ivm/view-apply-change.js +1 -1
- package/out/zql/src/query/query-internals.d.ts.map +1 -1
- package/out/zql/src/query/query-internals.js +1 -1
- package/out/zql/src/query/query-internals.js.map +1 -1
- package/package.json +1 -1
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { must } from "../../../shared/src/must.js";
|
|
2
|
+
import { parentWorker, singleProcessMode } from "../types/processes.js";
|
|
3
|
+
import { getShardConfig } from "../types/shards.js";
|
|
4
|
+
import { getNormalizedZeroConfig } from "../config/zero-config.js";
|
|
5
|
+
import { exitAfter, runUntilKilled } from "../services/life-cycle.js";
|
|
6
|
+
import { createLogContext } from "./logging.js";
|
|
7
|
+
import { getServerContext } from "../config/server-context.js";
|
|
8
|
+
import { initEventSink } from "../observability/events.js";
|
|
9
|
+
import { startOtelAuto } from "./otel-start.js";
|
|
10
|
+
import { ShadowSyncService } from "../services/shadow-sync/shadow-sync-service.js";
|
|
11
|
+
//#region ../zero-cache/src/server/shadow-syncer.ts
|
|
12
|
+
var MS_PER_HOUR = 1e3 * 60 * 60;
|
|
13
|
+
function runWorker(parent, env, ...argv) {
|
|
14
|
+
const config = getNormalizedZeroConfig({
|
|
15
|
+
env,
|
|
16
|
+
argv
|
|
17
|
+
});
|
|
18
|
+
startOtelAuto(createLogContext(config, "shadow-syncer", 0, false), "shadow-syncer", 0);
|
|
19
|
+
const lc = createLogContext(config, "shadow-syncer");
|
|
20
|
+
initEventSink(lc, config);
|
|
21
|
+
const { shadowSync, upstream, initialSync } = config;
|
|
22
|
+
const service = new ShadowSyncService(lc, getShardConfig(config), upstream.db, getServerContext(config), {
|
|
23
|
+
intervalMs: shadowSync.intervalHours * MS_PER_HOUR,
|
|
24
|
+
sampleRate: shadowSync.sampleRate,
|
|
25
|
+
maxRowsPerTable: shadowSync.maxRowsPerTable,
|
|
26
|
+
textCopy: initialSync.textCopy
|
|
27
|
+
});
|
|
28
|
+
parent.send(["ready", { ready: true }]);
|
|
29
|
+
return runUntilKilled(lc, parent, service);
|
|
30
|
+
}
|
|
31
|
+
if (!singleProcessMode()) exitAfter(() => runWorker(must(parentWorker), process.env, ...process.argv.slice(2)));
|
|
32
|
+
//#endregion
|
|
33
|
+
export { runWorker as default };
|
|
34
|
+
|
|
35
|
+
//# sourceMappingURL=shadow-syncer.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"shadow-syncer.js","names":[],"sources":["../../../../../zero-cache/src/server/shadow-syncer.ts"],"sourcesContent":["import {must} from '../../../shared/src/must.ts';\nimport {getServerContext} from '../config/server-context.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {ShadowSyncService} from '../services/shadow-sync/shadow-sync-service.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardConfig} from '../types/shards.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nconst MS_PER_HOUR = 1000 * 60 * 60;\n\nexport default function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...argv: string[]\n): Promise<void> {\n const config = getNormalizedZeroConfig({env, argv});\n\n startOtelAuto(\n createLogContext(config, 'shadow-syncer', 0, false),\n 'shadow-syncer',\n 0,\n );\n const lc = createLogContext(config, 'shadow-syncer');\n initEventSink(lc, config);\n\n const {shadowSync, upstream, initialSync} = config;\n const shard = getShardConfig(config);\n const service = new ShadowSyncService(\n lc,\n shard,\n upstream.db,\n getServerContext(config),\n {\n intervalMs: shadowSync.intervalHours * MS_PER_HOUR,\n sampleRate: shadowSync.sampleRate,\n maxRowsPerTable: shadowSync.maxRowsPerTable,\n textCopy: initialSync.textCopy,\n },\n );\n\n parent.send(['ready', {ready: true}]);\n\n return runUntilKilled(lc, parent, service);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;AAeA,IAAM,cAAc,MAAO,KAAK;AAEhC,SAAwB,UACtB,QACA,KACA,GAAG,MACY;CACf,MAAM,SAAS,wBAAwB;EAAC;EAAK;EAAK,CAAC;AAEnD,eACE,iBAAiB,QAAQ,iBAAiB,GAAG,MAAM,EACnD,iBACA,EACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,gBAAgB;AACpD,eAAc,IAAI,OAAO;CAEzB,MAAM,EAAC,YAAY,UAAU,gBAAe;CAE5C,MAAM,UAAU,IAAI,kBAClB,IAFY,eAAe,OAAO,EAIlC,SAAS,IACT,iBAAiB,OAAO,EACxB;EACE,YAAY,WAAW,gBAAgB;EACvC,YAAY,WAAW;EACvB,iBAAiB,WAAW;EAC5B,UAAU,YAAY;EACvB,CACF;AAED,QAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAErC,QAAO,eAAe,IAAI,QAAQ,QAAQ;;AAI5C,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"syncer.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/syncer.ts"],"names":[],"mappings":"AA+BA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAgC/B,MAAM,CAAC,OAAO,UAAU,SAAS,CAC/B,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,
|
|
1
|
+
{"version":3,"file":"syncer.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/syncer.ts"],"names":[],"mappings":"AA+BA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAgC/B,MAAM,CAAC,OAAO,UAAU,SAAS,CAC/B,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CAsMf"}
|
|
@@ -59,14 +59,8 @@ function runWorker(parent, env, ...args) {
|
|
|
59
59
|
const { cvr, upstream, enableCrudMutations } = config;
|
|
60
60
|
const replicaFile = replicaFileName(config.replica.file, fileMode);
|
|
61
61
|
lc.debug?.(`running view-syncer on ${replicaFile}`);
|
|
62
|
-
const cvrDB = pgClient(lc, cvr.db, {
|
|
63
|
-
|
|
64
|
-
connection: { ["application_name"]: `zero-sync-worker-${pid}-cvr` }
|
|
65
|
-
});
|
|
66
|
-
const upstreamDB = enableCrudMutations ? pgClient(lc, upstream.db, {
|
|
67
|
-
max: must(upstream.maxConnsPerWorker, "upstream.maxConnsPerWorker must be set"),
|
|
68
|
-
connection: { ["application_name"]: `zero-sync-worker-${pid}-upstream` }
|
|
69
|
-
}) : void 0;
|
|
62
|
+
const cvrDB = pgClient(lc, cvr.db, `sync-worker-${pid}-cvr`, { max: must(cvr.maxConnsPerWorker, "cvr.maxConnsPerWorker must be set") });
|
|
63
|
+
const upstreamDB = enableCrudMutations ? pgClient(lc, upstream.db, `sync-worker-${pid}-upstream`, { max: must(upstream.maxConnsPerWorker, "upstream.maxConnsPerWorker must be set") }) : void 0;
|
|
70
64
|
const dbWarmup = Promise.allSettled([warmupConnections(lc, cvrDB, "cvr"), upstreamDB ? warmupConnections(lc, upstreamDB, "upstream") : promiseVoid]);
|
|
71
65
|
const tmpDir = config.storageDBTmpDir ?? tmpdir();
|
|
72
66
|
const operatorStorage = DatabaseStorage.create(lc, path.join(tmpDir, `sync-worker-${randomUUID()}`));
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"syncer.js","names":[],"sources":["../../../../../zero-cache/src/server/syncer.ts"],"sourcesContent":["import {randomUUID} from 'node:crypto';\nimport {tmpdir} from 'node:os';\nimport path from 'node:path';\nimport {pid} from 'node:process';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {DatabaseStorage} from '../../../zqlite/src/database-storage.ts';\nimport type {ValidateLegacyJWT} from '../auth/auth.ts';\nimport {tokenConfigOptions, verifyToken} from '../auth/jwt.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {CustomQueryTransformer} from '../custom-queries/transform-query.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {MutagenService} from '../services/mutagen/mutagen.ts';\nimport {PusherService} from '../services/mutagen/pusher.ts';\nimport type {ReplicaState} from '../services/replicator/replicator.ts';\nimport {\n type ConnectionContextManager,\n ConnectionContextManagerImpl,\n} from '../services/view-syncer/connection-context-manager.ts';\nimport type {DrainCoordinator} from '../services/view-syncer/drain-coordinator.ts';\nimport {PipelineDriver} from '../services/view-syncer/pipeline-driver.ts';\nimport {Snapshotter} from '../services/view-syncer/snapshotter.ts';\nimport {ViewSyncerService} from '../services/view-syncer/view-syncer.ts';\nimport {ProtocolErrorWithLevel} from '../types/error-with-level.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport type {Subscription} from '../types/subscription.ts';\nimport {replicaFileModeSchema, replicaFileName} from '../workers/replicator.ts';\nimport {Syncer} from '../workers/syncer.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\nimport {InspectorDelegate} from './inspector-delegate.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\nimport {isPriorityOpRunning, runPriorityOp} from './priority-op.ts';\n\nfunction randomID() {\n return randInt(1, Number.MAX_SAFE_INTEGER).toString(36);\n}\n\nfunction getCustomQueryConfig(\n config: Pick<NormalizedZeroConfig, 'query' | 'getQueries'>,\n) {\n const queryConfig = config.query?.url ? config.query : config.getQueries;\n\n if (!queryConfig?.url) {\n return undefined;\n }\n\n return {\n url: queryConfig.url,\n apiKey: queryConfig.apiKey,\n allowedClientHeaders: queryConfig.allowedClientHeaders,\n forwardCookies: queryConfig.forwardCookies ?? false,\n };\n}\n\nexport default function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n assert(args.length >= 2, `expected [fileMode, workerIndex, ...flags]`);\n const fileMode = v.parse(args[0], replicaFileModeSchema);\n const workerIndex = Number(args[1]);\n const config = getNormalizedZeroConfig({env, argv: args.slice(2)});\n\n startOtelAuto(\n createLogContext(config, 'syncer', workerIndex, false),\n 'syncer',\n workerIndex,\n );\n const lc = createLogContext(config, 'syncer', workerIndex);\n initEventSink(lc, config);\n\n const {cvr, upstream, enableCrudMutations} = config;\n\n const replicaFile = replicaFileName(config.replica.file, fileMode);\n lc.debug?.(`running view-syncer on ${replicaFile}`);\n\n const cvrDB = pgClient(lc, cvr.db, {\n max: must(cvr.maxConnsPerWorker, 'cvr.maxConnsPerWorker must be set'),\n connection: {['application_name']: `zero-sync-worker-${pid}-cvr`},\n });\n\n const upstreamDB = enableCrudMutations\n ? pgClient(lc, upstream.db, {\n max: must(\n upstream.maxConnsPerWorker,\n 'upstream.maxConnsPerWorker must be set',\n ),\n connection: {['application_name']: `zero-sync-worker-${pid}-upstream`},\n })\n : undefined;\n\n const dbWarmup = Promise.allSettled([\n warmupConnections(lc, cvrDB, 'cvr'),\n upstreamDB ? warmupConnections(lc, upstreamDB, 'upstream') : promiseVoid,\n ]);\n\n const tmpDir = config.storageDBTmpDir ?? tmpdir();\n const operatorStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `sync-worker-${randomUUID()}`),\n );\n const writeAuthzStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `mutagen-${randomUUID()}`),\n );\n\n const shard = getShardID(config);\n const customQueryConfig = getCustomQueryConfig(config);\n const pushConfig =\n config.push.url === undefined && config.mutate.url === undefined\n ? undefined\n : {\n ...config.push,\n ...config.mutate,\n url: must(\n config.push.url ?? config.mutate.url,\n 'No push or mutate URL configured',\n ),\n };\n\n /** @deprecated used in JWT validation */\n let validateLegacyJWT: ValidateLegacyJWT | undefined = undefined;\n\n const tokenOptions = tokenConfigOptions(config.auth ?? {});\n if (tokenOptions.length === 1) {\n validateLegacyJWT = async (token, {userID}) => {\n if (!userID) {\n throw new ProtocolErrorWithLevel(\n {\n kind: 'Unauthorized',\n message: 'UserID is required for JWT validation.',\n origin: 'zeroCache',\n },\n 'warn',\n );\n }\n\n const decoded = await verifyToken(config.auth, token, {\n subject: userID,\n ...(config.auth?.issuer && {issuer: config.auth.issuer}),\n ...(config.auth?.audience && {\n audience: config.auth.audience,\n }),\n });\n return {\n type: 'jwt',\n raw: token,\n decoded,\n };\n };\n }\n\n const viewSyncerFactory = (\n id: string,\n sub: Subscription<ReplicaState>,\n drainCoordinator: DrainCoordinator,\n ) => {\n const logger = lc\n .withContext('component', 'view-syncer')\n .withContext('clientGroupID', id)\n .withContext('instance', randomID());\n\n const customQueryTransformer =\n customQueryConfig && new CustomQueryTransformer(logger, shard);\n const contextManager = new ConnectionContextManagerImpl(\n logger,\n config.auth.revalidateIntervalSeconds,\n config.auth.retransformIntervalSeconds,\n customQueryConfig,\n pushConfig,\n validateLegacyJWT,\n );\n\n lc.debug?.(\n `creating view syncer. Query Planner Enabled: ${config.enableQueryPlanner}`,\n );\n\n const inspectorDelegate = new InspectorDelegate(customQueryTransformer);\n\n const priorityOpRunningYieldThresholdMs = Math.max(\n config.yieldThresholdMs / 4,\n 2,\n );\n const normalYieldThresholdMs = Math.max(config.yieldThresholdMs, 2);\n\n return new ViewSyncerService(\n config,\n logger,\n shard,\n config.taskID,\n id,\n cvrDB,\n new PipelineDriver(\n logger,\n config.log,\n new Snapshotter(logger, replicaFile, shard),\n shard,\n operatorStorage.createClientGroupStorage(id),\n id,\n inspectorDelegate,\n () =>\n isPriorityOpRunning()\n ? priorityOpRunningYieldThresholdMs\n : normalYieldThresholdMs,\n config.enableQueryPlanner,\n config,\n ),\n sub,\n drainCoordinator,\n config.log.slowHydrateThreshold,\n inspectorDelegate,\n contextManager,\n customQueryTransformer,\n runPriorityOp,\n );\n };\n\n const mutagenFactory = upstreamDB\n ? (id: string) =>\n new MutagenService(\n lc\n .withContext('component', 'mutagen')\n .withContext('clientGroupID', id),\n shard,\n id,\n upstreamDB,\n config,\n writeAuthzStorage,\n )\n : undefined;\n\n const pusherFactory =\n pushConfig === undefined\n ? undefined\n : (id: string, contextManager: ConnectionContextManager) =>\n new PusherService(\n config,\n lc.withContext('clientGroupID', id),\n id,\n contextManager,\n );\n\n const syncer = new Syncer(\n lc,\n config,\n viewSyncerFactory,\n mutagenFactory,\n pusherFactory,\n parent,\n validateLegacyJWT,\n );\n\n startAnonymousTelemetry(lc, config);\n\n void dbWarmup.then(() => parent.send(['ready', {ready: true}]));\n\n return runUntilKilled(lc, parent, syncer);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA8CA,SAAS,WAAW;AAClB,QAAO,QAAQ,GAAG,OAAO,iBAAiB,CAAC,SAAS,GAAG;;AAGzD,SAAS,qBACP,QACA;CACA,MAAM,cAAc,OAAO,OAAO,MAAM,OAAO,QAAQ,OAAO;AAE9D,KAAI,CAAC,aAAa,IAChB;AAGF,QAAO;EACL,KAAK,YAAY;EACjB,QAAQ,YAAY;EACpB,sBAAsB,YAAY;EAClC,gBAAgB,YAAY,kBAAkB;EAC/C;;AAGH,SAAwB,UACtB,QACA,KACA,GAAG,MACY;AACf,QAAO,KAAK,UAAU,GAAG,6CAA6C;CACtE,MAAM,WAAW,MAAQ,KAAK,IAAI,sBAAsB;CACxD,MAAM,cAAc,OAAO,KAAK,GAAG;CACnC,MAAM,SAAS,wBAAwB;EAAC;EAAK,MAAM,KAAK,MAAM,EAAE;EAAC,CAAC;AAElE,eACE,iBAAiB,QAAQ,UAAU,aAAa,MAAM,EACtD,UACA,YACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,UAAU,YAAY;AAC1D,eAAc,IAAI,OAAO;CAEzB,MAAM,EAAC,KAAK,UAAU,wBAAuB;CAE7C,MAAM,cAAc,gBAAgB,OAAO,QAAQ,MAAM,SAAS;AAClE,IAAG,QAAQ,0BAA0B,cAAc;CAEnD,MAAM,QAAQ,SAAS,IAAI,IAAI,IAAI;EACjC,KAAK,KAAK,IAAI,mBAAmB,oCAAoC;EACrE,YAAY,GAAE,qBAAqB,oBAAoB,IAAI,OAAM;EAClE,CAAC;CAEF,MAAM,aAAa,sBACf,SAAS,IAAI,SAAS,IAAI;EACxB,KAAK,KACH,SAAS,mBACT,yCACD;EACD,YAAY,GAAE,qBAAqB,oBAAoB,IAAI,YAAW;EACvE,CAAC,GACF,KAAA;CAEJ,MAAM,WAAW,QAAQ,WAAW,CAClC,kBAAkB,IAAI,OAAO,MAAM,EACnC,aAAa,kBAAkB,IAAI,YAAY,WAAW,GAAG,YAC9D,CAAC;CAEF,MAAM,SAAS,OAAO,mBAAmB,QAAQ;CACjD,MAAM,kBAAkB,gBAAgB,OACtC,IACA,KAAK,KAAK,QAAQ,eAAe,YAAY,GAAG,CACjD;CACD,MAAM,oBAAoB,gBAAgB,OACxC,IACA,KAAK,KAAK,QAAQ,WAAW,YAAY,GAAG,CAC7C;CAED,MAAM,QAAQ,WAAW,OAAO;CAChC,MAAM,oBAAoB,qBAAqB,OAAO;CACtD,MAAM,aACJ,OAAO,KAAK,QAAQ,KAAA,KAAa,OAAO,OAAO,QAAQ,KAAA,IACnD,KAAA,IACA;EACE,GAAG,OAAO;EACV,GAAG,OAAO;EACV,KAAK,KACH,OAAO,KAAK,OAAO,OAAO,OAAO,KACjC,mCACD;EACF;;CAGP,IAAI,oBAAmD,KAAA;AAGvD,KADqB,mBAAmB,OAAO,QAAQ,EAAE,CAAC,CACzC,WAAW,EAC1B,qBAAoB,OAAO,OAAO,EAAC,aAAY;AAC7C,MAAI,CAAC,OACH,OAAM,IAAI,uBACR;GACE,MAAM;GACN,SAAS;GACT,QAAQ;GACT,EACD,OACD;AAUH,SAAO;GACL,MAAM;GACN,KAAK;GACL,SAVc,MAAM,YAAY,OAAO,MAAM,OAAO;IACpD,SAAS;IACT,GAAI,OAAO,MAAM,UAAU,EAAC,QAAQ,OAAO,KAAK,QAAO;IACvD,GAAI,OAAO,MAAM,YAAY,EAC3B,UAAU,OAAO,KAAK,UACvB;IACF,CAAC;GAKD;;CAIL,MAAM,qBACJ,IACA,KACA,qBACG;EACH,MAAM,SAAS,GACZ,YAAY,aAAa,cAAc,CACvC,YAAY,iBAAiB,GAAG,CAChC,YAAY,YAAY,UAAU,CAAC;EAEtC,MAAM,yBACJ,qBAAqB,IAAI,uBAAuB,QAAQ,MAAM;EAChE,MAAM,iBAAiB,IAAI,6BACzB,QACA,OAAO,KAAK,2BACZ,OAAO,KAAK,4BACZ,mBACA,YACA,kBACD;AAED,KAAG,QACD,gDAAgD,OAAO,qBACxD;EAED,MAAM,oBAAoB,IAAI,kBAAkB,uBAAuB;EAEvE,MAAM,oCAAoC,KAAK,IAC7C,OAAO,mBAAmB,GAC1B,EACD;EACD,MAAM,yBAAyB,KAAK,IAAI,OAAO,kBAAkB,EAAE;AAEnE,SAAO,IAAI,kBACT,QACA,QACA,OACA,OAAO,QACP,IACA,OACA,IAAI,eACF,QACA,OAAO,KACP,IAAI,YAAY,QAAQ,aAAa,MAAM,EAC3C,OACA,gBAAgB,yBAAyB,GAAG,EAC5C,IACA,yBAEE,qBAAqB,GACjB,oCACA,wBACN,OAAO,oBACP,OACD,EACD,KACA,kBACA,OAAO,IAAI,sBACX,mBACA,gBACA,wBACA,cACD;;CA4BH,MAAM,SAAS,IAAI,OACjB,IACA,QACA,mBA5BqB,cAClB,OACC,IAAI,eACF,GACG,YAAY,aAAa,UAAU,CACnC,YAAY,iBAAiB,GAAG,EACnC,OACA,IACA,YACA,QACA,kBACD,GACH,KAAA,GAGF,eAAe,KAAA,IACX,KAAA,KACC,IAAY,mBACX,IAAI,cACF,QACA,GAAG,YAAY,iBAAiB,GAAG,EACnC,IACA,eACD,EAQP,QACA,kBACD;AAED,yBAAwB,IAAI,OAAO;AAE9B,UAAS,WAAW,OAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC,CAAC;AAE/D,QAAO,eAAe,IAAI,QAAQ,OAAO;;AAI3C,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
|
|
1
|
+
{"version":3,"file":"syncer.js","names":[],"sources":["../../../../../zero-cache/src/server/syncer.ts"],"sourcesContent":["import {randomUUID} from 'node:crypto';\nimport {tmpdir} from 'node:os';\nimport path from 'node:path';\nimport {pid} from 'node:process';\nimport {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {randInt} from '../../../shared/src/rand.ts';\nimport {promiseVoid} from '../../../shared/src/resolved-promises.ts';\nimport * as v from '../../../shared/src/valita.ts';\nimport {DatabaseStorage} from '../../../zqlite/src/database-storage.ts';\nimport type {ValidateLegacyJWT} from '../auth/auth.ts';\nimport {tokenConfigOptions, verifyToken} from '../auth/jwt.ts';\nimport type {NormalizedZeroConfig} from '../config/normalize.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {CustomQueryTransformer} from '../custom-queries/transform-query.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {MutagenService} from '../services/mutagen/mutagen.ts';\nimport {PusherService} from '../services/mutagen/pusher.ts';\nimport type {ReplicaState} from '../services/replicator/replicator.ts';\nimport {\n type ConnectionContextManager,\n ConnectionContextManagerImpl,\n} from '../services/view-syncer/connection-context-manager.ts';\nimport type {DrainCoordinator} from '../services/view-syncer/drain-coordinator.ts';\nimport {PipelineDriver} from '../services/view-syncer/pipeline-driver.ts';\nimport {Snapshotter} from '../services/view-syncer/snapshotter.ts';\nimport {ViewSyncerService} from '../services/view-syncer/view-syncer.ts';\nimport {ProtocolErrorWithLevel} from '../types/error-with-level.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport type {Subscription} from '../types/subscription.ts';\nimport {replicaFileModeSchema, replicaFileName} from '../workers/replicator.ts';\nimport {Syncer} from '../workers/syncer.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\nimport {InspectorDelegate} from './inspector-delegate.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\nimport {isPriorityOpRunning, runPriorityOp} from './priority-op.ts';\n\nfunction randomID() {\n return randInt(1, Number.MAX_SAFE_INTEGER).toString(36);\n}\n\nfunction getCustomQueryConfig(\n config: Pick<NormalizedZeroConfig, 'query' | 'getQueries'>,\n) {\n const queryConfig = config.query?.url ? config.query : config.getQueries;\n\n if (!queryConfig?.url) {\n return undefined;\n }\n\n return {\n url: queryConfig.url,\n apiKey: queryConfig.apiKey,\n allowedClientHeaders: queryConfig.allowedClientHeaders,\n forwardCookies: queryConfig.forwardCookies ?? false,\n };\n}\n\nexport default function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...args: string[]\n): Promise<void> {\n assert(args.length >= 2, `expected [fileMode, workerIndex, ...flags]`);\n const fileMode = v.parse(args[0], replicaFileModeSchema);\n const workerIndex = Number(args[1]);\n const config = getNormalizedZeroConfig({env, argv: args.slice(2)});\n\n startOtelAuto(\n createLogContext(config, 'syncer', workerIndex, false),\n 'syncer',\n workerIndex,\n );\n const lc = createLogContext(config, 'syncer', workerIndex);\n initEventSink(lc, config);\n\n const {cvr, upstream, enableCrudMutations} = config;\n\n const replicaFile = replicaFileName(config.replica.file, fileMode);\n lc.debug?.(`running view-syncer on ${replicaFile}`);\n\n const cvrDB = pgClient(lc, cvr.db, `sync-worker-${pid}-cvr`, {\n max: must(cvr.maxConnsPerWorker, 'cvr.maxConnsPerWorker must be set'),\n });\n\n const upstreamDB = enableCrudMutations\n ? pgClient(lc, upstream.db, `sync-worker-${pid}-upstream`, {\n max: must(\n upstream.maxConnsPerWorker,\n 'upstream.maxConnsPerWorker must be set',\n ),\n })\n : undefined;\n\n const dbWarmup = Promise.allSettled([\n warmupConnections(lc, cvrDB, 'cvr'),\n upstreamDB ? warmupConnections(lc, upstreamDB, 'upstream') : promiseVoid,\n ]);\n\n const tmpDir = config.storageDBTmpDir ?? tmpdir();\n const operatorStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `sync-worker-${randomUUID()}`),\n );\n const writeAuthzStorage = DatabaseStorage.create(\n lc,\n path.join(tmpDir, `mutagen-${randomUUID()}`),\n );\n\n const shard = getShardID(config);\n const customQueryConfig = getCustomQueryConfig(config);\n const pushConfig =\n config.push.url === undefined && config.mutate.url === undefined\n ? undefined\n : {\n ...config.push,\n ...config.mutate,\n url: must(\n config.push.url ?? config.mutate.url,\n 'No push or mutate URL configured',\n ),\n };\n\n /** @deprecated used in JWT validation */\n let validateLegacyJWT: ValidateLegacyJWT | undefined = undefined;\n\n const tokenOptions = tokenConfigOptions(config.auth ?? {});\n if (tokenOptions.length === 1) {\n validateLegacyJWT = async (token, {userID}) => {\n if (!userID) {\n throw new ProtocolErrorWithLevel(\n {\n kind: 'Unauthorized',\n message: 'UserID is required for JWT validation.',\n origin: 'zeroCache',\n },\n 'warn',\n );\n }\n\n const decoded = await verifyToken(config.auth, token, {\n subject: userID,\n ...(config.auth?.issuer && {issuer: config.auth.issuer}),\n ...(config.auth?.audience && {\n audience: config.auth.audience,\n }),\n });\n return {\n type: 'jwt',\n raw: token,\n decoded,\n };\n };\n }\n\n const viewSyncerFactory = (\n id: string,\n sub: Subscription<ReplicaState>,\n drainCoordinator: DrainCoordinator,\n ) => {\n const logger = lc\n .withContext('component', 'view-syncer')\n .withContext('clientGroupID', id)\n .withContext('instance', randomID());\n\n const customQueryTransformer =\n customQueryConfig && new CustomQueryTransformer(logger, shard);\n const contextManager = new ConnectionContextManagerImpl(\n logger,\n config.auth.revalidateIntervalSeconds,\n config.auth.retransformIntervalSeconds,\n customQueryConfig,\n pushConfig,\n validateLegacyJWT,\n );\n\n lc.debug?.(\n `creating view syncer. Query Planner Enabled: ${config.enableQueryPlanner}`,\n );\n\n const inspectorDelegate = new InspectorDelegate(customQueryTransformer);\n\n const priorityOpRunningYieldThresholdMs = Math.max(\n config.yieldThresholdMs / 4,\n 2,\n );\n const normalYieldThresholdMs = Math.max(config.yieldThresholdMs, 2);\n\n return new ViewSyncerService(\n config,\n logger,\n shard,\n config.taskID,\n id,\n cvrDB,\n new PipelineDriver(\n logger,\n config.log,\n new Snapshotter(logger, replicaFile, shard),\n shard,\n operatorStorage.createClientGroupStorage(id),\n id,\n inspectorDelegate,\n () =>\n isPriorityOpRunning()\n ? priorityOpRunningYieldThresholdMs\n : normalYieldThresholdMs,\n config.enableQueryPlanner,\n config,\n ),\n sub,\n drainCoordinator,\n config.log.slowHydrateThreshold,\n inspectorDelegate,\n contextManager,\n customQueryTransformer,\n runPriorityOp,\n );\n };\n\n const mutagenFactory = upstreamDB\n ? (id: string) =>\n new MutagenService(\n lc\n .withContext('component', 'mutagen')\n .withContext('clientGroupID', id),\n shard,\n id,\n upstreamDB,\n config,\n writeAuthzStorage,\n )\n : undefined;\n\n const pusherFactory =\n pushConfig === undefined\n ? undefined\n : (id: string, contextManager: ConnectionContextManager) =>\n new PusherService(\n config,\n lc.withContext('clientGroupID', id),\n id,\n contextManager,\n );\n\n const syncer = new Syncer(\n lc,\n config,\n viewSyncerFactory,\n mutagenFactory,\n pusherFactory,\n parent,\n validateLegacyJWT,\n );\n\n startAnonymousTelemetry(lc, config);\n\n void dbWarmup.then(() => parent.send(['ready', {ready: true}]));\n\n return runUntilKilled(lc, parent, syncer);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA8CA,SAAS,WAAW;AAClB,QAAO,QAAQ,GAAG,OAAO,iBAAiB,CAAC,SAAS,GAAG;;AAGzD,SAAS,qBACP,QACA;CACA,MAAM,cAAc,OAAO,OAAO,MAAM,OAAO,QAAQ,OAAO;AAE9D,KAAI,CAAC,aAAa,IAChB;AAGF,QAAO;EACL,KAAK,YAAY;EACjB,QAAQ,YAAY;EACpB,sBAAsB,YAAY;EAClC,gBAAgB,YAAY,kBAAkB;EAC/C;;AAGH,SAAwB,UACtB,QACA,KACA,GAAG,MACY;AACf,QAAO,KAAK,UAAU,GAAG,6CAA6C;CACtE,MAAM,WAAW,MAAQ,KAAK,IAAI,sBAAsB;CACxD,MAAM,cAAc,OAAO,KAAK,GAAG;CACnC,MAAM,SAAS,wBAAwB;EAAC;EAAK,MAAM,KAAK,MAAM,EAAE;EAAC,CAAC;AAElE,eACE,iBAAiB,QAAQ,UAAU,aAAa,MAAM,EACtD,UACA,YACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,UAAU,YAAY;AAC1D,eAAc,IAAI,OAAO;CAEzB,MAAM,EAAC,KAAK,UAAU,wBAAuB;CAE7C,MAAM,cAAc,gBAAgB,OAAO,QAAQ,MAAM,SAAS;AAClE,IAAG,QAAQ,0BAA0B,cAAc;CAEnD,MAAM,QAAQ,SAAS,IAAI,IAAI,IAAI,eAAe,IAAI,OAAO,EAC3D,KAAK,KAAK,IAAI,mBAAmB,oCAAoC,EACtE,CAAC;CAEF,MAAM,aAAa,sBACf,SAAS,IAAI,SAAS,IAAI,eAAe,IAAI,YAAY,EACvD,KAAK,KACH,SAAS,mBACT,yCACD,EACF,CAAC,GACF,KAAA;CAEJ,MAAM,WAAW,QAAQ,WAAW,CAClC,kBAAkB,IAAI,OAAO,MAAM,EACnC,aAAa,kBAAkB,IAAI,YAAY,WAAW,GAAG,YAC9D,CAAC;CAEF,MAAM,SAAS,OAAO,mBAAmB,QAAQ;CACjD,MAAM,kBAAkB,gBAAgB,OACtC,IACA,KAAK,KAAK,QAAQ,eAAe,YAAY,GAAG,CACjD;CACD,MAAM,oBAAoB,gBAAgB,OACxC,IACA,KAAK,KAAK,QAAQ,WAAW,YAAY,GAAG,CAC7C;CAED,MAAM,QAAQ,WAAW,OAAO;CAChC,MAAM,oBAAoB,qBAAqB,OAAO;CACtD,MAAM,aACJ,OAAO,KAAK,QAAQ,KAAA,KAAa,OAAO,OAAO,QAAQ,KAAA,IACnD,KAAA,IACA;EACE,GAAG,OAAO;EACV,GAAG,OAAO;EACV,KAAK,KACH,OAAO,KAAK,OAAO,OAAO,OAAO,KACjC,mCACD;EACF;;CAGP,IAAI,oBAAmD,KAAA;AAGvD,KADqB,mBAAmB,OAAO,QAAQ,EAAE,CAAC,CACzC,WAAW,EAC1B,qBAAoB,OAAO,OAAO,EAAC,aAAY;AAC7C,MAAI,CAAC,OACH,OAAM,IAAI,uBACR;GACE,MAAM;GACN,SAAS;GACT,QAAQ;GACT,EACD,OACD;AAUH,SAAO;GACL,MAAM;GACN,KAAK;GACL,SAVc,MAAM,YAAY,OAAO,MAAM,OAAO;IACpD,SAAS;IACT,GAAI,OAAO,MAAM,UAAU,EAAC,QAAQ,OAAO,KAAK,QAAO;IACvD,GAAI,OAAO,MAAM,YAAY,EAC3B,UAAU,OAAO,KAAK,UACvB;IACF,CAAC;GAKD;;CAIL,MAAM,qBACJ,IACA,KACA,qBACG;EACH,MAAM,SAAS,GACZ,YAAY,aAAa,cAAc,CACvC,YAAY,iBAAiB,GAAG,CAChC,YAAY,YAAY,UAAU,CAAC;EAEtC,MAAM,yBACJ,qBAAqB,IAAI,uBAAuB,QAAQ,MAAM;EAChE,MAAM,iBAAiB,IAAI,6BACzB,QACA,OAAO,KAAK,2BACZ,OAAO,KAAK,4BACZ,mBACA,YACA,kBACD;AAED,KAAG,QACD,gDAAgD,OAAO,qBACxD;EAED,MAAM,oBAAoB,IAAI,kBAAkB,uBAAuB;EAEvE,MAAM,oCAAoC,KAAK,IAC7C,OAAO,mBAAmB,GAC1B,EACD;EACD,MAAM,yBAAyB,KAAK,IAAI,OAAO,kBAAkB,EAAE;AAEnE,SAAO,IAAI,kBACT,QACA,QACA,OACA,OAAO,QACP,IACA,OACA,IAAI,eACF,QACA,OAAO,KACP,IAAI,YAAY,QAAQ,aAAa,MAAM,EAC3C,OACA,gBAAgB,yBAAyB,GAAG,EAC5C,IACA,yBAEE,qBAAqB,GACjB,oCACA,wBACN,OAAO,oBACP,OACD,EACD,KACA,kBACA,OAAO,IAAI,sBACX,mBACA,gBACA,wBACA,cACD;;CA4BH,MAAM,SAAS,IAAI,OACjB,IACA,QACA,mBA5BqB,cAClB,OACC,IAAI,eACF,GACG,YAAY,aAAa,UAAU,CACnC,YAAY,iBAAiB,GAAG,EACnC,OACA,IACA,YACA,QACA,kBACD,GACH,KAAA,GAGF,eAAe,KAAA,IACX,KAAA,KACC,IAAY,mBACX,IAAI,cACF,QACA,GAAG,YAAY,iBAAiB,GAAG,EACnC,IACA,eACD,EAQP,QACA,kBACD;AAED,yBAAwB,IAAI,OAAO;AAE9B,UAAS,WAAW,OAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC,CAAC;AAE/D,QAAO,eAAe,IAAI,QAAQ,OAAO;;AAI3C,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
|
|
@@ -3,6 +3,7 @@ export declare const MAIN_URL: URL;
|
|
|
3
3
|
export declare const MUTATOR_URL: URL;
|
|
4
4
|
export declare const REAPER_URL: URL;
|
|
5
5
|
export declare const REPLICATOR_URL: URL;
|
|
6
|
+
export declare const SHADOW_SYNCER_URL: URL;
|
|
6
7
|
export declare const SYNCER_URL: URL;
|
|
7
8
|
export declare const WRITE_WORKER_URL: URL;
|
|
8
9
|
//# sourceMappingURL=worker-urls.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"worker-urls.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/worker-urls.ts"],"names":[],"mappings":"AAgBA,eAAO,MAAM,mBAAmB,KAAkC,CAAC;AACnE,eAAO,MAAM,QAAQ,KAAuB,CAAC;AAC7C,eAAO,MAAM,WAAW,KAA0B,CAAC;AACnD,eAAO,MAAM,UAAU,KAAyB,CAAC;AACjD,eAAO,MAAM,cAAc,KAA6B,CAAC;AACzD,eAAO,MAAM,UAAU,KAAyB,CAAC;AACjD,eAAO,MAAM,gBAAgB,KAA+B,CAAC"}
|
|
1
|
+
{"version":3,"file":"worker-urls.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/worker-urls.ts"],"names":[],"mappings":"AAgBA,eAAO,MAAM,mBAAmB,KAAkC,CAAC;AACnE,eAAO,MAAM,QAAQ,KAAuB,CAAC;AAC7C,eAAO,MAAM,WAAW,KAA0B,CAAC;AACnD,eAAO,MAAM,UAAU,KAAyB,CAAC;AACjD,eAAO,MAAM,cAAc,KAA6B,CAAC;AACzD,eAAO,MAAM,iBAAiB,KAAgC,CAAC;AAC/D,eAAO,MAAM,UAAU,KAAyB,CAAC;AACjD,eAAO,MAAM,gBAAgB,KAA+B,CAAC"}
|
|
@@ -10,9 +10,10 @@ var MAIN_URL = resolve("./main.ts");
|
|
|
10
10
|
resolve("./mutator.ts");
|
|
11
11
|
var REAPER_URL = resolve("./reaper.ts");
|
|
12
12
|
var REPLICATOR_URL = resolve("./replicator.ts");
|
|
13
|
+
var SHADOW_SYNCER_URL = resolve("./shadow-syncer.ts");
|
|
13
14
|
var SYNCER_URL = resolve("./syncer.ts");
|
|
14
15
|
var WRITE_WORKER_URL = resolve("./write-worker.ts");
|
|
15
16
|
//#endregion
|
|
16
|
-
export { CHANGE_STREAMER_URL, MAIN_URL, REAPER_URL, REPLICATOR_URL, SYNCER_URL, WRITE_WORKER_URL };
|
|
17
|
+
export { CHANGE_STREAMER_URL, MAIN_URL, REAPER_URL, REPLICATOR_URL, SHADOW_SYNCER_URL, SYNCER_URL, WRITE_WORKER_URL };
|
|
17
18
|
|
|
18
19
|
//# sourceMappingURL=worker-urls.js.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"worker-urls.js","names":[],"sources":["../../../../../zero-cache/src/server/worker-urls.ts"],"sourcesContent":["// This module provides URLs for worker files.\n\nconst tsRe = /\\.ts$/;\n\nfunction resolve(path: string): URL {\n const {url} = import.meta;\n if (url.endsWith('.js')) {\n // When compiled, change .ts to .js\n path = path.replace(tsRe, '.js');\n }\n return new URL(path, url);\n}\n\n// These URLs are part of the build process. See ../../zero/tool/build.ts\n// All these urls must be relative to this file and be located in the same directory.\n\nexport const CHANGE_STREAMER_URL = resolve('./change-streamer.ts');\nexport const MAIN_URL = resolve('./main.ts');\nexport const MUTATOR_URL = resolve('./mutator.ts');\nexport const REAPER_URL = resolve('./reaper.ts');\nexport const REPLICATOR_URL = resolve('./replicator.ts');\nexport const SYNCER_URL = resolve('./syncer.ts');\nexport const WRITE_WORKER_URL = resolve('./write-worker.ts');\n"],"mappings":";AAEA,IAAM,OAAO;AAEb,SAAS,QAAQ,MAAmB;CAClC,MAAM,EAAC,QAAO,OAAO;AACrB,KAAI,IAAI,SAAS,MAAM,CAErB,QAAO,KAAK,QAAQ,MAAM,MAAM;AAElC,QAAO,IAAI,IAAI,MAAM,IAAI;;AAM3B,IAAa,sBAAsB,QAAQ,uBAAuB;AAClE,IAAa,WAAW,QAAQ,YAAY;AACjB,QAAQ,eAAe;AAClD,IAAa,aAAa,QAAQ,cAAc;AAChD,IAAa,iBAAiB,QAAQ,kBAAkB;AACxD,IAAa,aAAa,QAAQ,cAAc;AAChD,IAAa,mBAAmB,QAAQ,oBAAoB"}
|
|
1
|
+
{"version":3,"file":"worker-urls.js","names":[],"sources":["../../../../../zero-cache/src/server/worker-urls.ts"],"sourcesContent":["// This module provides URLs for worker files.\n\nconst tsRe = /\\.ts$/;\n\nfunction resolve(path: string): URL {\n const {url} = import.meta;\n if (url.endsWith('.js')) {\n // When compiled, change .ts to .js\n path = path.replace(tsRe, '.js');\n }\n return new URL(path, url);\n}\n\n// These URLs are part of the build process. See ../../zero/tool/build.ts\n// All these urls must be relative to this file and be located in the same directory.\n\nexport const CHANGE_STREAMER_URL = resolve('./change-streamer.ts');\nexport const MAIN_URL = resolve('./main.ts');\nexport const MUTATOR_URL = resolve('./mutator.ts');\nexport const REAPER_URL = resolve('./reaper.ts');\nexport const REPLICATOR_URL = resolve('./replicator.ts');\nexport const SHADOW_SYNCER_URL = resolve('./shadow-syncer.ts');\nexport const SYNCER_URL = resolve('./syncer.ts');\nexport const WRITE_WORKER_URL = resolve('./write-worker.ts');\n"],"mappings":";AAEA,IAAM,OAAO;AAEb,SAAS,QAAQ,MAAmB;CAClC,MAAM,EAAC,QAAO,OAAO;AACrB,KAAI,IAAI,SAAS,MAAM,CAErB,QAAO,KAAK,QAAQ,MAAM,MAAM;AAElC,QAAO,IAAI,IAAI,MAAM,IAAI;;AAM3B,IAAa,sBAAsB,QAAQ,uBAAuB;AAClE,IAAa,WAAW,QAAQ,YAAY;AACjB,QAAQ,eAAe;AAClD,IAAa,aAAa,QAAQ,cAAc;AAChD,IAAa,iBAAiB,QAAQ,kBAAkB;AACxD,IAAa,oBAAoB,QAAQ,qBAAqB;AAC9D,IAAa,aAAa,QAAQ,cAAc;AAChD,IAAa,mBAAmB,QAAQ,oBAAoB"}
|
|
@@ -12,7 +12,7 @@ import { initReplica } from "../common/replica-schema.js";
|
|
|
12
12
|
import { stream } from "../../../types/streams.js";
|
|
13
13
|
import { ChangeProcessor } from "../../replicator/change-processor.js";
|
|
14
14
|
import { ReplicationStatusPublisher } from "../../replicator/replication-status.js";
|
|
15
|
-
import { WebSocket
|
|
15
|
+
import { WebSocket } from "ws";
|
|
16
16
|
//#region ../zero-cache/src/services/change-source/custom/change-source.ts
|
|
17
17
|
/**
|
|
18
18
|
* Initializes a Custom change source before streaming changes from the
|
|
@@ -69,7 +69,7 @@ var CustomChangeSource = class {
|
|
|
69
69
|
url.searchParams.set("lastWatermark", clientWatermark);
|
|
70
70
|
url.searchParams.set("replicaVersion", replicaVersion);
|
|
71
71
|
}
|
|
72
|
-
const ws = new WebSocket
|
|
72
|
+
const ws = new WebSocket(url);
|
|
73
73
|
const { instream, outstream } = stream(this.#lc, ws, changeStreamMessageSchema, { coalesce: (curr) => curr });
|
|
74
74
|
return {
|
|
75
75
|
changes: instream,
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-source.js","names":["#lc","#upstreamUri","#shard","#replicationConfig","#startStream"],"sources":["../../../../../../../zero-cache/src/services/change-source/custom/change-source.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {WebSocket} from 'ws';\nimport {assert, unreachable} from '../../../../../shared/src/asserts.ts';\nimport {\n stringify,\n type JSONObject,\n} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport type {SchemaValue} from '../../../../../zero-schema/src/table-schema.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {computeZqlSpecs} from '../../../db/lite-tables.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport type {ShardConfig, ShardID} from '../../../types/shards.ts';\nimport {stream} from '../../../types/streams.ts';\nimport {\n AutoResetSignal,\n type ReplicationConfig,\n} from '../../change-streamer/schema/tables.ts';\nimport {ChangeProcessor} from '../../replicator/change-processor.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {\n createReplicationStateTables,\n getSubscriptionState,\n initReplicationState,\n type SubscriptionState,\n} from '../../replicator/schema/replication-state.ts';\nimport type {ChangeSource, ChangeStream} from '../change-source.ts';\nimport {initReplica} from '../common/replica-schema.ts';\nimport {changeStreamMessageSchema} from '../protocol/current/downstream.ts';\nimport {\n type BackfillRequest,\n type ChangeSourceUpstream,\n} from '../protocol/current/upstream.ts';\n\n/** Server context to store with the initial sync metadata for debugging. */\nexport type ServerContext = JSONObject;\n\n/**\n * Initializes a Custom change source before streaming changes from the\n * corresponding logical replication stream.\n */\nexport async function initializeCustomChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n context: ServerContext,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initReplica(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n replicaDbFile,\n (log, tx) => initialSync(log, shard, tx, upstreamURI, context),\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionState(new StatementRunner(replica));\n replica.close();\n\n if (shard.publications.length) {\n // Verify that the publications match what has been synced.\n const requested = shard.publications.toSorted();\n const replicated = subscriptionState.publications.sort();\n if (!deepEqual(requested, replicated)) {\n throw new Error(\n `Invalid ShardConfig. Requested publications [${requested}] do not match synced publications: [${replicated}]`,\n );\n }\n }\n\n const changeSource = new CustomChangeSource(\n lc,\n upstreamURI,\n shard,\n subscriptionState,\n );\n\n return {subscriptionState, changeSource};\n}\n\nclass CustomChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replicationConfig: ReplicationConfig;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replicationConfig: ReplicationConfig,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replicationConfig = replicationConfig;\n }\n\n initialSync(): ChangeStream {\n return this.#startStream();\n }\n\n startLagReporter() {\n return null; // Not supported for custom sources\n }\n\n stop(): Promise<void> {\n return Promise.resolve();\n }\n\n startStream(\n clientWatermark: string,\n backfillRequests: BackfillRequest[] = [],\n ): Promise<ChangeStream> {\n if (backfillRequests?.length) {\n throw new Error(\n 'backfill is yet not supported for custom change sources',\n );\n }\n return Promise.resolve(this.#startStream(clientWatermark));\n }\n\n #startStream(clientWatermark?: string): ChangeStream {\n const {publications, replicaVersion} = this.#replicationConfig;\n const {appID, shardNum} = this.#shard;\n const url = new URL(this.#upstreamUri);\n url.searchParams.set('appID', appID);\n url.searchParams.set('shardNum', String(shardNum));\n for (const pub of publications) {\n url.searchParams.append('publications', pub);\n }\n if (clientWatermark) {\n assert(\n replicaVersion.length,\n 'replicaVersion is required when clientWatermark is set',\n );\n url.searchParams.set('lastWatermark', clientWatermark);\n url.searchParams.set('replicaVersion', replicaVersion);\n }\n\n const ws = new WebSocket(url);\n const {instream, outstream} = stream(\n this.#lc,\n ws,\n changeStreamMessageSchema,\n // Upstream acks coalesce. If upstream exhibits back-pressure,\n // only the last ACK is kept / buffered.\n {coalesce: (curr: ChangeSourceUpstream) => curr},\n );\n return {changes: instream, acks: outstream};\n }\n}\n\n/**\n * Initial sync for a custom change source makes a request to the\n * change source endpoint with no `replicaVersion` or `lastWatermark`.\n * The initial transaction returned by the endpoint is treated as\n * the initial sync, and the commit watermark of that transaction\n * becomes the `replicaVersion` of the initialized replica.\n *\n * Note that this is equivalent to how the LSN of the Postgres WAL\n * at initial sync time is the `replicaVersion` (and starting\n * version for all initially-synced rows).\n */\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n context: ServerContext,\n) {\n const {appID: id, publications} = shard;\n const changeSource = new CustomChangeSource(lc, upstreamURI, shard, {\n replicaVersion: '', // ignored for initialSync()\n publications,\n });\n const {changes} = changeSource.initialSync();\n\n createReplicationStateTables(tx);\n const processor = new ChangeProcessor(\n new StatementRunner(tx),\n 'initial-sync',\n (_, err) => {\n throw err;\n },\n );\n\n const statusPublisher = ReplicationStatusPublisher.forRunningTransaction(tx);\n try {\n let num = 0;\n for await (const change of changes) {\n const [tag] = change;\n switch (tag) {\n case 'begin': {\n const {commitWatermark} = change[2];\n lc.info?.(\n `initial sync of shard ${id} at replicaVersion ${commitWatermark}`,\n );\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying upstream tables at version ${commitWatermark}`,\n 5000,\n );\n initReplicationState(\n tx,\n publications.toSorted(),\n commitWatermark,\n context,\n false,\n );\n processor.processMessage(lc, change);\n break;\n }\n case 'data':\n processor.processMessage(lc, change);\n if (++num % 1000 === 0) {\n lc.debug?.(`processed ${num} changes`);\n }\n break;\n case 'commit':\n processor.processMessage(lc, change);\n validateInitiallySyncedData(lc, tx, shard);\n lc.info?.(`finished initial-sync of ${num} changes`);\n return;\n\n case 'status':\n break; // Ignored\n // @ts-expect-error: falls through if the tag is not 'reset-required\n case 'control': {\n const {tag, message} = change[1];\n if (tag === 'reset-required') {\n throw new AutoResetSignal(\n message ?? 'auto-reset signaled by change source',\n );\n }\n }\n // falls through\n case 'rollback':\n throw new Error(\n `unexpected message during initial-sync: ${stringify(change)}`,\n );\n default:\n unreachable(change);\n }\n }\n throw new Error(\n `change source ${upstreamURI} closed before initial-sync completed`,\n );\n } catch (e) {\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n }\n}\n\n// Verify that the upstream tables expected by the sync logic\n// have been properly initialized.\nfunction getRequiredTables({\n appID,\n shardNum,\n}: ShardID): Record<string, Record<string, SchemaValue>> {\n return {\n [`${appID}_${shardNum}.clients`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n lastMutationID: {type: 'number'},\n userID: {type: 'string'},\n },\n [`${appID}_${shardNum}.mutations`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n mutationID: {type: 'number'},\n mutation: {type: 'json'},\n },\n [`${appID}.permissions`]: {\n permissions: {type: 'json'},\n hash: {type: 'string'},\n },\n };\n}\n\nfunction validateInitiallySyncedData(\n lc: LogContext,\n db: Database,\n shard: ShardID,\n) {\n const tables = computeZqlSpecs(lc, db, {includeBackfillingColumns: true});\n const required = getRequiredTables(shard);\n for (const [name, columns] of Object.entries(required)) {\n const table = tables.get(name)?.zqlSpec;\n if (!table) {\n throw new Error(\n `Upstream is missing the \"${name}\" table. (Found ${[\n ...tables.keys(),\n ]})` +\n `Please ensure that each table has a unique index over one ` +\n `or more non-null columns.`,\n );\n }\n for (const [col, {type}] of Object.entries(columns)) {\n const found = table[col];\n if (!found) {\n throw new Error(\n `Upstream \"${table}\" table is missing the \"${col}\" column`,\n );\n }\n if (found.type !== type) {\n throw new Error(\n `Upstream \"${table}.${col}\" column is a ${found.type} type but must be a ${type} type.`,\n );\n }\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAyCA,eAAsB,6BACpB,IACA,aACA,OACA,eACA,SAC6E;AAC7E,OAAM,YACJ,IACA,WAAW,MAAM,MAAM,GAAG,MAAM,YAChC,gBACC,KAAK,OAAO,YAAY,KAAK,OAAO,IAAI,aAAa,QAAQ,CAC/D;CAED,MAAM,UAAU,IAAI,SAAS,IAAI,cAAc;CAC/C,MAAM,oBAAoB,qBAAqB,IAAI,gBAAgB,QAAQ,CAAC;AAC5E,SAAQ,OAAO;AAEf,KAAI,MAAM,aAAa,QAAQ;EAE7B,MAAM,YAAY,MAAM,aAAa,UAAU;EAC/C,MAAM,aAAa,kBAAkB,aAAa,MAAM;AACxD,MAAI,CAAC,UAAU,WAAW,WAAW,CACnC,OAAM,IAAI,MACR,gDAAgD,UAAU,uCAAuC,WAAW,GAC7G;;AAWL,QAAO;EAAC;EAAmB,cAPN,IAAI,mBACvB,IACA,aACA,OACA,kBACD;EAEuC;;AAG1C,IAAM,qBAAN,MAAiD;CAC/C;CACA;CACA;CACA;CAEA,YACE,IACA,aACA,OACA,mBACA;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,gBAAgB;AACvD,QAAA,cAAoB;AACpB,QAAA,QAAc;AACd,QAAA,oBAA0B;;CAG5B,cAA4B;AAC1B,SAAO,MAAA,aAAmB;;CAG5B,mBAAmB;AACjB,SAAO;;CAGT,OAAsB;AACpB,SAAO,QAAQ,SAAS;;CAG1B,YACE,iBACA,mBAAsC,EAAE,EACjB;AACvB,MAAI,kBAAkB,OACpB,OAAM,IAAI,MACR,0DACD;AAEH,SAAO,QAAQ,QAAQ,MAAA,YAAkB,gBAAgB,CAAC;;CAG5D,aAAa,iBAAwC;EACnD,MAAM,EAAC,cAAc,mBAAkB,MAAA;EACvC,MAAM,EAAC,OAAO,aAAY,MAAA;EAC1B,MAAM,MAAM,IAAI,IAAI,MAAA,YAAkB;AACtC,MAAI,aAAa,IAAI,SAAS,MAAM;AACpC,MAAI,aAAa,IAAI,YAAY,OAAO,SAAS,CAAC;AAClD,OAAK,MAAM,OAAO,aAChB,KAAI,aAAa,OAAO,gBAAgB,IAAI;AAE9C,MAAI,iBAAiB;AACnB,UACE,eAAe,QACf,yDACD;AACD,OAAI,aAAa,IAAI,iBAAiB,gBAAgB;AACtD,OAAI,aAAa,IAAI,kBAAkB,eAAe;;EAGxD,MAAM,KAAK,IAAI,YAAU,IAAI;EAC7B,MAAM,EAAC,UAAU,cAAa,OAC5B,MAAA,IACA,IACA,2BAGA,EAAC,WAAW,SAA+B,MAAK,CACjD;AACD,SAAO;GAAC,SAAS;GAAU,MAAM;GAAU;;;;;;;;;;;;;;AAe/C,eAAsB,YACpB,IACA,OACA,IACA,aACA,SACA;CACA,MAAM,EAAC,OAAO,IAAI,iBAAgB;CAKlC,MAAM,EAAC,YAJc,IAAI,mBAAmB,IAAI,aAAa,OAAO;EAClE,gBAAgB;EAChB;EACD,CAAC,CAC6B,aAAa;AAE5C,8BAA6B,GAAG;CAChC,MAAM,YAAY,IAAI,gBACpB,IAAI,gBAAgB,GAAG,EACvB,iBACC,GAAG,QAAQ;AACV,QAAM;GAET;CAED,MAAM,kBAAkB,2BAA2B,sBAAsB,GAAG;AAC5E,KAAI;EACF,IAAI,MAAM;AACV,aAAW,MAAM,UAAU,SAAS;GAClC,MAAM,CAAC,OAAO;AACd,WAAQ,KAAR;IACE,KAAK,SAAS;KACZ,MAAM,EAAC,oBAAmB,OAAO;AACjC,QAAG,OACD,yBAAyB,GAAG,qBAAqB,kBAClD;AACD,qBAAgB,QACd,IACA,gBACA,sCAAsC,mBACtC,IACD;AACD,0BACE,IACA,aAAa,UAAU,EACvB,iBACA,SACA,MACD;AACD,eAAU,eAAe,IAAI,OAAO;AACpC;;IAEF,KAAK;AACH,eAAU,eAAe,IAAI,OAAO;AACpC,SAAI,EAAE,MAAM,QAAS,EACnB,IAAG,QAAQ,aAAa,IAAI,UAAU;AAExC;IACF,KAAK;AACH,eAAU,eAAe,IAAI,OAAO;AACpC,iCAA4B,IAAI,IAAI,MAAM;AAC1C,QAAG,OAAO,4BAA4B,IAAI,UAAU;AACpD;IAEF,KAAK,SACH;IAEF,KAAK,WAAW;KACd,MAAM,EAAC,KAAK,YAAW,OAAO;AAC9B,SAAI,QAAQ,iBACV,OAAM,IAAI,gBACR,WAAW,uCACZ;;IAIL,KAAK,WACH,OAAM,IAAI,MACR,2CAA2C,UAAU,OAAO,GAC7D;IACH,QACE,aAAY,OAAO;;;AAGzB,QAAM,IAAI,MACR,iBAAiB,YAAY,uCAC9B;UACM,GAAG;AACV,QAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,EAAE;WACzD;AACR,kBAAgB,MAAM;;;AAM1B,SAAS,kBAAkB,EACzB,OACA,YACuD;AACvD,QAAO;GACJ,GAAG,MAAM,GAAG,SAAS,YAAY;GAChC,eAAe,EAAC,MAAM,UAAS;GAC/B,UAAU,EAAC,MAAM,UAAS;GAC1B,gBAAgB,EAAC,MAAM,UAAS;GAChC,QAAQ,EAAC,MAAM,UAAS;GACzB;GACA,GAAG,MAAM,GAAG,SAAS,cAAc;GAClC,eAAe,EAAC,MAAM,UAAS;GAC/B,UAAU,EAAC,MAAM,UAAS;GAC1B,YAAY,EAAC,MAAM,UAAS;GAC5B,UAAU,EAAC,MAAM,QAAO;GACzB;GACA,GAAG,MAAM,gBAAgB;GACxB,aAAa,EAAC,MAAM,QAAO;GAC3B,MAAM,EAAC,MAAM,UAAS;GACvB;EACF;;AAGH,SAAS,4BACP,IACA,IACA,OACA;CACA,MAAM,SAAS,gBAAgB,IAAI,IAAI,EAAC,2BAA2B,MAAK,CAAC;CACzE,MAAM,WAAW,kBAAkB,MAAM;AACzC,MAAK,MAAM,CAAC,MAAM,YAAY,OAAO,QAAQ,SAAS,EAAE;EACtD,MAAM,QAAQ,OAAO,IAAI,KAAK,EAAE;AAChC,MAAI,CAAC,MACH,OAAM,IAAI,MACR,4BAA4B,KAAK,kBAAkB,CACjD,GAAG,OAAO,MAAM,CACjB,CAAC,sFAGH;AAEH,OAAK,MAAM,CAAC,KAAK,EAAC,WAAU,OAAO,QAAQ,QAAQ,EAAE;GACnD,MAAM,QAAQ,MAAM;AACpB,OAAI,CAAC,MACH,OAAM,IAAI,MACR,aAAa,MAAM,0BAA0B,IAAI,UAClD;AAEH,OAAI,MAAM,SAAS,KACjB,OAAM,IAAI,MACR,aAAa,MAAM,GAAG,IAAI,gBAAgB,MAAM,KAAK,sBAAsB,KAAK,QACjF"}
|
|
1
|
+
{"version":3,"file":"change-source.js","names":["#lc","#upstreamUri","#shard","#replicationConfig","#startStream"],"sources":["../../../../../../../zero-cache/src/services/change-source/custom/change-source.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {WebSocket} from 'ws';\nimport {assert, unreachable} from '../../../../../shared/src/asserts.ts';\nimport {\n stringify,\n type JSONObject,\n} from '../../../../../shared/src/bigint-json.ts';\nimport {deepEqual} from '../../../../../shared/src/json.ts';\nimport type {SchemaValue} from '../../../../../zero-schema/src/table-schema.ts';\nimport {Database} from '../../../../../zqlite/src/db.ts';\nimport {computeZqlSpecs} from '../../../db/lite-tables.ts';\nimport {StatementRunner} from '../../../db/statements.ts';\nimport type {ShardConfig, ShardID} from '../../../types/shards.ts';\nimport {stream} from '../../../types/streams.ts';\nimport {\n AutoResetSignal,\n type ReplicationConfig,\n} from '../../change-streamer/schema/tables.ts';\nimport {ChangeProcessor} from '../../replicator/change-processor.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {\n createReplicationStateTables,\n getSubscriptionState,\n initReplicationState,\n type SubscriptionState,\n} from '../../replicator/schema/replication-state.ts';\nimport type {ChangeSource, ChangeStream} from '../change-source.ts';\nimport {initReplica} from '../common/replica-schema.ts';\nimport {changeStreamMessageSchema} from '../protocol/current/downstream.ts';\nimport {\n type BackfillRequest,\n type ChangeSourceUpstream,\n} from '../protocol/current/upstream.ts';\n\n/** Server context to store with the initial sync metadata for debugging. */\nexport type ServerContext = JSONObject;\n\n/**\n * Initializes a Custom change source before streaming changes from the\n * corresponding logical replication stream.\n */\nexport async function initializeCustomChangeSource(\n lc: LogContext,\n upstreamURI: string,\n shard: ShardConfig,\n replicaDbFile: string,\n context: ServerContext,\n): Promise<{subscriptionState: SubscriptionState; changeSource: ChangeSource}> {\n await initReplica(\n lc,\n `replica-${shard.appID}-${shard.shardNum}`,\n replicaDbFile,\n (log, tx) => initialSync(log, shard, tx, upstreamURI, context),\n );\n\n const replica = new Database(lc, replicaDbFile);\n const subscriptionState = getSubscriptionState(new StatementRunner(replica));\n replica.close();\n\n if (shard.publications.length) {\n // Verify that the publications match what has been synced.\n const requested = shard.publications.toSorted();\n const replicated = subscriptionState.publications.sort();\n if (!deepEqual(requested, replicated)) {\n throw new Error(\n `Invalid ShardConfig. Requested publications [${requested}] do not match synced publications: [${replicated}]`,\n );\n }\n }\n\n const changeSource = new CustomChangeSource(\n lc,\n upstreamURI,\n shard,\n subscriptionState,\n );\n\n return {subscriptionState, changeSource};\n}\n\nclass CustomChangeSource implements ChangeSource {\n readonly #lc: LogContext;\n readonly #upstreamUri: string;\n readonly #shard: ShardID;\n readonly #replicationConfig: ReplicationConfig;\n\n constructor(\n lc: LogContext,\n upstreamUri: string,\n shard: ShardID,\n replicationConfig: ReplicationConfig,\n ) {\n this.#lc = lc.withContext('component', 'change-source');\n this.#upstreamUri = upstreamUri;\n this.#shard = shard;\n this.#replicationConfig = replicationConfig;\n }\n\n initialSync(): ChangeStream {\n return this.#startStream();\n }\n\n startLagReporter() {\n return null; // Not supported for custom sources\n }\n\n stop(): Promise<void> {\n return Promise.resolve();\n }\n\n startStream(\n clientWatermark: string,\n backfillRequests: BackfillRequest[] = [],\n ): Promise<ChangeStream> {\n if (backfillRequests?.length) {\n throw new Error(\n 'backfill is yet not supported for custom change sources',\n );\n }\n return Promise.resolve(this.#startStream(clientWatermark));\n }\n\n #startStream(clientWatermark?: string): ChangeStream {\n const {publications, replicaVersion} = this.#replicationConfig;\n const {appID, shardNum} = this.#shard;\n const url = new URL(this.#upstreamUri);\n url.searchParams.set('appID', appID);\n url.searchParams.set('shardNum', String(shardNum));\n for (const pub of publications) {\n url.searchParams.append('publications', pub);\n }\n if (clientWatermark) {\n assert(\n replicaVersion.length,\n 'replicaVersion is required when clientWatermark is set',\n );\n url.searchParams.set('lastWatermark', clientWatermark);\n url.searchParams.set('replicaVersion', replicaVersion);\n }\n\n const ws = new WebSocket(url);\n const {instream, outstream} = stream(\n this.#lc,\n ws,\n changeStreamMessageSchema,\n // Upstream acks coalesce. If upstream exhibits back-pressure,\n // only the last ACK is kept / buffered.\n {coalesce: (curr: ChangeSourceUpstream) => curr},\n );\n return {changes: instream, acks: outstream};\n }\n}\n\n/**\n * Initial sync for a custom change source makes a request to the\n * change source endpoint with no `replicaVersion` or `lastWatermark`.\n * The initial transaction returned by the endpoint is treated as\n * the initial sync, and the commit watermark of that transaction\n * becomes the `replicaVersion` of the initialized replica.\n *\n * Note that this is equivalent to how the LSN of the Postgres WAL\n * at initial sync time is the `replicaVersion` (and starting\n * version for all initially-synced rows).\n */\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n context: ServerContext,\n) {\n const {appID: id, publications} = shard;\n const changeSource = new CustomChangeSource(lc, upstreamURI, shard, {\n replicaVersion: '', // ignored for initialSync()\n publications,\n });\n const {changes} = changeSource.initialSync();\n\n createReplicationStateTables(tx);\n const processor = new ChangeProcessor(\n new StatementRunner(tx),\n 'initial-sync',\n (_, err) => {\n throw err;\n },\n );\n\n const statusPublisher = ReplicationStatusPublisher.forRunningTransaction(tx);\n try {\n let num = 0;\n for await (const change of changes) {\n const [tag] = change;\n switch (tag) {\n case 'begin': {\n const {commitWatermark} = change[2];\n lc.info?.(\n `initial sync of shard ${id} at replicaVersion ${commitWatermark}`,\n );\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying upstream tables at version ${commitWatermark}`,\n 5000,\n );\n initReplicationState(\n tx,\n publications.toSorted(),\n commitWatermark,\n context,\n false,\n );\n processor.processMessage(lc, change);\n break;\n }\n case 'data':\n processor.processMessage(lc, change);\n if (++num % 1000 === 0) {\n lc.debug?.(`processed ${num} changes`);\n }\n break;\n case 'commit':\n processor.processMessage(lc, change);\n validateInitiallySyncedData(lc, tx, shard);\n lc.info?.(`finished initial-sync of ${num} changes`);\n return;\n\n case 'status':\n break; // Ignored\n // @ts-expect-error: falls through if the tag is not 'reset-required\n case 'control': {\n const {tag, message} = change[1];\n if (tag === 'reset-required') {\n throw new AutoResetSignal(\n message ?? 'auto-reset signaled by change source',\n );\n }\n }\n // falls through\n case 'rollback':\n throw new Error(\n `unexpected message during initial-sync: ${stringify(change)}`,\n );\n default:\n unreachable(change);\n }\n }\n throw new Error(\n `change source ${upstreamURI} closed before initial-sync completed`,\n );\n } catch (e) {\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n }\n}\n\n// Verify that the upstream tables expected by the sync logic\n// have been properly initialized.\nfunction getRequiredTables({\n appID,\n shardNum,\n}: ShardID): Record<string, Record<string, SchemaValue>> {\n return {\n [`${appID}_${shardNum}.clients`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n lastMutationID: {type: 'number'},\n userID: {type: 'string'},\n },\n [`${appID}_${shardNum}.mutations`]: {\n clientGroupID: {type: 'string'},\n clientID: {type: 'string'},\n mutationID: {type: 'number'},\n mutation: {type: 'json'},\n },\n [`${appID}.permissions`]: {\n permissions: {type: 'json'},\n hash: {type: 'string'},\n },\n };\n}\n\nfunction validateInitiallySyncedData(\n lc: LogContext,\n db: Database,\n shard: ShardID,\n) {\n const tables = computeZqlSpecs(lc, db, {includeBackfillingColumns: true});\n const required = getRequiredTables(shard);\n for (const [name, columns] of Object.entries(required)) {\n const table = tables.get(name)?.zqlSpec;\n if (!table) {\n throw new Error(\n `Upstream is missing the \"${name}\" table. (Found ${[\n ...tables.keys(),\n ]})` +\n `Please ensure that each table has a unique index over one ` +\n `or more non-null columns.`,\n );\n }\n for (const [col, {type}] of Object.entries(columns)) {\n const found = table[col];\n if (!found) {\n throw new Error(\n `Upstream \"${table}\" table is missing the \"${col}\" column`,\n );\n }\n if (found.type !== type) {\n throw new Error(\n `Upstream \"${table}.${col}\" column is a ${found.type} type but must be a ${type} type.`,\n );\n }\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAyCA,eAAsB,6BACpB,IACA,aACA,OACA,eACA,SAC6E;AAC7E,OAAM,YACJ,IACA,WAAW,MAAM,MAAM,GAAG,MAAM,YAChC,gBACC,KAAK,OAAO,YAAY,KAAK,OAAO,IAAI,aAAa,QAAQ,CAC/D;CAED,MAAM,UAAU,IAAI,SAAS,IAAI,cAAc;CAC/C,MAAM,oBAAoB,qBAAqB,IAAI,gBAAgB,QAAQ,CAAC;AAC5E,SAAQ,OAAO;AAEf,KAAI,MAAM,aAAa,QAAQ;EAE7B,MAAM,YAAY,MAAM,aAAa,UAAU;EAC/C,MAAM,aAAa,kBAAkB,aAAa,MAAM;AACxD,MAAI,CAAC,UAAU,WAAW,WAAW,CACnC,OAAM,IAAI,MACR,gDAAgD,UAAU,uCAAuC,WAAW,GAC7G;;AAWL,QAAO;EAAC;EAAmB,cAPN,IAAI,mBACvB,IACA,aACA,OACA,kBACD;EAEuC;;AAG1C,IAAM,qBAAN,MAAiD;CAC/C;CACA;CACA;CACA;CAEA,YACE,IACA,aACA,OACA,mBACA;AACA,QAAA,KAAW,GAAG,YAAY,aAAa,gBAAgB;AACvD,QAAA,cAAoB;AACpB,QAAA,QAAc;AACd,QAAA,oBAA0B;;CAG5B,cAA4B;AAC1B,SAAO,MAAA,aAAmB;;CAG5B,mBAAmB;AACjB,SAAO;;CAGT,OAAsB;AACpB,SAAO,QAAQ,SAAS;;CAG1B,YACE,iBACA,mBAAsC,EAAE,EACjB;AACvB,MAAI,kBAAkB,OACpB,OAAM,IAAI,MACR,0DACD;AAEH,SAAO,QAAQ,QAAQ,MAAA,YAAkB,gBAAgB,CAAC;;CAG5D,aAAa,iBAAwC;EACnD,MAAM,EAAC,cAAc,mBAAkB,MAAA;EACvC,MAAM,EAAC,OAAO,aAAY,MAAA;EAC1B,MAAM,MAAM,IAAI,IAAI,MAAA,YAAkB;AACtC,MAAI,aAAa,IAAI,SAAS,MAAM;AACpC,MAAI,aAAa,IAAI,YAAY,OAAO,SAAS,CAAC;AAClD,OAAK,MAAM,OAAO,aAChB,KAAI,aAAa,OAAO,gBAAgB,IAAI;AAE9C,MAAI,iBAAiB;AACnB,UACE,eAAe,QACf,yDACD;AACD,OAAI,aAAa,IAAI,iBAAiB,gBAAgB;AACtD,OAAI,aAAa,IAAI,kBAAkB,eAAe;;EAGxD,MAAM,KAAK,IAAI,UAAU,IAAI;EAC7B,MAAM,EAAC,UAAU,cAAa,OAC5B,MAAA,IACA,IACA,2BAGA,EAAC,WAAW,SAA+B,MAAK,CACjD;AACD,SAAO;GAAC,SAAS;GAAU,MAAM;GAAU;;;;;;;;;;;;;;AAe/C,eAAsB,YACpB,IACA,OACA,IACA,aACA,SACA;CACA,MAAM,EAAC,OAAO,IAAI,iBAAgB;CAKlC,MAAM,EAAC,YAJc,IAAI,mBAAmB,IAAI,aAAa,OAAO;EAClE,gBAAgB;EAChB;EACD,CAAC,CAC6B,aAAa;AAE5C,8BAA6B,GAAG;CAChC,MAAM,YAAY,IAAI,gBACpB,IAAI,gBAAgB,GAAG,EACvB,iBACC,GAAG,QAAQ;AACV,QAAM;GAET;CAED,MAAM,kBAAkB,2BAA2B,sBAAsB,GAAG;AAC5E,KAAI;EACF,IAAI,MAAM;AACV,aAAW,MAAM,UAAU,SAAS;GAClC,MAAM,CAAC,OAAO;AACd,WAAQ,KAAR;IACE,KAAK,SAAS;KACZ,MAAM,EAAC,oBAAmB,OAAO;AACjC,QAAG,OACD,yBAAyB,GAAG,qBAAqB,kBAClD;AACD,qBAAgB,QACd,IACA,gBACA,sCAAsC,mBACtC,IACD;AACD,0BACE,IACA,aAAa,UAAU,EACvB,iBACA,SACA,MACD;AACD,eAAU,eAAe,IAAI,OAAO;AACpC;;IAEF,KAAK;AACH,eAAU,eAAe,IAAI,OAAO;AACpC,SAAI,EAAE,MAAM,QAAS,EACnB,IAAG,QAAQ,aAAa,IAAI,UAAU;AAExC;IACF,KAAK;AACH,eAAU,eAAe,IAAI,OAAO;AACpC,iCAA4B,IAAI,IAAI,MAAM;AAC1C,QAAG,OAAO,4BAA4B,IAAI,UAAU;AACpD;IAEF,KAAK,SACH;IAEF,KAAK,WAAW;KACd,MAAM,EAAC,KAAK,YAAW,OAAO;AAC9B,SAAI,QAAQ,iBACV,OAAM,IAAI,gBACR,WAAW,uCACZ;;IAIL,KAAK,WACH,OAAM,IAAI,MACR,2CAA2C,UAAU,OAAO,GAC7D;IACH,QACE,aAAY,OAAO;;;AAGzB,QAAM,IAAI,MACR,iBAAiB,YAAY,uCAC9B;UACM,GAAG;AACV,QAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,EAAE;WACzD;AACR,kBAAgB,MAAM;;;AAM1B,SAAS,kBAAkB,EACzB,OACA,YACuD;AACvD,QAAO;GACJ,GAAG,MAAM,GAAG,SAAS,YAAY;GAChC,eAAe,EAAC,MAAM,UAAS;GAC/B,UAAU,EAAC,MAAM,UAAS;GAC1B,gBAAgB,EAAC,MAAM,UAAS;GAChC,QAAQ,EAAC,MAAM,UAAS;GACzB;GACA,GAAG,MAAM,GAAG,SAAS,cAAc;GAClC,eAAe,EAAC,MAAM,UAAS;GAC/B,UAAU,EAAC,MAAM,UAAS;GAC1B,YAAY,EAAC,MAAM,UAAS;GAC5B,UAAU,EAAC,MAAM,QAAO;GACzB;GACA,GAAG,MAAM,gBAAgB;GACxB,aAAa,EAAC,MAAM,QAAO;GAC3B,MAAM,EAAC,MAAM,UAAS;GACvB;EACF;;AAGH,SAAS,4BACP,IACA,IACA,OACA;CACA,MAAM,SAAS,gBAAgB,IAAI,IAAI,EAAC,2BAA2B,MAAK,CAAC;CACzE,MAAM,WAAW,kBAAkB,MAAM;AACzC,MAAK,MAAM,CAAC,MAAM,YAAY,OAAO,QAAQ,SAAS,EAAE;EACtD,MAAM,QAAQ,OAAO,IAAI,KAAK,EAAE;AAChC,MAAI,CAAC,MACH,OAAM,IAAI,MACR,4BAA4B,KAAK,kBAAkB,CACjD,GAAG,OAAO,MAAM,CACjB,CAAC,sFAGH;AAEH,OAAK,MAAM,CAAC,KAAK,EAAC,WAAU,OAAO,QAAQ,QAAQ,EAAE;GACnD,MAAM,QAAQ,MAAM;AACpB,OAAI,CAAC,MACH,OAAM,IAAI,MACR,aAAa,MAAM,0BAA0B,IAAI,UAClD;AAEH,OAAI,MAAM,SAAS,KACjB,OAAM,IAAI,MACR,aAAa,MAAM,GAAG,IAAI,gBAAgB,MAAM,KAAK,sBAAsB,KAAK,QACjF"}
|
|
@@ -6,7 +6,14 @@ type StreamOptions = {
|
|
|
6
6
|
* The number of bytes at which to flush a batch of rows in a
|
|
7
7
|
* backfill message. Defaults to Node's getDefaultHighWatermark().
|
|
8
8
|
*/
|
|
9
|
-
flushThresholdBytes?: number;
|
|
9
|
+
flushThresholdBytes?: number | undefined;
|
|
10
|
+
/**
|
|
11
|
+
* Use text-format COPY instead of binary COPY.
|
|
12
|
+
* Binary is faster and handles all types (unknown types are cast to
|
|
13
|
+
* `::text` in the SELECT). This flag exists as an escape hatch to
|
|
14
|
+
* revert to the old code path if needed.
|
|
15
|
+
*/
|
|
16
|
+
textCopy?: boolean | undefined;
|
|
10
17
|
};
|
|
11
18
|
/**
|
|
12
19
|
* Streams a series of `backfill` messages (ending with `backfill-complete`)
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"backfill-stream.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;
|
|
1
|
+
{"version":3,"file":"backfill-stream.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAkBjD,OAAO,KAAK,EACV,iBAAiB,EACjB,eAAe,EAGf,eAAe,EAChB,MAAM,wBAAwB,CAAC;AAahC,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,mBAAmB,CAAC;AAI/C,KAAK,aAAa,GAAG;IACnB;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,GAAG,SAAS,CAAC;IAEzC;;;;;OAKG;IACH,QAAQ,CAAC,EAAE,OAAO,GAAG,SAAS,CAAC;CAChC,CAAC;AAYF;;;;;GAKG;AACH,wBAAuB,cAAc,CACnC,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,EAAC,IAAI,EAAE,YAAY,EAAC,EAAE,IAAI,CAAC,OAAO,EAAE,MAAM,GAAG,cAAc,CAAC,EAC5D,EAAE,EAAE,eAAe,EACnB,IAAI,GAAE,aAAkB,GACvB,cAAc,CAAC,eAAe,GAAG,iBAAiB,CAAC,CA8FrD"}
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { assert } from "../../../../../shared/src/asserts.js";
|
|
1
2
|
import { parse } from "../../../../../shared/src/valita.js";
|
|
2
3
|
import { equals } from "../../../../../shared/src/set-utils.js";
|
|
3
4
|
import { pgClient } from "../../../types/pg.js";
|
|
@@ -5,15 +6,17 @@ import { toStateVersionString } from "./lsn.js";
|
|
|
5
6
|
import { READONLY } from "../../../db/mode-enum.js";
|
|
6
7
|
import { getPublicationInfo } from "./schema/published.js";
|
|
7
8
|
import { SchemaIncompatibilityError } from "../common/backfill-manager.js";
|
|
9
|
+
import { BinaryCopyParser, hasBinaryDecoder, makeBinaryDecoder, textCastDecoder } from "../../../db/pg-copy-binary.js";
|
|
8
10
|
import { TsvParser } from "../../../db/pg-copy.js";
|
|
9
11
|
import { getTypeParsers } from "../../../db/pg-type-parser.js";
|
|
10
12
|
import { TransactionPool, importSnapshot } from "../../../db/transaction-pool.js";
|
|
11
13
|
import { columnMetadataSchema, tableMetadataSchema } from "./backfill-metadata.js";
|
|
12
|
-
import { createReplicationSlot, makeDownloadStatements } from "./initial-sync.js";
|
|
14
|
+
import { createReplicationSlot, makeBinarySelectExprs, makeDownloadStatements } from "./initial-sync.js";
|
|
13
15
|
import postgres from "postgres";
|
|
14
16
|
import { PG_UNDEFINED_COLUMN, PG_UNDEFINED_TABLE } from "@drdgvhbh/postgres-error-codes";
|
|
15
17
|
//#region ../zero-cache/src/services/change-source/pg/backfill-stream.ts
|
|
16
18
|
var POSTGRES_COPY_CHUNK_SIZE = 64 * 1024;
|
|
19
|
+
var SAMPLE_OR_LIMIT_RE = /\sTABLESAMPLE\s+BERNOULLI\b|\sLIMIT\s+\d/i;
|
|
17
20
|
/**
|
|
18
21
|
* Streams a series of `backfill` messages (ending with `backfill-complete`)
|
|
19
22
|
* at a set watermark (i.e. LSN). The data is retrieved via a COPY stream
|
|
@@ -22,20 +25,30 @@ var POSTGRES_COPY_CHUNK_SIZE = 64 * 1024;
|
|
|
22
25
|
*/
|
|
23
26
|
async function* streamBackfill(lc, upstreamURI, { slot, publications }, bf, opts = {}) {
|
|
24
27
|
lc = lc.withContext("component", "backfill").withContext("table", bf.table.name);
|
|
25
|
-
const { flushThresholdBytes = POSTGRES_COPY_CHUNK_SIZE } = opts;
|
|
26
|
-
const db = pgClient(lc, upstreamURI, {
|
|
27
|
-
connection: { ["application_name"]: "backfill-stream" },
|
|
28
|
-
["max_lifetime"]: 7200
|
|
29
|
-
});
|
|
28
|
+
const { flushThresholdBytes = POSTGRES_COPY_CHUNK_SIZE, textCopy = false } = opts;
|
|
29
|
+
const db = pgClient(lc, upstreamURI, "backfill-stream", { ["max_lifetime"]: 7200 });
|
|
30
30
|
let tx;
|
|
31
31
|
let watermark;
|
|
32
32
|
try {
|
|
33
33
|
({tx, watermark} = await createSnapshotTransaction(lc, upstreamURI, db, slot));
|
|
34
34
|
const { tableSpec, backfill } = await validateSchema(tx, publications, bf, watermark);
|
|
35
|
-
const types = await getTypeParsers(db, { returnJsonAsString: true });
|
|
36
35
|
const { relation, columns } = backfill;
|
|
37
36
|
const cols = [...relation.rowKey.columns, ...columns];
|
|
38
|
-
|
|
37
|
+
const stmts = makeDownloadStatements(tableSpec, cols);
|
|
38
|
+
if (textCopy) {
|
|
39
|
+
const types = await getTypeParsers(db, { returnJsonAsString: true });
|
|
40
|
+
yield* stream(lc, tx, backfill, stmts, `COPY (${stmts.select}) TO STDOUT`, new TsvParser(), cols.map((col) => {
|
|
41
|
+
const parser = types.getTypeParser(tableSpec.columns[col].typeOID);
|
|
42
|
+
return (text) => parser(text);
|
|
43
|
+
}), flushThresholdBytes);
|
|
44
|
+
} else {
|
|
45
|
+
const binaryStmts = makeDownloadStatements(tableSpec, cols, void 0, void 0, makeBinarySelectExprs(tableSpec, cols));
|
|
46
|
+
yield* stream(lc, tx, backfill, stmts, `COPY (${binaryStmts.select}) TO STDOUT WITH (FORMAT binary)`, new BinaryCopyParser(), cols.map((col) => {
|
|
47
|
+
const spec = tableSpec.columns[col];
|
|
48
|
+
const decoder = hasBinaryDecoder(spec) ? makeBinaryDecoder(spec) : textCastDecoder;
|
|
49
|
+
return (buf) => decoder(buf);
|
|
50
|
+
}), flushThresholdBytes);
|
|
51
|
+
}
|
|
39
52
|
} catch (e) {
|
|
40
53
|
if (e instanceof postgres.PostgresError && (e.code === PG_UNDEFINED_TABLE || e.code === PG_UNDEFINED_COLUMN)) throw new SchemaIncompatibilityError(bf, String(e), { cause: e });
|
|
41
54
|
throw e;
|
|
@@ -44,7 +57,8 @@ async function* streamBackfill(lc, upstreamURI, { slot, publications }, bf, opts
|
|
|
44
57
|
db.end().catch((e) => lc.warn?.(`error closing backfill connection`, e));
|
|
45
58
|
}
|
|
46
59
|
}
|
|
47
|
-
async function* stream(lc, tx, backfill, {
|
|
60
|
+
async function* stream(lc, tx, backfill, { getTotalRows, getTotalBytes }, copyCommand, parser, decoders, flushThresholdBytes) {
|
|
61
|
+
assert(!SAMPLE_OR_LIMIT_RE.test(copyCommand), `backfill COPY must not sample or limit: ${copyCommand}`);
|
|
48
62
|
const start = performance.now();
|
|
49
63
|
const [rows, bytes] = await tx.processReadTask((sql) => Promise.all([sql.unsafe(getTotalRows), sql.unsafe(getTotalBytes)]));
|
|
50
64
|
const status = {
|
|
@@ -53,9 +67,8 @@ async function* stream(lc, tx, backfill, { select, getTotalRows, getTotalBytes }
|
|
|
53
67
|
totalBytes: Number(bytes[0].totalBytes)
|
|
54
68
|
};
|
|
55
69
|
let elapsed = (performance.now() - start).toFixed(3);
|
|
56
|
-
lc.info?.(`Computed total rows and bytes for: ${
|
|
57
|
-
const copyStream = await tx.processReadTask((sql) => sql.unsafe(
|
|
58
|
-
const tsvParser = new TsvParser();
|
|
70
|
+
lc.info?.(`Computed total rows and bytes for: ${copyCommand} (${elapsed} ms)`, { status });
|
|
71
|
+
const copyStream = await tx.processReadTask((sql) => sql.unsafe(copyCommand).readable());
|
|
59
72
|
let totalBytes = 0;
|
|
60
73
|
let totalMsgs = 0;
|
|
61
74
|
let rowValues = [];
|
|
@@ -63,16 +76,16 @@ async function* stream(lc, tx, backfill, { select, getTotalRows, getTotalBytes }
|
|
|
63
76
|
const logFlushed = () => {
|
|
64
77
|
lc.debug?.(`Flushed ${rowValues.length} rows, ${bufferedBytes} bytes (total: rows=${status.rows}, msgs=${totalMsgs}, bytes=${totalBytes})`);
|
|
65
78
|
};
|
|
66
|
-
let row = Array.from({ length:
|
|
79
|
+
let row = Array.from({ length: decoders.length });
|
|
67
80
|
let col = 0;
|
|
68
81
|
for await (const data of copyStream) {
|
|
69
82
|
const chunk = data;
|
|
70
|
-
for (const
|
|
71
|
-
row[col] =
|
|
72
|
-
if (++col ===
|
|
83
|
+
for (const field of parser.parse(chunk)) {
|
|
84
|
+
row[col] = field === null ? null : decoders[col](field);
|
|
85
|
+
if (++col === decoders.length) {
|
|
73
86
|
rowValues.push(row);
|
|
74
87
|
status.rows++;
|
|
75
|
-
row = Array.from({ length:
|
|
88
|
+
row = Array.from({ length: decoders.length });
|
|
76
89
|
col = 0;
|
|
77
90
|
}
|
|
78
91
|
}
|
|
@@ -119,7 +132,7 @@ async function* stream(lc, tx, backfill, { select, getTotalRows, getTotalBytes }
|
|
|
119
132
|
* LSN.)
|
|
120
133
|
*/
|
|
121
134
|
async function createSnapshotTransaction(lc, upstreamURI, db, slotNamePrefix) {
|
|
122
|
-
const replicationSession = pgClient(lc, upstreamURI, {
|
|
135
|
+
const replicationSession = pgClient(lc, upstreamURI, "backfill-replication-session", {
|
|
123
136
|
["fetch_types"]: false,
|
|
124
137
|
connection: { replication: "database" }
|
|
125
138
|
});
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"backfill-stream.js","names":[],"sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"sourcesContent":["import {\n PG_UNDEFINED_COLUMN,\n PG_UNDEFINED_TABLE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres from 'postgres';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {READONLY} from '../../../db/mode-enum.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {getTypeParsers, type TypeParser} from '../../../db/pg-type-parser.ts';\nimport type {PublishedTableSpec} from '../../../db/specs.ts';\nimport {importSnapshot, TransactionPool} from '../../../db/transaction-pool.ts';\nimport {pgClient, type PostgresDB} from '../../../types/pg.ts';\nimport {SchemaIncompatibilityError} from '../common/backfill-manager.ts';\nimport type {\n BackfillCompleted,\n BackfillRequest,\n DownloadStatus,\n JSONValue,\n MessageBackfill,\n} from '../protocol/current.ts';\nimport {\n columnMetadataSchema,\n tableMetadataSchema,\n} from './backfill-metadata.ts';\nimport {\n createReplicationSlot,\n makeDownloadStatements,\n type DownloadStatements,\n} from './initial-sync.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport type {Replica} from './schema/shard.ts';\n\ntype BackfillParams = Omit<BackfillCompleted, 'tag'>;\n\ntype StreamOptions = {\n /**\n * The number of bytes at which to flush a batch of rows in a\n * backfill message. Defaults to Node's getDefaultHighWatermark().\n */\n flushThresholdBytes?: number;\n};\n\n// The size of chunks that Postgres sends on COPY stream.\n// This happens to match NodeJS's getDefaultHighWatermark()\n// (for Node v20+).\nconst POSTGRES_COPY_CHUNK_SIZE = 64 * 1024;\n\n/**\n * Streams a series of `backfill` messages (ending with `backfill-complete`)\n * at a set watermark (i.e. LSN). The data is retrieved via a COPY stream\n * made at a transaction snapshot corresponding to specific LSN, obtained by\n * creating a short-lived replication slot.\n */\nexport async function* streamBackfill(\n lc: LogContext,\n upstreamURI: string,\n {slot, publications}: Pick<Replica, 'slot' | 'publications'>,\n bf: BackfillRequest,\n opts: StreamOptions = {},\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n lc = lc\n .withContext('component', 'backfill')\n .withContext('table', bf.table.name);\n\n const {flushThresholdBytes = POSTGRES_COPY_CHUNK_SIZE} = opts;\n const db = pgClient(lc, upstreamURI, {\n connection: {['application_name']: 'backfill-stream'},\n ['max_lifetime']: 120 * 60, // set a long (2h) limit for COPY streaming\n });\n let tx: TransactionPool | undefined;\n let watermark: string;\n try {\n ({tx, watermark} = await createSnapshotTransaction(\n lc,\n upstreamURI,\n db,\n slot,\n ));\n const {tableSpec, backfill} = await validateSchema(\n tx,\n publications,\n bf,\n watermark,\n );\n const types = await getTypeParsers(db, {returnJsonAsString: true});\n\n // Note: validateSchema ensures that the rowKey and columns are disjoint\n const {relation, columns} = backfill;\n const cols = [...relation.rowKey.columns, ...columns];\n\n yield* stream(\n lc,\n tx,\n backfill,\n makeDownloadStatements(tableSpec, cols),\n cols.map(col => types.getTypeParser(tableSpec.columns[col].typeOID)),\n flushThresholdBytes,\n );\n } catch (e) {\n // Although we make the best effort to validate the schema at the\n // transaction snapshot, certain forms of `ALTER TABLE` are not\n // MVCC safe and not \"frozen\" in the snapshot:\n //\n // https://www.postgresql.org/docs/current/mvcc-caveats.html\n //\n // Handle these errors as schema incompatibility errors rather than\n // unknown runtime errors.\n if (\n e instanceof postgres.PostgresError &&\n (e.code === PG_UNDEFINED_TABLE || e.code === PG_UNDEFINED_COLUMN)\n ) {\n throw new SchemaIncompatibilityError(bf, String(e), {cause: e});\n }\n throw e;\n } finally {\n tx?.setDone();\n // Workaround postgres.js hanging at the end of some COPY commands:\n // https://github.com/porsager/postgres/issues/499\n void db.end().catch(e => lc.warn?.(`error closing backfill connection`, e));\n }\n}\n\nasync function* stream(\n lc: LogContext,\n tx: TransactionPool,\n backfill: BackfillParams,\n {select, getTotalRows, getTotalBytes}: DownloadStatements,\n colParsers: TypeParser[],\n flushThresholdBytes: number,\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n const start = performance.now();\n const [rows, bytes] = await tx.processReadTask(sql =>\n Promise.all([\n sql.unsafe<{totalRows: bigint}[]>(getTotalRows),\n sql.unsafe<{totalBytes: bigint}[]>(getTotalBytes),\n ]),\n );\n const status: DownloadStatus = {\n rows: 0,\n totalRows: Number(rows[0].totalRows),\n totalBytes: Number(bytes[0].totalBytes),\n };\n\n let elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(`Computed total rows and bytes for: ${select} (${elapsed} ms)`, {\n status,\n });\n const copyStream = await tx.processReadTask(sql =>\n sql.unsafe(`COPY (${select}) TO STDOUT`).readable(),\n );\n\n const tsvParser = new TsvParser();\n let totalBytes = 0;\n let totalMsgs = 0;\n let rowValues: JSONValue[][] = [];\n let bufferedBytes = 0;\n\n const logFlushed = () => {\n lc.debug?.(\n `Flushed ${rowValues.length} rows, ${bufferedBytes} bytes ` +\n `(total: rows=${status.rows}, msgs=${totalMsgs}, bytes=${totalBytes})`,\n );\n };\n\n // Tracks the row being parsed.\n let row: JSONValue[] = Array.from({length: colParsers.length});\n let col = 0;\n\n for await (const data of copyStream) {\n const chunk = data as Buffer;\n for (const text of tsvParser.parse(chunk)) {\n row[col] = text === null ? null : (colParsers[col](text) as JSONValue);\n\n if (++col === colParsers.length) {\n rowValues.push(row);\n status.rows++;\n row = Array.from({length: colParsers.length});\n col = 0;\n }\n }\n bufferedBytes += chunk.byteLength;\n totalBytes += chunk.byteLength;\n\n if (bufferedBytes >= flushThresholdBytes) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n rowValues = [];\n bufferedBytes = 0;\n }\n }\n\n // Flush the last batch of rows.\n if (rowValues.length > 0) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n }\n\n yield {tag: 'backfill-completed', ...backfill, status};\n elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(\n `Finished streaming ${status.rows} rows, ${totalMsgs} msgs, ${totalBytes} bytes ` +\n `(${elapsed} ms)`,\n );\n}\n\n/**\n * Creates (and drops) a replication slot in order to obtain a snapshot\n * that corresponds with a specific LSN. Sets the snapshot on the\n * TransactionPool and returns the watermark corresponding to the LSN.\n *\n * (Note that PG's other LSN-related functions are not scoped to a\n * transaction; this is the only way to get set a transaction at a specific\n * LSN.)\n */\nasync function createSnapshotTransaction(\n lc: LogContext,\n upstreamURI: string,\n db: PostgresDB,\n slotNamePrefix: string,\n) {\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const tempSlot = `${slotNamePrefix}_bf_${Date.now()}`;\n try {\n const {snapshot_name: snapshot, consistent_point: lsn} =\n await createReplicationSlot(lc, replicationSession, tempSlot);\n\n const {init, imported} = importSnapshot(snapshot);\n const tx = new TransactionPool(lc, {mode: READONLY, init}).run(db);\n await imported;\n await replicationSession.unsafe(`DROP_REPLICATION_SLOT \"${tempSlot}\"`);\n\n const watermark = toStateVersionString(lsn);\n lc.info?.(`Opened snapshot transaction at LSN ${lsn} (${watermark})`);\n return {tx, watermark};\n } catch (e) {\n // In the event of a failure, clean up the replication slot if created.\n await replicationSession.unsafe(\n /*sql*/\n `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = '${tempSlot}'`,\n );\n lc.error?.(`Failed to create backfill snapshot`, e);\n throw e;\n } finally {\n await replicationSession.end();\n }\n}\n\nfunction validateSchema(\n tx: TransactionPool,\n publications: string[],\n bf: BackfillRequest,\n watermark: string,\n): Promise<{\n tableSpec: PublishedTableSpec;\n backfill: BackfillParams;\n}> {\n return tx.processReadTask(async sql => {\n const {tables} = await getPublicationInfo(sql, publications);\n const spec = tables.find(\n spec => spec.schema === bf.table.schema && spec.name === bf.table.name,\n );\n if (!spec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table has been renamed or dropped`,\n );\n }\n const tableMeta = v.parse(bf.table.metadata, tableMetadataSchema);\n if (spec.schemaOID !== tableMeta.schemaOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Schema no longer corresponds to the original schema`,\n );\n }\n if (spec.oid !== tableMeta.relationOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table no longer corresponds to the original table`,\n );\n }\n if (\n !equals(\n new Set(Object.keys(tableMeta.rowKey)),\n new Set(spec.replicaIdentityColumns),\n )\n ) {\n throw new SchemaIncompatibilityError(\n bf,\n 'Row key (e.g. PRIMARY KEY or INDEX) has changed',\n );\n }\n const allCols = [\n ...Object.entries(tableMeta.rowKey),\n ...Object.entries(bf.columns),\n ];\n for (const [col, val] of allCols) {\n const colSpec = spec.columns[col];\n if (!colSpec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} has been renamed or dropped`,\n );\n }\n const colMeta = v.parse(val, columnMetadataSchema);\n if (colMeta.attNum !== colSpec.pos) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} no longer corresponds to the original column`,\n );\n }\n }\n const backfill: BackfillParams = {\n relation: {\n schema: bf.table.schema,\n name: bf.table.name,\n rowKey: {columns: Object.keys(tableMeta.rowKey)},\n },\n columns: Object.keys(bf.columns).filter(\n col => !(col in tableMeta.rowKey),\n ),\n watermark,\n };\n return {tableSpec: spec, backfill};\n });\n}\n"],"mappings":";;;;;;;;;;;;;;;AAgDA,IAAM,2BAA2B,KAAK;;;;;;;AAQtC,gBAAuB,eACrB,IACA,aACA,EAAC,MAAM,gBACP,IACA,OAAsB,EAAE,EAC6B;AACrD,MAAK,GACF,YAAY,aAAa,WAAW,CACpC,YAAY,SAAS,GAAG,MAAM,KAAK;CAEtC,MAAM,EAAC,sBAAsB,6BAA4B;CACzD,MAAM,KAAK,SAAS,IAAI,aAAa;EACnC,YAAY,GAAE,qBAAqB,mBAAkB;GACpD,iBAAiB;EACnB,CAAC;CACF,IAAI;CACJ,IAAI;AACJ,KAAI;AACF,GAAC,CAAC,IAAI,aAAa,MAAM,0BACvB,IACA,aACA,IACA,KACD;EACD,MAAM,EAAC,WAAW,aAAY,MAAM,eAClC,IACA,cACA,IACA,UACD;EACD,MAAM,QAAQ,MAAM,eAAe,IAAI,EAAC,oBAAoB,MAAK,CAAC;EAGlE,MAAM,EAAC,UAAU,YAAW;EAC5B,MAAM,OAAO,CAAC,GAAG,SAAS,OAAO,SAAS,GAAG,QAAQ;AAErD,SAAO,OACL,IACA,IACA,UACA,uBAAuB,WAAW,KAAK,EACvC,KAAK,KAAI,QAAO,MAAM,cAAc,UAAU,QAAQ,KAAK,QAAQ,CAAC,EACpE,oBACD;UACM,GAAG;AASV,MACE,aAAa,SAAS,kBACrB,EAAE,SAAS,sBAAsB,EAAE,SAAS,qBAE7C,OAAM,IAAI,2BAA2B,IAAI,OAAO,EAAE,EAAE,EAAC,OAAO,GAAE,CAAC;AAEjE,QAAM;WACE;AACR,MAAI,SAAS;AAGR,KAAG,KAAK,CAAC,OAAM,MAAK,GAAG,OAAO,qCAAqC,EAAE,CAAC;;;AAI/E,gBAAgB,OACd,IACA,IACA,UACA,EAAC,QAAQ,cAAc,iBACvB,YACA,qBACqD;CACrD,MAAM,QAAQ,YAAY,KAAK;CAC/B,MAAM,CAAC,MAAM,SAAS,MAAM,GAAG,iBAAgB,QAC7C,QAAQ,IAAI,CACV,IAAI,OAA8B,aAAa,EAC/C,IAAI,OAA+B,cAAc,CAClD,CAAC,CACH;CACD,MAAM,SAAyB;EAC7B,MAAM;EACN,WAAW,OAAO,KAAK,GAAG,UAAU;EACpC,YAAY,OAAO,MAAM,GAAG,WAAW;EACxC;CAED,IAAI,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACpD,IAAG,OAAO,sCAAsC,OAAO,IAAI,QAAQ,OAAO,EACxE,QACD,CAAC;CACF,MAAM,aAAa,MAAM,GAAG,iBAAgB,QAC1C,IAAI,OAAO,SAAS,OAAO,aAAa,CAAC,UAAU,CACpD;CAED,MAAM,YAAY,IAAI,WAAW;CACjC,IAAI,aAAa;CACjB,IAAI,YAAY;CAChB,IAAI,YAA2B,EAAE;CACjC,IAAI,gBAAgB;CAEpB,MAAM,mBAAmB;AACvB,KAAG,QACD,WAAW,UAAU,OAAO,SAAS,cAAc,sBACjC,OAAO,KAAK,SAAS,UAAU,UAAU,WAAW,GACvE;;CAIH,IAAI,MAAmB,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO,CAAC;CAC9D,IAAI,MAAM;AAEV,YAAW,MAAM,QAAQ,YAAY;EACnC,MAAM,QAAQ;AACd,OAAK,MAAM,QAAQ,UAAU,MAAM,MAAM,EAAE;AACzC,OAAI,OAAO,SAAS,OAAO,OAAQ,WAAW,KAAK,KAAK;AAExD,OAAI,EAAE,QAAQ,WAAW,QAAQ;AAC/B,cAAU,KAAK,IAAI;AACnB,WAAO;AACP,UAAM,MAAM,KAAK,EAAC,QAAQ,WAAW,QAAO,CAAC;AAC7C,UAAM;;;AAGV,mBAAiB,MAAM;AACvB,gBAAc,MAAM;AAEpB,MAAI,iBAAiB,qBAAqB;AACxC,SAAM;IAAC,KAAK;IAAY,GAAG;IAAU;IAAW;IAAO;AACvD;AACA,eAAY;AACZ,eAAY,EAAE;AACd,mBAAgB;;;AAKpB,KAAI,UAAU,SAAS,GAAG;AACxB,QAAM;GAAC,KAAK;GAAY,GAAG;GAAU;GAAW;GAAO;AACvD;AACA,cAAY;;AAGd,OAAM;EAAC,KAAK;EAAsB,GAAG;EAAU;EAAO;AACtD,YAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AAChD,IAAG,OACD,sBAAsB,OAAO,KAAK,SAAS,UAAU,SAAS,WAAW,UACnE,QAAQ,MACf;;;;;;;;;;;AAYH,eAAe,0BACb,IACA,aACA,IACA,gBACA;CACA,MAAM,qBAAqB,SAAS,IAAI,aAAa;GAClD,gBAAgB;EACjB,YAAY,EAAC,aAAa,YAAW;EACtC,CAAC;CACF,MAAM,WAAW,GAAG,eAAe,MAAM,KAAK,KAAK;AACnD,KAAI;EACF,MAAM,EAAC,eAAe,UAAU,kBAAkB,QAChD,MAAM,sBAAsB,IAAI,oBAAoB,SAAS;EAE/D,MAAM,EAAC,MAAM,aAAY,eAAe,SAAS;EACjD,MAAM,KAAK,IAAI,gBAAgB,IAAI;GAAC,MAAM;GAAU;GAAK,CAAC,CAAC,IAAI,GAAG;AAClE,QAAM;AACN,QAAM,mBAAmB,OAAO,0BAA0B,SAAS,GAAG;EAEtE,MAAM,YAAY,qBAAqB,IAAI;AAC3C,KAAG,OAAO,sCAAsC,IAAI,IAAI,UAAU,GAAG;AACrE,SAAO;GAAC;GAAI;GAAU;UACf,GAAG;AAEV,QAAM,mBAAmB,OAEvB;8BACwB,SAAS,GAClC;AACD,KAAG,QAAQ,sCAAsC,EAAE;AACnD,QAAM;WACE;AACR,QAAM,mBAAmB,KAAK;;;AAIlC,SAAS,eACP,IACA,cACA,IACA,WAIC;AACD,QAAO,GAAG,gBAAgB,OAAM,QAAO;EACrC,MAAM,EAAC,WAAU,MAAM,mBAAmB,KAAK,aAAa;EAC5D,MAAM,OAAO,OAAO,MAClB,SAAQ,KAAK,WAAW,GAAG,MAAM,UAAU,KAAK,SAAS,GAAG,MAAM,KACnE;AACD,MAAI,CAAC,KACH,OAAM,IAAI,2BACR,IACA,oCACD;EAEH,MAAM,YAAY,MAAQ,GAAG,MAAM,UAAU,oBAAoB;AACjE,MAAI,KAAK,cAAc,UAAU,UAC/B,OAAM,IAAI,2BACR,IACA,sDACD;AAEH,MAAI,KAAK,QAAQ,UAAU,YACzB,OAAM,IAAI,2BACR,IACA,oDACD;AAEH,MACE,CAAC,OACC,IAAI,IAAI,OAAO,KAAK,UAAU,OAAO,CAAC,EACtC,IAAI,IAAI,KAAK,uBAAuB,CACrC,CAED,OAAM,IAAI,2BACR,IACA,kDACD;EAEH,MAAM,UAAU,CACd,GAAG,OAAO,QAAQ,UAAU,OAAO,EACnC,GAAG,OAAO,QAAQ,GAAG,QAAQ,CAC9B;AACD,OAAK,MAAM,CAAC,KAAK,QAAQ,SAAS;GAChC,MAAM,UAAU,KAAK,QAAQ;AAC7B,OAAI,CAAC,QACH,OAAM,IAAI,2BACR,IACA,UAAU,IAAI,8BACf;AAGH,OADgB,MAAQ,KAAK,qBAAqB,CACtC,WAAW,QAAQ,IAC7B,OAAM,IAAI,2BACR,IACA,UAAU,IAAI,+CACf;;AAcL,SAAO;GAAC,WAAW;GAAM,UAXQ;IAC/B,UAAU;KACR,QAAQ,GAAG,MAAM;KACjB,MAAM,GAAG,MAAM;KACf,QAAQ,EAAC,SAAS,OAAO,KAAK,UAAU,OAAO,EAAC;KACjD;IACD,SAAS,OAAO,KAAK,GAAG,QAAQ,CAAC,QAC/B,QAAO,EAAE,OAAO,UAAU,QAC3B;IACD;IACD;GACiC;GAClC"}
|
|
1
|
+
{"version":3,"file":"backfill-stream.js","names":[],"sources":["../../../../../../../zero-cache/src/services/change-source/pg/backfill-stream.ts"],"sourcesContent":["import {\n PG_UNDEFINED_COLUMN,\n PG_UNDEFINED_TABLE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport postgres from 'postgres';\nimport {assert} from '../../../../../shared/src/asserts.ts';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport * as v from '../../../../../shared/src/valita.ts';\nimport {READONLY} from '../../../db/mode-enum.ts';\nimport {\n BinaryCopyParser,\n hasBinaryDecoder,\n makeBinaryDecoder,\n textCastDecoder,\n} from '../../../db/pg-copy-binary.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {getTypeParsers} from '../../../db/pg-type-parser.ts';\nimport type {PublishedTableSpec} from '../../../db/specs.ts';\nimport {importSnapshot, TransactionPool} from '../../../db/transaction-pool.ts';\nimport {pgClient, type PostgresDB} from '../../../types/pg.ts';\nimport {SchemaIncompatibilityError} from '../common/backfill-manager.ts';\nimport type {\n BackfillCompleted,\n BackfillRequest,\n DownloadStatus,\n JSONValue,\n MessageBackfill,\n} from '../protocol/current.ts';\nimport {\n columnMetadataSchema,\n tableMetadataSchema,\n} from './backfill-metadata.ts';\nimport {\n createReplicationSlot,\n makeBinarySelectExprs,\n makeDownloadStatements,\n type DownloadStatements,\n} from './initial-sync.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport type {Replica} from './schema/shard.ts';\n\ntype BackfillParams = Omit<BackfillCompleted, 'tag'>;\n\ntype StreamOptions = {\n /**\n * The number of bytes at which to flush a batch of rows in a\n * backfill message. Defaults to Node's getDefaultHighWatermark().\n */\n flushThresholdBytes?: number | undefined;\n\n /**\n * Use text-format COPY instead of binary COPY.\n * Binary is faster and handles all types (unknown types are cast to\n * `::text` in the SELECT). This flag exists as an escape hatch to\n * revert to the old code path if needed.\n */\n textCopy?: boolean | undefined;\n};\n\n// The size of chunks that Postgres sends on COPY stream.\n// This happens to match NodeJS's getDefaultHighWatermark()\n// (for Node v20+).\nconst POSTGRES_COPY_CHUNK_SIZE = 64 * 1024;\n\n// Matches the exact clauses emitted by makeDownloadStatements; quoted\n// identifiers like \"limit\" won't match because they lack the surrounding\n// whitespace.\nconst SAMPLE_OR_LIMIT_RE = /\\sTABLESAMPLE\\s+BERNOULLI\\b|\\sLIMIT\\s+\\d/i;\n\n/**\n * Streams a series of `backfill` messages (ending with `backfill-complete`)\n * at a set watermark (i.e. LSN). The data is retrieved via a COPY stream\n * made at a transaction snapshot corresponding to specific LSN, obtained by\n * creating a short-lived replication slot.\n */\nexport async function* streamBackfill(\n lc: LogContext,\n upstreamURI: string,\n {slot, publications}: Pick<Replica, 'slot' | 'publications'>,\n bf: BackfillRequest,\n opts: StreamOptions = {},\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n lc = lc\n .withContext('component', 'backfill')\n .withContext('table', bf.table.name);\n\n const {flushThresholdBytes = POSTGRES_COPY_CHUNK_SIZE, textCopy = false} =\n opts;\n const db = pgClient(lc, upstreamURI, 'backfill-stream', {\n ['max_lifetime']: 120 * 60, // set a long (2h) limit for COPY streaming\n });\n let tx: TransactionPool | undefined;\n let watermark: string;\n try {\n ({tx, watermark} = await createSnapshotTransaction(\n lc,\n upstreamURI,\n db,\n slot,\n ));\n const {tableSpec, backfill} = await validateSchema(\n tx,\n publications,\n bf,\n watermark,\n );\n\n // Note: validateSchema ensures that the rowKey and columns are disjoint\n const {relation, columns} = backfill;\n const cols = [...relation.rowKey.columns, ...columns];\n const stmts = makeDownloadStatements(tableSpec, cols);\n\n if (textCopy) {\n const types = await getTypeParsers(db, {returnJsonAsString: true});\n yield* stream(\n lc,\n tx,\n backfill,\n stmts,\n `COPY (${stmts.select}) TO STDOUT`,\n new TsvParser(),\n cols.map(col => {\n const parser = types.getTypeParser(tableSpec.columns[col].typeOID);\n return (text: string) => parser(text) as JSONValue;\n }),\n flushThresholdBytes,\n );\n } else {\n const binaryStmts = makeDownloadStatements(\n tableSpec,\n cols,\n undefined,\n undefined,\n makeBinarySelectExprs(tableSpec, cols),\n );\n\n yield* stream(\n lc,\n tx,\n backfill,\n stmts,\n `COPY (${binaryStmts.select}) TO STDOUT WITH (FORMAT binary)`,\n new BinaryCopyParser(),\n cols.map(col => {\n const spec = tableSpec.columns[col];\n const decoder = hasBinaryDecoder(spec)\n ? makeBinaryDecoder(spec)\n : textCastDecoder;\n return (buf: Buffer) => decoder(buf) as unknown as JSONValue;\n }),\n flushThresholdBytes,\n );\n }\n } catch (e) {\n // Although we make the best effort to validate the schema at the\n // transaction snapshot, certain forms of `ALTER TABLE` are not\n // MVCC safe and not \"frozen\" in the snapshot:\n //\n // https://www.postgresql.org/docs/current/mvcc-caveats.html\n //\n // Handle these errors as schema incompatibility errors rather than\n // unknown runtime errors.\n if (\n e instanceof postgres.PostgresError &&\n (e.code === PG_UNDEFINED_TABLE || e.code === PG_UNDEFINED_COLUMN)\n ) {\n throw new SchemaIncompatibilityError(bf, String(e), {cause: e});\n }\n throw e;\n } finally {\n tx?.setDone();\n // Workaround postgres.js hanging at the end of some COPY commands:\n // https://github.com/porsager/postgres/issues/499\n void db.end().catch(e => lc.warn?.(`error closing backfill connection`, e));\n }\n}\n\nasync function* stream<T>(\n lc: LogContext,\n tx: TransactionPool,\n backfill: BackfillParams,\n {\n getTotalRows,\n getTotalBytes,\n }: Pick<DownloadStatements, 'getTotalRows' | 'getTotalBytes'>,\n copyCommand: string,\n parser: {parse(chunk: Buffer): Iterable<T | null>},\n decoders: ((field: T) => JSONValue)[],\n flushThresholdBytes: number,\n): AsyncGenerator<MessageBackfill | BackfillCompleted> {\n // Backfill must read every row: TABLESAMPLE / LIMIT are reserved for shadow\n // sync and must never appear in a backfill COPY.\n assert(\n !SAMPLE_OR_LIMIT_RE.test(copyCommand),\n `backfill COPY must not sample or limit: ${copyCommand}`,\n );\n const start = performance.now();\n const [rows, bytes] = await tx.processReadTask(sql =>\n Promise.all([\n sql.unsafe<{totalRows: bigint}[]>(getTotalRows),\n sql.unsafe<{totalBytes: bigint}[]>(getTotalBytes),\n ]),\n );\n const status: DownloadStatus = {\n rows: 0,\n totalRows: Number(rows[0].totalRows),\n totalBytes: Number(bytes[0].totalBytes),\n };\n\n let elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(\n `Computed total rows and bytes for: ${copyCommand} (${elapsed} ms)`,\n {\n status,\n },\n );\n const copyStream = await tx.processReadTask(sql =>\n sql.unsafe(copyCommand).readable(),\n );\n\n let totalBytes = 0;\n let totalMsgs = 0;\n let rowValues: JSONValue[][] = [];\n let bufferedBytes = 0;\n\n const logFlushed = () => {\n lc.debug?.(\n `Flushed ${rowValues.length} rows, ${bufferedBytes} bytes ` +\n `(total: rows=${status.rows}, msgs=${totalMsgs}, bytes=${totalBytes})`,\n );\n };\n\n // Tracks the row being parsed.\n let row: JSONValue[] = Array.from({length: decoders.length});\n let col = 0;\n\n for await (const data of copyStream) {\n const chunk = data as Buffer;\n for (const field of parser.parse(chunk)) {\n row[col] = field === null ? null : decoders[col](field);\n\n if (++col === decoders.length) {\n rowValues.push(row);\n status.rows++;\n row = Array.from({length: decoders.length});\n col = 0;\n }\n }\n bufferedBytes += chunk.byteLength;\n totalBytes += chunk.byteLength;\n\n if (bufferedBytes >= flushThresholdBytes) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n rowValues = [];\n bufferedBytes = 0;\n }\n }\n\n // Flush the last batch of rows.\n if (rowValues.length > 0) {\n yield {tag: 'backfill', ...backfill, rowValues, status};\n totalMsgs++;\n logFlushed();\n }\n\n yield {tag: 'backfill-completed', ...backfill, status};\n elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(\n `Finished streaming ${status.rows} rows, ${totalMsgs} msgs, ${totalBytes} bytes ` +\n `(${elapsed} ms)`,\n );\n}\n\n/**\n * Creates (and drops) a replication slot in order to obtain a snapshot\n * that corresponds with a specific LSN. Sets the snapshot on the\n * TransactionPool and returns the watermark corresponding to the LSN.\n *\n * (Note that PG's other LSN-related functions are not scoped to a\n * transaction; this is the only way to get set a transaction at a specific\n * LSN.)\n */\nasync function createSnapshotTransaction(\n lc: LogContext,\n upstreamURI: string,\n db: PostgresDB,\n slotNamePrefix: string,\n) {\n const replicationSession = pgClient(\n lc,\n upstreamURI,\n 'backfill-replication-session',\n {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n },\n );\n const tempSlot = `${slotNamePrefix}_bf_${Date.now()}`;\n try {\n const {snapshot_name: snapshot, consistent_point: lsn} =\n await createReplicationSlot(lc, replicationSession, tempSlot);\n\n const {init, imported} = importSnapshot(snapshot);\n const tx = new TransactionPool(lc, {mode: READONLY, init}).run(db);\n await imported;\n await replicationSession.unsafe(`DROP_REPLICATION_SLOT \"${tempSlot}\"`);\n\n const watermark = toStateVersionString(lsn);\n lc.info?.(`Opened snapshot transaction at LSN ${lsn} (${watermark})`);\n return {tx, watermark};\n } catch (e) {\n // In the event of a failure, clean up the replication slot if created.\n await replicationSession.unsafe(\n /*sql*/\n `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = '${tempSlot}'`,\n );\n lc.error?.(`Failed to create backfill snapshot`, e);\n throw e;\n } finally {\n await replicationSession.end();\n }\n}\n\nfunction validateSchema(\n tx: TransactionPool,\n publications: string[],\n bf: BackfillRequest,\n watermark: string,\n): Promise<{\n tableSpec: PublishedTableSpec;\n backfill: BackfillParams;\n}> {\n return tx.processReadTask(async sql => {\n const {tables} = await getPublicationInfo(sql, publications);\n const spec = tables.find(\n spec => spec.schema === bf.table.schema && spec.name === bf.table.name,\n );\n if (!spec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table has been renamed or dropped`,\n );\n }\n const tableMeta = v.parse(bf.table.metadata, tableMetadataSchema);\n if (spec.schemaOID !== tableMeta.schemaOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Schema no longer corresponds to the original schema`,\n );\n }\n if (spec.oid !== tableMeta.relationOID) {\n throw new SchemaIncompatibilityError(\n bf,\n `Table no longer corresponds to the original table`,\n );\n }\n if (\n !equals(\n new Set(Object.keys(tableMeta.rowKey)),\n new Set(spec.replicaIdentityColumns),\n )\n ) {\n throw new SchemaIncompatibilityError(\n bf,\n 'Row key (e.g. PRIMARY KEY or INDEX) has changed',\n );\n }\n const allCols = [\n ...Object.entries(tableMeta.rowKey),\n ...Object.entries(bf.columns),\n ];\n for (const [col, val] of allCols) {\n const colSpec = spec.columns[col];\n if (!colSpec) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} has been renamed or dropped`,\n );\n }\n const colMeta = v.parse(val, columnMetadataSchema);\n if (colMeta.attNum !== colSpec.pos) {\n throw new SchemaIncompatibilityError(\n bf,\n `Column ${col} no longer corresponds to the original column`,\n );\n }\n }\n const backfill: BackfillParams = {\n relation: {\n schema: bf.table.schema,\n name: bf.table.name,\n rowKey: {columns: Object.keys(tableMeta.rowKey)},\n },\n columns: Object.keys(bf.columns).filter(\n col => !(col in tableMeta.rowKey),\n ),\n watermark,\n };\n return {tableSpec: spec, backfill};\n });\n}\n"],"mappings":";;;;;;;;;;;;;;;;;AAgEA,IAAM,2BAA2B,KAAK;AAKtC,IAAM,qBAAqB;;;;;;;AAQ3B,gBAAuB,eACrB,IACA,aACA,EAAC,MAAM,gBACP,IACA,OAAsB,EAAE,EAC6B;AACrD,MAAK,GACF,YAAY,aAAa,WAAW,CACpC,YAAY,SAAS,GAAG,MAAM,KAAK;CAEtC,MAAM,EAAC,sBAAsB,0BAA0B,WAAW,UAChE;CACF,MAAM,KAAK,SAAS,IAAI,aAAa,mBAAmB,GACrD,iBAAiB,MACnB,CAAC;CACF,IAAI;CACJ,IAAI;AACJ,KAAI;AACF,GAAC,CAAC,IAAI,aAAa,MAAM,0BACvB,IACA,aACA,IACA,KACD;EACD,MAAM,EAAC,WAAW,aAAY,MAAM,eAClC,IACA,cACA,IACA,UACD;EAGD,MAAM,EAAC,UAAU,YAAW;EAC5B,MAAM,OAAO,CAAC,GAAG,SAAS,OAAO,SAAS,GAAG,QAAQ;EACrD,MAAM,QAAQ,uBAAuB,WAAW,KAAK;AAErD,MAAI,UAAU;GACZ,MAAM,QAAQ,MAAM,eAAe,IAAI,EAAC,oBAAoB,MAAK,CAAC;AAClE,UAAO,OACL,IACA,IACA,UACA,OACA,SAAS,MAAM,OAAO,cACtB,IAAI,WAAW,EACf,KAAK,KAAI,QAAO;IACd,MAAM,SAAS,MAAM,cAAc,UAAU,QAAQ,KAAK,QAAQ;AAClE,YAAQ,SAAiB,OAAO,KAAK;KACrC,EACF,oBACD;SACI;GACL,MAAM,cAAc,uBAClB,WACA,MACA,KAAA,GACA,KAAA,GACA,sBAAsB,WAAW,KAAK,CACvC;AAED,UAAO,OACL,IACA,IACA,UACA,OACA,SAAS,YAAY,OAAO,mCAC5B,IAAI,kBAAkB,EACtB,KAAK,KAAI,QAAO;IACd,MAAM,OAAO,UAAU,QAAQ;IAC/B,MAAM,UAAU,iBAAiB,KAAK,GAClC,kBAAkB,KAAK,GACvB;AACJ,YAAQ,QAAgB,QAAQ,IAAI;KACpC,EACF,oBACD;;UAEI,GAAG;AASV,MACE,aAAa,SAAS,kBACrB,EAAE,SAAS,sBAAsB,EAAE,SAAS,qBAE7C,OAAM,IAAI,2BAA2B,IAAI,OAAO,EAAE,EAAE,EAAC,OAAO,GAAE,CAAC;AAEjE,QAAM;WACE;AACR,MAAI,SAAS;AAGR,KAAG,KAAK,CAAC,OAAM,MAAK,GAAG,OAAO,qCAAqC,EAAE,CAAC;;;AAI/E,gBAAgB,OACd,IACA,IACA,UACA,EACE,cACA,iBAEF,aACA,QACA,UACA,qBACqD;AAGrD,QACE,CAAC,mBAAmB,KAAK,YAAY,EACrC,2CAA2C,cAC5C;CACD,MAAM,QAAQ,YAAY,KAAK;CAC/B,MAAM,CAAC,MAAM,SAAS,MAAM,GAAG,iBAAgB,QAC7C,QAAQ,IAAI,CACV,IAAI,OAA8B,aAAa,EAC/C,IAAI,OAA+B,cAAc,CAClD,CAAC,CACH;CACD,MAAM,SAAyB;EAC7B,MAAM;EACN,WAAW,OAAO,KAAK,GAAG,UAAU;EACpC,YAAY,OAAO,MAAM,GAAG,WAAW;EACxC;CAED,IAAI,WAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AACpD,IAAG,OACD,sCAAsC,YAAY,IAAI,QAAQ,OAC9D,EACE,QACD,CACF;CACD,MAAM,aAAa,MAAM,GAAG,iBAAgB,QAC1C,IAAI,OAAO,YAAY,CAAC,UAAU,CACnC;CAED,IAAI,aAAa;CACjB,IAAI,YAAY;CAChB,IAAI,YAA2B,EAAE;CACjC,IAAI,gBAAgB;CAEpB,MAAM,mBAAmB;AACvB,KAAG,QACD,WAAW,UAAU,OAAO,SAAS,cAAc,sBACjC,OAAO,KAAK,SAAS,UAAU,UAAU,WAAW,GACvE;;CAIH,IAAI,MAAmB,MAAM,KAAK,EAAC,QAAQ,SAAS,QAAO,CAAC;CAC5D,IAAI,MAAM;AAEV,YAAW,MAAM,QAAQ,YAAY;EACnC,MAAM,QAAQ;AACd,OAAK,MAAM,SAAS,OAAO,MAAM,MAAM,EAAE;AACvC,OAAI,OAAO,UAAU,OAAO,OAAO,SAAS,KAAK,MAAM;AAEvD,OAAI,EAAE,QAAQ,SAAS,QAAQ;AAC7B,cAAU,KAAK,IAAI;AACnB,WAAO;AACP,UAAM,MAAM,KAAK,EAAC,QAAQ,SAAS,QAAO,CAAC;AAC3C,UAAM;;;AAGV,mBAAiB,MAAM;AACvB,gBAAc,MAAM;AAEpB,MAAI,iBAAiB,qBAAqB;AACxC,SAAM;IAAC,KAAK;IAAY,GAAG;IAAU;IAAW;IAAO;AACvD;AACA,eAAY;AACZ,eAAY,EAAE;AACd,mBAAgB;;;AAKpB,KAAI,UAAU,SAAS,GAAG;AACxB,QAAM;GAAC,KAAK;GAAY,GAAG;GAAU;GAAW;GAAO;AACvD;AACA,cAAY;;AAGd,OAAM;EAAC,KAAK;EAAsB,GAAG;EAAU;EAAO;AACtD,YAAW,YAAY,KAAK,GAAG,OAAO,QAAQ,EAAE;AAChD,IAAG,OACD,sBAAsB,OAAO,KAAK,SAAS,UAAU,SAAS,WAAW,UACnE,QAAQ,MACf;;;;;;;;;;;AAYH,eAAe,0BACb,IACA,aACA,IACA,gBACA;CACA,MAAM,qBAAqB,SACzB,IACA,aACA,gCACA;GACG,gBAAgB;EACjB,YAAY,EAAC,aAAa,YAAW;EACtC,CACF;CACD,MAAM,WAAW,GAAG,eAAe,MAAM,KAAK,KAAK;AACnD,KAAI;EACF,MAAM,EAAC,eAAe,UAAU,kBAAkB,QAChD,MAAM,sBAAsB,IAAI,oBAAoB,SAAS;EAE/D,MAAM,EAAC,MAAM,aAAY,eAAe,SAAS;EACjD,MAAM,KAAK,IAAI,gBAAgB,IAAI;GAAC,MAAM;GAAU;GAAK,CAAC,CAAC,IAAI,GAAG;AAClE,QAAM;AACN,QAAM,mBAAmB,OAAO,0BAA0B,SAAS,GAAG;EAEtE,MAAM,YAAY,qBAAqB,IAAI;AAC3C,KAAG,OAAO,sCAAsC,IAAI,IAAI,UAAU,GAAG;AACrE,SAAO;GAAC;GAAI;GAAU;UACf,GAAG;AAEV,QAAM,mBAAmB,OAEvB;8BACwB,SAAS,GAClC;AACD,KAAG,QAAQ,sCAAsC,EAAE;AACnD,QAAM;WACE;AACR,QAAM,mBAAmB,KAAK;;;AAIlC,SAAS,eACP,IACA,cACA,IACA,WAIC;AACD,QAAO,GAAG,gBAAgB,OAAM,QAAO;EACrC,MAAM,EAAC,WAAU,MAAM,mBAAmB,KAAK,aAAa;EAC5D,MAAM,OAAO,OAAO,MAClB,SAAQ,KAAK,WAAW,GAAG,MAAM,UAAU,KAAK,SAAS,GAAG,MAAM,KACnE;AACD,MAAI,CAAC,KACH,OAAM,IAAI,2BACR,IACA,oCACD;EAEH,MAAM,YAAY,MAAQ,GAAG,MAAM,UAAU,oBAAoB;AACjE,MAAI,KAAK,cAAc,UAAU,UAC/B,OAAM,IAAI,2BACR,IACA,sDACD;AAEH,MAAI,KAAK,QAAQ,UAAU,YACzB,OAAM,IAAI,2BACR,IACA,oDACD;AAEH,MACE,CAAC,OACC,IAAI,IAAI,OAAO,KAAK,UAAU,OAAO,CAAC,EACtC,IAAI,IAAI,KAAK,uBAAuB,CACrC,CAED,OAAM,IAAI,2BACR,IACA,kDACD;EAEH,MAAM,UAAU,CACd,GAAG,OAAO,QAAQ,UAAU,OAAO,EACnC,GAAG,OAAO,QAAQ,GAAG,QAAQ,CAC9B;AACD,OAAK,MAAM,CAAC,KAAK,QAAQ,SAAS;GAChC,MAAM,UAAU,KAAK,QAAQ;AAC7B,OAAI,CAAC,QACH,OAAM,IAAI,2BACR,IACA,UAAU,IAAI,8BACf;AAGH,OADgB,MAAQ,KAAK,qBAAqB,CACtC,WAAW,QAAQ,IAC7B,OAAM,IAAI,2BACR,IACA,UAAU,IAAI,+CACf;;AAcL,SAAO;GAAC,WAAW;GAAM,UAXQ;IAC/B,UAAU;KACR,QAAQ,GAAG,MAAM;KACjB,MAAM,GAAG,MAAM;KACf,QAAQ,EAAC,SAAS,OAAO,KAAK,UAAU,OAAO,EAAC;KACjD;IACD,SAAS,OAAO,KAAK,GAAG,QAAQ,CAAC,QAC/B,QAAO,EAAE,OAAO,UAAU,QAC3B;IACD;IACD;GACiC;GAClC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"change-source.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAejD,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AAOzD,OAAO,KAAK,EAGV,kBAAkB,EACnB,MAAM,sBAAsB,CAAC;AAE9B,OAAO,EAAC,KAAK,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAGhE,OAAO,EAEL,KAAK,WAAW,EAEjB,MAAM,0BAA0B,CAAC;AAKlC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,2BAA2B,CAAC;AAEpD,OAAO,EAEL,KAAK,iBAAiB,EAEvB,MAAM,8CAA8C,CAAC;AACtD,OAAO,KAAK,EAAC,YAAY,EAAe,MAAM,qBAAqB,CAAC;AAEpE,OAAO,EAEL,KAAK,QAAQ,EACd,MAAM,wCAAwC,CAAC;AAchD,OAAO,KAAK,EAEV,mBAAmB,EAEpB,MAAM,mCAAmC,CAAC;AAG3C,OAAO,EAEL,KAAK,kBAAkB,EACvB,KAAK,aAAa,EACnB,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EAGV,eAAe,IAAI,gBAAgB,EACpC,MAAM,yCAAyC,CAAC;AAuBjD;;;;GAIG;AACH,wBAAsB,8BAA8B,CAClD,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,WAAW,EAClB,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,kBAAkB,EAC/B,OAAO,EAAE,aAAa,EACtB,mBAAmB,SAAI,GACtB,OAAO,CAAC;IAAC,iBAAiB,EAAE,iBAAiB,CAAC;IAAC,YAAY,EAAE,YAAY,CAAA;CAAC,CAAC,
|
|
1
|
+
{"version":3,"file":"change-source.d.ts","sourceRoot":"","sources":["../../../../../../../zero-cache/src/services/change-source/pg/change-source.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAejD,OAAO,KAAK,CAAC,MAAM,qCAAqC,CAAC;AAOzD,OAAO,KAAK,EAGV,kBAAkB,EACnB,MAAM,sBAAsB,CAAC;AAE9B,OAAO,EAAC,KAAK,WAAW,EAAC,MAAM,gCAAgC,CAAC;AAGhE,OAAO,EAEL,KAAK,WAAW,EAEjB,MAAM,0BAA0B,CAAC;AAKlC,OAAO,KAAK,EAAC,IAAI,EAAC,MAAM,2BAA2B,CAAC;AAEpD,OAAO,EAEL,KAAK,iBAAiB,EAEvB,MAAM,8CAA8C,CAAC;AACtD,OAAO,KAAK,EAAC,YAAY,EAAe,MAAM,qBAAqB,CAAC;AAEpE,OAAO,EAEL,KAAK,QAAQ,EACd,MAAM,wCAAwC,CAAC;AAchD,OAAO,KAAK,EAEV,mBAAmB,EAEpB,MAAM,mCAAmC,CAAC;AAG3C,OAAO,EAEL,KAAK,kBAAkB,EACvB,KAAK,aAAa,EACnB,MAAM,mBAAmB,CAAC;AAC3B,OAAO,KAAK,EAGV,eAAe,IAAI,gBAAgB,EACpC,MAAM,yCAAyC,CAAC;AAuBjD;;;;GAIG;AACH,wBAAsB,8BAA8B,CAClD,EAAE,EAAE,UAAU,EACd,WAAW,EAAE,MAAM,EACnB,KAAK,EAAE,WAAW,EAClB,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,kBAAkB,EAC/B,OAAO,EAAE,aAAa,EACtB,mBAAmB,SAAI,GACtB,OAAO,CAAC;IAAC,iBAAiB,EAAE,iBAAiB,CAAC;IAAC,YAAY,EAAE,YAAY,CAAA;CAAC,CAAC,CAuC7E;AA4cD,qBAAa,KAAM,YAAW,QAAQ;;gBAIxB,IAAI,EAAE,IAAI,CAAC,MAAM,CAAC;IAI9B,QAAQ,CAAC,MAAM,EAAE,mBAAmB,GAAG,IAAI;IAgC3C,GAAG,CAAC,SAAS,EAAE,WAAW;CAoB3B;AAED,QAAA,MAAM,eAAe;;;;aAInB,CAAC;AAEH,MAAM,MAAM,SAAS,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,eAAe,CAAC,CAAC;AAgxBxD,wBAAgB,iBAAiB,CAAC,CAAC,EAAE,kBAAkB,EAAE,CAAC,EAAE,gBAAgB,WAwB3E"}
|