@rocicorp/zero 0.26.0 → 0.26.1-canary.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/out/analyze-query/src/run-ast.d.ts.map +1 -1
- package/out/analyze-query/src/run-ast.js +4 -1
- package/out/analyze-query/src/run-ast.js.map +1 -1
- package/out/replicache/src/btree/node.js +4 -4
- package/out/replicache/src/btree/node.js.map +1 -1
- package/out/replicache/src/btree/write.js +2 -2
- package/out/replicache/src/btree/write.js.map +1 -1
- package/out/replicache/src/dag/gc.js +5 -2
- package/out/replicache/src/dag/gc.js.map +1 -1
- package/out/replicache/src/db/write.d.ts.map +1 -1
- package/out/replicache/src/db/write.js +21 -6
- package/out/replicache/src/db/write.js.map +1 -1
- package/out/replicache/src/error-responses.d.ts.map +1 -1
- package/out/replicache/src/error-responses.js +4 -1
- package/out/replicache/src/error-responses.js.map +1 -1
- package/out/replicache/src/persist/clients.d.ts.map +1 -1
- package/out/replicache/src/persist/clients.js +4 -1
- package/out/replicache/src/persist/clients.js.map +1 -1
- package/out/replicache/src/persist/collect-idb-databases.d.ts.map +1 -1
- package/out/replicache/src/persist/collect-idb-databases.js +2 -1
- package/out/replicache/src/persist/collect-idb-databases.js.map +1 -1
- package/out/replicache/src/persist/idb-databases-store.d.ts.map +1 -1
- package/out/replicache/src/persist/idb-databases-store.js +4 -1
- package/out/replicache/src/persist/idb-databases-store.js.map +1 -1
- package/out/replicache/src/process-scheduler.js +4 -1
- package/out/replicache/src/process-scheduler.js.map +1 -1
- package/out/replicache/src/replicache-impl.js +2 -2
- package/out/replicache/src/replicache-impl.js.map +1 -1
- package/out/replicache/src/subscriptions.d.ts.map +1 -1
- package/out/replicache/src/subscriptions.js +5 -2
- package/out/replicache/src/subscriptions.js.map +1 -1
- package/out/replicache/src/sync/diff.d.ts.map +1 -1
- package/out/replicache/src/sync/diff.js +4 -1
- package/out/replicache/src/sync/diff.js.map +1 -1
- package/out/replicache/src/sync/pull.d.ts.map +1 -1
- package/out/replicache/src/sync/pull.js +4 -1
- package/out/replicache/src/sync/pull.js.map +1 -1
- package/out/replicache/src/sync/push.d.ts.map +1 -1
- package/out/replicache/src/sync/push.js +5 -2
- package/out/replicache/src/sync/push.js.map +1 -1
- package/out/shared/src/asserts.d.ts +1 -1
- package/out/shared/src/asserts.d.ts.map +1 -1
- package/out/shared/src/asserts.js +1 -1
- package/out/shared/src/asserts.js.map +1 -1
- package/out/z2s/src/compiler.d.ts.map +1 -1
- package/out/z2s/src/compiler.js +8 -2
- package/out/z2s/src/compiler.js.map +1 -1
- package/out/zero/package.json.js +1 -1
- package/out/zero-cache/src/db/transaction-pool.d.ts.map +1 -1
- package/out/zero-cache/src/db/transaction-pool.js +17 -11
- package/out/zero-cache/src/db/transaction-pool.js.map +1 -1
- package/out/zero-cache/src/observability/events.d.ts.map +1 -1
- package/out/zero-cache/src/observability/events.js +28 -9
- package/out/zero-cache/src/observability/events.js.map +1 -1
- package/out/zero-cache/src/services/analyze.js +1 -0
- package/out/zero-cache/src/services/analyze.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +29 -14
- package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +6 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js +69 -25
- package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +6 -1
- package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/pg/schema/init.js +12 -8
- package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts +26 -0
- package/out/zero-cache/src/services/change-source/protocol/current/data.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/data.js +15 -3
- package/out/zero-cache/src/services/change-source/protocol/current/data.js.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts +30 -0
- package/out/zero-cache/src/services/change-source/protocol/current/downstream.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-source/protocol/current.js +2 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +8 -2
- package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts +10 -0
- package/out/zero-cache/src/services/change-streamer/change-streamer.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.d.ts +2 -0
- package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/change-processor.js +8 -6
- package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
- package/out/zero-cache/src/services/replicator/incremental-sync.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/incremental-sync.js +39 -1
- package/out/zero-cache/src/services/replicator/incremental-sync.js.map +1 -1
- package/out/zero-cache/src/services/replicator/replication-status.d.ts +4 -3
- package/out/zero-cache/src/services/replicator/replication-status.d.ts.map +1 -1
- package/out/zero-cache/src/services/replicator/replication-status.js +25 -10
- package/out/zero-cache/src/services/replicator/replication-status.js.map +1 -1
- package/out/zero-cache/src/services/run-ast.d.ts.map +1 -1
- package/out/zero-cache/src/services/run-ast.js +22 -2
- package/out/zero-cache/src/services/run-ast.js.map +1 -1
- package/out/zero-cache/src/services/running-state.d.ts +1 -0
- package/out/zero-cache/src/services/running-state.d.ts.map +1 -1
- package/out/zero-cache/src/services/running-state.js +4 -0
- package/out/zero-cache/src/services/running-state.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/cvr.js +8 -2
- package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +10 -1
- package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
- package/out/zero-cache/src/services/view-syncer/snapshotter.js +15 -7
- package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
- package/out/zero-cache/src/types/subscription.d.ts +3 -1
- package/out/zero-cache/src/types/subscription.d.ts.map +1 -1
- package/out/zero-cache/src/types/subscription.js +29 -9
- package/out/zero-cache/src/types/subscription.js.map +1 -1
- package/out/zero-client/src/client/http-string.js.map +1 -1
- package/out/zero-client/src/client/version.js +1 -1
- package/out/zero-client/src/client/zero.js.map +1 -1
- package/out/zero-events/src/status.d.ts +8 -0
- package/out/zero-events/src/status.d.ts.map +1 -1
- package/out/zero-schema/src/permissions.d.ts.map +1 -1
- package/out/zero-schema/src/permissions.js +4 -1
- package/out/zero-schema/src/permissions.js.map +1 -1
- package/out/zero-server/src/process-mutations.d.ts.map +1 -1
- package/out/zero-server/src/process-mutations.js +13 -19
- package/out/zero-server/src/process-mutations.js.map +1 -1
- package/out/zql/src/builder/filter.d.ts.map +1 -1
- package/out/zql/src/builder/filter.js +5 -2
- package/out/zql/src/builder/filter.js.map +1 -1
- package/out/zql/src/ivm/constraint.js.map +1 -1
- package/package.json +1 -1
|
@@ -108,7 +108,9 @@ async function initialSync(lc, shard, tx, upstreamURI, syncOptions, context) {
|
|
|
108
108
|
const numWorkers = platform() === "win32" ? numTables : Math.min(tableCopyWorkers, numTables);
|
|
109
109
|
const copyPool = pgClient(lc, upstreamURI, {
|
|
110
110
|
max: numWorkers,
|
|
111
|
-
connection: { ["application_name"]: "initial-sync-copy-worker" }
|
|
111
|
+
connection: { ["application_name"]: "initial-sync-copy-worker" },
|
|
112
|
+
["max_lifetime"]: 120 * 60
|
|
113
|
+
// set a long (2h) limit for COPY streaming
|
|
112
114
|
});
|
|
113
115
|
const copiers = startTableCopyWorkers(
|
|
114
116
|
lc,
|
|
@@ -119,21 +121,30 @@ async function initialSync(lc, shard, tx, upstreamURI, syncOptions, context) {
|
|
|
119
121
|
);
|
|
120
122
|
try {
|
|
121
123
|
createLiteTables(tx, tables, initialVersion);
|
|
124
|
+
const downloads = await Promise.all(
|
|
125
|
+
tables.map(
|
|
126
|
+
(spec) => copiers.processReadTask(
|
|
127
|
+
(db, lc2) => getInitialDownloadState(lc2, db, spec)
|
|
128
|
+
)
|
|
129
|
+
)
|
|
130
|
+
);
|
|
122
131
|
statusPublisher.publish(
|
|
123
132
|
lc,
|
|
124
133
|
"Initializing",
|
|
125
134
|
`Copying ${numTables} upstream tables at version ${initialVersion}`,
|
|
126
|
-
5e3
|
|
135
|
+
5e3,
|
|
136
|
+
() => ({ downloadStatus: downloads.map(({ status }) => status) })
|
|
127
137
|
);
|
|
128
138
|
void copyProfiler?.start();
|
|
129
139
|
const rowCounts = await Promise.all(
|
|
130
|
-
|
|
140
|
+
downloads.map(
|
|
131
141
|
(table) => copiers.processReadTask(
|
|
132
142
|
(db, lc2) => copy(lc2, table, copyPool, db, tx)
|
|
133
143
|
)
|
|
134
144
|
)
|
|
135
145
|
);
|
|
136
146
|
void copyProfiler?.stopAndDispose(lc, "initial-copy");
|
|
147
|
+
copiers.setDone();
|
|
137
148
|
const total = rowCounts.reduce(
|
|
138
149
|
(acc, curr) => ({
|
|
139
150
|
rows: acc.rows + curr.rows,
|
|
@@ -164,12 +175,7 @@ async function initialSync(lc, shard, tx, upstreamURI, syncOptions, context) {
|
|
|
164
175
|
`Synced ${total.rows.toLocaleString()} rows of ${numTables} tables in ${publications} up to ${lsn} (flush: ${total.flushTime.toFixed(3)}, index: ${index.toFixed(3)}, total: ${elapsed.toFixed(3)} ms)`
|
|
165
176
|
);
|
|
166
177
|
} finally {
|
|
167
|
-
|
|
168
|
-
if (platform() === "win32") {
|
|
169
|
-
void copyPool.end().catch((e) => lc.warn?.(`Error closing copyPool`, e));
|
|
170
|
-
} else {
|
|
171
|
-
await copyPool.end();
|
|
172
|
-
}
|
|
178
|
+
void copyPool.end().catch((e) => lc.warn?.(`Error closing copyPool`, e));
|
|
173
179
|
}
|
|
174
180
|
} catch (e) {
|
|
175
181
|
lc.warn?.(`dropping replication slot ${slotName}`, e);
|
|
@@ -285,20 +291,58 @@ const INSERT_BATCH_SIZE = 50;
|
|
|
285
291
|
const MB = 1024 * 1024;
|
|
286
292
|
const MAX_BUFFERED_ROWS = 1e4;
|
|
287
293
|
const BUFFERED_SIZE_THRESHOLD = 8 * MB;
|
|
288
|
-
function
|
|
294
|
+
function makeDownloadStatements(table, cols) {
|
|
289
295
|
const filterConditions = Object.values(table.publications).map(({ rowFilter }) => rowFilter).filter((f) => !!f);
|
|
290
|
-
|
|
296
|
+
const where = filterConditions.length === 0 ? "" : (
|
|
291
297
|
/*sql*/
|
|
292
|
-
`
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
))
|
|
298
|
+
`WHERE ${filterConditions.join(" OR ")}`
|
|
299
|
+
);
|
|
300
|
+
const fromTable = (
|
|
301
|
+
/*sql*/
|
|
302
|
+
`FROM ${id(table.schema)}.${id(table.name)} ${where}`
|
|
297
303
|
);
|
|
304
|
+
const totalBytes = `(${cols.map((col) => `SUM(COALESCE(pg_column_size(${id(col)}), 0))`).join(" + ")})`;
|
|
305
|
+
const stmts = {
|
|
306
|
+
select: (
|
|
307
|
+
/*sql*/
|
|
308
|
+
`SELECT ${cols.map(id).join(",")} ${fromTable}`
|
|
309
|
+
),
|
|
310
|
+
getTotalRows: (
|
|
311
|
+
/*sql*/
|
|
312
|
+
`SELECT COUNT(*) AS "totalRows" ${fromTable}`
|
|
313
|
+
),
|
|
314
|
+
getTotalBytes: (
|
|
315
|
+
/*sql*/
|
|
316
|
+
`SELECT ${totalBytes} AS "totalBytes" ${fromTable}`
|
|
317
|
+
)
|
|
318
|
+
};
|
|
319
|
+
return stmts;
|
|
320
|
+
}
|
|
321
|
+
async function getInitialDownloadState(lc, sql, spec) {
|
|
322
|
+
const start = performance.now();
|
|
323
|
+
const table = liteTableName(spec);
|
|
324
|
+
const columns = Object.keys(spec.columns);
|
|
325
|
+
const stmts = makeDownloadStatements(spec, columns);
|
|
326
|
+
const rowsResult = sql.unsafe(stmts.getTotalRows).execute();
|
|
327
|
+
const bytesResult = sql.unsafe(stmts.getTotalBytes).execute();
|
|
328
|
+
const state = {
|
|
329
|
+
spec,
|
|
330
|
+
status: {
|
|
331
|
+
table,
|
|
332
|
+
columns,
|
|
333
|
+
rows: 0,
|
|
334
|
+
totalRows: Number((await rowsResult)[0].totalRows),
|
|
335
|
+
totalBytes: Number((await bytesResult)[0].totalBytes)
|
|
336
|
+
}
|
|
337
|
+
};
|
|
338
|
+
const elapsed = (performance.now() - start).toFixed(3);
|
|
339
|
+
lc.info?.(`Computed initial download state for ${table} (${elapsed} ms)`, {
|
|
340
|
+
state: state.status
|
|
341
|
+
});
|
|
342
|
+
return state;
|
|
298
343
|
}
|
|
299
|
-
async function copy(lc, table, dbClient, from, to) {
|
|
344
|
+
async function copy(lc, { spec: table, status }, dbClient, from, to) {
|
|
300
345
|
const start = performance.now();
|
|
301
|
-
let rows = 0;
|
|
302
346
|
let flushTime = 0;
|
|
303
347
|
const tableName = liteTableName(table);
|
|
304
348
|
const orderedColumns = Object.entries(table.columns);
|
|
@@ -315,7 +359,7 @@ async function copy(lc, table, dbClient, from, to) {
|
|
|
315
359
|
const insertBatchStmt = to.prepare(
|
|
316
360
|
insertSql + `,${valuesSql}`.repeat(INSERT_BATCH_SIZE - 1)
|
|
317
361
|
);
|
|
318
|
-
const
|
|
362
|
+
const { select } = makeDownloadStatements(table, columnNames);
|
|
319
363
|
const valuesPerRow = columnSpecs.length;
|
|
320
364
|
const valuesPerBatch = valuesPerRow * INSERT_BATCH_SIZE;
|
|
321
365
|
const pendingValues = Array.from({
|
|
@@ -338,14 +382,14 @@ async function copy(lc, table, dbClient, from, to) {
|
|
|
338
382
|
pendingValues[i] = void 0;
|
|
339
383
|
}
|
|
340
384
|
pendingSize = 0;
|
|
341
|
-
rows += flushedRows;
|
|
385
|
+
status.rows += flushedRows;
|
|
342
386
|
const elapsed2 = performance.now() - start2;
|
|
343
387
|
flushTime += elapsed2;
|
|
344
388
|
lc.debug?.(
|
|
345
389
|
`flushed ${flushedRows} ${tableName} rows (${flushedSize} bytes) in ${elapsed2.toFixed(3)} ms`
|
|
346
390
|
);
|
|
347
391
|
}
|
|
348
|
-
lc.info?.(`Starting copy stream of ${tableName}:`,
|
|
392
|
+
lc.info?.(`Starting copy stream of ${tableName}:`, select);
|
|
349
393
|
const pgParsers = await getTypeParsers(dbClient, { returnJsonAsString: true });
|
|
350
394
|
const parsers = columnSpecs.map((c) => {
|
|
351
395
|
const pgParse = pgParsers.getTypeParser(c.typeOID);
|
|
@@ -358,7 +402,7 @@ async function copy(lc, table, dbClient, from, to) {
|
|
|
358
402
|
const tsvParser = new TsvParser();
|
|
359
403
|
let col = 0;
|
|
360
404
|
await pipeline(
|
|
361
|
-
await from.unsafe(`COPY (${
|
|
405
|
+
await from.unsafe(`COPY (${select}) TO STDOUT`).readable(),
|
|
362
406
|
new Writable({
|
|
363
407
|
highWaterMark: BUFFERED_SIZE_THRESHOLD,
|
|
364
408
|
write(chunk, _encoding, callback) {
|
|
@@ -390,14 +434,14 @@ async function copy(lc, table, dbClient, from, to) {
|
|
|
390
434
|
);
|
|
391
435
|
const elapsed = performance.now() - start;
|
|
392
436
|
lc.info?.(
|
|
393
|
-
`Finished copying ${rows} rows into ${tableName} (flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `
|
|
437
|
+
`Finished copying ${status.rows} rows into ${tableName} (flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `
|
|
394
438
|
);
|
|
395
|
-
return { rows, flushTime };
|
|
439
|
+
return { rows: status.rows, flushTime };
|
|
396
440
|
}
|
|
397
441
|
export {
|
|
398
442
|
INSERT_BATCH_SIZE,
|
|
399
443
|
createReplicationSlot,
|
|
400
444
|
initialSync,
|
|
401
|
-
|
|
445
|
+
makeDownloadStatements
|
|
402
446
|
};
|
|
403
447
|
//# sourceMappingURL=initial-sync.js.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"initial-sync.js","sources":["../../../../../../../zero-cache/src/services/change-source/pg/initial-sync.ts"],"sourcesContent":["import {\n PG_CONFIGURATION_LIMIT_EXCEEDED,\n PG_INSUFFICIENT_PRIVILEGE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {platform} from 'node:os';\nimport {Writable} from 'node:stream';\nimport {pipeline} from 'node:stream/promises';\nimport postgres from 'postgres';\nimport type {JSONObject} from '../../../../../shared/src/bigint-json.ts';\nimport {must} from '../../../../../shared/src/must.ts';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport {\n createLiteIndexStatement,\n createLiteTableStatement,\n} from '../../../db/create.ts';\nimport * as Mode from '../../../db/mode-enum.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {\n mapPostgresToLite,\n mapPostgresToLiteIndex,\n} from '../../../db/pg-to-lite.ts';\nimport {getTypeParsers} from '../../../db/pg-type-parser.ts';\nimport {runTx} from '../../../db/run-transaction.ts';\nimport type {IndexSpec, PublishedTableSpec} from '../../../db/specs.ts';\nimport {importSnapshot, TransactionPool} from '../../../db/transaction-pool.ts';\nimport {\n JSON_STRINGIFIED,\n liteValue,\n type LiteValueType,\n} from '../../../types/lite.ts';\nimport {liteTableName} from '../../../types/names.ts';\nimport {\n pgClient,\n type PostgresDB,\n type PostgresTransaction,\n type PostgresValueType,\n} from '../../../types/pg.ts';\nimport {CpuProfiler} from '../../../types/profiler.ts';\nimport type {ShardConfig} from '../../../types/shards.ts';\nimport {ALLOWED_APP_ID_CHARACTERS} from '../../../types/shards.ts';\nimport {id} from '../../../types/sql.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {ColumnMetadataStore} from '../../replicator/schema/column-metadata.ts';\nimport {initReplicationState} from '../../replicator/schema/replication-state.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {ensureShardSchema} from './schema/init.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport {\n addReplica,\n dropShard,\n getInternalShardConfig,\n newReplicationSlot,\n replicationSlotExpression,\n validatePublications,\n} from './schema/shard.ts';\n\nexport type InitialSyncOptions = {\n tableCopyWorkers: number;\n profileCopy?: boolean | undefined;\n};\n\n/** Server context to store with the initial sync metadata for debugging. */\nexport type ServerContext = JSONObject;\n\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n syncOptions: InitialSyncOptions,\n context: ServerContext,\n) {\n if (!ALLOWED_APP_ID_CHARACTERS.test(shard.appID)) {\n throw new Error(\n 'The App ID may only consist of lower-case letters, numbers, and the underscore character',\n );\n }\n const {tableCopyWorkers, profileCopy} = syncOptions;\n const copyProfiler = profileCopy ? await CpuProfiler.connect() : null;\n const sql = pgClient(lc, upstreamURI);\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const slotName = newReplicationSlot(shard);\n const statusPublisher = new ReplicationStatusPublisher(tx).publish(\n lc,\n 'Initializing',\n );\n try {\n await checkUpstreamConfig(sql);\n\n const {publications} = await ensurePublishedTables(lc, sql, shard);\n lc.info?.(`Upstream is setup with publications [${publications}]`);\n\n const {database, host} = sql.options;\n lc.info?.(`opening replication session to ${database}@${host}`);\n\n let slot: ReplicationSlot;\n for (let first = true; ; first = false) {\n try {\n slot = await createReplicationSlot(lc, replicationSession, slotName);\n break;\n } catch (e) {\n if (first && e instanceof postgres.PostgresError) {\n if (e.code === PG_INSUFFICIENT_PRIVILEGE) {\n // Some Postgres variants (e.g. Google Cloud SQL) require that\n // the user have the REPLICATION role in order to create a slot.\n // Note that this must be done by the upstreamDB connection, and\n // does not work in the replicationSession itself.\n await sql`ALTER ROLE current_user WITH REPLICATION`;\n lc.info?.(`Added the REPLICATION role to database user`);\n continue;\n }\n if (e.code === PG_CONFIGURATION_LIMIT_EXCEEDED) {\n const slotExpression = replicationSlotExpression(shard);\n\n const dropped = await sql<{slot: string}[]>`\n SELECT slot_name as slot, pg_drop_replication_slot(slot_name) \n FROM pg_replication_slots\n WHERE slot_name LIKE ${slotExpression} AND NOT active`;\n if (dropped.length) {\n lc.warn?.(\n `Dropped inactive replication slots: ${dropped.map(({slot}) => slot)}`,\n e,\n );\n continue;\n }\n lc.error?.(`Unable to drop replication slots`, e);\n }\n }\n throw e;\n }\n }\n const {snapshot_name: snapshot, consistent_point: lsn} = slot;\n const initialVersion = toStateVersionString(lsn);\n\n initReplicationState(tx, publications, initialVersion, context);\n\n // Run up to MAX_WORKERS to copy of tables at the replication slot's snapshot.\n const start = performance.now();\n // Retrieve the published schema at the consistent_point.\n const published = await runTx(\n sql,\n async tx => {\n await tx.unsafe(/* sql*/ `SET TRANSACTION SNAPSHOT '${snapshot}'`);\n return getPublicationInfo(tx, publications);\n },\n {mode: Mode.READONLY},\n );\n // Note: If this throws, initial-sync is aborted.\n validatePublications(lc, published);\n\n // Now that tables have been validated, kick off the copiers.\n const {tables, indexes} = published;\n const numTables = tables.length;\n if (platform() === 'win32' && tableCopyWorkers < numTables) {\n lc.warn?.(\n `Increasing the number of copy workers from ${tableCopyWorkers} to ` +\n `${numTables} to work around a Node/Postgres connection bug`,\n );\n }\n const numWorkers =\n platform() === 'win32'\n ? numTables\n : Math.min(tableCopyWorkers, numTables);\n\n const copyPool = pgClient(lc, upstreamURI, {\n max: numWorkers,\n connection: {['application_name']: 'initial-sync-copy-worker'},\n });\n const copiers = startTableCopyWorkers(\n lc,\n copyPool,\n snapshot,\n numWorkers,\n numTables,\n );\n try {\n createLiteTables(tx, tables, initialVersion);\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying ${numTables} upstream tables at version ${initialVersion}`,\n 5000,\n );\n\n void copyProfiler?.start();\n const rowCounts = await Promise.all(\n tables.map(table =>\n copiers.processReadTask((db, lc) =>\n copy(lc, table, copyPool, db, tx),\n ),\n ),\n );\n void copyProfiler?.stopAndDispose(lc, 'initial-copy');\n\n const total = rowCounts.reduce(\n (acc, curr) => ({\n rows: acc.rows + curr.rows,\n flushTime: acc.flushTime + curr.flushTime,\n }),\n {rows: 0, flushTime: 0},\n );\n\n statusPublisher.publish(\n lc,\n 'Indexing',\n `Creating ${indexes.length} indexes`,\n 5000,\n );\n const indexStart = performance.now();\n createLiteIndices(tx, indexes);\n const index = performance.now() - indexStart;\n lc.info?.(`Created indexes (${index.toFixed(3)} ms)`);\n\n await addReplica(\n sql,\n shard,\n slotName,\n initialVersion,\n published,\n context,\n );\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Synced ${total.rows.toLocaleString()} rows of ${numTables} tables in ${publications} up to ${lsn} ` +\n `(flush: ${total.flushTime.toFixed(3)}, index: ${index.toFixed(3)}, total: ${elapsed.toFixed(3)} ms)`,\n );\n } finally {\n copiers.setDone();\n if (platform() === 'win32') {\n // Workaround a Node bug in Windows in which certain COPY streams result\n // in hanging the connection, which causes this await to never resolve.\n void copyPool.end().catch(e => lc.warn?.(`Error closing copyPool`, e));\n } else {\n await copyPool.end();\n }\n }\n } catch (e) {\n // If initial-sync did not succeed, make a best effort to drop the\n // orphaned replication slot to avoid running out of slots in\n // pathological cases that result in repeated failures.\n lc.warn?.(`dropping replication slot ${slotName}`, e);\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = ${slotName};\n `.catch(e => lc.warn?.(`Unable to drop replication slot ${slotName}`, e));\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n await replicationSession.end();\n await sql.end();\n }\n}\n\nasync function checkUpstreamConfig(sql: PostgresDB) {\n const {walLevel, version} = (\n await sql<{walLevel: string; version: number}[]>`\n SELECT current_setting('wal_level') as \"walLevel\", \n current_setting('server_version_num') as \"version\";\n `\n )[0];\n\n if (walLevel !== 'logical') {\n throw new Error(\n `Postgres must be configured with \"wal_level = logical\" (currently: \"${walLevel})`,\n );\n }\n if (version < 150000) {\n throw new Error(\n `Must be running Postgres 15 or higher (currently: \"${version}\")`,\n );\n }\n}\n\nasync function ensurePublishedTables(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n validate = true,\n): Promise<{publications: string[]}> {\n const {database, host} = sql.options;\n lc.info?.(`Ensuring upstream PUBLICATION on ${database}@${host}`);\n\n await ensureShardSchema(lc, sql, shard);\n const {publications} = await getInternalShardConfig(sql, shard);\n\n if (validate) {\n let valid = false;\n const nonInternalPublications = publications.filter(\n p => !p.startsWith('_'),\n );\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(publications)}\n `.values();\n if (exists.length !== publications.length) {\n lc.warn?.(\n `some configured publications [${publications}] are missing: ` +\n `[${exists.flat()}]. resyncing`,\n );\n } else if (\n !equals(new Set(shard.publications), new Set(nonInternalPublications))\n ) {\n lc.warn?.(\n `requested publications [${shard.publications}] differ from previous` +\n `publications [${nonInternalPublications}]. resyncing`,\n );\n } else {\n valid = true;\n }\n if (!valid) {\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n return ensurePublishedTables(lc, sql, shard, false);\n }\n }\n return {publications};\n}\n\nfunction startTableCopyWorkers(\n lc: LogContext,\n db: PostgresDB,\n snapshot: string,\n numWorkers: number,\n numTables: number,\n): TransactionPool {\n const {init} = importSnapshot(snapshot);\n const tableCopiers = new TransactionPool(\n lc,\n Mode.READONLY,\n init,\n undefined,\n numWorkers,\n );\n tableCopiers.run(db);\n\n lc.info?.(`Started ${numWorkers} workers to copy ${numTables} tables`);\n\n if (parseInt(process.versions.node) < 22) {\n lc.warn?.(\n `\\n\\n\\n` +\n `Older versions of Node have a bug that results in an unresponsive\\n` +\n `Postgres connection after running certain combinations of COPY commands.\\n` +\n `If initial sync hangs, run zero-cache with Node v22+. This has the additional\\n` +\n `benefit of being consistent with the Node version run in the production container image.` +\n `\\n\\n\\n`,\n );\n }\n return tableCopiers;\n}\n\n// Row returned by `CREATE_REPLICATION_SLOT`\ntype ReplicationSlot = {\n slot_name: string;\n consistent_point: string;\n snapshot_name: string;\n output_plugin: string;\n};\n\n// Note: The replication connection does not support the extended query protocol,\n// so all commands must be sent using sql.unsafe(). This is technically safe\n// because all placeholder values are under our control (i.e. \"slotName\").\nexport async function createReplicationSlot(\n lc: LogContext,\n session: postgres.Sql,\n slotName: string,\n): Promise<ReplicationSlot> {\n const slot = (\n await session.unsafe<ReplicationSlot[]>(\n /*sql*/ `CREATE_REPLICATION_SLOT \"${slotName}\" LOGICAL pgoutput`,\n )\n )[0];\n lc.info?.(`Created replication slot ${slotName}`, slot);\n return slot;\n}\n\nfunction createLiteTables(\n tx: Database,\n tables: PublishedTableSpec[],\n initialVersion: string,\n) {\n // TODO: Figure out how to reuse the ChangeProcessor here to avoid\n // duplicating the ColumnMetadata logic.\n const columnMetadata = must(ColumnMetadataStore.getInstance(tx));\n for (const t of tables) {\n tx.exec(createLiteTableStatement(mapPostgresToLite(t, initialVersion)));\n const tableName = liteTableName(t);\n for (const [colName, colSpec] of Object.entries(t.columns)) {\n columnMetadata.insert(tableName, colName, colSpec);\n }\n }\n}\n\nfunction createLiteIndices(tx: Database, indices: IndexSpec[]) {\n for (const index of indices) {\n tx.exec(createLiteIndexStatement(mapPostgresToLiteIndex(index)));\n }\n}\n\n// Verified empirically that batches of 50 seem to be the sweet spot,\n// similar to the report in https://sqlite.org/forum/forumpost/8878a512d3652655\n//\n// Exported for testing.\nexport const INSERT_BATCH_SIZE = 50;\n\nconst MB = 1024 * 1024;\nconst MAX_BUFFERED_ROWS = 10_000;\nconst BUFFERED_SIZE_THRESHOLD = 8 * MB;\n\nexport function makeSelectPublishedStmt(\n table: PublishedTableSpec,\n columns: string[],\n) {\n const filterConditions = Object.values(table.publications)\n .map(({rowFilter}) => rowFilter)\n .filter(f => !!f); // remove nulls\n return (\n /*sql*/ `\n SELECT ${columns.map(id).join(',')} FROM ${id(table.schema)}.${id(table.name)}` +\n (filterConditions.length === 0\n ? ''\n : /*sql*/ ` WHERE ${filterConditions.join(' OR ')}`)\n );\n}\n\nasync function copy(\n lc: LogContext,\n table: PublishedTableSpec,\n dbClient: PostgresDB,\n from: PostgresTransaction,\n to: Database,\n) {\n const start = performance.now();\n let rows = 0;\n let flushTime = 0;\n\n const tableName = liteTableName(table);\n const orderedColumns = Object.entries(table.columns);\n\n const columnNames = orderedColumns.map(([c]) => c);\n const columnSpecs = orderedColumns.map(([_name, spec]) => spec);\n const insertColumnList = columnNames.map(c => id(c)).join(',');\n\n // (?,?,?,?,?)\n const valuesSql =\n columnNames.length > 0 ? `(${'?,'.repeat(columnNames.length - 1)}?)` : '()';\n const insertSql = /*sql*/ `\n INSERT INTO \"${tableName}\" (${insertColumnList}) VALUES ${valuesSql}`;\n const insertStmt = to.prepare(insertSql);\n // INSERT VALUES (?,?,?,?,?),... x INSERT_BATCH_SIZE\n const insertBatchStmt = to.prepare(\n insertSql + `,${valuesSql}`.repeat(INSERT_BATCH_SIZE - 1),\n );\n\n const selectStmt = makeSelectPublishedStmt(table, columnNames);\n const valuesPerRow = columnSpecs.length;\n const valuesPerBatch = valuesPerRow * INSERT_BATCH_SIZE;\n\n // Preallocate the buffer of values to reduce memory allocation churn.\n const pendingValues: LiteValueType[] = Array.from({\n length: MAX_BUFFERED_ROWS * valuesPerRow,\n });\n let pendingRows = 0;\n let pendingSize = 0;\n\n function flush() {\n const start = performance.now();\n const flushedRows = pendingRows;\n const flushedSize = pendingSize;\n\n let l = 0;\n for (; pendingRows > INSERT_BATCH_SIZE; pendingRows -= INSERT_BATCH_SIZE) {\n insertBatchStmt.run(pendingValues.slice(l, (l += valuesPerBatch)));\n }\n // Insert the remaining rows individually.\n for (; pendingRows > 0; pendingRows--) {\n insertStmt.run(pendingValues.slice(l, (l += valuesPerRow)));\n }\n for (let i = 0; i < flushedRows; i++) {\n // Reuse the array and unreference the values to allow GC.\n // This is faster than allocating a new array every time.\n pendingValues[i] = undefined as unknown as LiteValueType;\n }\n pendingSize = 0;\n rows += flushedRows;\n\n const elapsed = performance.now() - start;\n flushTime += elapsed;\n lc.debug?.(\n `flushed ${flushedRows} ${tableName} rows (${flushedSize} bytes) in ${elapsed.toFixed(3)} ms`,\n );\n }\n\n lc.info?.(`Starting copy stream of ${tableName}:`, selectStmt);\n const pgParsers = await getTypeParsers(dbClient, {returnJsonAsString: true});\n const parsers = columnSpecs.map(c => {\n const pgParse = pgParsers.getTypeParser(c.typeOID);\n return (val: string) =>\n liteValue(\n pgParse(val) as PostgresValueType,\n c.dataType,\n JSON_STRINGIFIED,\n );\n });\n\n const tsvParser = new TsvParser();\n let col = 0;\n\n await pipeline(\n await from.unsafe(`COPY (${selectStmt}) TO STDOUT`).readable(),\n new Writable({\n highWaterMark: BUFFERED_SIZE_THRESHOLD,\n\n write(\n chunk: Buffer,\n _encoding: string,\n callback: (error?: Error) => void,\n ) {\n try {\n for (const text of tsvParser.parse(chunk)) {\n pendingSize += text === null ? 4 : text.length;\n pendingValues[pendingRows * valuesPerRow + col] =\n text === null ? null : parsers[col](text);\n\n if (++col === parsers.length) {\n col = 0;\n if (\n ++pendingRows >= MAX_BUFFERED_ROWS - valuesPerRow ||\n pendingSize >= BUFFERED_SIZE_THRESHOLD\n ) {\n flush();\n }\n }\n }\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n\n final: (callback: (error?: Error) => void) => {\n try {\n flush();\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n }),\n );\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Finished copying ${rows} rows into ${tableName} ` +\n `(flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `,\n );\n return {rows, flushTime};\n}\n"],"names":["slot","tx","Mode.READONLY","lc","e","start","elapsed"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;AAkEA,eAAsB,YACpB,IACA,OACA,IACA,aACA,aACA,SACA;AACA,MAAI,CAAC,0BAA0B,KAAK,MAAM,KAAK,GAAG;AAChD,UAAM,IAAI;AAAA,MACR;AAAA,IAAA;AAAA,EAEJ;AACA,QAAM,EAAC,kBAAkB,YAAA,IAAe;AACxC,QAAM,eAAe,cAAc,MAAM,YAAY,YAAY;AACjE,QAAM,MAAM,SAAS,IAAI,WAAW;AACpC,QAAM,qBAAqB,SAAS,IAAI,aAAa;AAAA,IACnD,CAAC,aAAa,GAAG;AAAA;AAAA,IACjB,YAAY,EAAC,aAAa,WAAA;AAAA;AAAA,EAAU,CACrC;AACD,QAAM,WAAW,mBAAmB,KAAK;AACzC,QAAM,kBAAkB,IAAI,2BAA2B,EAAE,EAAE;AAAA,IACzD;AAAA,IACA;AAAA,EAAA;AAEF,MAAI;AACF,UAAM,oBAAoB,GAAG;AAE7B,UAAM,EAAC,aAAA,IAAgB,MAAM,sBAAsB,IAAI,KAAK,KAAK;AACjE,OAAG,OAAO,wCAAwC,YAAY,GAAG;AAEjE,UAAM,EAAC,UAAU,KAAA,IAAQ,IAAI;AAC7B,OAAG,OAAO,kCAAkC,QAAQ,IAAI,IAAI,EAAE;AAE9D,QAAI;AACJ,aAAS,QAAQ,QAAQ,QAAQ,OAAO;AACtC,UAAI;AACF,eAAO,MAAM,sBAAsB,IAAI,oBAAoB,QAAQ;AACnE;AAAA,MACF,SAAS,GAAG;AACV,YAAI,SAAS,aAAa,SAAS,eAAe;AAChD,cAAI,EAAE,SAAS,2BAA2B;AAKxC,kBAAM;AACN,eAAG,OAAO,6CAA6C;AACvD;AAAA,UACF;AACA,cAAI,EAAE,SAAS,iCAAiC;AAC9C,kBAAM,iBAAiB,0BAA0B,KAAK;AAEtD,kBAAM,UAAU,MAAM;AAAA;AAAA;AAAA,uCAGK,cAAc;AACzC,gBAAI,QAAQ,QAAQ;AAClB,iBAAG;AAAA,gBACD,uCAAuC,QAAQ,IAAI,CAAC,EAAC,MAAAA,MAAAA,MAAUA,KAAI,CAAC;AAAA,gBACpE;AAAA,cAAA;AAEF;AAAA,YACF;AACA,eAAG,QAAQ,oCAAoC,CAAC;AAAA,UAClD;AAAA,QACF;AACA,cAAM;AAAA,MACR;AAAA,IACF;AACA,UAAM,EAAC,eAAe,UAAU,kBAAkB,QAAO;AACzD,UAAM,iBAAiB,qBAAqB,GAAG;AAE/C,yBAAqB,IAAI,cAAc,gBAAgB,OAAO;AAG9D,UAAM,QAAQ,YAAY,IAAA;AAE1B,UAAM,YAAY,MAAM;AAAA,MACtB;AAAA,MACA,OAAMC,QAAM;AACV,cAAMA,IAAG;AAAA;AAAA,UAAgB,6BAA6B,QAAQ;AAAA,QAAA;AAC9D,eAAO,mBAAmBA,KAAI,YAAY;AAAA,MAC5C;AAAA,MACA,EAAC,MAAMC,SAAK;AAAA,IAAQ;AAGtB,yBAAqB,IAAI,SAAS;AAGlC,UAAM,EAAC,QAAQ,QAAA,IAAW;AAC1B,UAAM,YAAY,OAAO;AACzB,QAAI,SAAA,MAAe,WAAW,mBAAmB,WAAW;AAC1D,SAAG;AAAA,QACD,8CAA8C,gBAAgB,OACzD,SAAS;AAAA,MAAA;AAAA,IAElB;AACA,UAAM,aACJ,eAAe,UACX,YACA,KAAK,IAAI,kBAAkB,SAAS;AAE1C,UAAM,WAAW,SAAS,IAAI,aAAa;AAAA,MACzC,KAAK;AAAA,MACL,YAAY,EAAC,CAAC,kBAAkB,GAAG,2BAAA;AAAA,IAA0B,CAC9D;AACD,UAAM,UAAU;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,QAAI;AACF,uBAAiB,IAAI,QAAQ,cAAc;AAC3C,sBAAgB;AAAA,QACd;AAAA,QACA;AAAA,QACA,WAAW,SAAS,+BAA+B,cAAc;AAAA,QACjE;AAAA,MAAA;AAGF,WAAK,cAAc,MAAA;AACnB,YAAM,YAAY,MAAM,QAAQ;AAAA,QAC9B,OAAO;AAAA,UAAI,WACT,QAAQ;AAAA,YAAgB,CAAC,IAAIC,QAC3B,KAAKA,KAAI,OAAO,UAAU,IAAI,EAAE;AAAA,UAAA;AAAA,QAClC;AAAA,MACF;AAEF,WAAK,cAAc,eAAe,IAAI,cAAc;AAEpD,YAAM,QAAQ,UAAU;AAAA,QACtB,CAAC,KAAK,UAAU;AAAA,UACd,MAAM,IAAI,OAAO,KAAK;AAAA,UACtB,WAAW,IAAI,YAAY,KAAK;AAAA,QAAA;AAAA,QAElC,EAAC,MAAM,GAAG,WAAW,EAAA;AAAA,MAAC;AAGxB,sBAAgB;AAAA,QACd;AAAA,QACA;AAAA,QACA,YAAY,QAAQ,MAAM;AAAA,QAC1B;AAAA,MAAA;AAEF,YAAM,aAAa,YAAY,IAAA;AAC/B,wBAAkB,IAAI,OAAO;AAC7B,YAAM,QAAQ,YAAY,IAAA,IAAQ;AAClC,SAAG,OAAO,oBAAoB,MAAM,QAAQ,CAAC,CAAC,MAAM;AAEpD,YAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAGF,YAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,SAAG;AAAA,QACD,UAAU,MAAM,KAAK,eAAA,CAAgB,YAAY,SAAS,cAAc,YAAY,UAAU,GAAG,YACpF,MAAM,UAAU,QAAQ,CAAC,CAAC,YAAY,MAAM,QAAQ,CAAC,CAAC,YAAY,QAAQ,QAAQ,CAAC,CAAC;AAAA,MAAA;AAAA,IAErG,UAAA;AACE,cAAQ,QAAA;AACR,UAAI,SAAA,MAAe,SAAS;AAG1B,aAAK,SAAS,MAAM,MAAM,OAAK,GAAG,OAAO,0BAA0B,CAAC,CAAC;AAAA,MACvE,OAAO;AACL,cAAM,SAAS,IAAA;AAAA,MACjB;AAAA,IACF;AAAA,EACF,SAAS,GAAG;AAIV,OAAG,OAAO,6BAA6B,QAAQ,IAAI,CAAC;AACpD,UAAM;AAAA;AAAA,4BAEkB,QAAQ;AAAA,MAC9B,MAAM,CAAAC,OAAK,GAAG,OAAO,mCAAmC,QAAQ,IAAIA,EAAC,CAAC;AACxE,UAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,CAAC;AAAA,EAClE,UAAA;AACE,oBAAgB,KAAA;AAChB,UAAM,mBAAmB,IAAA;AACzB,UAAM,IAAI,IAAA;AAAA,EACZ;AACF;AAEA,eAAe,oBAAoB,KAAiB;AAClD,QAAM,EAAC,UAAU,QAAA,KACf,MAAM;AAAA;AAAA;AAAA,KAIN,CAAC;AAEH,MAAI,aAAa,WAAW;AAC1B,UAAM,IAAI;AAAA,MACR,uEAAuE,QAAQ;AAAA,IAAA;AAAA,EAEnF;AACA,MAAI,UAAU,MAAQ;AACpB,UAAM,IAAI;AAAA,MACR,sDAAsD,OAAO;AAAA,IAAA;AAAA,EAEjE;AACF;AAEA,eAAe,sBACb,IACA,KACA,OACA,WAAW,MACwB;AACnC,QAAM,EAAC,UAAU,KAAA,IAAQ,IAAI;AAC7B,KAAG,OAAO,oCAAoC,QAAQ,IAAI,IAAI,EAAE;AAEhE,QAAM,kBAAkB,IAAI,KAAK,KAAK;AACtC,QAAM,EAAC,aAAA,IAAgB,MAAM,uBAAuB,KAAK,KAAK;AAE9D,MAAI,UAAU;AACZ,QAAI,QAAQ;AACZ,UAAM,0BAA0B,aAAa;AAAA,MAC3C,CAAA,MAAK,CAAC,EAAE,WAAW,GAAG;AAAA,IAAA;AAExB,UAAM,SAAS,MAAM;AAAA,4DACmC,IAAI,YAAY,CAAC;AAAA,QACrE,OAAA;AACJ,QAAI,OAAO,WAAW,aAAa,QAAQ;AACzC,SAAG;AAAA,QACD,iCAAiC,YAAY,mBACvC,OAAO,MAAM;AAAA,MAAA;AAAA,IAEvB,WACE,CAAC,OAAO,IAAI,IAAI,MAAM,YAAY,GAAG,IAAI,IAAI,uBAAuB,CAAC,GACrE;AACA,SAAG;AAAA,QACD,2BAA2B,MAAM,YAAY,uCAC1B,uBAAuB;AAAA,MAAA;AAAA,IAE9C,OAAO;AACL,cAAQ;AAAA,IACV;AACA,QAAI,CAAC,OAAO;AACV,YAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,QAAQ,CAAC;AACvD,aAAO,sBAAsB,IAAI,KAAK,OAAO,KAAK;AAAA,IACpD;AAAA,EACF;AACA,SAAO,EAAC,aAAA;AACV;AAEA,SAAS,sBACP,IACA,IACA,UACA,YACA,WACiB;AACjB,QAAM,EAAC,KAAA,IAAQ,eAAe,QAAQ;AACtC,QAAM,eAAe,IAAI;AAAA,IACvB;AAAA,IACAF;AAAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEF,eAAa,IAAI,EAAE;AAEnB,KAAG,OAAO,WAAW,UAAU,oBAAoB,SAAS,SAAS;AAErE,MAAI,SAAS,QAAQ,SAAS,IAAI,IAAI,IAAI;AACxC,OAAG;AAAA,MACD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAAA;AAAA,EAOJ;AACA,SAAO;AACT;AAaA,eAAsB,sBACpB,IACA,SACA,UAC0B;AAC1B,QAAM,QACJ,MAAM,QAAQ;AAAA;AAAA,IACJ,4BAA4B,QAAQ;AAAA,EAAA,GAE9C,CAAC;AACH,KAAG,OAAO,4BAA4B,QAAQ,IAAI,IAAI;AACtD,SAAO;AACT;AAEA,SAAS,iBACP,IACA,QACA,gBACA;AAGA,QAAM,iBAAiB,KAAK,oBAAoB,YAAY,EAAE,CAAC;AAC/D,aAAW,KAAK,QAAQ;AACtB,OAAG,KAAK,yBAAyB,kBAAkB,GAAG,cAAc,CAAC,CAAC;AACtE,UAAM,YAAY,cAAc,CAAC;AACjC,eAAW,CAAC,SAAS,OAAO,KAAK,OAAO,QAAQ,EAAE,OAAO,GAAG;AAC1D,qBAAe,OAAO,WAAW,SAAS,OAAO;AAAA,IACnD;AAAA,EACF;AACF;AAEA,SAAS,kBAAkB,IAAc,SAAsB;AAC7D,aAAW,SAAS,SAAS;AAC3B,OAAG,KAAK,yBAAyB,uBAAuB,KAAK,CAAC,CAAC;AAAA,EACjE;AACF;AAMO,MAAM,oBAAoB;AAEjC,MAAM,KAAK,OAAO;AAClB,MAAM,oBAAoB;AAC1B,MAAM,0BAA0B,IAAI;AAE7B,SAAS,wBACd,OACA,SACA;AACA,QAAM,mBAAmB,OAAO,OAAO,MAAM,YAAY,EACtD,IAAI,CAAC,EAAC,UAAA,MAAe,SAAS,EAC9B,OAAO,CAAA,MAAK,CAAC,CAAC,CAAC;AAClB;AAAA;AAAA,IACU;AAAA,aACC,QAAQ,IAAI,EAAE,EAAE,KAAK,GAAG,CAAC,SAAS,GAAG,MAAM,MAAM,CAAC,IAAI,GAAG,MAAM,IAAI,CAAC,MAC5E,iBAAiB,WAAW,IACzB;AAAA;AAAA,MACQ,UAAU,iBAAiB,KAAK,MAAM,CAAC;AAAA;AAAA;AAEvD;AAEA,eAAe,KACb,IACA,OACA,UACA,MACA,IACA;AACA,QAAM,QAAQ,YAAY,IAAA;AAC1B,MAAI,OAAO;AACX,MAAI,YAAY;AAEhB,QAAM,YAAY,cAAc,KAAK;AACrC,QAAM,iBAAiB,OAAO,QAAQ,MAAM,OAAO;AAEnD,QAAM,cAAc,eAAe,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC;AACjD,QAAM,cAAc,eAAe,IAAI,CAAC,CAAC,OAAO,IAAI,MAAM,IAAI;AAC9D,QAAM,mBAAmB,YAAY,IAAI,CAAA,MAAK,GAAG,CAAC,CAAC,EAAE,KAAK,GAAG;AAG7D,QAAM,YACJ,YAAY,SAAS,IAAI,IAAI,KAAK,OAAO,YAAY,SAAS,CAAC,CAAC,OAAO;AACzE,QAAM;AAAA;AAAA,IAAoB;AAAA,mBACT,SAAS,MAAM,gBAAgB,YAAY,SAAS;AAAA;AACrE,QAAM,aAAa,GAAG,QAAQ,SAAS;AAEvC,QAAM,kBAAkB,GAAG;AAAA,IACzB,YAAY,IAAI,SAAS,GAAG,OAAO,oBAAoB,CAAC;AAAA,EAAA;AAG1D,QAAM,aAAa,wBAAwB,OAAO,WAAW;AAC7D,QAAM,eAAe,YAAY;AACjC,QAAM,iBAAiB,eAAe;AAGtC,QAAM,gBAAiC,MAAM,KAAK;AAAA,IAChD,QAAQ,oBAAoB;AAAA,EAAA,CAC7B;AACD,MAAI,cAAc;AAClB,MAAI,cAAc;AAElB,WAAS,QAAQ;AACf,UAAMG,SAAQ,YAAY,IAAA;AAC1B,UAAM,cAAc;AACpB,UAAM,cAAc;AAEpB,QAAI,IAAI;AACR,WAAO,cAAc,mBAAmB,eAAe,mBAAmB;AACxE,sBAAgB,IAAI,cAAc,MAAM,GAAI,KAAK,cAAe,CAAC;AAAA,IACnE;AAEA,WAAO,cAAc,GAAG,eAAe;AACrC,iBAAW,IAAI,cAAc,MAAM,GAAI,KAAK,YAAa,CAAC;AAAA,IAC5D;AACA,aAAS,IAAI,GAAG,IAAI,aAAa,KAAK;AAGpC,oBAAc,CAAC,IAAI;AAAA,IACrB;AACA,kBAAc;AACd,YAAQ;AAER,UAAMC,WAAU,YAAY,IAAA,IAAQD;AACpC,iBAAaC;AACb,OAAG;AAAA,MACD,WAAW,WAAW,IAAI,SAAS,UAAU,WAAW,cAAcA,SAAQ,QAAQ,CAAC,CAAC;AAAA,IAAA;AAAA,EAE5F;AAEA,KAAG,OAAO,2BAA2B,SAAS,KAAK,UAAU;AAC7D,QAAM,YAAY,MAAM,eAAe,UAAU,EAAC,oBAAoB,MAAK;AAC3E,QAAM,UAAU,YAAY,IAAI,CAAA,MAAK;AACnC,UAAM,UAAU,UAAU,cAAc,EAAE,OAAO;AACjD,WAAO,CAAC,QACN;AAAA,MACE,QAAQ,GAAG;AAAA,MACX,EAAE;AAAA,MACF;AAAA,IAAA;AAAA,EAEN,CAAC;AAED,QAAM,YAAY,IAAI,UAAA;AACtB,MAAI,MAAM;AAEV,QAAM;AAAA,IACJ,MAAM,KAAK,OAAO,SAAS,UAAU,aAAa,EAAE,SAAA;AAAA,IACpD,IAAI,SAAS;AAAA,MACX,eAAe;AAAA,MAEf,MACE,OACA,WACA,UACA;AACA,YAAI;AACF,qBAAW,QAAQ,UAAU,MAAM,KAAK,GAAG;AACzC,2BAAe,SAAS,OAAO,IAAI,KAAK;AACxC,0BAAc,cAAc,eAAe,GAAG,IAC5C,SAAS,OAAO,OAAO,QAAQ,GAAG,EAAE,IAAI;AAE1C,gBAAI,EAAE,QAAQ,QAAQ,QAAQ;AAC5B,oBAAM;AACN,kBACE,EAAE,eAAe,oBAAoB,gBACrC,eAAe,yBACf;AACA,sBAAA;AAAA,cACF;AAAA,YACF;AAAA,UACF;AACA,mBAAA;AAAA,QACF,SAAS,GAAG;AACV,mBAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,CAAC,CAAC,CAAC;AAAA,QACxD;AAAA,MACF;AAAA,MAEA,OAAO,CAAC,aAAsC;AAC5C,YAAI;AACF,gBAAA;AACA,mBAAA;AAAA,QACF,SAAS,GAAG;AACV,mBAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,CAAC,CAAC,CAAC;AAAA,QACxD;AAAA,MACF;AAAA,IAAA,CACD;AAAA,EAAA;AAGH,QAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,KAAG;AAAA,IACD,oBAAoB,IAAI,cAAc,SAAS,YAClC,UAAU,QAAQ,CAAC,CAAC,gBAAgB,QAAQ,QAAQ,CAAC,CAAC;AAAA,EAAA;AAErE,SAAO,EAAC,MAAM,UAAA;AAChB;"}
|
|
1
|
+
{"version":3,"file":"initial-sync.js","sources":["../../../../../../../zero-cache/src/services/change-source/pg/initial-sync.ts"],"sourcesContent":["import {\n PG_CONFIGURATION_LIMIT_EXCEEDED,\n PG_INSUFFICIENT_PRIVILEGE,\n} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {platform} from 'node:os';\nimport {Writable} from 'node:stream';\nimport {pipeline} from 'node:stream/promises';\nimport postgres from 'postgres';\nimport type {JSONObject} from '../../../../../shared/src/bigint-json.ts';\nimport {must} from '../../../../../shared/src/must.ts';\nimport {equals} from '../../../../../shared/src/set-utils.ts';\nimport type {DownloadStatus} from '../../../../../zero-events/src/status.ts';\nimport type {Database} from '../../../../../zqlite/src/db.ts';\nimport {\n createLiteIndexStatement,\n createLiteTableStatement,\n} from '../../../db/create.ts';\nimport * as Mode from '../../../db/mode-enum.ts';\nimport {TsvParser} from '../../../db/pg-copy.ts';\nimport {\n mapPostgresToLite,\n mapPostgresToLiteIndex,\n} from '../../../db/pg-to-lite.ts';\nimport {getTypeParsers} from '../../../db/pg-type-parser.ts';\nimport {runTx} from '../../../db/run-transaction.ts';\nimport type {IndexSpec, PublishedTableSpec} from '../../../db/specs.ts';\nimport {importSnapshot, TransactionPool} from '../../../db/transaction-pool.ts';\nimport {\n JSON_STRINGIFIED,\n liteValue,\n type LiteValueType,\n} from '../../../types/lite.ts';\nimport {liteTableName} from '../../../types/names.ts';\nimport {\n pgClient,\n type PostgresDB,\n type PostgresTransaction,\n type PostgresValueType,\n} from '../../../types/pg.ts';\nimport {CpuProfiler} from '../../../types/profiler.ts';\nimport type {ShardConfig} from '../../../types/shards.ts';\nimport {ALLOWED_APP_ID_CHARACTERS} from '../../../types/shards.ts';\nimport {id} from '../../../types/sql.ts';\nimport {ReplicationStatusPublisher} from '../../replicator/replication-status.ts';\nimport {ColumnMetadataStore} from '../../replicator/schema/column-metadata.ts';\nimport {initReplicationState} from '../../replicator/schema/replication-state.ts';\nimport {toStateVersionString} from './lsn.ts';\nimport {ensureShardSchema} from './schema/init.ts';\nimport {getPublicationInfo} from './schema/published.ts';\nimport {\n addReplica,\n dropShard,\n getInternalShardConfig,\n newReplicationSlot,\n replicationSlotExpression,\n validatePublications,\n} from './schema/shard.ts';\n\nexport type InitialSyncOptions = {\n tableCopyWorkers: number;\n profileCopy?: boolean | undefined;\n};\n\n/** Server context to store with the initial sync metadata for debugging. */\nexport type ServerContext = JSONObject;\n\nexport async function initialSync(\n lc: LogContext,\n shard: ShardConfig,\n tx: Database,\n upstreamURI: string,\n syncOptions: InitialSyncOptions,\n context: ServerContext,\n) {\n if (!ALLOWED_APP_ID_CHARACTERS.test(shard.appID)) {\n throw new Error(\n 'The App ID may only consist of lower-case letters, numbers, and the underscore character',\n );\n }\n const {tableCopyWorkers, profileCopy} = syncOptions;\n const copyProfiler = profileCopy ? await CpuProfiler.connect() : null;\n const sql = pgClient(lc, upstreamURI);\n const replicationSession = pgClient(lc, upstreamURI, {\n ['fetch_types']: false, // Necessary for the streaming protocol\n connection: {replication: 'database'}, // https://www.postgresql.org/docs/current/protocol-replication.html\n });\n const slotName = newReplicationSlot(shard);\n const statusPublisher = new ReplicationStatusPublisher(tx).publish(\n lc,\n 'Initializing',\n );\n try {\n await checkUpstreamConfig(sql);\n\n const {publications} = await ensurePublishedTables(lc, sql, shard);\n lc.info?.(`Upstream is setup with publications [${publications}]`);\n\n const {database, host} = sql.options;\n lc.info?.(`opening replication session to ${database}@${host}`);\n\n let slot: ReplicationSlot;\n for (let first = true; ; first = false) {\n try {\n slot = await createReplicationSlot(lc, replicationSession, slotName);\n break;\n } catch (e) {\n if (first && e instanceof postgres.PostgresError) {\n if (e.code === PG_INSUFFICIENT_PRIVILEGE) {\n // Some Postgres variants (e.g. Google Cloud SQL) require that\n // the user have the REPLICATION role in order to create a slot.\n // Note that this must be done by the upstreamDB connection, and\n // does not work in the replicationSession itself.\n await sql`ALTER ROLE current_user WITH REPLICATION`;\n lc.info?.(`Added the REPLICATION role to database user`);\n continue;\n }\n if (e.code === PG_CONFIGURATION_LIMIT_EXCEEDED) {\n const slotExpression = replicationSlotExpression(shard);\n\n const dropped = await sql<{slot: string}[]>`\n SELECT slot_name as slot, pg_drop_replication_slot(slot_name) \n FROM pg_replication_slots\n WHERE slot_name LIKE ${slotExpression} AND NOT active`;\n if (dropped.length) {\n lc.warn?.(\n `Dropped inactive replication slots: ${dropped.map(({slot}) => slot)}`,\n e,\n );\n continue;\n }\n lc.error?.(`Unable to drop replication slots`, e);\n }\n }\n throw e;\n }\n }\n const {snapshot_name: snapshot, consistent_point: lsn} = slot;\n const initialVersion = toStateVersionString(lsn);\n\n initReplicationState(tx, publications, initialVersion, context);\n\n // Run up to MAX_WORKERS to copy of tables at the replication slot's snapshot.\n const start = performance.now();\n // Retrieve the published schema at the consistent_point.\n const published = await runTx(\n sql,\n async tx => {\n await tx.unsafe(/* sql*/ `SET TRANSACTION SNAPSHOT '${snapshot}'`);\n return getPublicationInfo(tx, publications);\n },\n {mode: Mode.READONLY},\n );\n // Note: If this throws, initial-sync is aborted.\n validatePublications(lc, published);\n\n // Now that tables have been validated, kick off the copiers.\n const {tables, indexes} = published;\n const numTables = tables.length;\n if (platform() === 'win32' && tableCopyWorkers < numTables) {\n lc.warn?.(\n `Increasing the number of copy workers from ${tableCopyWorkers} to ` +\n `${numTables} to work around a Node/Postgres connection bug`,\n );\n }\n const numWorkers =\n platform() === 'win32'\n ? numTables\n : Math.min(tableCopyWorkers, numTables);\n\n const copyPool = pgClient(lc, upstreamURI, {\n max: numWorkers,\n connection: {['application_name']: 'initial-sync-copy-worker'},\n ['max_lifetime']: 120 * 60, // set a long (2h) limit for COPY streaming\n });\n const copiers = startTableCopyWorkers(\n lc,\n copyPool,\n snapshot,\n numWorkers,\n numTables,\n );\n try {\n createLiteTables(tx, tables, initialVersion);\n const downloads = await Promise.all(\n tables.map(spec =>\n copiers.processReadTask((db, lc) =>\n getInitialDownloadState(lc, db, spec),\n ),\n ),\n );\n statusPublisher.publish(\n lc,\n 'Initializing',\n `Copying ${numTables} upstream tables at version ${initialVersion}`,\n 5000,\n () => ({downloadStatus: downloads.map(({status}) => status)}),\n );\n\n void copyProfiler?.start();\n const rowCounts = await Promise.all(\n downloads.map(table =>\n copiers.processReadTask((db, lc) =>\n copy(lc, table, copyPool, db, tx),\n ),\n ),\n );\n void copyProfiler?.stopAndDispose(lc, 'initial-copy');\n copiers.setDone();\n\n const total = rowCounts.reduce(\n (acc, curr) => ({\n rows: acc.rows + curr.rows,\n flushTime: acc.flushTime + curr.flushTime,\n }),\n {rows: 0, flushTime: 0},\n );\n\n statusPublisher.publish(\n lc,\n 'Indexing',\n `Creating ${indexes.length} indexes`,\n 5000,\n );\n const indexStart = performance.now();\n createLiteIndices(tx, indexes);\n const index = performance.now() - indexStart;\n lc.info?.(`Created indexes (${index.toFixed(3)} ms)`);\n\n await addReplica(\n sql,\n shard,\n slotName,\n initialVersion,\n published,\n context,\n );\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Synced ${total.rows.toLocaleString()} rows of ${numTables} tables in ${publications} up to ${lsn} ` +\n `(flush: ${total.flushTime.toFixed(3)}, index: ${index.toFixed(3)}, total: ${elapsed.toFixed(3)} ms)`,\n );\n } finally {\n // All meaningful errors are handled at the processReadTask() call site.\n void copyPool.end().catch(e => lc.warn?.(`Error closing copyPool`, e));\n }\n } catch (e) {\n // If initial-sync did not succeed, make a best effort to drop the\n // orphaned replication slot to avoid running out of slots in\n // pathological cases that result in repeated failures.\n lc.warn?.(`dropping replication slot ${slotName}`, e);\n await sql`\n SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots\n WHERE slot_name = ${slotName};\n `.catch(e => lc.warn?.(`Unable to drop replication slot ${slotName}`, e));\n await statusPublisher.publishAndThrowError(lc, 'Initializing', e);\n } finally {\n statusPublisher.stop();\n await replicationSession.end();\n await sql.end();\n }\n}\n\nasync function checkUpstreamConfig(sql: PostgresDB) {\n const {walLevel, version} = (\n await sql<{walLevel: string; version: number}[]>`\n SELECT current_setting('wal_level') as \"walLevel\", \n current_setting('server_version_num') as \"version\";\n `\n )[0];\n\n if (walLevel !== 'logical') {\n throw new Error(\n `Postgres must be configured with \"wal_level = logical\" (currently: \"${walLevel})`,\n );\n }\n if (version < 150000) {\n throw new Error(\n `Must be running Postgres 15 or higher (currently: \"${version}\")`,\n );\n }\n}\n\nasync function ensurePublishedTables(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardConfig,\n validate = true,\n): Promise<{publications: string[]}> {\n const {database, host} = sql.options;\n lc.info?.(`Ensuring upstream PUBLICATION on ${database}@${host}`);\n\n await ensureShardSchema(lc, sql, shard);\n const {publications} = await getInternalShardConfig(sql, shard);\n\n if (validate) {\n let valid = false;\n const nonInternalPublications = publications.filter(\n p => !p.startsWith('_'),\n );\n const exists = await sql`\n SELECT pubname FROM pg_publication WHERE pubname IN ${sql(publications)}\n `.values();\n if (exists.length !== publications.length) {\n lc.warn?.(\n `some configured publications [${publications}] are missing: ` +\n `[${exists.flat()}]. resyncing`,\n );\n } else if (\n !equals(new Set(shard.publications), new Set(nonInternalPublications))\n ) {\n lc.warn?.(\n `requested publications [${shard.publications}] differ from previous` +\n `publications [${nonInternalPublications}]. resyncing`,\n );\n } else {\n valid = true;\n }\n if (!valid) {\n await sql.unsafe(dropShard(shard.appID, shard.shardNum));\n return ensurePublishedTables(lc, sql, shard, false);\n }\n }\n return {publications};\n}\n\nfunction startTableCopyWorkers(\n lc: LogContext,\n db: PostgresDB,\n snapshot: string,\n numWorkers: number,\n numTables: number,\n): TransactionPool {\n const {init} = importSnapshot(snapshot);\n const tableCopiers = new TransactionPool(\n lc,\n Mode.READONLY,\n init,\n undefined,\n numWorkers,\n );\n tableCopiers.run(db);\n\n lc.info?.(`Started ${numWorkers} workers to copy ${numTables} tables`);\n\n if (parseInt(process.versions.node) < 22) {\n lc.warn?.(\n `\\n\\n\\n` +\n `Older versions of Node have a bug that results in an unresponsive\\n` +\n `Postgres connection after running certain combinations of COPY commands.\\n` +\n `If initial sync hangs, run zero-cache with Node v22+. This has the additional\\n` +\n `benefit of being consistent with the Node version run in the production container image.` +\n `\\n\\n\\n`,\n );\n }\n return tableCopiers;\n}\n\n// Row returned by `CREATE_REPLICATION_SLOT`\ntype ReplicationSlot = {\n slot_name: string;\n consistent_point: string;\n snapshot_name: string;\n output_plugin: string;\n};\n\n// Note: The replication connection does not support the extended query protocol,\n// so all commands must be sent using sql.unsafe(). This is technically safe\n// because all placeholder values are under our control (i.e. \"slotName\").\nexport async function createReplicationSlot(\n lc: LogContext,\n session: postgres.Sql,\n slotName: string,\n): Promise<ReplicationSlot> {\n const slot = (\n await session.unsafe<ReplicationSlot[]>(\n /*sql*/ `CREATE_REPLICATION_SLOT \"${slotName}\" LOGICAL pgoutput`,\n )\n )[0];\n lc.info?.(`Created replication slot ${slotName}`, slot);\n return slot;\n}\n\nfunction createLiteTables(\n tx: Database,\n tables: PublishedTableSpec[],\n initialVersion: string,\n) {\n // TODO: Figure out how to reuse the ChangeProcessor here to avoid\n // duplicating the ColumnMetadata logic.\n const columnMetadata = must(ColumnMetadataStore.getInstance(tx));\n for (const t of tables) {\n tx.exec(createLiteTableStatement(mapPostgresToLite(t, initialVersion)));\n const tableName = liteTableName(t);\n for (const [colName, colSpec] of Object.entries(t.columns)) {\n columnMetadata.insert(tableName, colName, colSpec);\n }\n }\n}\n\nfunction createLiteIndices(tx: Database, indices: IndexSpec[]) {\n for (const index of indices) {\n tx.exec(createLiteIndexStatement(mapPostgresToLiteIndex(index)));\n }\n}\n\n// Verified empirically that batches of 50 seem to be the sweet spot,\n// similar to the report in https://sqlite.org/forum/forumpost/8878a512d3652655\n//\n// Exported for testing.\nexport const INSERT_BATCH_SIZE = 50;\n\nconst MB = 1024 * 1024;\nconst MAX_BUFFERED_ROWS = 10_000;\nconst BUFFERED_SIZE_THRESHOLD = 8 * MB;\n\nexport type DownloadStatements = {\n select: string;\n getTotalRows: string;\n getTotalBytes: string;\n};\n\nexport function makeDownloadStatements(\n table: PublishedTableSpec,\n cols: string[],\n): DownloadStatements {\n const filterConditions = Object.values(table.publications)\n .map(({rowFilter}) => rowFilter)\n .filter(f => !!f); // remove nulls\n const where =\n filterConditions.length === 0\n ? ''\n : /*sql*/ `WHERE ${filterConditions.join(' OR ')}`;\n const fromTable = /*sql*/ `FROM ${id(table.schema)}.${id(table.name)} ${where}`;\n const totalBytes = `(${cols.map(col => `SUM(COALESCE(pg_column_size(${id(col)}), 0))`).join(' + ')})`;\n const stmts = {\n select: /*sql*/ `SELECT ${cols.map(id).join(',')} ${fromTable}`,\n getTotalRows: /*sql*/ `SELECT COUNT(*) AS \"totalRows\" ${fromTable}`,\n getTotalBytes: /*sql*/ `SELECT ${totalBytes} AS \"totalBytes\" ${fromTable}`,\n };\n return stmts;\n}\n\ntype DownloadState = {\n spec: PublishedTableSpec;\n status: DownloadStatus;\n};\n\nasync function getInitialDownloadState(\n lc: LogContext,\n sql: PostgresDB,\n spec: PublishedTableSpec,\n): Promise<DownloadState> {\n const start = performance.now();\n const table = liteTableName(spec);\n const columns = Object.keys(spec.columns);\n const stmts = makeDownloadStatements(spec, columns);\n const rowsResult = sql\n .unsafe<{totalRows: bigint}[]>(stmts.getTotalRows)\n .execute();\n const bytesResult = sql\n .unsafe<{totalBytes: bigint}[]>(stmts.getTotalBytes)\n .execute();\n\n const state: DownloadState = {\n spec,\n status: {\n table,\n columns,\n rows: 0,\n totalRows: Number((await rowsResult)[0].totalRows),\n totalBytes: Number((await bytesResult)[0].totalBytes),\n },\n };\n const elapsed = (performance.now() - start).toFixed(3);\n lc.info?.(`Computed initial download state for ${table} (${elapsed} ms)`, {\n state: state.status,\n });\n return state;\n}\n\nasync function copy(\n lc: LogContext,\n {spec: table, status}: DownloadState,\n dbClient: PostgresDB,\n from: PostgresTransaction,\n to: Database,\n) {\n const start = performance.now();\n let flushTime = 0;\n\n const tableName = liteTableName(table);\n const orderedColumns = Object.entries(table.columns);\n\n const columnNames = orderedColumns.map(([c]) => c);\n const columnSpecs = orderedColumns.map(([_name, spec]) => spec);\n const insertColumnList = columnNames.map(c => id(c)).join(',');\n\n // (?,?,?,?,?)\n const valuesSql =\n columnNames.length > 0 ? `(${'?,'.repeat(columnNames.length - 1)}?)` : '()';\n const insertSql = /*sql*/ `\n INSERT INTO \"${tableName}\" (${insertColumnList}) VALUES ${valuesSql}`;\n const insertStmt = to.prepare(insertSql);\n // INSERT VALUES (?,?,?,?,?),... x INSERT_BATCH_SIZE\n const insertBatchStmt = to.prepare(\n insertSql + `,${valuesSql}`.repeat(INSERT_BATCH_SIZE - 1),\n );\n\n const {select} = makeDownloadStatements(table, columnNames);\n const valuesPerRow = columnSpecs.length;\n const valuesPerBatch = valuesPerRow * INSERT_BATCH_SIZE;\n\n // Preallocate the buffer of values to reduce memory allocation churn.\n const pendingValues: LiteValueType[] = Array.from({\n length: MAX_BUFFERED_ROWS * valuesPerRow,\n });\n let pendingRows = 0;\n let pendingSize = 0;\n\n function flush() {\n const start = performance.now();\n const flushedRows = pendingRows;\n const flushedSize = pendingSize;\n\n let l = 0;\n for (; pendingRows > INSERT_BATCH_SIZE; pendingRows -= INSERT_BATCH_SIZE) {\n insertBatchStmt.run(pendingValues.slice(l, (l += valuesPerBatch)));\n }\n // Insert the remaining rows individually.\n for (; pendingRows > 0; pendingRows--) {\n insertStmt.run(pendingValues.slice(l, (l += valuesPerRow)));\n }\n for (let i = 0; i < flushedRows; i++) {\n // Reuse the array and unreference the values to allow GC.\n // This is faster than allocating a new array every time.\n pendingValues[i] = undefined as unknown as LiteValueType;\n }\n pendingSize = 0;\n status.rows += flushedRows;\n\n const elapsed = performance.now() - start;\n flushTime += elapsed;\n lc.debug?.(\n `flushed ${flushedRows} ${tableName} rows (${flushedSize} bytes) in ${elapsed.toFixed(3)} ms`,\n );\n }\n\n lc.info?.(`Starting copy stream of ${tableName}:`, select);\n const pgParsers = await getTypeParsers(dbClient, {returnJsonAsString: true});\n const parsers = columnSpecs.map(c => {\n const pgParse = pgParsers.getTypeParser(c.typeOID);\n return (val: string) =>\n liteValue(\n pgParse(val) as PostgresValueType,\n c.dataType,\n JSON_STRINGIFIED,\n );\n });\n\n const tsvParser = new TsvParser();\n let col = 0;\n\n await pipeline(\n await from.unsafe(`COPY (${select}) TO STDOUT`).readable(),\n new Writable({\n highWaterMark: BUFFERED_SIZE_THRESHOLD,\n\n write(\n chunk: Buffer,\n _encoding: string,\n callback: (error?: Error) => void,\n ) {\n try {\n for (const text of tsvParser.parse(chunk)) {\n pendingSize += text === null ? 4 : text.length;\n pendingValues[pendingRows * valuesPerRow + col] =\n text === null ? null : parsers[col](text);\n\n if (++col === parsers.length) {\n col = 0;\n if (\n ++pendingRows >= MAX_BUFFERED_ROWS - valuesPerRow ||\n pendingSize >= BUFFERED_SIZE_THRESHOLD\n ) {\n flush();\n }\n }\n }\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n\n final: (callback: (error?: Error) => void) => {\n try {\n flush();\n callback();\n } catch (e) {\n callback(e instanceof Error ? e : new Error(String(e)));\n }\n },\n }),\n );\n\n const elapsed = performance.now() - start;\n lc.info?.(\n `Finished copying ${status.rows} rows into ${tableName} ` +\n `(flush: ${flushTime.toFixed(3)} ms) (total: ${elapsed.toFixed(3)} ms) `,\n );\n return {rows: status.rows, flushTime};\n}\n"],"names":["slot","tx","Mode.READONLY","lc","e","start","elapsed"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;AAmEA,eAAsB,YACpB,IACA,OACA,IACA,aACA,aACA,SACA;AACA,MAAI,CAAC,0BAA0B,KAAK,MAAM,KAAK,GAAG;AAChD,UAAM,IAAI;AAAA,MACR;AAAA,IAAA;AAAA,EAEJ;AACA,QAAM,EAAC,kBAAkB,YAAA,IAAe;AACxC,QAAM,eAAe,cAAc,MAAM,YAAY,YAAY;AACjE,QAAM,MAAM,SAAS,IAAI,WAAW;AACpC,QAAM,qBAAqB,SAAS,IAAI,aAAa;AAAA,IACnD,CAAC,aAAa,GAAG;AAAA;AAAA,IACjB,YAAY,EAAC,aAAa,WAAA;AAAA;AAAA,EAAU,CACrC;AACD,QAAM,WAAW,mBAAmB,KAAK;AACzC,QAAM,kBAAkB,IAAI,2BAA2B,EAAE,EAAE;AAAA,IACzD;AAAA,IACA;AAAA,EAAA;AAEF,MAAI;AACF,UAAM,oBAAoB,GAAG;AAE7B,UAAM,EAAC,aAAA,IAAgB,MAAM,sBAAsB,IAAI,KAAK,KAAK;AACjE,OAAG,OAAO,wCAAwC,YAAY,GAAG;AAEjE,UAAM,EAAC,UAAU,KAAA,IAAQ,IAAI;AAC7B,OAAG,OAAO,kCAAkC,QAAQ,IAAI,IAAI,EAAE;AAE9D,QAAI;AACJ,aAAS,QAAQ,QAAQ,QAAQ,OAAO;AACtC,UAAI;AACF,eAAO,MAAM,sBAAsB,IAAI,oBAAoB,QAAQ;AACnE;AAAA,MACF,SAAS,GAAG;AACV,YAAI,SAAS,aAAa,SAAS,eAAe;AAChD,cAAI,EAAE,SAAS,2BAA2B;AAKxC,kBAAM;AACN,eAAG,OAAO,6CAA6C;AACvD;AAAA,UACF;AACA,cAAI,EAAE,SAAS,iCAAiC;AAC9C,kBAAM,iBAAiB,0BAA0B,KAAK;AAEtD,kBAAM,UAAU,MAAM;AAAA;AAAA;AAAA,uCAGK,cAAc;AACzC,gBAAI,QAAQ,QAAQ;AAClB,iBAAG;AAAA,gBACD,uCAAuC,QAAQ,IAAI,CAAC,EAAC,MAAAA,MAAAA,MAAUA,KAAI,CAAC;AAAA,gBACpE;AAAA,cAAA;AAEF;AAAA,YACF;AACA,eAAG,QAAQ,oCAAoC,CAAC;AAAA,UAClD;AAAA,QACF;AACA,cAAM;AAAA,MACR;AAAA,IACF;AACA,UAAM,EAAC,eAAe,UAAU,kBAAkB,QAAO;AACzD,UAAM,iBAAiB,qBAAqB,GAAG;AAE/C,yBAAqB,IAAI,cAAc,gBAAgB,OAAO;AAG9D,UAAM,QAAQ,YAAY,IAAA;AAE1B,UAAM,YAAY,MAAM;AAAA,MACtB;AAAA,MACA,OAAMC,QAAM;AACV,cAAMA,IAAG;AAAA;AAAA,UAAgB,6BAA6B,QAAQ;AAAA,QAAA;AAC9D,eAAO,mBAAmBA,KAAI,YAAY;AAAA,MAC5C;AAAA,MACA,EAAC,MAAMC,SAAK;AAAA,IAAQ;AAGtB,yBAAqB,IAAI,SAAS;AAGlC,UAAM,EAAC,QAAQ,QAAA,IAAW;AAC1B,UAAM,YAAY,OAAO;AACzB,QAAI,SAAA,MAAe,WAAW,mBAAmB,WAAW;AAC1D,SAAG;AAAA,QACD,8CAA8C,gBAAgB,OACzD,SAAS;AAAA,MAAA;AAAA,IAElB;AACA,UAAM,aACJ,eAAe,UACX,YACA,KAAK,IAAI,kBAAkB,SAAS;AAE1C,UAAM,WAAW,SAAS,IAAI,aAAa;AAAA,MACzC,KAAK;AAAA,MACL,YAAY,EAAC,CAAC,kBAAkB,GAAG,2BAAA;AAAA,MACnC,CAAC,cAAc,GAAG,MAAM;AAAA;AAAA,IAAA,CACzB;AACD,UAAM,UAAU;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,QAAI;AACF,uBAAiB,IAAI,QAAQ,cAAc;AAC3C,YAAM,YAAY,MAAM,QAAQ;AAAA,QAC9B,OAAO;AAAA,UAAI,UACT,QAAQ;AAAA,YAAgB,CAAC,IAAIC,QAC3B,wBAAwBA,KAAI,IAAI,IAAI;AAAA,UAAA;AAAA,QACtC;AAAA,MACF;AAEF,sBAAgB;AAAA,QACd;AAAA,QACA;AAAA,QACA,WAAW,SAAS,+BAA+B,cAAc;AAAA,QACjE;AAAA,QACA,OAAO,EAAC,gBAAgB,UAAU,IAAI,CAAC,EAAC,OAAA,MAAY,MAAM,EAAA;AAAA,MAAC;AAG7D,WAAK,cAAc,MAAA;AACnB,YAAM,YAAY,MAAM,QAAQ;AAAA,QAC9B,UAAU;AAAA,UAAI,WACZ,QAAQ;AAAA,YAAgB,CAAC,IAAIA,QAC3B,KAAKA,KAAI,OAAO,UAAU,IAAI,EAAE;AAAA,UAAA;AAAA,QAClC;AAAA,MACF;AAEF,WAAK,cAAc,eAAe,IAAI,cAAc;AACpD,cAAQ,QAAA;AAER,YAAM,QAAQ,UAAU;AAAA,QACtB,CAAC,KAAK,UAAU;AAAA,UACd,MAAM,IAAI,OAAO,KAAK;AAAA,UACtB,WAAW,IAAI,YAAY,KAAK;AAAA,QAAA;AAAA,QAElC,EAAC,MAAM,GAAG,WAAW,EAAA;AAAA,MAAC;AAGxB,sBAAgB;AAAA,QACd;AAAA,QACA;AAAA,QACA,YAAY,QAAQ,MAAM;AAAA,QAC1B;AAAA,MAAA;AAEF,YAAM,aAAa,YAAY,IAAA;AAC/B,wBAAkB,IAAI,OAAO;AAC7B,YAAM,QAAQ,YAAY,IAAA,IAAQ;AAClC,SAAG,OAAO,oBAAoB,MAAM,QAAQ,CAAC,CAAC,MAAM;AAEpD,YAAM;AAAA,QACJ;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MAAA;AAGF,YAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,SAAG;AAAA,QACD,UAAU,MAAM,KAAK,eAAA,CAAgB,YAAY,SAAS,cAAc,YAAY,UAAU,GAAG,YACpF,MAAM,UAAU,QAAQ,CAAC,CAAC,YAAY,MAAM,QAAQ,CAAC,CAAC,YAAY,QAAQ,QAAQ,CAAC,CAAC;AAAA,MAAA;AAAA,IAErG,UAAA;AAEE,WAAK,SAAS,MAAM,MAAM,OAAK,GAAG,OAAO,0BAA0B,CAAC,CAAC;AAAA,IACvE;AAAA,EACF,SAAS,GAAG;AAIV,OAAG,OAAO,6BAA6B,QAAQ,IAAI,CAAC;AACpD,UAAM;AAAA;AAAA,4BAEkB,QAAQ;AAAA,MAC9B,MAAM,CAAAC,OAAK,GAAG,OAAO,mCAAmC,QAAQ,IAAIA,EAAC,CAAC;AACxE,UAAM,gBAAgB,qBAAqB,IAAI,gBAAgB,CAAC;AAAA,EAClE,UAAA;AACE,oBAAgB,KAAA;AAChB,UAAM,mBAAmB,IAAA;AACzB,UAAM,IAAI,IAAA;AAAA,EACZ;AACF;AAEA,eAAe,oBAAoB,KAAiB;AAClD,QAAM,EAAC,UAAU,QAAA,KACf,MAAM;AAAA;AAAA;AAAA,KAIN,CAAC;AAEH,MAAI,aAAa,WAAW;AAC1B,UAAM,IAAI;AAAA,MACR,uEAAuE,QAAQ;AAAA,IAAA;AAAA,EAEnF;AACA,MAAI,UAAU,MAAQ;AACpB,UAAM,IAAI;AAAA,MACR,sDAAsD,OAAO;AAAA,IAAA;AAAA,EAEjE;AACF;AAEA,eAAe,sBACb,IACA,KACA,OACA,WAAW,MACwB;AACnC,QAAM,EAAC,UAAU,KAAA,IAAQ,IAAI;AAC7B,KAAG,OAAO,oCAAoC,QAAQ,IAAI,IAAI,EAAE;AAEhE,QAAM,kBAAkB,IAAI,KAAK,KAAK;AACtC,QAAM,EAAC,aAAA,IAAgB,MAAM,uBAAuB,KAAK,KAAK;AAE9D,MAAI,UAAU;AACZ,QAAI,QAAQ;AACZ,UAAM,0BAA0B,aAAa;AAAA,MAC3C,CAAA,MAAK,CAAC,EAAE,WAAW,GAAG;AAAA,IAAA;AAExB,UAAM,SAAS,MAAM;AAAA,4DACmC,IAAI,YAAY,CAAC;AAAA,QACrE,OAAA;AACJ,QAAI,OAAO,WAAW,aAAa,QAAQ;AACzC,SAAG;AAAA,QACD,iCAAiC,YAAY,mBACvC,OAAO,MAAM;AAAA,MAAA;AAAA,IAEvB,WACE,CAAC,OAAO,IAAI,IAAI,MAAM,YAAY,GAAG,IAAI,IAAI,uBAAuB,CAAC,GACrE;AACA,SAAG;AAAA,QACD,2BAA2B,MAAM,YAAY,uCAC1B,uBAAuB;AAAA,MAAA;AAAA,IAE9C,OAAO;AACL,cAAQ;AAAA,IACV;AACA,QAAI,CAAC,OAAO;AACV,YAAM,IAAI,OAAO,UAAU,MAAM,OAAO,MAAM,QAAQ,CAAC;AACvD,aAAO,sBAAsB,IAAI,KAAK,OAAO,KAAK;AAAA,IACpD;AAAA,EACF;AACA,SAAO,EAAC,aAAA;AACV;AAEA,SAAS,sBACP,IACA,IACA,UACA,YACA,WACiB;AACjB,QAAM,EAAC,KAAA,IAAQ,eAAe,QAAQ;AACtC,QAAM,eAAe,IAAI;AAAA,IACvB;AAAA,IACAF;AAAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEF,eAAa,IAAI,EAAE;AAEnB,KAAG,OAAO,WAAW,UAAU,oBAAoB,SAAS,SAAS;AAErE,MAAI,SAAS,QAAQ,SAAS,IAAI,IAAI,IAAI;AACxC,OAAG;AAAA,MACD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAAA;AAAA,EAOJ;AACA,SAAO;AACT;AAaA,eAAsB,sBACpB,IACA,SACA,UAC0B;AAC1B,QAAM,QACJ,MAAM,QAAQ;AAAA;AAAA,IACJ,4BAA4B,QAAQ;AAAA,EAAA,GAE9C,CAAC;AACH,KAAG,OAAO,4BAA4B,QAAQ,IAAI,IAAI;AACtD,SAAO;AACT;AAEA,SAAS,iBACP,IACA,QACA,gBACA;AAGA,QAAM,iBAAiB,KAAK,oBAAoB,YAAY,EAAE,CAAC;AAC/D,aAAW,KAAK,QAAQ;AACtB,OAAG,KAAK,yBAAyB,kBAAkB,GAAG,cAAc,CAAC,CAAC;AACtE,UAAM,YAAY,cAAc,CAAC;AACjC,eAAW,CAAC,SAAS,OAAO,KAAK,OAAO,QAAQ,EAAE,OAAO,GAAG;AAC1D,qBAAe,OAAO,WAAW,SAAS,OAAO;AAAA,IACnD;AAAA,EACF;AACF;AAEA,SAAS,kBAAkB,IAAc,SAAsB;AAC7D,aAAW,SAAS,SAAS;AAC3B,OAAG,KAAK,yBAAyB,uBAAuB,KAAK,CAAC,CAAC;AAAA,EACjE;AACF;AAMO,MAAM,oBAAoB;AAEjC,MAAM,KAAK,OAAO;AAClB,MAAM,oBAAoB;AAC1B,MAAM,0BAA0B,IAAI;AAQ7B,SAAS,uBACd,OACA,MACoB;AACpB,QAAM,mBAAmB,OAAO,OAAO,MAAM,YAAY,EACtD,IAAI,CAAC,EAAC,UAAA,MAAe,SAAS,EAC9B,OAAO,CAAA,MAAK,CAAC,CAAC,CAAC;AAClB,QAAM,QACJ,iBAAiB,WAAW,IACxB;AAAA;AAAA,IACQ,SAAS,iBAAiB,KAAK,MAAM,CAAC;AAAA;AACpD,QAAM;AAAA;AAAA,IAAoB,QAAQ,GAAG,MAAM,MAAM,CAAC,IAAI,GAAG,MAAM,IAAI,CAAC,IAAI,KAAK;AAAA;AAC7E,QAAM,aAAa,IAAI,KAAK,IAAI,CAAA,QAAO,+BAA+B,GAAG,GAAG,CAAC,QAAQ,EAAE,KAAK,KAAK,CAAC;AAClG,QAAM,QAAQ;AAAA,IACZ;AAAA;AAAA,MAAgB,UAAU,KAAK,IAAI,EAAE,EAAE,KAAK,GAAG,CAAC,IAAI,SAAS;AAAA;AAAA,IAC7D;AAAA;AAAA,MAAsB,kCAAkC,SAAS;AAAA;AAAA,IACjE;AAAA;AAAA,MAAuB,UAAU,UAAU,oBAAoB,SAAS;AAAA;AAAA,EAAA;AAE1E,SAAO;AACT;AAOA,eAAe,wBACb,IACA,KACA,MACwB;AACxB,QAAM,QAAQ,YAAY,IAAA;AAC1B,QAAM,QAAQ,cAAc,IAAI;AAChC,QAAM,UAAU,OAAO,KAAK,KAAK,OAAO;AACxC,QAAM,QAAQ,uBAAuB,MAAM,OAAO;AAClD,QAAM,aAAa,IAChB,OAA8B,MAAM,YAAY,EAChD,QAAA;AACH,QAAM,cAAc,IACjB,OAA+B,MAAM,aAAa,EAClD,QAAA;AAEH,QAAM,QAAuB;AAAA,IAC3B;AAAA,IACA,QAAQ;AAAA,MACN;AAAA,MACA;AAAA,MACA,MAAM;AAAA,MACN,WAAW,QAAQ,MAAM,YAAY,CAAC,EAAE,SAAS;AAAA,MACjD,YAAY,QAAQ,MAAM,aAAa,CAAC,EAAE,UAAU;AAAA,IAAA;AAAA,EACtD;AAEF,QAAM,WAAW,YAAY,IAAA,IAAQ,OAAO,QAAQ,CAAC;AACrD,KAAG,OAAO,uCAAuC,KAAK,KAAK,OAAO,QAAQ;AAAA,IACxE,OAAO,MAAM;AAAA,EAAA,CACd;AACD,SAAO;AACT;AAEA,eAAe,KACb,IACA,EAAC,MAAM,OAAO,UACd,UACA,MACA,IACA;AACA,QAAM,QAAQ,YAAY,IAAA;AAC1B,MAAI,YAAY;AAEhB,QAAM,YAAY,cAAc,KAAK;AACrC,QAAM,iBAAiB,OAAO,QAAQ,MAAM,OAAO;AAEnD,QAAM,cAAc,eAAe,IAAI,CAAC,CAAC,CAAC,MAAM,CAAC;AACjD,QAAM,cAAc,eAAe,IAAI,CAAC,CAAC,OAAO,IAAI,MAAM,IAAI;AAC9D,QAAM,mBAAmB,YAAY,IAAI,CAAA,MAAK,GAAG,CAAC,CAAC,EAAE,KAAK,GAAG;AAG7D,QAAM,YACJ,YAAY,SAAS,IAAI,IAAI,KAAK,OAAO,YAAY,SAAS,CAAC,CAAC,OAAO;AACzE,QAAM;AAAA;AAAA,IAAoB;AAAA,mBACT,SAAS,MAAM,gBAAgB,YAAY,SAAS;AAAA;AACrE,QAAM,aAAa,GAAG,QAAQ,SAAS;AAEvC,QAAM,kBAAkB,GAAG;AAAA,IACzB,YAAY,IAAI,SAAS,GAAG,OAAO,oBAAoB,CAAC;AAAA,EAAA;AAG1D,QAAM,EAAC,OAAA,IAAU,uBAAuB,OAAO,WAAW;AAC1D,QAAM,eAAe,YAAY;AACjC,QAAM,iBAAiB,eAAe;AAGtC,QAAM,gBAAiC,MAAM,KAAK;AAAA,IAChD,QAAQ,oBAAoB;AAAA,EAAA,CAC7B;AACD,MAAI,cAAc;AAClB,MAAI,cAAc;AAElB,WAAS,QAAQ;AACf,UAAMG,SAAQ,YAAY,IAAA;AAC1B,UAAM,cAAc;AACpB,UAAM,cAAc;AAEpB,QAAI,IAAI;AACR,WAAO,cAAc,mBAAmB,eAAe,mBAAmB;AACxE,sBAAgB,IAAI,cAAc,MAAM,GAAI,KAAK,cAAe,CAAC;AAAA,IACnE;AAEA,WAAO,cAAc,GAAG,eAAe;AACrC,iBAAW,IAAI,cAAc,MAAM,GAAI,KAAK,YAAa,CAAC;AAAA,IAC5D;AACA,aAAS,IAAI,GAAG,IAAI,aAAa,KAAK;AAGpC,oBAAc,CAAC,IAAI;AAAA,IACrB;AACA,kBAAc;AACd,WAAO,QAAQ;AAEf,UAAMC,WAAU,YAAY,IAAA,IAAQD;AACpC,iBAAaC;AACb,OAAG;AAAA,MACD,WAAW,WAAW,IAAI,SAAS,UAAU,WAAW,cAAcA,SAAQ,QAAQ,CAAC,CAAC;AAAA,IAAA;AAAA,EAE5F;AAEA,KAAG,OAAO,2BAA2B,SAAS,KAAK,MAAM;AACzD,QAAM,YAAY,MAAM,eAAe,UAAU,EAAC,oBAAoB,MAAK;AAC3E,QAAM,UAAU,YAAY,IAAI,CAAA,MAAK;AACnC,UAAM,UAAU,UAAU,cAAc,EAAE,OAAO;AACjD,WAAO,CAAC,QACN;AAAA,MACE,QAAQ,GAAG;AAAA,MACX,EAAE;AAAA,MACF;AAAA,IAAA;AAAA,EAEN,CAAC;AAED,QAAM,YAAY,IAAI,UAAA;AACtB,MAAI,MAAM;AAEV,QAAM;AAAA,IACJ,MAAM,KAAK,OAAO,SAAS,MAAM,aAAa,EAAE,SAAA;AAAA,IAChD,IAAI,SAAS;AAAA,MACX,eAAe;AAAA,MAEf,MACE,OACA,WACA,UACA;AACA,YAAI;AACF,qBAAW,QAAQ,UAAU,MAAM,KAAK,GAAG;AACzC,2BAAe,SAAS,OAAO,IAAI,KAAK;AACxC,0BAAc,cAAc,eAAe,GAAG,IAC5C,SAAS,OAAO,OAAO,QAAQ,GAAG,EAAE,IAAI;AAE1C,gBAAI,EAAE,QAAQ,QAAQ,QAAQ;AAC5B,oBAAM;AACN,kBACE,EAAE,eAAe,oBAAoB,gBACrC,eAAe,yBACf;AACA,sBAAA;AAAA,cACF;AAAA,YACF;AAAA,UACF;AACA,mBAAA;AAAA,QACF,SAAS,GAAG;AACV,mBAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,CAAC,CAAC,CAAC;AAAA,QACxD;AAAA,MACF;AAAA,MAEA,OAAO,CAAC,aAAsC;AAC5C,YAAI;AACF,gBAAA;AACA,mBAAA;AAAA,QACF,SAAS,GAAG;AACV,mBAAS,aAAa,QAAQ,IAAI,IAAI,MAAM,OAAO,CAAC,CAAC,CAAC;AAAA,QACxD;AAAA,MACF;AAAA,IAAA,CACD;AAAA,EAAA;AAGH,QAAM,UAAU,YAAY,IAAA,IAAQ;AACpC,KAAG;AAAA,IACD,oBAAoB,OAAO,IAAI,cAAc,SAAS,YACzC,UAAU,QAAQ,CAAC,CAAC,gBAAgB,QAAQ,QAAQ,CAAC,CAAC;AAAA,EAAA;AAErE,SAAO,EAAC,MAAM,OAAO,MAAM,UAAA;AAC7B;"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ddl.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/ddl.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAC5D,OAAO,EAAiB,KAAK,WAAW,EAAC,MAAM,6BAA6B,CAAC;AAc7E,eAAO,MAAM,gBAAgB,IAAI,CAAC;AAQlC,eAAO,MAAM,cAAc;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAGzB,CAAC;AASH,eAAO,MAAM,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAE9B,CAAC;AAEH,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEhE;;;;;;;;;;;GAWG;AACH,eAAO,MAAM,oBAAoB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAG/B,CAAC;AAEH,MAAM,MAAM,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAElE,eAAO,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;eAGlC,CAAC;AAEF,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;
|
|
1
|
+
{"version":3,"file":"ddl.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/ddl.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAC5D,OAAO,EAAiB,KAAK,WAAW,EAAC,MAAM,6BAA6B,CAAC;AAc7E,eAAO,MAAM,gBAAgB,IAAI,CAAC;AAQlC,eAAO,MAAM,cAAc;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAGzB,CAAC;AASH,eAAO,MAAM,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAE9B,CAAC;AAEH,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEhE;;;;;;;;;;;GAWG;AACH,eAAO,MAAM,oBAAoB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAG/B,CAAC;AAEH,MAAM,MAAM,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAElE,eAAO,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;eAGlC,CAAC;AAEF,MAAM,MAAM,gBAAgB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAmMtE,eAAO,MAAM,IAAI,2HAQP,CAAC;AAEX,wBAAgB,4BAA4B,CAAC,KAAK,EAAE,WAAW,UA0C9D;AAGD,wBAAgB,0BAA0B,CACxC,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,GAAG,MAAM,UAgBzB"}
|
|
@@ -110,7 +110,12 @@ BEGIN
|
|
|
110
110
|
|
|
111
111
|
-- Filter DDL updates that are not relevant to the shard (i.e. publications) when possible.
|
|
112
112
|
|
|
113
|
-
|
|
113
|
+
-- Note: ALTER TABLE statements may *remove* the table from the set of published
|
|
114
|
+
-- tables, and there is no way to determine if the table "used to be" in the
|
|
115
|
+
-- set. Thus, all ALTER TABLE statements must produce a ddl update, similar to
|
|
116
|
+
-- any DROP * statement.
|
|
117
|
+
IF (target.object_type = 'table' AND tag != 'ALTER TABLE')
|
|
118
|
+
OR target.object_type = 'table column' THEN
|
|
114
119
|
SELECT ns.nspname AS "schema", c.relname AS "name" FROM pg_class AS c
|
|
115
120
|
JOIN pg_namespace AS ns ON c.relnamespace = ns.oid
|
|
116
121
|
JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ddl.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/ddl.ts"],"sourcesContent":["import {literal as lit} from 'pg-format';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {upstreamSchema, type ShardConfig} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {\n indexDefinitionsQuery,\n publishedSchema,\n publishedTableQuery,\n} from './published.ts';\n\n// Sent in the 'version' tag of \"ddlStart\" and \"ddlUpdate\" event messages.\n// This is used to ensure that the message constructed in the upstream\n// Trigger function is compatible with the code processing it in the zero-cache.\n//\n// Increment this when changing the format of the contents of the \"ddl\" events.\n// This will allow old / incompatible code to detect the change and abort.\nexport const PROTOCOL_VERSION = 1;\n\nconst triggerEvent = v.object({\n context: v.object({query: v.string()}).rest(v.string()),\n});\n\n// All DDL events contain a snapshot of the current tables and indexes that\n// are published / relevant to the shard.\nexport const ddlEventSchema = triggerEvent.extend({\n version: v.literal(PROTOCOL_VERSION),\n schema: publishedSchema,\n});\n\n// The `ddlStart` message is computed before every DDL event, regardless of\n// whether the subsequent event affects the shard. Downstream processing should\n// capture the contained schema information in order to determine the schema\n// changes necessary to apply a subsequent `ddlUpdate` message. Note that a\n// `ddlUpdate` message may not follow, as updates determined to be irrelevant\n// to the shard will not result in a message. However, all `ddlUpdate` messages\n// are guaranteed to be preceded by a `ddlStart` message.\nexport const ddlStartEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlStart'),\n});\n\nexport type DdlStartEvent = v.Infer<typeof ddlStartEventSchema>;\n\n/**\n * The {@link DdlUpdateEvent} contains an updated schema resulting from\n * a particular ddl event. The event type provides information\n * (i.e. constraints) on the difference from the schema of the preceding\n * {@link DdlStartEvent}.\n *\n * Note that in almost all cases (the exception being `CREATE` events),\n * it is possible that there is no relevant difference between the\n * ddl-start schema and the ddl-update schema, as many aspects of the\n * schema (e.g. column constraints) are not relevant to downstream\n * replication.\n */\nexport const ddlUpdateEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlUpdate'),\n event: v.object({tag: v.string()}),\n});\n\nexport type DdlUpdateEvent = v.Infer<typeof ddlUpdateEventSchema>;\n\nexport const replicationEventSchema = v.union(\n ddlStartEventSchema,\n ddlUpdateEventSchema,\n);\n\nexport type ReplicationEvent = v.Infer<typeof replicationEventSchema>;\n\n// Creates a function that appends `_{shard-num}` to the input and\n// quotes the result to be a valid identifier.\nfunction append(shardNum: number) {\n return (name: string) => id(name + '_' + String(shardNum));\n}\n\n/**\n * Event trigger functions contain the core logic that are invoked by triggers.\n *\n * Note that although many of these functions can theoretically be parameterized and\n * shared across shards, it is advantageous to keep the functions in each shard\n * isolated from each other in order to avoid the complexity of shared-function\n * versioning.\n *\n * In a sense, shards (and their triggers and functions) should be thought of as\n * execution environments that can be updated at different schedules. If per-shard\n * triggers called into shared functions, we would have to consider versioning the\n * functions when changing their behavior, backwards compatibility, removal of\n * unused versions, etc. (not unlike versioning of npm packages).\n *\n * Instead, we opt for the simplicity and isolation of having each shard\n * completely own (and maintain) the entirety of its trigger/function stack.\n */\nfunction createEventFunctionStatements(shard: ShardConfig) {\n const {appID, shardNum, publications} = shard;\n const schema = id(upstreamSchema(shard)); // e.g. \"{APP_ID}_{SHARD_ID}\"\n return /*sql*/ `\nCREATE SCHEMA IF NOT EXISTS ${schema};\n\nCREATE OR REPLACE FUNCTION ${schema}.get_trigger_context()\nRETURNS record AS $$\nDECLARE\n result record;\nBEGIN\n SELECT current_query() AS \"query\" into result;\n RETURN result;\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.notice_ignore(tag TEXT, target record)\nRETURNS void AS $$\nBEGIN\n RAISE NOTICE 'zero(%) ignoring % %', ${lit(shardNum)}, tag, row_to_json(target);\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.schema_specs()\nRETURNS TEXT AS $$\nDECLARE\n tables record;\n indexes record;\nBEGIN\n ${publishedTableQuery(publications)} INTO tables;\n ${indexDefinitionsQuery(publications)} INTO indexes;\n RETURN json_build_object(\n 'tables', tables.tables,\n 'indexes', indexes.indexes\n );\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_start()\nRETURNS event_trigger AS $$\nDECLARE\n schema_specs TEXT;\n message TEXT;\nBEGIN\n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlStart',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_end(tag TEXT)\nRETURNS void AS $$\nDECLARE\n publications TEXT[];\n target RECORD;\n relevant RECORD;\n schema_specs TEXT;\n message TEXT;\n event TEXT;\nBEGIN\n publications := ARRAY[${lit(publications)}];\n\n SELECT objid, object_type, object_identity \n FROM pg_event_trigger_ddl_commands() \n LIMIT 1 INTO target;\n\n -- Filter DDL updates that are not relevant to the shard (i.e. publications) when possible.\n\n IF target.object_type = 'table' OR target.object_type = 'table column' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE c.oid = target.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n ELSIF target.object_type = 'index' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_indexes as ind ON ind.schemaname = ns.nspname AND ind.indexname = c.relname\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = ind.tablename\n WHERE c.oid = target.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n ELSIF target.object_type = 'publication relation' THEN\n SELECT pb.pubname FROM pg_publication_rel AS rel\n JOIN pg_publication AS pb ON pb.oid = rel.prpubid\n WHERE rel.oid = target.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n ELSIF target.object_type = 'publication namespace' THEN\n SELECT pb.pubname FROM pg_publication_namespace AS ns\n JOIN pg_publication AS pb ON pb.oid = ns.pnpubid\n WHERE ns.oid = target.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n ELSIF target.object_type = 'schema' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE ns.oid = target.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n -- no-op CREATE IF NOT EXIST statements\n ELSIF tag LIKE 'CREATE %' AND target.object_type IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n RAISE INFO 'Creating ddlUpdate for % %', tag, row_to_json(target);\n\n -- Construct and emit the DdlUpdateEvent message.\n SELECT json_build_object('tag', tag) INTO event;\n \n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlUpdate',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'event', event::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n`;\n}\n\n// Exported for testing.\nexport const TAGS = [\n 'CREATE TABLE',\n 'ALTER TABLE',\n 'CREATE INDEX',\n 'DROP TABLE',\n 'DROP INDEX',\n 'ALTER PUBLICATION',\n 'ALTER SCHEMA',\n] as const;\n\nexport function createEventTriggerStatements(shard: ShardConfig) {\n // Better to assert here than get a cryptic syntax error from Postgres.\n assert(shard.publications.length, `shard publications must be non-empty`);\n\n // Unlike functions, which are namespaced in shard-specific schemas,\n // EVENT TRIGGER names are in the global namespace and thus must include\n // the appID and shardNum.\n const {appID, shardNum} = shard;\n const sharded = append(shardNum);\n const schema = id(upstreamSchema(shard));\n\n const triggers = [\n dropEventTriggerStatements(shard.appID, shard.shardNum),\n createEventFunctionStatements(shard),\n ];\n\n // A single ddl_command_start trigger covering all relevant tags.\n triggers.push(/*sql*/ `\nCREATE EVENT TRIGGER ${sharded(`${appID}_ddl_start`)}\n ON ddl_command_start\n WHEN TAG IN (${lit(TAGS)})\n EXECUTE PROCEDURE ${schema}.emit_ddl_start();\n`);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n triggers.push(/*sql*/ `\nCREATE OR REPLACE FUNCTION ${schema}.emit_${tagID}() \nRETURNS event_trigger AS $$\nBEGIN\n PERFORM ${schema}.emit_ddl_end(${lit(tag)});\nEND\n$$ LANGUAGE plpgsql;\n\nCREATE EVENT TRIGGER ${sharded(`${appID}_${tagID}`)}\n ON ddl_command_end\n WHEN TAG IN (${lit(tag)})\n EXECUTE PROCEDURE ${schema}.emit_${tagID}();\n`);\n }\n return triggers.join('');\n}\n\n// Exported for testing.\nexport function dropEventTriggerStatements(\n appID: string,\n shardID: string | number,\n) {\n const stmts: string[] = [];\n // A single ddl_command_start trigger covering all relevant tags.\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_ddl_start_${shardID}`)};\n `);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_${tagID}_${shardID}`)};\n `);\n }\n return stmts.join('');\n}\n"],"names":["v.object","v.string","v.literal","v.union","lit"],"mappings":";;;;;;;AAiBO,MAAM,mBAAmB;AAEhC,MAAM,eAAeA,OAAS;AAAA,EAC5B,SAASA,OAAS,EAAC,OAAOC,OAAE,GAAS,EAAE,KAAKA,QAAU;AACxD,CAAC;AAIM,MAAM,iBAAiB,aAAa,OAAO;AAAA,EAChD,SAASC,QAAU,gBAAgB;AAAA,EACnC,QAAQ;AACV,CAAC;AASM,MAAM,sBAAsB,eAAe,OAAO;AAAA,EACvD,MAAMA,QAAU,UAAU;AAC5B,CAAC;AAgBM,MAAM,uBAAuB,eAAe,OAAO;AAAA,EACxD,MAAMA,QAAU,WAAW;AAAA,EAC3B,OAAOF,OAAS,EAAC,KAAKC,OAAE,GAAS;AACnC,CAAC;AAIM,MAAM,yBAAyBE;AAAAA,EACpC;AAAA,EACA;AACF;AAMA,SAAS,OAAO,UAAkB;AAChC,SAAO,CAAC,SAAiB,GAAG,OAAO,MAAM,OAAO,QAAQ,CAAC;AAC3D;AAmBA,SAAS,8BAA8B,OAAoB;AACzD,QAAM,EAAC,OAAO,UAAU,aAAA,IAAgB;AACxC,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AACvC;AAAA;AAAA,IAAe;AAAA,8BACa,MAAM;AAAA;AAAA,6BAEP,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BAWN,MAAM;AAAA;AAAA;AAAA,yCAGMC,UAAI,QAAQ,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,6BAKzB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAM/B,oBAAoB,YAAY,CAAC;AAAA,IACjC,sBAAsB,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BASV,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAMxB,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA,iBAEhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAAA,6BAK0B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0BAUTA,UAAI,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAe3B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAYN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAWN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,cAMR,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAST,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA;AAAA,iBAGhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAIH;AAGO,MAAM,OAAO;AAAA,EAClB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEO,SAAS,6BAA6B,OAAoB;AAE/D,SAAO,MAAM,aAAa,QAAQ,sCAAsC;AAKxE,QAAM,EAAC,OAAO,SAAA,IAAY;AAC1B,QAAM,UAAU,OAAO,QAAQ;AAC/B,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AAEvC,QAAM,WAAW;AAAA,IACf,2BAA2B,MAAM,OAAO,MAAM,QAAQ;AAAA,IACtD,8BAA8B,KAAK;AAAA,EAAA;AAIrC,WAAS;AAAA;AAAA,IAAa;AAAA,uBACD,QAAQ,GAAG,KAAK,YAAY,CAAC;AAAA;AAAA,iBAEnCA,UAAI,IAAI,CAAC;AAAA,sBACJ,MAAM;AAAA;AAAA,EAAA;AAI1B,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,aAAS;AAAA;AAAA,MAAa;AAAA,6BACG,MAAM,SAAS,KAAK;AAAA;AAAA;AAAA,YAGrC,MAAM,iBAAiBA,UAAI,GAAG,CAAC;AAAA;AAAA;AAAA;AAAA,uBAIpB,QAAQ,GAAG,KAAK,IAAI,KAAK,EAAE,CAAC;AAAA;AAAA,iBAElCA,UAAI,GAAG,CAAC;AAAA,sBACH,MAAM,SAAS,KAAK;AAAA;AAAA,IAAA;AAAA,EAExC;AACA,SAAO,SAAS,KAAK,EAAE;AACzB;AAGO,SAAS,2BACd,OACA,SACA;AACA,QAAM,QAAkB,CAAA;AAExB,QAAM;AAAA;AAAA,IAAa;AAAA,mCACc,GAAG,GAAG,KAAK,cAAc,OAAO,EAAE,CAAC;AAAA;AAAA,EAAA;AAIpE,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,UAAM;AAAA;AAAA,MAAa;AAAA,qCACc,GAAG,GAAG,KAAK,IAAI,KAAK,IAAI,OAAO,EAAE,CAAC;AAAA;AAAA,IAAA;AAAA,EAErE;AACA,SAAO,MAAM,KAAK,EAAE;AACtB;"}
|
|
1
|
+
{"version":3,"file":"ddl.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/ddl.ts"],"sourcesContent":["import {literal as lit} from 'pg-format';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {upstreamSchema, type ShardConfig} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {\n indexDefinitionsQuery,\n publishedSchema,\n publishedTableQuery,\n} from './published.ts';\n\n// Sent in the 'version' tag of \"ddlStart\" and \"ddlUpdate\" event messages.\n// This is used to ensure that the message constructed in the upstream\n// Trigger function is compatible with the code processing it in the zero-cache.\n//\n// Increment this when changing the format of the contents of the \"ddl\" events.\n// This will allow old / incompatible code to detect the change and abort.\nexport const PROTOCOL_VERSION = 1;\n\nconst triggerEvent = v.object({\n context: v.object({query: v.string()}).rest(v.string()),\n});\n\n// All DDL events contain a snapshot of the current tables and indexes that\n// are published / relevant to the shard.\nexport const ddlEventSchema = triggerEvent.extend({\n version: v.literal(PROTOCOL_VERSION),\n schema: publishedSchema,\n});\n\n// The `ddlStart` message is computed before every DDL event, regardless of\n// whether the subsequent event affects the shard. Downstream processing should\n// capture the contained schema information in order to determine the schema\n// changes necessary to apply a subsequent `ddlUpdate` message. Note that a\n// `ddlUpdate` message may not follow, as updates determined to be irrelevant\n// to the shard will not result in a message. However, all `ddlUpdate` messages\n// are guaranteed to be preceded by a `ddlStart` message.\nexport const ddlStartEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlStart'),\n});\n\nexport type DdlStartEvent = v.Infer<typeof ddlStartEventSchema>;\n\n/**\n * The {@link DdlUpdateEvent} contains an updated schema resulting from\n * a particular ddl event. The event type provides information\n * (i.e. constraints) on the difference from the schema of the preceding\n * {@link DdlStartEvent}.\n *\n * Note that in almost all cases (the exception being `CREATE` events),\n * it is possible that there is no relevant difference between the\n * ddl-start schema and the ddl-update schema, as many aspects of the\n * schema (e.g. column constraints) are not relevant to downstream\n * replication.\n */\nexport const ddlUpdateEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlUpdate'),\n event: v.object({tag: v.string()}),\n});\n\nexport type DdlUpdateEvent = v.Infer<typeof ddlUpdateEventSchema>;\n\nexport const replicationEventSchema = v.union(\n ddlStartEventSchema,\n ddlUpdateEventSchema,\n);\n\nexport type ReplicationEvent = v.Infer<typeof replicationEventSchema>;\n\n// Creates a function that appends `_{shard-num}` to the input and\n// quotes the result to be a valid identifier.\nfunction append(shardNum: number) {\n return (name: string) => id(name + '_' + String(shardNum));\n}\n\n/**\n * Event trigger functions contain the core logic that are invoked by triggers.\n *\n * Note that although many of these functions can theoretically be parameterized and\n * shared across shards, it is advantageous to keep the functions in each shard\n * isolated from each other in order to avoid the complexity of shared-function\n * versioning.\n *\n * In a sense, shards (and their triggers and functions) should be thought of as\n * execution environments that can be updated at different schedules. If per-shard\n * triggers called into shared functions, we would have to consider versioning the\n * functions when changing their behavior, backwards compatibility, removal of\n * unused versions, etc. (not unlike versioning of npm packages).\n *\n * Instead, we opt for the simplicity and isolation of having each shard\n * completely own (and maintain) the entirety of its trigger/function stack.\n */\nfunction createEventFunctionStatements(shard: ShardConfig) {\n const {appID, shardNum, publications} = shard;\n const schema = id(upstreamSchema(shard)); // e.g. \"{APP_ID}_{SHARD_ID}\"\n return /*sql*/ `\nCREATE SCHEMA IF NOT EXISTS ${schema};\n\nCREATE OR REPLACE FUNCTION ${schema}.get_trigger_context()\nRETURNS record AS $$\nDECLARE\n result record;\nBEGIN\n SELECT current_query() AS \"query\" into result;\n RETURN result;\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.notice_ignore(tag TEXT, target record)\nRETURNS void AS $$\nBEGIN\n RAISE NOTICE 'zero(%) ignoring % %', ${lit(shardNum)}, tag, row_to_json(target);\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.schema_specs()\nRETURNS TEXT AS $$\nDECLARE\n tables record;\n indexes record;\nBEGIN\n ${publishedTableQuery(publications)} INTO tables;\n ${indexDefinitionsQuery(publications)} INTO indexes;\n RETURN json_build_object(\n 'tables', tables.tables,\n 'indexes', indexes.indexes\n );\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_start()\nRETURNS event_trigger AS $$\nDECLARE\n schema_specs TEXT;\n message TEXT;\nBEGIN\n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlStart',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_end(tag TEXT)\nRETURNS void AS $$\nDECLARE\n publications TEXT[];\n target RECORD;\n relevant RECORD;\n schema_specs TEXT;\n message TEXT;\n event TEXT;\nBEGIN\n publications := ARRAY[${lit(publications)}];\n\n SELECT objid, object_type, object_identity \n FROM pg_event_trigger_ddl_commands() \n LIMIT 1 INTO target;\n\n -- Filter DDL updates that are not relevant to the shard (i.e. publications) when possible.\n\n -- Note: ALTER TABLE statements may *remove* the table from the set of published\n -- tables, and there is no way to determine if the table \"used to be\" in the\n -- set. Thus, all ALTER TABLE statements must produce a ddl update, similar to\n -- any DROP * statement.\n IF (target.object_type = 'table' AND tag != 'ALTER TABLE') \n OR target.object_type = 'table column' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE c.oid = target.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n ELSIF target.object_type = 'index' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_indexes as ind ON ind.schemaname = ns.nspname AND ind.indexname = c.relname\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = ind.tablename\n WHERE c.oid = target.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n ELSIF target.object_type = 'publication relation' THEN\n SELECT pb.pubname FROM pg_publication_rel AS rel\n JOIN pg_publication AS pb ON pb.oid = rel.prpubid\n WHERE rel.oid = target.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n ELSIF target.object_type = 'publication namespace' THEN\n SELECT pb.pubname FROM pg_publication_namespace AS ns\n JOIN pg_publication AS pb ON pb.oid = ns.pnpubid\n WHERE ns.oid = target.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n ELSIF target.object_type = 'schema' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE ns.oid = target.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n -- no-op CREATE IF NOT EXIST statements\n ELSIF tag LIKE 'CREATE %' AND target.object_type IS NULL THEN\n PERFORM ${schema}.notice_ignore(tag, target);\n RETURN;\n END IF;\n\n RAISE INFO 'Creating ddlUpdate for % %', tag, row_to_json(target);\n\n -- Construct and emit the DdlUpdateEvent message.\n SELECT json_build_object('tag', tag) INTO event;\n \n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlUpdate',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'event', event::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n`;\n}\n\n// Exported for testing.\nexport const TAGS = [\n 'CREATE TABLE',\n 'ALTER TABLE',\n 'CREATE INDEX',\n 'DROP TABLE',\n 'DROP INDEX',\n 'ALTER PUBLICATION',\n 'ALTER SCHEMA',\n] as const;\n\nexport function createEventTriggerStatements(shard: ShardConfig) {\n // Better to assert here than get a cryptic syntax error from Postgres.\n assert(shard.publications.length, `shard publications must be non-empty`);\n\n // Unlike functions, which are namespaced in shard-specific schemas,\n // EVENT TRIGGER names are in the global namespace and thus must include\n // the appID and shardNum.\n const {appID, shardNum} = shard;\n const sharded = append(shardNum);\n const schema = id(upstreamSchema(shard));\n\n const triggers = [\n dropEventTriggerStatements(shard.appID, shard.shardNum),\n createEventFunctionStatements(shard),\n ];\n\n // A single ddl_command_start trigger covering all relevant tags.\n triggers.push(/*sql*/ `\nCREATE EVENT TRIGGER ${sharded(`${appID}_ddl_start`)}\n ON ddl_command_start\n WHEN TAG IN (${lit(TAGS)})\n EXECUTE PROCEDURE ${schema}.emit_ddl_start();\n`);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n triggers.push(/*sql*/ `\nCREATE OR REPLACE FUNCTION ${schema}.emit_${tagID}() \nRETURNS event_trigger AS $$\nBEGIN\n PERFORM ${schema}.emit_ddl_end(${lit(tag)});\nEND\n$$ LANGUAGE plpgsql;\n\nCREATE EVENT TRIGGER ${sharded(`${appID}_${tagID}`)}\n ON ddl_command_end\n WHEN TAG IN (${lit(tag)})\n EXECUTE PROCEDURE ${schema}.emit_${tagID}();\n`);\n }\n return triggers.join('');\n}\n\n// Exported for testing.\nexport function dropEventTriggerStatements(\n appID: string,\n shardID: string | number,\n) {\n const stmts: string[] = [];\n // A single ddl_command_start trigger covering all relevant tags.\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_ddl_start_${shardID}`)};\n `);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_${tagID}_${shardID}`)};\n `);\n }\n return stmts.join('');\n}\n"],"names":["v.object","v.string","v.literal","v.union","lit"],"mappings":";;;;;;;AAiBO,MAAM,mBAAmB;AAEhC,MAAM,eAAeA,OAAS;AAAA,EAC5B,SAASA,OAAS,EAAC,OAAOC,OAAE,GAAS,EAAE,KAAKA,QAAU;AACxD,CAAC;AAIM,MAAM,iBAAiB,aAAa,OAAO;AAAA,EAChD,SAASC,QAAU,gBAAgB;AAAA,EACnC,QAAQ;AACV,CAAC;AASM,MAAM,sBAAsB,eAAe,OAAO;AAAA,EACvD,MAAMA,QAAU,UAAU;AAC5B,CAAC;AAgBM,MAAM,uBAAuB,eAAe,OAAO;AAAA,EACxD,MAAMA,QAAU,WAAW;AAAA,EAC3B,OAAOF,OAAS,EAAC,KAAKC,OAAE,GAAS;AACnC,CAAC;AAIM,MAAM,yBAAyBE;AAAAA,EACpC;AAAA,EACA;AACF;AAMA,SAAS,OAAO,UAAkB;AAChC,SAAO,CAAC,SAAiB,GAAG,OAAO,MAAM,OAAO,QAAQ,CAAC;AAC3D;AAmBA,SAAS,8BAA8B,OAAoB;AACzD,QAAM,EAAC,OAAO,UAAU,aAAA,IAAgB;AACxC,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AACvC;AAAA;AAAA,IAAe;AAAA,8BACa,MAAM;AAAA;AAAA,6BAEP,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BAWN,MAAM;AAAA;AAAA;AAAA,yCAGMC,UAAI,QAAQ,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,6BAKzB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAM/B,oBAAoB,YAAY,CAAC;AAAA,IACjC,sBAAsB,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BASV,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAMxB,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA,iBAEhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAAA,6BAK0B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0BAUTA,UAAI,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAoB3B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAYN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAWN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,cAMR,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAST,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA;AAAA,iBAGhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAIH;AAGO,MAAM,OAAO;AAAA,EAClB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEO,SAAS,6BAA6B,OAAoB;AAE/D,SAAO,MAAM,aAAa,QAAQ,sCAAsC;AAKxE,QAAM,EAAC,OAAO,SAAA,IAAY;AAC1B,QAAM,UAAU,OAAO,QAAQ;AAC/B,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AAEvC,QAAM,WAAW;AAAA,IACf,2BAA2B,MAAM,OAAO,MAAM,QAAQ;AAAA,IACtD,8BAA8B,KAAK;AAAA,EAAA;AAIrC,WAAS;AAAA;AAAA,IAAa;AAAA,uBACD,QAAQ,GAAG,KAAK,YAAY,CAAC;AAAA;AAAA,iBAEnCA,UAAI,IAAI,CAAC;AAAA,sBACJ,MAAM;AAAA;AAAA,EAAA;AAI1B,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,aAAS;AAAA;AAAA,MAAa;AAAA,6BACG,MAAM,SAAS,KAAK;AAAA;AAAA;AAAA,YAGrC,MAAM,iBAAiBA,UAAI,GAAG,CAAC;AAAA;AAAA;AAAA;AAAA,uBAIpB,QAAQ,GAAG,KAAK,IAAI,KAAK,EAAE,CAAC;AAAA;AAAA,iBAElCA,UAAI,GAAG,CAAC;AAAA,sBACH,MAAM,SAAS,KAAK;AAAA;AAAA,IAAA;AAAA,EAExC;AACA,SAAO,SAAS,KAAK,EAAE;AACzB;AAGO,SAAS,2BACd,OACA,SACA;AACA,QAAM,QAAkB,CAAA;AAExB,QAAM;AAAA;AAAA,IAAa;AAAA,mCACc,GAAG,GAAG,KAAK,cAAc,OAAO,EAAE,CAAC;AAAA;AAAA,EAAA;AAIpE,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,UAAM;AAAA;AAAA,MAAa;AAAA,qCACc,GAAG,GAAG,KAAK,IAAI,KAAK,IAAI,OAAO,EAAE,CAAC;AAAA;AAAA,IAAA;AAAA,EAErE;AACA,SAAO,MAAM,KAAK,EAAE;AACtB;"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"init.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AASjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yBAAyB,CAAC;AACxD,OAAO,EAAiB,KAAK,WAAW,EAAC,MAAM,6BAA6B,CAAC;AAa7E;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,GACjB,OAAO,CAAC,IAAI,CAAC,CAgBf;AAED;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,EAClB,cAAc,EAAE,MAAM,GACrB,OAAO,CAAC,IAAI,CAAC,CAqBf;
|
|
1
|
+
{"version":3,"file":"init.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AASjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yBAAyB,CAAC;AACxD,OAAO,EAAiB,KAAK,WAAW,EAAC,MAAM,6BAA6B,CAAC;AAa7E;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,GACjB,OAAO,CAAC,IAAI,CAAC,CAgBf;AAED;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,EAClB,cAAc,EAAE,MAAM,GACrB,OAAO,CAAC,IAAI,CAAC,CAqBf;AA+KD,wBAAsB,uBAAuB,CAC3C,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,iBAYnB"}
|
|
@@ -178,14 +178,8 @@ function getIncrementalMigrations(shard, replicaVersion) {
|
|
|
178
178
|
}
|
|
179
179
|
},
|
|
180
180
|
// Upgrade DDL trigger to log more info to PG logs.
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
const [{ publications }] = await sql`
|
|
184
|
-
SELECT publications FROM ${sql(shardConfigTable)}`;
|
|
185
|
-
await setupTriggers(lc, sql, { ...shard, publications });
|
|
186
|
-
lc.info?.(`Upgraded DDL event triggers`);
|
|
187
|
-
}
|
|
188
|
-
},
|
|
181
|
+
// (subsumed by v16)
|
|
182
|
+
14: {},
|
|
189
183
|
// Add initialSyncContext column to replicas table.
|
|
190
184
|
15: {
|
|
191
185
|
migrateSchema: async (_, sql) => {
|
|
@@ -195,6 +189,16 @@ function getIncrementalMigrations(shard, replicaVersion) {
|
|
|
195
189
|
ADD COLUMN "subscriberContext" JSON
|
|
196
190
|
`;
|
|
197
191
|
}
|
|
192
|
+
},
|
|
193
|
+
// Upgrade DDL trigger to fire on all ALTER TABLE statements
|
|
194
|
+
// to catch the *removal* of a table from the published set.
|
|
195
|
+
16: {
|
|
196
|
+
migrateSchema: async (lc, sql) => {
|
|
197
|
+
const [{ publications }] = await sql`
|
|
198
|
+
SELECT publications FROM ${sql(shardConfigTable)}`;
|
|
199
|
+
await setupTriggers(lc, sql, { ...shard, publications });
|
|
200
|
+
lc.info?.(`Upgraded DDL event triggers`);
|
|
201
|
+
}
|
|
198
202
|
}
|
|
199
203
|
};
|
|
200
204
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"init.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {\n getVersionHistory,\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../../db/migration.ts';\nimport type {PostgresDB} from '../../../../types/pg.ts';\nimport {upstreamSchema, type ShardConfig} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {AutoResetSignal} from '../../../change-streamer/schema/tables.ts';\nimport {decommissionShard} from '../decommission.ts';\nimport {publishedSchema} from './published.ts';\nimport {\n getMutationsTableDefinition,\n legacyReplicationSlot,\n metadataPublicationName,\n setupTablesAndReplication,\n setupTriggers,\n} from './shard.ts';\n\n/**\n * Ensures that a shard is set up for initial sync.\n */\nexport async function ensureShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n): Promise<void> {\n const initialSetup: Migration = {\n migrateSchema: (lc, tx) => setupTablesAndReplication(lc, tx, shard),\n minSafeVersion: 1,\n };\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n initialSetup,\n // The incremental migration of any existing replicas will be replaced by\n // the incoming replica being synced, so the replicaVersion here is\n // unnecessary.\n getIncrementalMigrations(shard, 'obsolete'),\n );\n}\n\n/**\n * Updates the schema for an existing shard.\n */\nexport async function updateShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n replicaVersion: string,\n): Promise<void> {\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n {\n // If the expected existing shard is absent, throw an\n // AutoResetSignal to backtrack and initial sync.\n migrateSchema: () => {\n throw new AutoResetSignal(\n `upstream shard ${upstreamSchema(shard)} is not initialized`,\n );\n },\n },\n getIncrementalMigrations(shard, replicaVersion),\n );\n\n // The decommission check is run in updateShardSchema so that it happens\n // after initial sync, and not when the shard schema is initially set up.\n await decommissionLegacyShard(lc, db, shard);\n}\n\nfunction getIncrementalMigrations(\n shard: ShardConfig,\n replicaVersion?: string,\n): IncrementalMigrationMap {\n const shardConfigTable = `${upstreamSchema(shard)}.shardConfig`;\n\n return {\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('resetting to upgrade shard schema');\n },\n minSafeVersion: 3,\n },\n\n // v5 changes the upstream schema organization from \"zero_{SHARD_ID}\" to\n // the \"{APP_ID}_0\". An incremental migration indicates that the previous\n // SHARD_ID was \"0\" and the new APP_ID is \"zero\" (i.e. the default values\n // for those options). In this case, the upstream format is identical, and\n // no migration is necessary. However, the version is bumped to v5 to\n // indicate that it was created with the {APP_ID} configuration and should\n // not be decommissioned as a legacy shard.\n 5: {},\n\n 6: {\n migrateSchema: async (lc, sql) => {\n assert(\n replicaVersion,\n `replicaVersion is always passed for incremental migrations`,\n );\n await Promise.all([\n sql`\n ALTER TABLE ${sql(shardConfigTable)} ADD \"replicaVersion\" TEXT`,\n sql`\n UPDATE ${sql(shardConfigTable)} SET ${sql({replicaVersion})}`,\n ]);\n lc.info?.(\n `Recorded replicaVersion ${replicaVersion} in upstream shardConfig`,\n );\n },\n },\n\n // Updates the DDL event trigger protocol to v2, and adds support for\n // ALTER SCHEMA x RENAME TO y\n 7: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded to v2 event triggers`);\n },\n },\n\n // Adds support for non-disruptive resyncs, which tracks multiple\n // replicas with different slot names.\n 8: {\n migrateSchema: async (lc, sql) => {\n const legacyShardConfigSchema = v.object({\n replicaVersion: v.string().nullable(),\n initialSchema: publishedSchema.nullable(),\n });\n const result = await sql`\n SELECT \"replicaVersion\", \"initialSchema\" FROM ${sql(shardConfigTable)}`;\n assert(\n result.length === 1,\n () => `Expected exactly one shardConfig row, got ${result.length}`,\n );\n const {replicaVersion, initialSchema} = v.parse(\n result[0],\n legacyShardConfigSchema,\n 'passthrough',\n );\n\n await Promise.all([\n sql`\n CREATE TABLE ${sql(upstreamSchema(shard))}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `,\n sql`\n INSERT INTO ${sql(upstreamSchema(shard))}.replicas ${sql({\n slot: legacyReplicationSlot(shard),\n version: replicaVersion,\n initialSchema,\n })}\n `,\n sql`\n ALTER TABLE ${sql(shardConfigTable)} DROP \"replicaVersion\", DROP \"initialSchema\"\n `,\n ]);\n lc.info?.(`Upgraded schema to support non-disruptive resyncs`);\n },\n },\n\n // Fixes field ordering of compound indexes. This incremental migration\n // only fixes indexes resulting from new schema changes. A full resync is\n // required to fix existing indexes.\n //\n // The migration has been subsumed by the identical logic for migrating\n // to v12 (i.e. a trigger upgrade).\n 9: {},\n\n // Adds the `mutations` table used to track mutation results.\n 10: {\n migrateSchema: async (lc, sql) => {\n await sql.unsafe(/*sql*/ `\n ${getMutationsTableDefinition(upstreamSchema(shard))}\n ALTER PUBLICATION ${id(metadataPublicationName(shard.appID, shard.shardNum))} ADD TABLE ${id(upstreamSchema(shard))}.\"mutations\";\n `);\n lc.info?.('Upgraded schema with new mutations table');\n },\n },\n\n // Formerly dropped the schemaVersions table, but restored in the v13\n // migration for rollback safety.\n 11: {},\n\n // Upgrade DDL trigger to query schemaOID, needed information for auto-backfill.\n // (subsumed by v14)\n 12: {},\n\n // Recreates the legacy schemaVersions table that was prematurely dropped\n // in the (former) v11 migration. It needs to remain present for at least one\n // release in order to be rollback safe.\n //\n // TODO: Drop the table once a release that no longer reads the table has\n // been rolled out.\n 13: {\n migrateSchema: async (_, sql) => {\n await sql`\n CREATE TABLE IF NOT EXISTS ${sql(upstreamSchema(shard))}.\"schemaVersions\" (\n \"minSupportedVersion\" INT4,\n \"maxSupportedVersion\" INT4,\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );`;\n await sql`\n INSERT INTO ${sql(upstreamSchema(shard))}.\"schemaVersions\" \n (\"lock\", \"minSupportedVersion\", \"maxSupportedVersion\")\n VALUES (true, 1, 1)\n ON CONFLICT DO NOTHING;\n `;\n },\n },\n\n // Upgrade DDL trigger to log more info to PG logs.\n 14: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded DDL event triggers`);\n },\n },\n\n // Add initialSyncContext column to replicas table.\n 15: {\n migrateSchema: async (_, sql) => {\n await sql`\n ALTER TABLE ${sql(upstreamSchema(shard))}.replicas\n ADD COLUMN \"initialSyncContext\" JSON,\n ADD COLUMN \"subscriberContext\" JSON\n `;\n },\n },\n };\n}\n\nexport async function decommissionLegacyShard(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n) {\n if (shard.appID !== 'zero') {\n // When migration from non-default shard ids, e.g. \"zero_prod\" => \"prod_0\",\n // clean up the old \"zero_prod\" shard if it is pre-v5. Note that the v5\n // check is important to guard against cleaning up a **new** \"zero_0\" app\n // that coexists with the current App (with app-id === \"0\").\n const versionHistory = await getVersionHistory(db, `zero_${shard.appID}`);\n if (versionHistory !== null && versionHistory.schemaVersion < 5) {\n await decommissionShard(lc, db, 'zero', shard.appID);\n }\n }\n}\n"],"names":["lc","v.object","v.string","replicaVersion","v.parse"],"mappings":";;;;;;;;;;AA0BA,eAAsB,kBACpB,IACA,IACA,OACe;AACf,QAAM,eAA0B;AAAA,IAC9B,eAAe,CAACA,KAAI,OAAO,0BAA0BA,KAAI,IAAI,KAAK;AAAA,IAClE,gBAAgB;AAAA,EAAA;AAElB,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA;AAAA,IAIA,yBAAyB,OAAO,UAAU;AAAA,EAAA;AAE9C;AAKA,eAAsB,kBACpB,IACA,IACA,OACA,gBACe;AACf,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA,MAGE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR,kBAAkB,eAAe,KAAK,CAAC;AAAA,QAAA;AAAA,MAE3C;AAAA,IAAA;AAAA,IAEF,yBAAyB,OAAO,cAAc;AAAA,EAAA;AAKhD,QAAM,wBAAwB,IAAI,IAAI,KAAK;AAC7C;AAEA,SAAS,yBACP,OACA,gBACyB;AACzB,QAAM,mBAAmB,GAAG,eAAe,KAAK,CAAC;AAEjD,SAAO;AAAA,IACL,GAAG;AAAA,MACD,eAAe,MAAM;AACnB,cAAM,IAAI,gBAAgB,mCAAmC;AAAA,MAC/D;AAAA,MACA,gBAAgB;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAUlB,GAAG,CAAA;AAAA,IAEH,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC;AAAA,UACE;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA,UACnC;AAAA,mBACS,IAAI,gBAAgB,CAAC,QAAQ,IAAI,EAAC,eAAA,CAAe,CAAC;AAAA,QAAA,CAC5D;AACD,WAAG;AAAA,UACD,2BAA2B,cAAc;AAAA,QAAA;AAAA,MAE7C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,+BAA+B;AAAA,MAC3C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,0BAA0BC,OAAS;AAAA,UACvC,gBAAgBC,OAAE,EAAS,SAAA;AAAA,UAC3B,eAAe,gBAAgB,SAAA;AAAA,QAAS,CACzC;AACD,cAAM,SAAS,MAAM;AAAA,0DAC6B,IAAI,gBAAgB,CAAC;AACvE;AAAA,UACE,OAAO,WAAW;AAAA,UAClB,MAAM,6CAA6C,OAAO,MAAM;AAAA,QAAA;AAElE,cAAM,EAAC,gBAAAC,iBAAgB,cAAA,IAAiBC;AAAAA,UACtC,OAAO,CAAC;AAAA,UACR;AAAA,UACA;AAAA,QAAA;AAGF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,yBACe,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,UAMzC;AAAA,wBACc,IAAI,eAAe,KAAK,CAAC,CAAC,aAAa,IAAI;AAAA,YACvD,MAAM,sBAAsB,KAAK;AAAA,YACjC,SAASD;AAAAA,YACT;AAAA,UAAA,CACD,CAAC;AAAA;AAAA,UAEF;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA;AAAA,QAAA,CAEpC;AACD,WAAG,OAAO,mDAAmD;AAAA,MAC/D;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IASF,GAAG,CAAA;AAAA;AAAA,IAGH,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,IAAI;AAAA;AAAA,UAAe;AAAA,YACrB,4BAA4B,eAAe,KAAK,CAAC,CAAC;AAAA,8BAChC,GAAG,wBAAwB,MAAM,OAAO,MAAM,QAAQ,CAAC,CAAC,cAAc,GAAG,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA,QAAA;AAErH,WAAG,OAAO,0CAA0C;AAAA,MACtD;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,IAAI,CAAA;AAAA;AAAA;AAAA,IAIJ,IAAI,CAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAQJ,IAAI;AAAA,MACF,eAAe,OAAO,GAAG,QAAQ;AAC/B,cAAM;AAAA,uCACyB,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAKzD,cAAM;AAAA,wBACU,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,MAK5C;AAAA,IAAA;AAAA;AAAA,IAIF,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,6BAA6B;AAAA,MACzC;AAAA,IAAA;AAAA;AAAA,IAIF,IAAI;AAAA,MACF,eAAe,OAAO,GAAG,QAAQ;AAC/B,cAAM;AAAA,wBACU,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA,MAI5C;AAAA,IAAA;AAAA,EACF;AAEJ;AAEA,eAAsB,wBACpB,IACA,IACA,OACA;AACA,MAAI,MAAM,UAAU,QAAQ;AAK1B,UAAM,iBAAiB,MAAM,kBAAkB,IAAI,QAAQ,MAAM,KAAK,EAAE;AACxE,QAAI,mBAAmB,QAAQ,eAAe,gBAAgB,GAAG;AAC/D,YAAM,kBAAkB,IAAI,IAAI,QAAQ,MAAM,KAAK;AAAA,IACrD;AAAA,EACF;AACF;"}
|
|
1
|
+
{"version":3,"file":"init.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {\n getVersionHistory,\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../../db/migration.ts';\nimport type {PostgresDB} from '../../../../types/pg.ts';\nimport {upstreamSchema, type ShardConfig} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {AutoResetSignal} from '../../../change-streamer/schema/tables.ts';\nimport {decommissionShard} from '../decommission.ts';\nimport {publishedSchema} from './published.ts';\nimport {\n getMutationsTableDefinition,\n legacyReplicationSlot,\n metadataPublicationName,\n setupTablesAndReplication,\n setupTriggers,\n} from './shard.ts';\n\n/**\n * Ensures that a shard is set up for initial sync.\n */\nexport async function ensureShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n): Promise<void> {\n const initialSetup: Migration = {\n migrateSchema: (lc, tx) => setupTablesAndReplication(lc, tx, shard),\n minSafeVersion: 1,\n };\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n initialSetup,\n // The incremental migration of any existing replicas will be replaced by\n // the incoming replica being synced, so the replicaVersion here is\n // unnecessary.\n getIncrementalMigrations(shard, 'obsolete'),\n );\n}\n\n/**\n * Updates the schema for an existing shard.\n */\nexport async function updateShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n replicaVersion: string,\n): Promise<void> {\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n {\n // If the expected existing shard is absent, throw an\n // AutoResetSignal to backtrack and initial sync.\n migrateSchema: () => {\n throw new AutoResetSignal(\n `upstream shard ${upstreamSchema(shard)} is not initialized`,\n );\n },\n },\n getIncrementalMigrations(shard, replicaVersion),\n );\n\n // The decommission check is run in updateShardSchema so that it happens\n // after initial sync, and not when the shard schema is initially set up.\n await decommissionLegacyShard(lc, db, shard);\n}\n\nfunction getIncrementalMigrations(\n shard: ShardConfig,\n replicaVersion?: string,\n): IncrementalMigrationMap {\n const shardConfigTable = `${upstreamSchema(shard)}.shardConfig`;\n\n return {\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('resetting to upgrade shard schema');\n },\n minSafeVersion: 3,\n },\n\n // v5 changes the upstream schema organization from \"zero_{SHARD_ID}\" to\n // the \"{APP_ID}_0\". An incremental migration indicates that the previous\n // SHARD_ID was \"0\" and the new APP_ID is \"zero\" (i.e. the default values\n // for those options). In this case, the upstream format is identical, and\n // no migration is necessary. However, the version is bumped to v5 to\n // indicate that it was created with the {APP_ID} configuration and should\n // not be decommissioned as a legacy shard.\n 5: {},\n\n 6: {\n migrateSchema: async (lc, sql) => {\n assert(\n replicaVersion,\n `replicaVersion is always passed for incremental migrations`,\n );\n await Promise.all([\n sql`\n ALTER TABLE ${sql(shardConfigTable)} ADD \"replicaVersion\" TEXT`,\n sql`\n UPDATE ${sql(shardConfigTable)} SET ${sql({replicaVersion})}`,\n ]);\n lc.info?.(\n `Recorded replicaVersion ${replicaVersion} in upstream shardConfig`,\n );\n },\n },\n\n // Updates the DDL event trigger protocol to v2, and adds support for\n // ALTER SCHEMA x RENAME TO y\n 7: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded to v2 event triggers`);\n },\n },\n\n // Adds support for non-disruptive resyncs, which tracks multiple\n // replicas with different slot names.\n 8: {\n migrateSchema: async (lc, sql) => {\n const legacyShardConfigSchema = v.object({\n replicaVersion: v.string().nullable(),\n initialSchema: publishedSchema.nullable(),\n });\n const result = await sql`\n SELECT \"replicaVersion\", \"initialSchema\" FROM ${sql(shardConfigTable)}`;\n assert(\n result.length === 1,\n () => `Expected exactly one shardConfig row, got ${result.length}`,\n );\n const {replicaVersion, initialSchema} = v.parse(\n result[0],\n legacyShardConfigSchema,\n 'passthrough',\n );\n\n await Promise.all([\n sql`\n CREATE TABLE ${sql(upstreamSchema(shard))}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `,\n sql`\n INSERT INTO ${sql(upstreamSchema(shard))}.replicas ${sql({\n slot: legacyReplicationSlot(shard),\n version: replicaVersion,\n initialSchema,\n })}\n `,\n sql`\n ALTER TABLE ${sql(shardConfigTable)} DROP \"replicaVersion\", DROP \"initialSchema\"\n `,\n ]);\n lc.info?.(`Upgraded schema to support non-disruptive resyncs`);\n },\n },\n\n // Fixes field ordering of compound indexes. This incremental migration\n // only fixes indexes resulting from new schema changes. A full resync is\n // required to fix existing indexes.\n //\n // The migration has been subsumed by the identical logic for migrating\n // to v12 (i.e. a trigger upgrade).\n 9: {},\n\n // Adds the `mutations` table used to track mutation results.\n 10: {\n migrateSchema: async (lc, sql) => {\n await sql.unsafe(/*sql*/ `\n ${getMutationsTableDefinition(upstreamSchema(shard))}\n ALTER PUBLICATION ${id(metadataPublicationName(shard.appID, shard.shardNum))} ADD TABLE ${id(upstreamSchema(shard))}.\"mutations\";\n `);\n lc.info?.('Upgraded schema with new mutations table');\n },\n },\n\n // Formerly dropped the schemaVersions table, but restored in the v13\n // migration for rollback safety.\n 11: {},\n\n // Upgrade DDL trigger to query schemaOID, needed information for auto-backfill.\n // (subsumed by v14)\n 12: {},\n\n // Recreates the legacy schemaVersions table that was prematurely dropped\n // in the (former) v11 migration. It needs to remain present for at least one\n // release in order to be rollback safe.\n //\n // TODO: Drop the table once a release that no longer reads the table has\n // been rolled out.\n 13: {\n migrateSchema: async (_, sql) => {\n await sql`\n CREATE TABLE IF NOT EXISTS ${sql(upstreamSchema(shard))}.\"schemaVersions\" (\n \"minSupportedVersion\" INT4,\n \"maxSupportedVersion\" INT4,\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );`;\n await sql`\n INSERT INTO ${sql(upstreamSchema(shard))}.\"schemaVersions\" \n (\"lock\", \"minSupportedVersion\", \"maxSupportedVersion\")\n VALUES (true, 1, 1)\n ON CONFLICT DO NOTHING;\n `;\n },\n },\n\n // Upgrade DDL trigger to log more info to PG logs.\n // (subsumed by v16)\n 14: {},\n\n // Add initialSyncContext column to replicas table.\n 15: {\n migrateSchema: async (_, sql) => {\n await sql`\n ALTER TABLE ${sql(upstreamSchema(shard))}.replicas\n ADD COLUMN \"initialSyncContext\" JSON,\n ADD COLUMN \"subscriberContext\" JSON\n `;\n },\n },\n\n // Upgrade DDL trigger to fire on all ALTER TABLE statements\n // to catch the *removal* of a table from the published set.\n 16: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded DDL event triggers`);\n },\n },\n };\n}\n\nexport async function decommissionLegacyShard(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n) {\n if (shard.appID !== 'zero') {\n // When migration from non-default shard ids, e.g. \"zero_prod\" => \"prod_0\",\n // clean up the old \"zero_prod\" shard if it is pre-v5. Note that the v5\n // check is important to guard against cleaning up a **new** \"zero_0\" app\n // that coexists with the current App (with app-id === \"0\").\n const versionHistory = await getVersionHistory(db, `zero_${shard.appID}`);\n if (versionHistory !== null && versionHistory.schemaVersion < 5) {\n await decommissionShard(lc, db, 'zero', shard.appID);\n }\n }\n}\n"],"names":["lc","v.object","v.string","replicaVersion","v.parse"],"mappings":";;;;;;;;;;AA0BA,eAAsB,kBACpB,IACA,IACA,OACe;AACf,QAAM,eAA0B;AAAA,IAC9B,eAAe,CAACA,KAAI,OAAO,0BAA0BA,KAAI,IAAI,KAAK;AAAA,IAClE,gBAAgB;AAAA,EAAA;AAElB,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA;AAAA,IAIA,yBAAyB,OAAO,UAAU;AAAA,EAAA;AAE9C;AAKA,eAAsB,kBACpB,IACA,IACA,OACA,gBACe;AACf,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA,MAGE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR,kBAAkB,eAAe,KAAK,CAAC;AAAA,QAAA;AAAA,MAE3C;AAAA,IAAA;AAAA,IAEF,yBAAyB,OAAO,cAAc;AAAA,EAAA;AAKhD,QAAM,wBAAwB,IAAI,IAAI,KAAK;AAC7C;AAEA,SAAS,yBACP,OACA,gBACyB;AACzB,QAAM,mBAAmB,GAAG,eAAe,KAAK,CAAC;AAEjD,SAAO;AAAA,IACL,GAAG;AAAA,MACD,eAAe,MAAM;AACnB,cAAM,IAAI,gBAAgB,mCAAmC;AAAA,MAC/D;AAAA,MACA,gBAAgB;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAUlB,GAAG,CAAA;AAAA,IAEH,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC;AAAA,UACE;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA,UACnC;AAAA,mBACS,IAAI,gBAAgB,CAAC,QAAQ,IAAI,EAAC,eAAA,CAAe,CAAC;AAAA,QAAA,CAC5D;AACD,WAAG;AAAA,UACD,2BAA2B,cAAc;AAAA,QAAA;AAAA,MAE7C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,+BAA+B;AAAA,MAC3C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,0BAA0BC,OAAS;AAAA,UACvC,gBAAgBC,OAAE,EAAS,SAAA;AAAA,UAC3B,eAAe,gBAAgB,SAAA;AAAA,QAAS,CACzC;AACD,cAAM,SAAS,MAAM;AAAA,0DAC6B,IAAI,gBAAgB,CAAC;AACvE;AAAA,UACE,OAAO,WAAW;AAAA,UAClB,MAAM,6CAA6C,OAAO,MAAM;AAAA,QAAA;AAElE,cAAM,EAAC,gBAAAC,iBAAgB,cAAA,IAAiBC;AAAAA,UACtC,OAAO,CAAC;AAAA,UACR;AAAA,UACA;AAAA,QAAA;AAGF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,yBACe,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,UAMzC;AAAA,wBACc,IAAI,eAAe,KAAK,CAAC,CAAC,aAAa,IAAI;AAAA,YACvD,MAAM,sBAAsB,KAAK;AAAA,YACjC,SAASD;AAAAA,YACT;AAAA,UAAA,CACD,CAAC;AAAA;AAAA,UAEF;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA;AAAA,QAAA,CAEpC;AACD,WAAG,OAAO,mDAAmD;AAAA,MAC/D;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IASF,GAAG,CAAA;AAAA;AAAA,IAGH,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,IAAI;AAAA;AAAA,UAAe;AAAA,YACrB,4BAA4B,eAAe,KAAK,CAAC,CAAC;AAAA,8BAChC,GAAG,wBAAwB,MAAM,OAAO,MAAM,QAAQ,CAAC,CAAC,cAAc,GAAG,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA,QAAA;AAErH,WAAG,OAAO,0CAA0C;AAAA,MACtD;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,IAAI,CAAA;AAAA;AAAA;AAAA,IAIJ,IAAI,CAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAQJ,IAAI;AAAA,MACF,eAAe,OAAO,GAAG,QAAQ;AAC/B,cAAM;AAAA,uCACyB,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAKzD,cAAM;AAAA,wBACU,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,MAK5C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,IAAI,CAAA;AAAA;AAAA,IAGJ,IAAI;AAAA,MACF,eAAe,OAAO,GAAG,QAAQ;AAC/B,cAAM;AAAA,wBACU,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA,MAI5C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,6BAA6B;AAAA,MACzC;AAAA,IAAA;AAAA,EACF;AAEJ;AAEA,eAAsB,wBACpB,IACA,IACA,OACA;AACA,MAAI,MAAM,UAAU,QAAQ;AAK1B,UAAM,iBAAiB,MAAM,kBAAkB,IAAI,QAAQ,MAAM,KAAK,EAAE;AACxE,QAAI,mBAAmB,QAAQ,eAAe,gBAAgB,GAAG;AAC/D,YAAM,kBAAkB,IAAI,IAAI,QAAQ,MAAM,KAAK;AAAA,IACrD;AAAA,EACF;AACF;"}
|
|
@@ -238,6 +238,12 @@ export declare const dropIndexSchema: v.ObjectType<{
|
|
|
238
238
|
name: v.Type<string>;
|
|
239
239
|
}, undefined>;
|
|
240
240
|
}, undefined>;
|
|
241
|
+
export declare const downloadStatusSchema: v.ObjectType<{
|
|
242
|
+
rows: v.Type<number>;
|
|
243
|
+
totalRows: v.Type<number>;
|
|
244
|
+
totalBytes: v.Optional<number>;
|
|
245
|
+
}, undefined>;
|
|
246
|
+
export type DownloadStatus = v.Infer<typeof downloadStatusSchema>;
|
|
241
247
|
export declare const backfillSchema: v.ObjectType<{
|
|
242
248
|
tag: v.Type<"backfill">;
|
|
243
249
|
relation: v.ObjectType<{
|
|
@@ -251,6 +257,11 @@ export declare const backfillSchema: v.ObjectType<{
|
|
|
251
257
|
columns: v.ArrayType<v.Type<string>>;
|
|
252
258
|
watermark: v.Type<string>;
|
|
253
259
|
rowValues: v.ArrayType<v.ArrayType<v.Type<import("../../../../../../shared/src/bigint-json.ts").JSONValue>>>;
|
|
260
|
+
status: v.Optional<{
|
|
261
|
+
totalBytes?: number | undefined;
|
|
262
|
+
rows: number;
|
|
263
|
+
totalRows: number;
|
|
264
|
+
}>;
|
|
254
265
|
}, undefined>;
|
|
255
266
|
export declare const backfillCompletedSchema: v.ObjectType<{
|
|
256
267
|
tag: v.Type<"backfill-completed">;
|
|
@@ -264,6 +275,11 @@ export declare const backfillCompletedSchema: v.ObjectType<{
|
|
|
264
275
|
}, undefined>;
|
|
265
276
|
columns: v.ArrayType<v.Type<string>>;
|
|
266
277
|
watermark: v.Type<string>;
|
|
278
|
+
status: v.Optional<{
|
|
279
|
+
totalBytes?: number | undefined;
|
|
280
|
+
rows: number;
|
|
281
|
+
totalRows: number;
|
|
282
|
+
}>;
|
|
267
283
|
}, undefined>;
|
|
268
284
|
export type MessageBegin = v.Infer<typeof beginSchema>;
|
|
269
285
|
export type MessageCommit = v.Infer<typeof commitSchema>;
|
|
@@ -349,6 +365,11 @@ export declare const dataChangeSchema: v.UnionType<[v.ObjectType<{
|
|
|
349
365
|
columns: v.ArrayType<v.Type<string>>;
|
|
350
366
|
watermark: v.Type<string>;
|
|
351
367
|
rowValues: v.ArrayType<v.ArrayType<v.Type<import("../../../../../../shared/src/bigint-json.ts").JSONValue>>>;
|
|
368
|
+
status: v.Optional<{
|
|
369
|
+
totalBytes?: number | undefined;
|
|
370
|
+
rows: number;
|
|
371
|
+
totalRows: number;
|
|
372
|
+
}>;
|
|
352
373
|
}, undefined>]>;
|
|
353
374
|
declare const dataChangeTagsSchema: v.Type<"insert" | "update" | "delete" | "truncate" | "backfill">;
|
|
354
375
|
export type DataChange = Satisfies<JSONObject, // guarantees serialization over IPC or network
|
|
@@ -492,6 +513,11 @@ export declare const schemaChangeSchema: v.UnionType<[v.ObjectType<{
|
|
|
492
513
|
}, undefined>;
|
|
493
514
|
columns: v.ArrayType<v.Type<string>>;
|
|
494
515
|
watermark: v.Type<string>;
|
|
516
|
+
status: v.Optional<{
|
|
517
|
+
totalBytes?: number | undefined;
|
|
518
|
+
rows: number;
|
|
519
|
+
totalRows: number;
|
|
520
|
+
}>;
|
|
495
521
|
}, undefined>]>;
|
|
496
522
|
declare const schemaChangeTagsSchema: v.Type<"create-table" | "rename-table" | "update-table-metadata" | "add-column" | "update-column" | "drop-column" | "drop-table" | "create-index" | "drop-index" | "backfill-completed">;
|
|
497
523
|
export type SchemaChange = Satisfies<JSONObject, v.Infer<typeof schemaChangeSchema>>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/protocol/current/data.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,EAEL,KAAK,UAAU,EAChB,MAAM,6CAA6C,CAAC;AAErD,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAE5D,OAAO,KAAK,EAAC,SAAS,EAAC,MAAM,gCAAgC,CAAC;AAG9D,eAAO,MAAM,WAAW;;;;aAgBtB,CAAC;AAEH,eAAO,MAAM,YAAY;;aAEvB,CAAC;AAEH,eAAO,MAAM,cAAc;;aAEzB,CAAC;AAcH,eAAO,MAAM,cAAc;;;;;;;;;EA2BvB,CAAC;AAGL,eAAO,MAAM,iBAAiB;;;;;;;aAK5B,CAAC;AAUH,eAAO,MAAM,mBAAmB;;2EAER,CAAC;AAEzB,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEhE,eAAO,MAAM,SAAS,yFAA4B,CAAC;AAEnD,eAAO,MAAM,YAAY;;;;;;;;;;;;;aAIvB,CAAC;AAEH,eAAO,MAAM,YAAY;;;;;;;;;;;;;;aAUvB,CAAC;AAEH,eAAO,MAAM,YAAY;;;;;;;;;;;;;aAKvB,CAAC;AAEH,eAAO,MAAM,cAAc;;;;;;;;;;;;aAGzB,CAAC;AAEH,eAAO,MAAM,gBAAgB;;;aAG3B,CAAC;AAEH,MAAM,MAAM,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,gBAAgB,CAAC,CAAC;AAU1D,eAAO,MAAM,gBAAgB,mEAAmB,CAAC;AAEjD,MAAM,MAAM,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,gBAAgB,CAAC,CAAC;AAE1D,eAAO,MAAM,iBAAiB;;;;;;;;;;;;;;;;;;;;;;aA4B5B,CAAC;AAEH,eAAO,MAAM,iBAAiB;;;;;;;;;;aAI5B,CAAC;AAEH,eAAO,MAAM,yBAAyB;;;;;;;;;;;;aAKpC,CAAC;AAOH,eAAO,MAAM,eAAe;;;;;;;;;;;;;;;;;;;;;;;aAa1B,CAAC;AAEH,eAAO,MAAM,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAK7B,CAAC;AAEH,eAAO,MAAM,gBAAgB;;;;;;;aAI3B,CAAC;AAEH,eAAO,MAAM,eAAe;;;;;;aAG1B,CAAC;AAEH,eAAO,MAAM,iBAAiB;;;;;;;;;;aAG5B,CAAC;AAEH,eAAO,MAAM,eAAe;;;;;;aAG1B,CAAC;
|
|
1
|
+
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/protocol/current/data.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,EAEL,KAAK,UAAU,EAChB,MAAM,6CAA6C,CAAC;AAErD,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAE5D,OAAO,KAAK,EAAC,SAAS,EAAC,MAAM,gCAAgC,CAAC;AAG9D,eAAO,MAAM,WAAW;;;;aAgBtB,CAAC;AAEH,eAAO,MAAM,YAAY;;aAEvB,CAAC;AAEH,eAAO,MAAM,cAAc;;aAEzB,CAAC;AAcH,eAAO,MAAM,cAAc;;;;;;;;;EA2BvB,CAAC;AAGL,eAAO,MAAM,iBAAiB;;;;;;;aAK5B,CAAC;AAUH,eAAO,MAAM,mBAAmB;;2EAER,CAAC;AAEzB,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEhE,eAAO,MAAM,SAAS,yFAA4B,CAAC;AAEnD,eAAO,MAAM,YAAY;;;;;;;;;;;;;aAIvB,CAAC;AAEH,eAAO,MAAM,YAAY;;;;;;;;;;;;;;aAUvB,CAAC;AAEH,eAAO,MAAM,YAAY;;;;;;;;;;;;;aAKvB,CAAC;AAEH,eAAO,MAAM,cAAc;;;;;;;;;;;;aAGzB,CAAC;AAEH,eAAO,MAAM,gBAAgB;;;aAG3B,CAAC;AAEH,MAAM,MAAM,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,gBAAgB,CAAC,CAAC;AAU1D,eAAO,MAAM,gBAAgB,mEAAmB,CAAC;AAEjD,MAAM,MAAM,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,gBAAgB,CAAC,CAAC;AAE1D,eAAO,MAAM,iBAAiB;;;;;;;;;;;;;;;;;;;;;;aA4B5B,CAAC;AAEH,eAAO,MAAM,iBAAiB;;;;;;;;;;aAI5B,CAAC;AAEH,eAAO,MAAM,yBAAyB;;;;;;;;;;;;aAKpC,CAAC;AAOH,eAAO,MAAM,eAAe;;;;;;;;;;;;;;;;;;;;;;;aAa1B,CAAC;AAEH,eAAO,MAAM,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAK7B,CAAC;AAEH,eAAO,MAAM,gBAAgB;;;;;;;aAI3B,CAAC;AAEH,eAAO,MAAM,eAAe;;;;;;aAG1B,CAAC;AAEH,eAAO,MAAM,iBAAiB;;;;;;;;;;aAG5B,CAAC;AAEH,eAAO,MAAM,eAAe;;;;;;aAG1B,CAAC;AAEH,eAAO,MAAM,oBAAoB;;;;aAI/B,CAAC;AAEH,MAAM,MAAM,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AAIlE,eAAO,MAAM,cAAc;;;;;;;;;;;;;;;;;;aA8BzB,CAAC;AAIH,eAAO,MAAM,uBAAuB;;;;;;;;;;;;;;;;;aAkBlC,CAAC;AAEH,MAAM,MAAM,YAAY,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,WAAW,CAAC,CAAC;AACvD,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,YAAY,CAAC,CAAC;AACzD,MAAM,MAAM,eAAe,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,cAAc,CAAC,CAAC;AAE7D,MAAM,MAAM,eAAe,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,cAAc,CAAC,CAAC;AAC7D,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,YAAY,CAAC,CAAC;AACzD,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,YAAY,CAAC,CAAC;AACzD,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,YAAY,CAAC,CAAC;AACzD,MAAM,MAAM,eAAe,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,cAAc,CAAC,CAAC;AAE7D,MAAM,MAAM,eAAe,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,cAAc,CAAC,CAAC;AAE7D,MAAM,MAAM,WAAW,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,iBAAiB,CAAC,CAAC;AAC5D,MAAM,MAAM,WAAW,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,iBAAiB,CAAC,CAAC;AAC5D,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC;AAC5E,MAAM,MAAM,SAAS,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,eAAe,CAAC,CAAC;AACxD,MAAM,MAAM,YAAY,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,kBAAkB,CAAC,CAAC;AAC9D,MAAM,MAAM,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,gBAAgB,CAAC,CAAC;AAC1D,MAAM,MAAM,SAAS,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,eAAe,CAAC,CAAC;AACxD,MAAM,MAAM,WAAW,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,iBAAiB,CAAC,CAAC;AAC5D,MAAM,MAAM,SAAS,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,eAAe,CAAC,CAAC;AACxD,MAAM,MAAM,iBAAiB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,uBAAuB,CAAC,CAAC;AAExE,eAAO,MAAM,gBAAgB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;eAM5B,CAAC;AAWF,QAAA,MAAM,oBAAoB,kEAAoC,CAAC;AAE/D,MAAM,MAAM,UAAU,GAAG,SAAS,CAChC,UAAU,EAAE,+CAA+C;AAC3D,CAAC,CAAC,KAAK,CAAC,OAAO,gBAAgB,CAAC,CACjC,CAAC;AAEF,MAAM,MAAM,aAAa,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,oBAAoB,CAAC,CAAC;AA6BjE,eAAO,MAAM,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;eAA4B,CAAC;AAE5D,QAAA,MAAM,sBAAsB,0LAAsC,CAAC;AAEnE,MAAM,MAAM,YAAY,GAAG,SAAS,CAClC,UAAU,EACV,CAAC,CAAC,KAAK,CAAC,OAAO,kBAAkB,CAAC,CACnC,CAAC;AAEF,MAAM,MAAM,eAAe,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAErE,MAAM,MAAM,kBAAkB,GAAG,UAAU,GAAG,YAAY,CAAC;AAE3D,MAAM,MAAM,MAAM,GACd,YAAY,GACZ,kBAAkB,GAClB,aAAa,GACb,eAAe,CAAC;AAEpB,MAAM,MAAM,SAAS,GAAG,MAAM,CAAC,KAAK,CAAC,CAAC;AAItC,wBAAgB,cAAc,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM,IAAI,YAAY,CAErE;AAID,wBAAgB,YAAY,CAAC,MAAM,EAAE,MAAM,GAAG,MAAM,IAAI,UAAU,CAEjE"}
|