@powersync/service-module-postgres 0.19.3 → 0.19.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/dist/api/PostgresRouteAPIAdapter.d.ts +1 -1
  2. package/dist/api/PostgresRouteAPIAdapter.js +63 -72
  3. package/dist/api/PostgresRouteAPIAdapter.js.map +1 -1
  4. package/dist/module/PostgresModule.js.map +1 -1
  5. package/dist/replication/MissingReplicationSlotError.d.ts +41 -0
  6. package/dist/replication/MissingReplicationSlotError.js +33 -0
  7. package/dist/replication/MissingReplicationSlotError.js.map +1 -0
  8. package/dist/replication/PostgresErrorRateLimiter.js +1 -1
  9. package/dist/replication/PostgresErrorRateLimiter.js.map +1 -1
  10. package/dist/replication/SnapshotQuery.js +2 -2
  11. package/dist/replication/SnapshotQuery.js.map +1 -1
  12. package/dist/replication/WalStream.d.ts +35 -3
  13. package/dist/replication/WalStream.js +135 -9
  14. package/dist/replication/WalStream.js.map +1 -1
  15. package/dist/replication/WalStreamReplicationJob.js +6 -3
  16. package/dist/replication/WalStreamReplicationJob.js.map +1 -1
  17. package/dist/replication/replication-index.d.ts +3 -1
  18. package/dist/replication/replication-index.js +3 -1
  19. package/dist/replication/replication-index.js.map +1 -1
  20. package/dist/replication/replication-utils.d.ts +3 -11
  21. package/dist/replication/replication-utils.js +101 -164
  22. package/dist/replication/replication-utils.js.map +1 -1
  23. package/dist/replication/wal-budget-utils.d.ts +23 -0
  24. package/dist/replication/wal-budget-utils.js +57 -0
  25. package/dist/replication/wal-budget-utils.js.map +1 -0
  26. package/dist/types/registry.js +1 -1
  27. package/dist/types/registry.js.map +1 -1
  28. package/package.json +15 -11
  29. package/sql/check-source-configuration.plpgsql +13 -0
  30. package/sql/debug-tables-info-batched.plpgsql +230 -0
  31. package/CHANGELOG.md +0 -858
  32. package/src/api/PostgresRouteAPIAdapter.ts +0 -356
  33. package/src/index.ts +0 -1
  34. package/src/module/PostgresModule.ts +0 -122
  35. package/src/replication/ConnectionManagerFactory.ts +0 -33
  36. package/src/replication/PgManager.ts +0 -122
  37. package/src/replication/PgRelation.ts +0 -41
  38. package/src/replication/PostgresErrorRateLimiter.ts +0 -48
  39. package/src/replication/SnapshotQuery.ts +0 -213
  40. package/src/replication/WalStream.ts +0 -1137
  41. package/src/replication/WalStreamReplicationJob.ts +0 -138
  42. package/src/replication/WalStreamReplicator.ts +0 -53
  43. package/src/replication/replication-index.ts +0 -5
  44. package/src/replication/replication-utils.ts +0 -398
  45. package/src/types/registry.ts +0 -275
  46. package/src/types/resolver.ts +0 -227
  47. package/src/types/types.ts +0 -44
  48. package/src/utils/application-name.ts +0 -8
  49. package/src/utils/migration_lib.ts +0 -80
  50. package/src/utils/populate_test_data.ts +0 -37
  51. package/src/utils/populate_test_data_worker.ts +0 -53
  52. package/src/utils/postgres_version.ts +0 -8
  53. package/test/src/checkpoints.test.ts +0 -86
  54. package/test/src/chunked_snapshots.test.ts +0 -161
  55. package/test/src/env.ts +0 -11
  56. package/test/src/large_batch.test.ts +0 -241
  57. package/test/src/pg_test.test.ts +0 -729
  58. package/test/src/resuming_snapshots.test.ts +0 -160
  59. package/test/src/route_api_adapter.test.ts +0 -62
  60. package/test/src/schema_changes.test.ts +0 -655
  61. package/test/src/setup.ts +0 -12
  62. package/test/src/slow_tests.test.ts +0 -519
  63. package/test/src/storage_combination.test.ts +0 -35
  64. package/test/src/types/registry.test.ts +0 -149
  65. package/test/src/util.ts +0 -151
  66. package/test/src/validation.test.ts +0 -63
  67. package/test/src/wal_stream.test.ts +0 -607
  68. package/test/src/wal_stream_utils.ts +0 -284
  69. package/test/tsconfig.json +0 -27
  70. package/tsconfig.json +0 -34
  71. package/tsconfig.tsbuildinfo +0 -1
  72. package/vitest.config.ts +0 -3
@@ -1,1137 +0,0 @@
1
- import * as lib_postgres from '@powersync/lib-service-postgres';
2
- import {
3
- container,
4
- DatabaseConnectionError,
5
- logger as defaultLogger,
6
- ErrorCode,
7
- Logger,
8
- ReplicationAbortedError,
9
- ReplicationAssertionError
10
- } from '@powersync/lib-services-framework';
11
- import {
12
- BucketStorageBatch,
13
- getUuidReplicaIdentityBson,
14
- MetricsEngine,
15
- RelationCache,
16
- ReplicationLagTracker,
17
- SaveUpdate,
18
- SourceEntityDescriptor,
19
- SourceTable,
20
- storage
21
- } from '@powersync/service-core';
22
- import * as pgwire from '@powersync/service-jpgwire';
23
- import {
24
- applyValueContext,
25
- CompatibilityContext,
26
- HydratedSyncRules,
27
- SqliteInputRow,
28
- SqliteInputValue,
29
- SqliteRow,
30
- TablePattern,
31
- ToastableSqliteRow,
32
- toSyncRulesValue
33
- } from '@powersync/service-sync-rules';
34
-
35
- import { ReplicationMetric } from '@powersync/service-types';
36
- import { PostgresTypeResolver } from '../types/resolver.js';
37
- import { PgManager } from './PgManager.js';
38
- import { getPgOutputRelation, getRelId, referencedColumnTypeIds } from './PgRelation.js';
39
- import { checkSourceConfiguration, checkTableRls, getReplicationIdentityColumns } from './replication-utils.js';
40
- import {
41
- ChunkedSnapshotQuery,
42
- IdSnapshotQuery,
43
- MissingRow,
44
- PrimaryKeyValue,
45
- SimpleSnapshotQuery,
46
- SnapshotQuery
47
- } from './SnapshotQuery.js';
48
-
49
- export interface WalStreamOptions {
50
- logger?: Logger;
51
- connections: PgManager;
52
- storage: storage.SyncRulesBucketStorage;
53
- metrics: MetricsEngine;
54
- abort_signal: AbortSignal;
55
-
56
- /**
57
- * Override snapshot chunk length (number of rows), for testing.
58
- *
59
- * Defaults to 10_000.
60
- *
61
- * Note that queries are streamed, so we don't actually keep that much data in memory.
62
- */
63
- snapshotChunkLength?: number;
64
- }
65
-
66
- interface InitResult {
67
- /** True if initial snapshot is not yet done. */
68
- needsInitialSync: boolean;
69
- /** True if snapshot must be started from scratch with a new slot. */
70
- needsNewSlot: boolean;
71
- }
72
-
73
- export const ZERO_LSN = '00000000/00000000';
74
- export const PUBLICATION_NAME = 'powersync';
75
- export const POSTGRES_DEFAULT_SCHEMA = 'public';
76
-
77
- export const KEEPALIVE_CONTENT = 'ping';
78
- export const KEEPALIVE_BUFFER = Buffer.from(KEEPALIVE_CONTENT);
79
- export const KEEPALIVE_STATEMENT: pgwire.Statement = {
80
- statement: /* sql */ `
81
- SELECT
82
- *
83
- FROM
84
- pg_logical_emit_message(FALSE, 'powersync', $1)
85
- `,
86
- params: [{ type: 'varchar', value: KEEPALIVE_CONTENT }]
87
- } as const;
88
-
89
- export const isKeepAliveMessage = (msg: pgwire.PgoutputMessage) => {
90
- return (
91
- msg.tag == 'message' &&
92
- msg.prefix == 'powersync' &&
93
- msg.content &&
94
- Buffer.from(msg.content).equals(KEEPALIVE_BUFFER)
95
- );
96
- };
97
-
98
- export const sendKeepAlive = async (db: pgwire.PgClient) => {
99
- await lib_postgres.retriedQuery(db, KEEPALIVE_STATEMENT);
100
- };
101
-
102
- export class MissingReplicationSlotError extends Error {
103
- constructor(message: string, cause?: any) {
104
- super(message);
105
-
106
- this.cause = cause;
107
- }
108
- }
109
-
110
- export class WalStream {
111
- sync_rules: HydratedSyncRules;
112
- group_id: number;
113
-
114
- connection_id = 1;
115
-
116
- private logger: Logger;
117
-
118
- private readonly storage: storage.SyncRulesBucketStorage;
119
- private readonly metrics: MetricsEngine;
120
- private readonly slot_name: string;
121
-
122
- private connections: PgManager;
123
-
124
- private abort_signal: AbortSignal;
125
-
126
- private relationCache = new RelationCache((relation: number | SourceTable) => {
127
- if (typeof relation == 'number') {
128
- return relation;
129
- }
130
- return relation.objectId!;
131
- });
132
-
133
- private startedStreaming = false;
134
-
135
- private snapshotChunkLength: number;
136
-
137
- private replicationLag = new ReplicationLagTracker();
138
-
139
- private initialSnapshotPromise: Promise<void> | null = null;
140
-
141
- constructor(options: WalStreamOptions) {
142
- this.logger = options.logger ?? defaultLogger;
143
- this.storage = options.storage;
144
- this.metrics = options.metrics;
145
- this.sync_rules = options.storage.getParsedSyncRules({ defaultSchema: POSTGRES_DEFAULT_SCHEMA });
146
- this.group_id = options.storage.group_id;
147
- this.slot_name = options.storage.slot_name;
148
- this.connections = options.connections;
149
- this.snapshotChunkLength = options.snapshotChunkLength ?? 10_000;
150
-
151
- this.abort_signal = options.abort_signal;
152
- this.abort_signal.addEventListener(
153
- 'abort',
154
- () => {
155
- if (this.startedStreaming) {
156
- // Ping to speed up cancellation of streaming replication
157
- // We're not using pg_snapshot here, since it could be in the middle of
158
- // an initial replication transaction.
159
- const promise = sendKeepAlive(this.connections.pool);
160
- promise.catch((e) => {
161
- // Failures here are okay - this only speeds up stopping the process.
162
- this.logger.warn('Failed to ping connection', e);
163
- });
164
- } else {
165
- // If we haven't started streaming yet, it could be due to something like
166
- // and invalid password. In that case, don't attempt to ping.
167
- }
168
- },
169
- { once: true }
170
- );
171
- }
172
-
173
- get stopped() {
174
- return this.abort_signal.aborted;
175
- }
176
-
177
- async getQualifiedTableNames(
178
- batch: storage.BucketStorageBatch,
179
- db: pgwire.PgConnection,
180
- tablePattern: TablePattern
181
- ): Promise<storage.SourceTable[]> {
182
- const schema = tablePattern.schema;
183
- if (tablePattern.connectionTag != this.connections.connectionTag) {
184
- return [];
185
- }
186
-
187
- let tableRows: any[];
188
- const prefix = tablePattern.isWildcard ? tablePattern.tablePrefix : undefined;
189
-
190
- {
191
- let query = `
192
- SELECT
193
- c.oid AS relid,
194
- c.relname AS table_name,
195
- (SELECT
196
- json_agg(DISTINCT a.atttypid)
197
- FROM pg_attribute a
198
- WHERE a.attnum > 0 AND NOT a.attisdropped AND a.attrelid = c.oid)
199
- AS column_types
200
- FROM pg_class c
201
- JOIN pg_namespace n ON n.oid = c.relnamespace
202
- WHERE n.nspname = $1
203
- AND c.relkind = 'r'`;
204
-
205
- if (tablePattern.isWildcard) {
206
- query += ' AND c.relname LIKE $2';
207
- } else {
208
- query += ' AND c.relname = $2';
209
- }
210
-
211
- const result = await db.query({
212
- statement: query,
213
- params: [
214
- { type: 'varchar', value: schema },
215
- { type: 'varchar', value: tablePattern.tablePattern }
216
- ]
217
- });
218
-
219
- tableRows = pgwire.pgwireRows(result);
220
- }
221
-
222
- let result: storage.SourceTable[] = [];
223
-
224
- for (let row of tableRows) {
225
- const name = row.table_name as string;
226
- if (typeof row.relid != 'bigint') {
227
- throw new ReplicationAssertionError(`Missing relid for ${name}`);
228
- }
229
- const relid = Number(row.relid as bigint);
230
-
231
- if (prefix && !name.startsWith(prefix)) {
232
- continue;
233
- }
234
-
235
- const rs = await db.query({
236
- statement: `SELECT 1 FROM pg_publication_tables WHERE pubname = $1 AND schemaname = $2 AND tablename = $3`,
237
- params: [
238
- { type: 'varchar', value: PUBLICATION_NAME },
239
- { type: 'varchar', value: tablePattern.schema },
240
- { type: 'varchar', value: name }
241
- ]
242
- });
243
- if (rs.rows.length == 0) {
244
- this.logger.info(`Skipping ${tablePattern.schema}.${name} - not part of ${PUBLICATION_NAME} publication`);
245
- continue;
246
- }
247
-
248
- try {
249
- const result = await checkTableRls(db, relid);
250
- if (!result.canRead) {
251
- // We log the message, then continue anyway, since the check does not cover all cases.
252
- this.logger.warn(result.message!);
253
- }
254
- } catch (e) {
255
- // It's possible that we just don't have permission to access pg_roles - log the error and continue.
256
- this.logger.warn(`Could not check RLS access for ${tablePattern.schema}.${name}`, e);
257
- }
258
-
259
- const cresult = await getReplicationIdentityColumns(db, relid);
260
-
261
- const columnTypes = (JSON.parse(row.column_types) as string[]).map((e) => Number(e));
262
- const table = await this.handleRelation({
263
- batch,
264
- descriptor: {
265
- name,
266
- schema,
267
- objectId: relid,
268
- replicaIdColumns: cresult.replicationColumns
269
- } as SourceEntityDescriptor,
270
- snapshot: false,
271
- referencedTypeIds: columnTypes
272
- });
273
-
274
- result.push(table);
275
- }
276
- return result;
277
- }
278
-
279
- async initSlot(): Promise<InitResult> {
280
- await checkSourceConfiguration(this.connections.pool, PUBLICATION_NAME);
281
- await this.ensureStorageCompatibility();
282
-
283
- const slotName = this.slot_name;
284
-
285
- const status = await this.storage.getStatus();
286
- const snapshotDone = status.snapshot_done && status.checkpoint_lsn != null;
287
- if (snapshotDone) {
288
- // Snapshot is done, but we still need to check the replication slot status
289
- this.logger.info(`Initial replication already done`);
290
- }
291
-
292
- // Check if replication slot exists
293
- const slot = pgwire.pgwireRows(
294
- await this.connections.pool.query({
295
- // We specifically want wal_status and invalidation_reason, but it's not available on older versions,
296
- // so we just query *.
297
- statement: 'SELECT * FROM pg_replication_slots WHERE slot_name = $1',
298
- params: [{ type: 'varchar', value: slotName }]
299
- })
300
- )[0];
301
-
302
- // Previously we also used pg_catalog.pg_logical_slot_peek_binary_changes to confirm that we can query the slot.
303
- // However, there were some edge cases where the query times out, repeating the query, ultimately
304
- // causing high load on the source database and never recovering automatically.
305
- // We now instead jump straight to replication if the wal_status is not "lost", rather detecting those
306
- // errors during streaming replication, which is a little more robust.
307
-
308
- // We can have:
309
- // 1. needsInitialSync: true, lost slot -> MissingReplicationSlotError (starts new sync rules version).
310
- // Theoretically we could handle this the same as (2).
311
- // 2. needsInitialSync: true, no slot -> create new slot
312
- // 3. needsInitialSync: true, valid slot -> resume initial sync
313
- // 4. needsInitialSync: false, lost slot -> MissingReplicationSlotError (starts new sync rules version)
314
- // 5. needsInitialSync: false, no slot -> MissingReplicationSlotError (starts new sync rules version)
315
- // 6. needsInitialSync: false, valid slot -> resume streaming replication
316
- // The main advantage of MissingReplicationSlotError are:
317
- // 1. If there was a complete snapshot already (cases 4/5), users can still sync from that snapshot while
318
- // we do the reprocessing under a new slot name.
319
- // 2. If there was a partial snapshot (case 1), we can start with the new slot faster by not waiting for
320
- // the partial data to be cleared.
321
- if (slot != null) {
322
- // This checks that the slot is still valid
323
-
324
- // wal_status is present in postgres 13+
325
- // invalidation_reason is present in postgres 17+
326
- const lost = slot.wal_status == 'lost';
327
- if (lost) {
328
- // Case 1 / 4
329
- throw new MissingReplicationSlotError(
330
- `Replication slot ${slotName} is not valid anymore. invalidation_reason: ${slot.invalidation_reason ?? 'unknown'}`
331
- );
332
- }
333
- // Case 3 / 6
334
- return {
335
- needsInitialSync: !snapshotDone,
336
- needsNewSlot: false
337
- };
338
- } else {
339
- if (snapshotDone) {
340
- // Case 5
341
- // This will create a new slot, while keeping the current sync rules active
342
- throw new MissingReplicationSlotError(`Replication slot ${slotName} is missing`);
343
- }
344
- // Case 2
345
- // This will clear data (if any) and re-create the same slot
346
- return { needsInitialSync: true, needsNewSlot: true };
347
- }
348
- }
349
-
350
- async estimatedCountNumber(db: pgwire.PgConnection, table: storage.SourceTable): Promise<number> {
351
- const results = await db.query({
352
- statement: `SELECT reltuples::bigint AS estimate
353
- FROM pg_class
354
- WHERE oid = $1::regclass`,
355
- params: [{ value: table.qualifiedName, type: 'varchar' }]
356
- });
357
- const row = results.rows[0];
358
- return Number(row?.decodeWithoutCustomTypes(0) ?? -1n);
359
- }
360
-
361
- /**
362
- * Start initial replication.
363
- *
364
- * If (partial) replication was done before on this slot, this clears the state
365
- * and starts again from scratch.
366
- */
367
- async startInitialReplication(replicationConnection: pgwire.PgConnection, status: InitResult) {
368
- // If anything here errors, the entire replication process is aborted,
369
- // and all connections are closed, including this one.
370
- const db = await this.connections.snapshotConnection();
371
-
372
- const slotName = this.slot_name;
373
-
374
- if (status.needsNewSlot) {
375
- // This happens when there is no existing replication slot, or if the
376
- // existing one is unhealthy.
377
- // In those cases, we have to start replication from scratch.
378
- // If there is an existing healthy slot, we can skip this and continue
379
- // initial replication where we left off.
380
- await this.storage.clear({ signal: this.abort_signal });
381
-
382
- await db.query({
383
- statement: 'SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE slot_name = $1',
384
- params: [{ type: 'varchar', value: slotName }]
385
- });
386
-
387
- // We use the replication connection here, not a pool.
388
- // The replication slot must be created before we start snapshotting tables.
389
- await replicationConnection.query(`CREATE_REPLICATION_SLOT ${slotName} LOGICAL pgoutput`);
390
-
391
- this.logger.info(`Created replication slot ${slotName}`);
392
- }
393
-
394
- await this.initialReplication(db);
395
- }
396
-
397
- async initialReplication(db: pgwire.PgConnection) {
398
- const sourceTables = this.sync_rules.getSourceTables();
399
- const flushResults = await this.storage.startBatch(
400
- {
401
- logger: this.logger,
402
- zeroLSN: ZERO_LSN,
403
- defaultSchema: POSTGRES_DEFAULT_SCHEMA,
404
- storeCurrentData: true,
405
- skipExistingRows: true
406
- },
407
- async (batch) => {
408
- let tablesWithStatus: SourceTable[] = [];
409
- for (let tablePattern of sourceTables) {
410
- const tables = await this.getQualifiedTableNames(batch, db, tablePattern);
411
- // Pre-get counts
412
- for (let table of tables) {
413
- if (table.snapshotComplete) {
414
- this.logger.info(`Skipping ${table.qualifiedName} - snapshot already done`);
415
- continue;
416
- }
417
- const count = await this.estimatedCountNumber(db, table);
418
- table = await batch.updateTableProgress(table, { totalEstimatedCount: count });
419
- this.relationCache.update(table);
420
- tablesWithStatus.push(table);
421
-
422
- this.logger.info(`To replicate: ${table.qualifiedName} ${table.formatSnapshotProgress()}`);
423
- }
424
- }
425
-
426
- for (let table of tablesWithStatus) {
427
- await this.snapshotTableInTx(batch, db, table);
428
- this.touch();
429
- }
430
-
431
- // Always commit the initial snapshot at zero.
432
- // This makes sure we don't skip any changes applied before starting this snapshot,
433
- // in the case of snapshot retries.
434
- // We could alternatively commit at the replication slot LSN.
435
-
436
- // Get the current LSN for the snapshot.
437
- // We could also use the LSN from the last table snapshot.
438
- const rs = await db.query(`select pg_current_wal_lsn() as lsn`);
439
- const noCommitBefore = rs.rows[0].decodeWithoutCustomTypes(0);
440
-
441
- await batch.markAllSnapshotDone(noCommitBefore);
442
- await batch.commit(ZERO_LSN);
443
- }
444
- );
445
- /**
446
- * Send a keepalive message after initial replication.
447
- * In some edge cases we wait for a keepalive after the initial snapshot.
448
- * If we don't explicitly check the contents of keepalive messages then a keepalive is detected
449
- * rather quickly after initial replication - perhaps due to other WAL events.
450
- * If we do explicitly check the contents of messages, we need an actual keepalive payload in order
451
- * to advance the active sync rules LSN.
452
- */
453
- await sendKeepAlive(db);
454
-
455
- const lastOp = flushResults?.flushed_op;
456
- if (lastOp != null) {
457
- // Populate the cache _after_ initial replication, but _before_ we switch to this sync rules.
458
- await this.storage.populatePersistentChecksumCache({
459
- // No checkpoint yet, but we do have the opId.
460
- maxOpId: lastOp,
461
- signal: this.abort_signal
462
- });
463
- }
464
- }
465
-
466
- static decodeRow(row: pgwire.PgRow, types: PostgresTypeResolver): SqliteInputRow {
467
- let result: SqliteInputRow = {};
468
-
469
- row.raw.forEach((rawValue, i) => {
470
- const column = row.columns[i];
471
- let mappedValue: SqliteInputValue;
472
-
473
- if (typeof rawValue == 'string') {
474
- mappedValue = toSyncRulesValue(types.registry.decodeDatabaseValue(rawValue, column.typeOid), false, true);
475
- } else {
476
- // Binary format, expose as-is.
477
- mappedValue = rawValue;
478
- }
479
-
480
- result[column.name] = mappedValue;
481
- });
482
- return result;
483
- }
484
-
485
- private async snapshotTableInTx(
486
- batch: storage.BucketStorageBatch,
487
- db: pgwire.PgConnection,
488
- table: storage.SourceTable,
489
- limited?: PrimaryKeyValue[]
490
- ): Promise<storage.SourceTable> {
491
- // Note: We use the default "Read Committed" isolation level here, not snapshot isolation.
492
- // The data may change during the transaction, but that is compensated for in the streaming
493
- // replication afterwards.
494
- await db.query('BEGIN');
495
- try {
496
- await this.snapshotTable(batch, db, table, limited);
497
-
498
- // Get the current LSN.
499
- // The data will only be consistent once incremental replication has passed that point.
500
- // We have to get this LSN _after_ we have finished the table snapshot.
501
- //
502
- // There are basically two relevant LSNs here:
503
- // A: The LSN before the snapshot starts. We don't explicitly record this on the PowerSync side,
504
- // but it is implicitly recorded in the replication slot.
505
- // B: The LSN after the table snapshot is complete, which is what we get here.
506
- // When we do the snapshot queries, the data that we get back for each chunk could match the state
507
- // anywhere between A and B. To actually have a consistent state on our side, we need to:
508
- // 1. Complete the snapshot.
509
- // 2. Wait until logical replication has caught up with all the change between A and B.
510
- // Calling `markSnapshotDone(LSN B)` covers that.
511
- const rs = await db.query(`select pg_current_wal_lsn() as lsn`);
512
- const tableLsnNotBefore = rs.rows[0].decodeWithoutCustomTypes(0);
513
- // Side note: A ROLLBACK would probably also be fine here, since we only read in this transaction.
514
- await db.query('COMMIT');
515
- const [resultTable] = await batch.markTableSnapshotDone([table], tableLsnNotBefore);
516
- this.relationCache.update(resultTable);
517
- return resultTable;
518
- } catch (e) {
519
- await db.query('ROLLBACK');
520
- throw e;
521
- }
522
- }
523
-
524
- private async snapshotTable(
525
- batch: storage.BucketStorageBatch,
526
- db: pgwire.PgConnection,
527
- table: storage.SourceTable,
528
- limited?: PrimaryKeyValue[]
529
- ) {
530
- let totalEstimatedCount = table.snapshotStatus?.totalEstimatedCount;
531
- let at = table.snapshotStatus?.replicatedCount ?? 0;
532
- let lastCountTime = 0;
533
- let q: SnapshotQuery;
534
- // We do streaming on two levels:
535
- // 1. Coarse level: DELCARE CURSOR, FETCH 10000 at a time.
536
- // 2. Fine level: Stream chunks from each fetch call.
537
- if (limited) {
538
- q = new IdSnapshotQuery(db, table, limited);
539
- } else if (ChunkedSnapshotQuery.supports(table)) {
540
- // Single primary key - we can use the primary key for chunking
541
- const orderByKey = table.replicaIdColumns[0];
542
- q = new ChunkedSnapshotQuery(db, table, this.snapshotChunkLength, table.snapshotStatus?.lastKey ?? null);
543
- if (table.snapshotStatus?.lastKey != null) {
544
- this.logger.info(
545
- `Replicating ${table.qualifiedName} ${table.formatSnapshotProgress()} - resuming from ${orderByKey.name} > ${(q as ChunkedSnapshotQuery).lastKey}`
546
- );
547
- } else {
548
- this.logger.info(`Replicating ${table.qualifiedName} ${table.formatSnapshotProgress()} - resumable`);
549
- }
550
- } else {
551
- // Fallback case - query the entire table
552
- this.logger.info(`Replicating ${table.qualifiedName} ${table.formatSnapshotProgress()} - not resumable`);
553
- q = new SimpleSnapshotQuery(db, table, this.snapshotChunkLength);
554
- at = 0;
555
- }
556
- await q.initialize();
557
-
558
- let hasRemainingData = true;
559
- while (hasRemainingData) {
560
- // Fetch 10k at a time.
561
- // The balance here is between latency overhead per FETCH call,
562
- // and not spending too much time on each FETCH call.
563
- // We aim for a couple of seconds on each FETCH call.
564
- const cursor = q.nextChunk();
565
- hasRemainingData = false;
566
- // pgwire streams rows in chunks.
567
- // These chunks can be quite small (as little as 16KB), so we don't flush chunks automatically.
568
- // There are typically 100-200 rows per chunk.
569
- for await (let chunk of cursor) {
570
- if (chunk.tag == 'RowDescription') {
571
- continue;
572
- }
573
-
574
- if (chunk.rows.length > 0) {
575
- hasRemainingData = true;
576
- }
577
-
578
- for (const rawRow of chunk.rows) {
579
- const record = this.sync_rules.applyRowContext<never>(WalStream.decodeRow(rawRow, this.connections.types));
580
-
581
- // This auto-flushes when the batch reaches its size limit
582
- await batch.save({
583
- tag: storage.SaveOperationTag.INSERT,
584
- sourceTable: table,
585
- before: undefined,
586
- beforeReplicaId: undefined,
587
- after: record,
588
- afterReplicaId: getUuidReplicaIdentityBson(record, table.replicaIdColumns)
589
- });
590
- }
591
-
592
- at += chunk.rows.length;
593
- this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(chunk.rows.length);
594
-
595
- this.touch();
596
- }
597
-
598
- // Important: flush before marking progress
599
- await batch.flush();
600
- if (limited == null) {
601
- let lastKey: Uint8Array | undefined;
602
- if (q instanceof ChunkedSnapshotQuery) {
603
- lastKey = q.getLastKeySerialized();
604
- }
605
- if (lastCountTime < performance.now() - 10 * 60 * 1000) {
606
- // Even though we're doing the snapshot inside a transaction, the transaction uses
607
- // the default "Read Committed" isolation level. This means we can get new data
608
- // within the transaction, so we re-estimate the count every 10 minutes when replicating
609
- // large tables.
610
- totalEstimatedCount = await this.estimatedCountNumber(db, table);
611
- lastCountTime = performance.now();
612
- }
613
- table = await batch.updateTableProgress(table, {
614
- lastKey: lastKey,
615
- replicatedCount: at,
616
- totalEstimatedCount: totalEstimatedCount
617
- });
618
- this.relationCache.update(table);
619
-
620
- this.logger.info(`Replicating ${table.qualifiedName} ${table.formatSnapshotProgress()}`);
621
- } else {
622
- this.logger.info(`Replicating ${table.qualifiedName} ${at}/${limited.length} for resnapshot`);
623
- }
624
-
625
- if (this.abort_signal.aborted) {
626
- // We only abort after flushing
627
- throw new ReplicationAbortedError(`Initial replication interrupted`);
628
- }
629
- }
630
- }
631
-
632
- async handleRelation(options: {
633
- batch: storage.BucketStorageBatch;
634
- descriptor: SourceEntityDescriptor;
635
- snapshot: boolean;
636
- referencedTypeIds: number[];
637
- }) {
638
- const { batch, descriptor, snapshot, referencedTypeIds } = options;
639
-
640
- if (!descriptor.objectId && typeof descriptor.objectId != 'number') {
641
- throw new ReplicationAssertionError(`objectId expected, got ${typeof descriptor.objectId}`);
642
- }
643
- const result = await this.storage.resolveTable({
644
- group_id: this.group_id,
645
- connection_id: this.connection_id,
646
- connection_tag: this.connections.connectionTag,
647
- entity_descriptor: descriptor,
648
- sync_rules: this.sync_rules
649
- });
650
- this.relationCache.update(result.table);
651
-
652
- // Drop conflicting tables. This includes for example renamed tables.
653
- await batch.drop(result.dropTables);
654
-
655
- // Ensure we have a description for custom types referenced in the table.
656
- await this.connections.types.fetchTypes(referencedTypeIds);
657
-
658
- // Snapshot if:
659
- // 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere)
660
- // 2. Snapshot is not already done, AND:
661
- // 3. The table is used in sync rules.
662
- const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny;
663
-
664
- if (shouldSnapshot) {
665
- // Truncate this table, in case a previous snapshot was interrupted.
666
- await batch.truncate([result.table]);
667
-
668
- // Start the snapshot inside a transaction.
669
- // We use a dedicated connection for this.
670
- const db = await this.connections.snapshotConnection();
671
- try {
672
- const table = await this.snapshotTableInTx(batch, db, result.table);
673
- // After the table snapshot, we wait for replication to catch up.
674
- // To make sure there is actually something to replicate, we send a keepalive
675
- // message.
676
- await sendKeepAlive(db);
677
- return table;
678
- } finally {
679
- await db.end();
680
- }
681
- }
682
-
683
- return result.table;
684
- }
685
-
686
- /**
687
- * Process rows that have missing TOAST values.
688
- *
689
- * This can happen during edge cases in the chunked intial snapshot process.
690
- *
691
- * We handle this similar to an inline table snapshot, but limited to the specific
692
- * set of rows.
693
- */
694
- private async resnapshot(batch: BucketStorageBatch, rows: MissingRow[]) {
695
- const byTable = new Map<number, MissingRow[]>();
696
- for (let row of rows) {
697
- const relId = row.table.objectId as number; // always a number for postgres
698
- if (!byTable.has(relId)) {
699
- byTable.set(relId, []);
700
- }
701
- byTable.get(relId)!.push(row);
702
- }
703
- const db = await this.connections.snapshotConnection();
704
- try {
705
- for (let rows of byTable.values()) {
706
- const table = rows[0].table;
707
- await this.snapshotTableInTx(
708
- batch,
709
- db,
710
- table,
711
- rows.map((r) => r.key)
712
- );
713
- }
714
- // Even with resnapshot, we need to wait until we get a new consistent checkpoint
715
- // after the snapshot, so we need to send a keepalive message.
716
- await sendKeepAlive(db);
717
- } finally {
718
- await db.end();
719
- }
720
- }
721
-
722
- private getTable(relationId: number): storage.SourceTable {
723
- const table = this.relationCache.get(relationId);
724
- if (table == null) {
725
- // We should always receive a replication message before the relation is used.
726
- // If we can't find it, it's a bug.
727
- throw new ReplicationAssertionError(`Missing relation cache for ${relationId}`);
728
- }
729
- return table;
730
- }
731
-
732
- private syncRulesRecord(row: SqliteInputRow): SqliteRow;
733
- private syncRulesRecord(row: SqliteInputRow | undefined): SqliteRow | undefined;
734
-
735
- private syncRulesRecord(row: SqliteInputRow | undefined): SqliteRow | undefined {
736
- if (row == null) {
737
- return undefined;
738
- }
739
- return this.sync_rules.applyRowContext<never>(row);
740
- }
741
-
742
- private toastableSyncRulesRecord(row: ToastableSqliteRow<SqliteInputValue>): ToastableSqliteRow {
743
- return this.sync_rules.applyRowContext(row);
744
- }
745
-
746
- async writeChange(
747
- batch: storage.BucketStorageBatch,
748
- msg: pgwire.PgoutputMessage
749
- ): Promise<storage.FlushedResult | null> {
750
- if (msg.lsn == null) {
751
- return null;
752
- }
753
- if (msg.tag == 'insert' || msg.tag == 'update' || msg.tag == 'delete') {
754
- const table = this.getTable(getRelId(msg.relation));
755
- if (!table.syncAny) {
756
- this.logger.debug(`Table ${table.qualifiedName} not used in sync rules - skipping`);
757
- return null;
758
- }
759
-
760
- if (msg.tag == 'insert') {
761
- this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1);
762
- const baseRecord = this.syncRulesRecord(this.connections.types.constructAfterRecord(msg));
763
- return await batch.save({
764
- tag: storage.SaveOperationTag.INSERT,
765
- sourceTable: table,
766
- before: undefined,
767
- beforeReplicaId: undefined,
768
- after: baseRecord,
769
- afterReplicaId: getUuidReplicaIdentityBson(baseRecord, table.replicaIdColumns)
770
- });
771
- } else if (msg.tag == 'update') {
772
- this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1);
773
- // "before" may be null if the replica id columns are unchanged
774
- // It's fine to treat that the same as an insert.
775
- const before = this.syncRulesRecord(this.connections.types.constructBeforeRecord(msg));
776
- const after = this.toastableSyncRulesRecord(this.connections.types.constructAfterRecord(msg));
777
- return await batch.save({
778
- tag: storage.SaveOperationTag.UPDATE,
779
- sourceTable: table,
780
- before: before,
781
- beforeReplicaId: before ? getUuidReplicaIdentityBson(before, table.replicaIdColumns) : undefined,
782
- after: after,
783
- afterReplicaId: getUuidReplicaIdentityBson(after, table.replicaIdColumns)
784
- });
785
- } else if (msg.tag == 'delete') {
786
- this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1);
787
- const before = this.syncRulesRecord(this.connections.types.constructBeforeRecord(msg)!);
788
-
789
- return await batch.save({
790
- tag: storage.SaveOperationTag.DELETE,
791
- sourceTable: table,
792
- before: before,
793
- beforeReplicaId: getUuidReplicaIdentityBson(before, table.replicaIdColumns),
794
- after: undefined,
795
- afterReplicaId: undefined
796
- });
797
- }
798
- } else if (msg.tag == 'truncate') {
799
- let tables: storage.SourceTable[] = [];
800
- for (let relation of msg.relations) {
801
- const table = this.getTable(getRelId(relation));
802
- tables.push(table);
803
- }
804
- return await batch.truncate(tables);
805
- }
806
- return null;
807
- }
808
-
809
- async replicate() {
810
- try {
811
- // If anything errors here, the entire replication process is halted, and
812
- // all connections automatically closed, including this one.
813
- this.initialSnapshotPromise = (async () => {
814
- const initReplicationConnection = await this.connections.replicationConnection();
815
- await this.initReplication(initReplicationConnection);
816
- await initReplicationConnection.end();
817
- })();
818
-
819
- await this.initialSnapshotPromise;
820
-
821
- // At this point, the above connection has often timed out, so we start a new one
822
- const streamReplicationConnection = await this.connections.replicationConnection();
823
- await this.streamChanges(streamReplicationConnection);
824
- await streamReplicationConnection.end();
825
- } catch (e) {
826
- await this.storage.reportError(e);
827
- throw e;
828
- }
829
- }
830
-
831
- /**
832
- * After calling replicate(), call this to wait for the initial snapshot to complete.
833
- *
834
- * For tests only.
835
- */
836
- async waitForInitialSnapshot() {
837
- if (this.initialSnapshotPromise == null) {
838
- throw new ReplicationAssertionError(`Initial snapshot not started yet`);
839
- }
840
- return this.initialSnapshotPromise;
841
- }
842
-
843
- async initReplication(replicationConnection: pgwire.PgConnection) {
844
- const result = await this.initSlot();
845
- if (result.needsInitialSync) {
846
- await this.startInitialReplication(replicationConnection, result);
847
- }
848
- }
849
-
850
- async streamChanges(replicationConnection: pgwire.PgConnection) {
851
- try {
852
- await this.streamChangesInternal(replicationConnection);
853
- } catch (e) {
854
- if (isReplicationSlotInvalidError(e)) {
855
- throw new MissingReplicationSlotError(e.message, e);
856
- }
857
- throw e;
858
- }
859
- }
860
-
861
- private async streamChangesInternal(replicationConnection: pgwire.PgConnection) {
862
- // When changing any logic here, check /docs/wal-lsns.md.
863
- const { createEmptyCheckpoints } = await this.ensureStorageCompatibility();
864
-
865
- const replicationOptions: Record<string, string> = {
866
- proto_version: '1',
867
- publication_names: PUBLICATION_NAME
868
- };
869
-
870
- /**
871
- * Viewing the contents of logical messages emitted with `pg_logical_emit_message`
872
- * is only supported on Postgres >= 14.0.
873
- * https://www.postgresql.org/docs/14/protocol-logical-replication.html
874
- */
875
- const exposesLogicalMessages = await this.checkLogicalMessageSupport();
876
- if (exposesLogicalMessages) {
877
- /**
878
- * Only add this option if the Postgres server supports it.
879
- * Adding the option to a server that doesn't support it will throw an exception when starting logical replication.
880
- * Error: `unrecognized pgoutput option: messages`
881
- */
882
- replicationOptions['messages'] = 'true';
883
- }
884
-
885
- const replicationStream = replicationConnection.logicalReplication({
886
- slot: this.slot_name,
887
- options: replicationOptions
888
- });
889
-
890
- this.startedStreaming = true;
891
-
892
- let resnapshot: { table: storage.SourceTable; key: PrimaryKeyValue }[] = [];
893
-
894
- const markRecordUnavailable = (record: SaveUpdate) => {
895
- if (!IdSnapshotQuery.supports(record.sourceTable)) {
896
- // If it's not supported, it's also safe to ignore
897
- return;
898
- }
899
- let key: PrimaryKeyValue = {};
900
- for (let column of record.sourceTable.replicaIdColumns) {
901
- const name = column.name;
902
- const value = record.after[name];
903
- if (value == null) {
904
- // We don't expect this to actually happen.
905
- // The key should always be present in the "after" record.
906
- return;
907
- }
908
- // We just need a consistent representation of the primary key, and don't care about fixed quirks.
909
- key[name] = applyValueContext(value, CompatibilityContext.FULL_BACKWARDS_COMPATIBILITY);
910
- }
911
- resnapshot.push({
912
- table: record.sourceTable,
913
- key: key
914
- });
915
- };
916
-
917
- await this.storage.startBatch(
918
- {
919
- logger: this.logger,
920
- zeroLSN: ZERO_LSN,
921
- defaultSchema: POSTGRES_DEFAULT_SCHEMA,
922
- storeCurrentData: true,
923
- skipExistingRows: false,
924
- markRecordUnavailable
925
- },
926
- async (batch) => {
927
- // We don't handle any plain keepalive messages while we have transactions.
928
- // While we have transactions, we use that to advance the position.
929
- // Replication never starts in the middle of a transaction, so this starts as false.
930
- let skipKeepalive = false;
931
- let count = 0;
932
-
933
- for await (const chunk of replicationStream.pgoutputDecode()) {
934
- this.touch();
935
-
936
- if (this.abort_signal.aborted) {
937
- break;
938
- }
939
-
940
- // chunkLastLsn may come from normal messages in the chunk,
941
- // or from a PrimaryKeepalive message.
942
- const { messages, lastLsn: chunkLastLsn } = chunk;
943
-
944
- /**
945
- * We can check if an explicit keepalive was sent if `exposesLogicalMessages == true`.
946
- * If we can't check the logical messages, we should assume a keepalive if we
947
- * receive an empty array of messages in a replication event.
948
- */
949
- const assumeKeepAlive = !exposesLogicalMessages;
950
- let keepAliveDetected = false;
951
- const lastCommit = messages.findLast((msg) => msg.tag == 'commit');
952
-
953
- for (const msg of messages) {
954
- if (msg.tag == 'relation') {
955
- await this.handleRelation({
956
- batch,
957
- descriptor: getPgOutputRelation(msg),
958
- snapshot: true,
959
- referencedTypeIds: referencedColumnTypeIds(msg)
960
- });
961
- } else if (msg.tag == 'begin') {
962
- // This may span multiple transactions in the same chunk, or even across chunks.
963
- skipKeepalive = true;
964
- this.replicationLag.trackUncommittedChange(new Date(Number(msg.commitTime / 1000n)));
965
- } else if (msg.tag == 'commit') {
966
- this.metrics.getCounter(ReplicationMetric.TRANSACTIONS_REPLICATED).add(1);
967
- if (msg == lastCommit) {
968
- // Only commit if this is the last commit in the chunk.
969
- // This effectively lets us batch multiple transactions within the same chunk
970
- // into a single flush, increasing throughput for many small transactions.
971
- skipKeepalive = false;
972
- // flush() must be before the resnapshot check - that is
973
- // typically what reports the resnapshot records.
974
- await batch.flush({ oldestUncommittedChange: this.replicationLag.oldestUncommittedChange });
975
- // This _must_ be checked after the flush(), and before
976
- // commit() or ack(). We never persist the resnapshot list,
977
- // so we have to process it before marking our progress.
978
- if (resnapshot.length > 0) {
979
- await this.resnapshot(batch, resnapshot);
980
- resnapshot = [];
981
- }
982
- const { checkpointBlocked } = await batch.commit(msg.lsn!, {
983
- createEmptyCheckpoints,
984
- oldestUncommittedChange: this.replicationLag.oldestUncommittedChange
985
- });
986
- await this.ack(msg.lsn!, replicationStream);
987
- if (!checkpointBlocked) {
988
- this.replicationLag.markCommitted();
989
- }
990
- }
991
- } else {
992
- if (count % 100 == 0) {
993
- this.logger.info(`Replicating op ${count} ${msg.lsn}`);
994
- }
995
-
996
- /**
997
- * If we can see the contents of logical messages, then we can check if a keepalive
998
- * message is present. We only perform a keepalive (below) if we explicitly detect a keepalive message.
999
- * If we can't see the contents of logical messages, then we should assume a keepalive is required
1000
- * due to the default value of `assumeKeepalive`.
1001
- */
1002
- if (exposesLogicalMessages && isKeepAliveMessage(msg)) {
1003
- keepAliveDetected = true;
1004
- }
1005
-
1006
- count += 1;
1007
- const flushResult = await this.writeChange(batch, msg);
1008
- if (flushResult != null && resnapshot.length > 0) {
1009
- // If we have large transactions, we also need to flush the resnapshot list
1010
- // periodically.
1011
- // TODO: make sure this bit is actually triggered
1012
- await this.resnapshot(batch, resnapshot);
1013
- resnapshot = [];
1014
- }
1015
- }
1016
- }
1017
-
1018
- if (!skipKeepalive) {
1019
- if (assumeKeepAlive || keepAliveDetected) {
1020
- // Reset the detection flag.
1021
- keepAliveDetected = false;
1022
-
1023
- // In a transaction, we ack and commit according to the transaction progress.
1024
- // Outside transactions, we use the PrimaryKeepalive messages to advance progress.
1025
- // Big caveat: This _must not_ be used to skip individual messages, since this LSN
1026
- // may be in the middle of the next transaction.
1027
- // It must only be used to associate checkpoints with LSNs.
1028
- const { checkpointBlocked } = await batch.keepalive(chunkLastLsn);
1029
- if (!checkpointBlocked) {
1030
- this.replicationLag.clearUncommittedChange();
1031
- }
1032
-
1033
- this.replicationLag.markStarted();
1034
- }
1035
-
1036
- // We receive chunks with empty messages often (about each second).
1037
- // Acknowledging here progresses the slot past these and frees up resources.
1038
- await this.ack(chunkLastLsn, replicationStream);
1039
- }
1040
-
1041
- this.metrics.getCounter(ReplicationMetric.CHUNKS_REPLICATED).add(1);
1042
- }
1043
- }
1044
- );
1045
- }
1046
-
1047
- async ack(lsn: string, replicationStream: pgwire.ReplicationStream) {
1048
- if (lsn == ZERO_LSN) {
1049
- return;
1050
- }
1051
-
1052
- replicationStream.ack(lsn);
1053
- }
1054
-
1055
- /**
1056
- * Ensures that the storage is compatible with the replication connection.
1057
- * @throws {DatabaseConnectionError} If the storage is not compatible with the replication connection.
1058
- */
1059
- protected async ensureStorageCompatibility(): Promise<storage.ResolvedBucketBatchCommitOptions> {
1060
- const supportsLogicalMessages = await this.checkLogicalMessageSupport();
1061
-
1062
- const storageIdentifier = await this.storage.factory.getSystemIdentifier();
1063
- if (storageIdentifier.type != lib_postgres.POSTGRES_CONNECTION_TYPE) {
1064
- return {
1065
- // Keep the same behaviour as before allowing Postgres storage.
1066
- createEmptyCheckpoints: true,
1067
- oldestUncommittedChange: null
1068
- };
1069
- }
1070
-
1071
- const parsedStorageIdentifier = lib_postgres.utils.decodePostgresSystemIdentifier(storageIdentifier.id);
1072
- /**
1073
- * Check if the same server is being used for both the sync bucket storage and the logical replication.
1074
- */
1075
- const replicationIdentifier = await lib_postgres.utils.queryPostgresSystemIdentifier(this.connections.pool);
1076
-
1077
- if (!supportsLogicalMessages && replicationIdentifier.server_id == parsedStorageIdentifier.server_id) {
1078
- throw new DatabaseConnectionError(
1079
- ErrorCode.PSYNC_S1144,
1080
- `Separate Postgres servers are required for the replication source and sync bucket storage when using Postgres versions below 14.0.`,
1081
- new Error('Postgres version is below 14')
1082
- );
1083
- }
1084
-
1085
- return {
1086
- /**
1087
- * Don't create empty checkpoints if the same Postgres database is used for the data source
1088
- * and sync bucket storage. Creating empty checkpoints will cause WAL feedback loops.
1089
- */
1090
- createEmptyCheckpoints: replicationIdentifier.database_name != parsedStorageIdentifier.database_name,
1091
- oldestUncommittedChange: null
1092
- };
1093
- }
1094
-
1095
- /**
1096
- * Check if the replication connection Postgres server supports
1097
- * viewing the contents of logical replication messages.
1098
- */
1099
- protected async checkLogicalMessageSupport() {
1100
- const version = await this.connections.getServerVersion();
1101
- return version ? version.compareMain('14.0.0') >= 0 : false;
1102
- }
1103
-
1104
- getReplicationLagMillis(): number | undefined {
1105
- return this.replicationLag.getLagMillis();
1106
- }
1107
-
1108
- private touch() {
1109
- container.probes.touch().catch((e) => {
1110
- this.logger.error(`Error touching probe`, e);
1111
- });
1112
- }
1113
- }
1114
-
1115
- function isReplicationSlotInvalidError(e: any) {
1116
- // We could access the error code from pgwire using this:
1117
- // e[Symbol.for('pg.ErrorCode')]
1118
- // However, we typically get a generic code such as 42704 (undefined_object), which does not
1119
- // help much. So we check the actual error message.
1120
- const message = e.message ?? '';
1121
-
1122
- // Sample: record with incorrect prev-link 10000/10000 at 0/18AB778
1123
- // Seen during development. Some internal error, fixed by re-creating slot.
1124
- //
1125
- // Sample: publication "powersync" does not exist
1126
- // Happens when publication deleted or never created.
1127
- // Slot must be re-created in this case.
1128
- return (
1129
- /incorrect prev-link/.test(message) ||
1130
- /replication slot.*does not exist/.test(message) ||
1131
- /publication.*does not exist/.test(message) ||
1132
- // Postgres 18 - exceeded max_slot_wal_keep_size
1133
- /can no longer access replication slot/.test(message) ||
1134
- // Postgres 17 - exceeded max_slot_wal_keep_size
1135
- /can no longer get changes from replication slot/.test(message)
1136
- );
1137
- }