@powersync/service-module-postgres 0.19.2 → 0.19.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/dist/api/PostgresRouteAPIAdapter.d.ts +1 -1
  2. package/dist/api/PostgresRouteAPIAdapter.js +63 -72
  3. package/dist/api/PostgresRouteAPIAdapter.js.map +1 -1
  4. package/dist/module/PostgresModule.js.map +1 -1
  5. package/dist/replication/MissingReplicationSlotError.d.ts +41 -0
  6. package/dist/replication/MissingReplicationSlotError.js +33 -0
  7. package/dist/replication/MissingReplicationSlotError.js.map +1 -0
  8. package/dist/replication/PostgresErrorRateLimiter.js +1 -1
  9. package/dist/replication/PostgresErrorRateLimiter.js.map +1 -1
  10. package/dist/replication/SnapshotQuery.js +2 -2
  11. package/dist/replication/SnapshotQuery.js.map +1 -1
  12. package/dist/replication/WalStream.d.ts +37 -14
  13. package/dist/replication/WalStream.js +145 -41
  14. package/dist/replication/WalStream.js.map +1 -1
  15. package/dist/replication/WalStreamReplicationJob.d.ts +1 -1
  16. package/dist/replication/WalStreamReplicationJob.js +7 -4
  17. package/dist/replication/WalStreamReplicationJob.js.map +1 -1
  18. package/dist/replication/WalStreamReplicator.d.ts +0 -1
  19. package/dist/replication/WalStreamReplicator.js +0 -22
  20. package/dist/replication/WalStreamReplicator.js.map +1 -1
  21. package/dist/replication/replication-index.d.ts +3 -1
  22. package/dist/replication/replication-index.js +3 -1
  23. package/dist/replication/replication-index.js.map +1 -1
  24. package/dist/replication/replication-utils.d.ts +3 -11
  25. package/dist/replication/replication-utils.js +101 -164
  26. package/dist/replication/replication-utils.js.map +1 -1
  27. package/dist/replication/wal-budget-utils.d.ts +23 -0
  28. package/dist/replication/wal-budget-utils.js +57 -0
  29. package/dist/replication/wal-budget-utils.js.map +1 -0
  30. package/dist/types/registry.js +1 -1
  31. package/dist/types/registry.js.map +1 -1
  32. package/package.json +15 -11
  33. package/sql/check-source-configuration.plpgsql +13 -0
  34. package/sql/debug-tables-info-batched.plpgsql +230 -0
  35. package/CHANGELOG.md +0 -843
  36. package/src/api/PostgresRouteAPIAdapter.ts +0 -356
  37. package/src/index.ts +0 -1
  38. package/src/module/PostgresModule.ts +0 -122
  39. package/src/replication/ConnectionManagerFactory.ts +0 -33
  40. package/src/replication/PgManager.ts +0 -122
  41. package/src/replication/PgRelation.ts +0 -41
  42. package/src/replication/PostgresErrorRateLimiter.ts +0 -48
  43. package/src/replication/SnapshotQuery.ts +0 -213
  44. package/src/replication/WalStream.ts +0 -1157
  45. package/src/replication/WalStreamReplicationJob.ts +0 -138
  46. package/src/replication/WalStreamReplicator.ts +0 -79
  47. package/src/replication/replication-index.ts +0 -5
  48. package/src/replication/replication-utils.ts +0 -398
  49. package/src/types/registry.ts +0 -275
  50. package/src/types/resolver.ts +0 -227
  51. package/src/types/types.ts +0 -44
  52. package/src/utils/application-name.ts +0 -8
  53. package/src/utils/migration_lib.ts +0 -80
  54. package/src/utils/populate_test_data.ts +0 -37
  55. package/src/utils/populate_test_data_worker.ts +0 -53
  56. package/src/utils/postgres_version.ts +0 -8
  57. package/test/src/checkpoints.test.ts +0 -86
  58. package/test/src/chunked_snapshots.test.ts +0 -161
  59. package/test/src/env.ts +0 -11
  60. package/test/src/large_batch.test.ts +0 -241
  61. package/test/src/pg_test.test.ts +0 -729
  62. package/test/src/resuming_snapshots.test.ts +0 -160
  63. package/test/src/route_api_adapter.test.ts +0 -62
  64. package/test/src/schema_changes.test.ts +0 -655
  65. package/test/src/setup.ts +0 -12
  66. package/test/src/slow_tests.test.ts +0 -519
  67. package/test/src/storage_combination.test.ts +0 -35
  68. package/test/src/types/registry.test.ts +0 -149
  69. package/test/src/util.ts +0 -151
  70. package/test/src/validation.test.ts +0 -63
  71. package/test/src/wal_stream.test.ts +0 -607
  72. package/test/src/wal_stream_utils.ts +0 -284
  73. package/test/tsconfig.json +0 -27
  74. package/tsconfig.json +0 -34
  75. package/tsconfig.tsbuildinfo +0 -1
  76. package/vitest.config.ts +0 -3
@@ -1,607 +0,0 @@
1
- import { MissingReplicationSlotError } from '@module/replication/WalStream.js';
2
- import { METRICS_HELPER, putOp, removeOp } from '@powersync/service-core-tests';
3
- import { pgwireRows } from '@powersync/service-jpgwire';
4
- import { JSONBig } from '@powersync/service-jsonbig';
5
- import { ReplicationMetric } from '@powersync/service-types';
6
- import * as crypto from 'crypto';
7
- import { describe, expect, test } from 'vitest';
8
- import { describeWithStorage, StorageVersionTestContext } from './util.js';
9
- import { WalStreamTestContext, withMaxWalSize } from './wal_stream_utils.js';
10
-
11
- const BASIC_SYNC_RULES = `
12
- bucket_definitions:
13
- global:
14
- data:
15
- - SELECT id, description FROM "test_data"
16
- `;
17
-
18
- describe('wal stream', () => {
19
- describeWithStorage({ timeout: 20_000 }, defineWalStreamTests);
20
- });
21
-
22
- function defineWalStreamTests({ factory, storageVersion }: StorageVersionTestContext) {
23
- const openContext = (options?: Parameters<typeof WalStreamTestContext.open>[1]) => {
24
- return WalStreamTestContext.open(factory, { ...options, storageVersion });
25
- };
26
- test('replicating basic values', async () => {
27
- await using context = await openContext();
28
- const { pool } = context;
29
- await context.updateSyncRules(`
30
- bucket_definitions:
31
- global:
32
- data:
33
- - SELECT id, description, num FROM "test_data"`);
34
-
35
- await pool.query(`DROP TABLE IF EXISTS test_data`);
36
- await pool.query(
37
- `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num int8)`
38
- );
39
-
40
- await context.initializeReplication();
41
-
42
- const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0;
43
- const startTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0;
44
-
45
- const [{ test_id }] = pgwireRows(
46
- await pool.query(
47
- `INSERT INTO test_data(description, num) VALUES('test1', 1152921504606846976) returning id as test_id`
48
- )
49
- );
50
-
51
- const data = await context.getBucketData('global[]');
52
-
53
- expect(data).toMatchObject([putOp('test_data', { id: test_id, description: 'test1', num: 1152921504606846976n })]);
54
- const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0;
55
- const endTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0;
56
- expect(endRowCount - startRowCount).toEqual(1);
57
- // In some rare cases there may be additional empty transactions, so we allow for that.
58
- expect(endTxCount - startTxCount).toBeGreaterThanOrEqual(1);
59
- });
60
-
61
- test('replicating case sensitive table', async () => {
62
- await using context = await openContext();
63
- const { pool } = context;
64
- await context.updateSyncRules(`
65
- bucket_definitions:
66
- global:
67
- data:
68
- - SELECT id, description FROM "test_DATA"
69
- `);
70
-
71
- await pool.query(`DROP TABLE IF EXISTS "test_DATA"`);
72
- await pool.query(`CREATE TABLE "test_DATA"(id uuid primary key default uuid_generate_v4(), description text)`);
73
-
74
- await context.initializeReplication();
75
-
76
- const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0;
77
- const startTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0;
78
-
79
- const [{ test_id }] = pgwireRows(
80
- await pool.query(`INSERT INTO "test_DATA"(description) VALUES('test1') returning id as test_id`)
81
- );
82
-
83
- const data = await context.getBucketData('global[]');
84
-
85
- expect(data).toMatchObject([putOp('test_DATA', { id: test_id, description: 'test1' })]);
86
- const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0;
87
- const endTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0;
88
- expect(endRowCount - startRowCount).toEqual(1);
89
- expect(endTxCount - startTxCount).toBeGreaterThanOrEqual(1);
90
- });
91
-
92
- test('replicating TOAST values', async () => {
93
- await using context = await openContext();
94
- const { pool } = context;
95
- await context.updateSyncRules(`
96
- bucket_definitions:
97
- global:
98
- data:
99
- - SELECT id, name, description FROM "test_data"
100
- `);
101
-
102
- await pool.query(`DROP TABLE IF EXISTS test_data`);
103
- await pool.query(
104
- `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), name text, description text)`
105
- );
106
-
107
- await context.replicateSnapshot();
108
-
109
- // Must be > 8kb after compression
110
- const largeDescription = crypto.randomBytes(20_000).toString('hex');
111
- const [{ test_id }] = pgwireRows(
112
- await pool.query({
113
- statement: `INSERT INTO test_data(name, description) VALUES('test1', $1) returning id as test_id`,
114
- params: [{ type: 'varchar', value: largeDescription }]
115
- })
116
- );
117
-
118
- await pool.query(`UPDATE test_data SET name = 'test2' WHERE id = '${test_id}'`);
119
-
120
- const data = await context.getBucketData('global[]');
121
- expect(data.slice(0, 1)).toMatchObject([
122
- putOp('test_data', { id: test_id, name: 'test1', description: largeDescription })
123
- ]);
124
- expect(data.slice(1)).toMatchObject([
125
- putOp('test_data', { id: test_id, name: 'test2', description: largeDescription })
126
- ]);
127
- });
128
-
129
- test('replicating TRUNCATE', async () => {
130
- await using context = await openContext();
131
- const { pool } = context;
132
- const syncRuleContent = `
133
- bucket_definitions:
134
- global:
135
- data:
136
- - SELECT id, description FROM "test_data"
137
- by_test_data:
138
- parameters: SELECT id FROM test_data WHERE id = token_parameters.user_id
139
- data: []
140
- `;
141
- await context.updateSyncRules(syncRuleContent);
142
- await pool.query(`DROP TABLE IF EXISTS test_data`);
143
- await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
144
-
145
- await context.initializeReplication();
146
-
147
- const [{ test_id }] = pgwireRows(
148
- await pool.query(`INSERT INTO test_data(description) VALUES('test1') returning id as test_id`)
149
- );
150
- await pool.query(`TRUNCATE test_data`);
151
-
152
- const data = await context.getBucketData('global[]');
153
-
154
- expect(data).toMatchObject([
155
- putOp('test_data', { id: test_id, description: 'test1' }),
156
- removeOp('test_data', test_id)
157
- ]);
158
- });
159
-
160
- test('replicating changing primary key', async () => {
161
- await using context = await openContext();
162
- const { pool } = context;
163
- await context.updateSyncRules(BASIC_SYNC_RULES);
164
- await pool.query(`DROP TABLE IF EXISTS test_data`);
165
- await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
166
-
167
- await context.initializeReplication();
168
-
169
- const [{ test_id }] = pgwireRows(
170
- await pool.query(`INSERT INTO test_data(description) VALUES('test1') returning id as test_id`)
171
- );
172
-
173
- const [{ test_id: test_id2 }] = pgwireRows(
174
- await pool.query(
175
- `UPDATE test_data SET id = uuid_generate_v4(), description = 'test2a' WHERE id = '${test_id}' returning id as test_id`
176
- )
177
- );
178
-
179
- // Since we don't have an old copy of the record with the new primary key, this
180
- // may trigger a "resnapshot".
181
- await pool.query(`UPDATE test_data SET description = 'test2b' WHERE id = '${test_id2}'`);
182
-
183
- // Re-use old id again
184
- await pool.query(`INSERT INTO test_data(id, description) VALUES('${test_id}', 'test1b')`);
185
- await pool.query(`UPDATE test_data SET description = 'test1c' WHERE id = '${test_id}'`);
186
-
187
- const data = await context.getBucketData('global[]');
188
- expect(data).toMatchObject([
189
- // Initial insert
190
- putOp('test_data', { id: test_id, description: 'test1' }),
191
- // Update id, then description
192
- removeOp('test_data', test_id),
193
- putOp('test_data', { id: test_id2, description: 'test2a' }),
194
- putOp('test_data', { id: test_id2, description: 'test2b' }),
195
- // Re-use old id
196
- putOp('test_data', { id: test_id, description: 'test1b' }),
197
- putOp('test_data', { id: test_id, description: 'test1c' })
198
- ]);
199
- });
200
-
201
- test('initial sync', async () => {
202
- await using context = await openContext();
203
- const { pool } = context;
204
- await context.updateSyncRules(BASIC_SYNC_RULES);
205
-
206
- await pool.query(`DROP TABLE IF EXISTS test_data`);
207
- await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
208
-
209
- const [{ test_id }] = pgwireRows(
210
- await pool.query(`INSERT INTO test_data(description) VALUES('test1') returning id as test_id`)
211
- );
212
-
213
- await context.replicateSnapshot();
214
-
215
- const data = await context.getBucketData('global[]');
216
- expect(data).toMatchObject([putOp('test_data', { id: test_id, description: 'test1' })]);
217
- });
218
-
219
- test('record too large', async () => {
220
- await using context = await openContext();
221
- await context.updateSyncRules(`bucket_definitions:
222
- global:
223
- data:
224
- - SELECT id, description, other FROM "test_data"`);
225
- const { pool } = context;
226
-
227
- await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
228
-
229
- await context.replicateSnapshot();
230
-
231
- // 4MB
232
- const largeDescription = crypto.randomBytes(2_000_000).toString('hex');
233
- // 18MB
234
- const tooLargeDescription = crypto.randomBytes(9_000_000).toString('hex');
235
-
236
- await pool.query({
237
- statement: `INSERT INTO test_data(id, description, other) VALUES('t1', $1, 'foo')`,
238
- params: [{ type: 'varchar', value: tooLargeDescription }]
239
- });
240
- await pool.query({
241
- statement: `UPDATE test_data SET description = $1 WHERE id = 't1'`,
242
- params: [{ type: 'varchar', value: largeDescription }]
243
- });
244
-
245
- const data = await context.getBucketData('global[]');
246
- expect(data.length).toEqual(1);
247
- const row = JSON.parse(data[0].data as string);
248
- delete row.description;
249
- expect(row).toEqual({ id: 't1', other: 'foo' });
250
- delete data[0].data;
251
- expect(data[0]).toMatchObject({ object_id: 't1', object_type: 'test_data', op: 'PUT', op_id: '1' });
252
- });
253
-
254
- test('table not in sync rules', async () => {
255
- await using context = await openContext();
256
- const { pool } = context;
257
- await context.updateSyncRules(BASIC_SYNC_RULES);
258
-
259
- await pool.query(`CREATE TABLE test_donotsync(id uuid primary key default uuid_generate_v4(), description text)`);
260
-
261
- await context.initializeReplication();
262
-
263
- const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0;
264
- const startTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0;
265
-
266
- await pool.query(`INSERT INTO test_donotsync(description) VALUES('test1') returning id as test_id`);
267
-
268
- const data = await context.getBucketData('global[]');
269
-
270
- expect(data).toMatchObject([]);
271
- const endRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0;
272
- const endTxCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.TRANSACTIONS_REPLICATED)) ?? 0;
273
-
274
- // There was a transaction, but we should not replicate any actual data
275
- expect(endRowCount - startRowCount).toEqual(0);
276
- expect(endTxCount - startTxCount).toBeGreaterThanOrEqual(1);
277
- });
278
-
279
- test('reporting slot issues', async () => {
280
- {
281
- await using context = await openContext();
282
- const { pool } = context;
283
- await context.updateSyncRules(`
284
- bucket_definitions:
285
- global:
286
- data:
287
- - SELECT id, description FROM "test_data"`);
288
-
289
- await pool.query(
290
- `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num int8)`
291
- );
292
- await pool.query(
293
- `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
294
- );
295
- await context.replicateSnapshot();
296
-
297
- const data = await context.getBucketData('global[]');
298
-
299
- expect(data).toMatchObject([
300
- putOp('test_data', {
301
- id: '8133cd37-903b-4937-a022-7c8294015a3a',
302
- description: 'test1'
303
- })
304
- ]);
305
-
306
- expect(await context.storage!.getStatus()).toMatchObject({ active: true, snapshot_done: true });
307
- }
308
-
309
- {
310
- await using context = await openContext({ doNotClear: true });
311
- const { pool } = context;
312
- await pool.query('DROP PUBLICATION powersync');
313
- await pool.query(`UPDATE test_data SET description = 'updated'`);
314
- await pool.query('CREATE PUBLICATION powersync FOR ALL TABLES');
315
-
316
- const serverVersion = await context.connectionManager.getServerVersion();
317
-
318
- await context.loadActiveSyncRules();
319
-
320
- // Note: The actual error may be thrown either in replicateSnapshot(), or in getCheckpoint().
321
-
322
- if (serverVersion!.compareMain('18.0.0') >= 0) {
323
- // No error expected in Postres 18. Replication keeps on working depite the
324
- // publication being re-created.
325
- await context.replicateSnapshot();
326
- await context.getCheckpoint();
327
- } else {
328
- // await context.getCheckpoint();
329
- // Postgres < 18 invalidates the replication slot when the publication is re-created.
330
- // In the service, this error is handled in WalStreamReplicationJob,
331
- // creating a new replication slot.
332
- await expect(async () => {
333
- await context.replicateSnapshot();
334
- await context.getCheckpoint();
335
- }).rejects.toThrowError(MissingReplicationSlotError);
336
- }
337
- }
338
- });
339
-
340
- test('dropped replication slot', async () => {
341
- {
342
- await using context = await openContext();
343
- const { pool } = context;
344
- await context.updateSyncRules(`
345
- bucket_definitions:
346
- global:
347
- data:
348
- - SELECT id, description FROM "test_data"`);
349
-
350
- await pool.query(
351
- `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num int8)`
352
- );
353
- await pool.query(
354
- `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
355
- );
356
- await context.replicateSnapshot();
357
-
358
- const data = await context.getBucketData('global[]');
359
-
360
- expect(data).toMatchObject([
361
- putOp('test_data', {
362
- id: '8133cd37-903b-4937-a022-7c8294015a3a',
363
- description: 'test1'
364
- })
365
- ]);
366
-
367
- expect(await context.storage!.getStatus()).toMatchObject({ active: true, snapshot_done: true });
368
- }
369
-
370
- {
371
- await using context = await openContext({ doNotClear: true });
372
- const { pool } = context;
373
- const storage = await context.factory.getActiveStorage();
374
-
375
- // Here we explicitly drop the replication slot, which should always be handled.
376
- await pool.query({
377
- statement: `SELECT pg_drop_replication_slot($1)`,
378
- params: [{ type: 'varchar', value: storage?.slot_name! }]
379
- });
380
-
381
- await context.loadActiveSyncRules();
382
-
383
- // The error is handled on a higher level, which triggers
384
- // creating a new replication slot.
385
- await expect(async () => {
386
- await context.replicateSnapshot();
387
- }).rejects.toThrowError(MissingReplicationSlotError);
388
- }
389
- });
390
-
391
- test('replication slot lost', async () => {
392
- await using baseContext = await openContext({ doNotClear: true });
393
-
394
- const serverVersion = await baseContext.connectionManager.getServerVersion();
395
- if (serverVersion!.compareMain('13.0.0') < 0) {
396
- console.warn(`max_slot_wal_keep_size not supported on postgres ${serverVersion} - skipping test.`);
397
- return;
398
- }
399
-
400
- // Configure max_slot_wal_keep_size for the test, reverting afterwards.
401
- await using s = await withMaxWalSize(baseContext.pool, '100MB');
402
-
403
- {
404
- await using context = await openContext();
405
- const { pool } = context;
406
- await context.updateSyncRules(`
407
- bucket_definitions:
408
- global:
409
- data:
410
- - SELECT id, description FROM "test_data"`);
411
-
412
- await pool.query(
413
- `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num int8)`
414
- );
415
- await pool.query(
416
- `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
417
- );
418
- await context.replicateSnapshot();
419
-
420
- const data = await context.getBucketData('global[]');
421
-
422
- expect(data).toMatchObject([
423
- putOp('test_data', {
424
- id: '8133cd37-903b-4937-a022-7c8294015a3a',
425
- description: 'test1'
426
- })
427
- ]);
428
-
429
- expect(await context.storage!.getStatus()).toMatchObject({ active: true, snapshot_done: true });
430
- }
431
-
432
- {
433
- await using context = await openContext({ doNotClear: true });
434
- const { pool } = context;
435
- const storage = await context.factory.getActiveStorage();
436
- const slotName = storage?.slot_name!;
437
-
438
- // Here, we write data to the WAL until the replication slot is lost.
439
- const TRIES = 100;
440
- for (let i = 0; i < TRIES; i++) {
441
- // Write something to the WAL.
442
- await pool.query(`select pg_logical_emit_message(true, 'test', 'x')`);
443
- // Switch WAL file. With default settings, each WAL file is around 16MB.
444
- await pool.query(`select pg_switch_wal()`);
445
- // Checkpoint command forces the old WAL files to be archived/removed.
446
- await pool.query(`checkpoint`);
447
- // Now check if the slot is still active.
448
- const slot = pgwireRows(
449
- await context.pool.query({
450
- statement: `select slot_name, wal_status, safe_wal_size, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) as lag from pg_replication_slots where slot_name = $1`,
451
- params: [{ type: 'varchar', value: slotName }]
452
- })
453
- )[0];
454
- if (slot.wal_status == 'lost') {
455
- break;
456
- } else if (i == TRIES - 1) {
457
- throw new Error(
458
- `Could not generate test conditions to expire replication slot. Current status: ${JSONBig.stringify(slot)}`
459
- );
460
- }
461
- }
462
-
463
- await context.loadActiveSyncRules();
464
-
465
- // The error is handled on a higher level, which triggers
466
- // creating a new replication slot.
467
- await expect(async () => {
468
- await context.replicateSnapshot();
469
- }).rejects.toThrowError(MissingReplicationSlotError);
470
- }
471
- });
472
-
473
- test('old date format', async () => {
474
- await using context = await openContext();
475
- await context.updateSyncRules(BASIC_SYNC_RULES);
476
-
477
- const { pool } = context;
478
- await pool.query(`DROP TABLE IF EXISTS test_data`);
479
- await pool.query(`CREATE TABLE test_data(id text primary key, description timestamptz);`);
480
-
481
- await context.initializeReplication();
482
- await pool.query(`INSERT INTO test_data(id, description) VALUES ('t1', '2025-09-10 15:17:14+02')`);
483
-
484
- let data = await context.getBucketData('global[]');
485
- expect(data).toMatchObject([putOp('test_data', { id: 't1', description: '2025-09-10 13:17:14Z' })]);
486
- });
487
-
488
- test('new date format', async () => {
489
- await using context = await openContext();
490
- await context.updateSyncRules(`
491
- streams:
492
- stream:
493
- query: SELECT id, * FROM "test_data"
494
-
495
- config:
496
- edition: 2
497
- `);
498
- const { pool } = context;
499
- await pool.query(`DROP TABLE IF EXISTS test_data`);
500
- await pool.query(`CREATE TABLE test_data(id text primary key, description timestamptz);`);
501
-
502
- await context.initializeReplication();
503
- await pool.query(`INSERT INTO test_data(id, description) VALUES ('t1', '2025-09-10 15:17:14+02')`);
504
-
505
- const data = await context.getBucketData('stream|0[]');
506
- expect(data).toMatchObject([putOp('test_data', { id: 't1', description: '2025-09-10T13:17:14.000000Z' })]);
507
- });
508
-
509
- test('custom types', async () => {
510
- await using context = await openContext();
511
-
512
- await context.updateSyncRules(`
513
- streams:
514
- stream:
515
- query: SELECT id, * FROM "test_data"
516
-
517
- config:
518
- edition: 2
519
- `);
520
-
521
- const { pool } = context;
522
- await pool.query(`DROP TABLE IF EXISTS test_data`);
523
- await pool.query(`CREATE TYPE composite AS (foo bool, bar int4);`);
524
- await pool.query(`CREATE TABLE test_data(id text primary key, description composite, ts timestamptz);`);
525
-
526
- // Covered by initial replication
527
- await pool.query(
528
- `INSERT INTO test_data(id, description, ts) VALUES ('t1', ROW(TRUE, 1)::composite, '2025-11-17T09:11:00Z')`
529
- );
530
-
531
- await context.initializeReplication();
532
- // Covered by streaming replication
533
- await pool.query(
534
- `INSERT INTO test_data(id, description, ts) VALUES ('t2', ROW(TRUE, 2)::composite, '2025-11-17T09:12:00Z')`
535
- );
536
-
537
- const data = await context.getBucketData('stream|0[]');
538
- expect(data).toMatchObject([
539
- putOp('test_data', { id: 't1', description: '{"foo":1,"bar":1}', ts: '2025-11-17T09:11:00.000000Z' }),
540
- putOp('test_data', { id: 't2', description: '{"foo":1,"bar":2}', ts: '2025-11-17T09:12:00.000000Z' })
541
- ]);
542
- });
543
-
544
- test('custom types in primary key', async () => {
545
- await using context = await openContext();
546
-
547
- await context.updateSyncRules(`
548
- streams:
549
- stream:
550
- query: SELECT id, * FROM "test_data"
551
-
552
- config:
553
- edition: 2
554
- `);
555
-
556
- const { pool } = context;
557
- await pool.query(`DROP TABLE IF EXISTS test_data`);
558
- await pool.query(`CREATE DOMAIN test_id AS TEXT;`);
559
- await pool.query(`CREATE TABLE test_data(id test_id primary key);`);
560
-
561
- await context.initializeReplication();
562
- await pool.query(`INSERT INTO test_data(id) VALUES ('t1')`);
563
-
564
- const data = await context.getBucketData('stream|0[]');
565
- expect(data).toMatchObject([putOp('test_data', { id: 't1' })]);
566
- });
567
-
568
- test('replica identity handling', async () => {
569
- // This specifically test a case of timestamps being used as part of the replica identity.
570
- // There was a regression in versions 1.15.0-1.15.5, which this tests for.
571
- await using context = await openContext();
572
- const { pool } = context;
573
- await context.updateSyncRules(BASIC_SYNC_RULES);
574
-
575
- await pool.query(`DROP TABLE IF EXISTS test_data`);
576
- await pool.query(`CREATE TABLE test_data(id uuid primary key, description text, ts timestamptz)`);
577
- await pool.query(`ALTER TABLE test_data REPLICA IDENTITY FULL`);
578
-
579
- const test_id = `a9798b07-84de-4297-9a8e-aafb4dd0282f`;
580
-
581
- await pool.query(
582
- `INSERT INTO test_data(id, description, ts) VALUES('${test_id}', 'test1', '2025-01-01T00:00:00Z') returning id as test_id`
583
- );
584
-
585
- await context.replicateSnapshot();
586
-
587
- await pool.query(`UPDATE test_data SET description = 'test2' WHERE id = '${test_id}'`);
588
-
589
- const data = await context.getBucketData('global[]');
590
- // For replica identity full, each change changes the id, making it a REMOVE+PUT
591
- expect(data).toMatchObject([
592
- // Initial insert
593
- putOp('test_data', { id: test_id, description: 'test1' }),
594
- // Update
595
- removeOp('test_data', test_id),
596
- putOp('test_data', { id: test_id, description: 'test2' })
597
- ]);
598
-
599
- // subkey contains `${table id}/${replica identity}`.
600
- // table id changes from run to run, but replica identity should always stay constant.
601
- // This should not change if we make changes to the implementation
602
- // (unless specifically opting in to new behavior)
603
- expect(data[0].subkey).toContain('/c7b3f1a3-ec4d-5d44-b295-c7f2a32bb056');
604
- expect(data[1].subkey).toContain('/c7b3f1a3-ec4d-5d44-b295-c7f2a32bb056');
605
- expect(data[2].subkey).toContain('/984d457a-69f0-559a-a2f9-a511c28b968d');
606
- });
607
- }