@powersync/service-module-postgres 0.16.7 → 0.16.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,9 +4,10 @@ import { METRICS_HELPER, putOp, removeOp } from '@powersync/service-core-tests';
4
4
  import { pgwireRows } from '@powersync/service-jpgwire';
5
5
  import { ReplicationMetric } from '@powersync/service-types';
6
6
  import * as crypto from 'crypto';
7
- import { describe, expect, test } from 'vitest';
7
+ import { afterAll, beforeAll, describe, expect, test } from 'vitest';
8
8
  import { describeWithStorage } from './util.js';
9
- import { WalStreamTestContext } from './wal_stream_utils.js';
9
+ import { WalStreamTestContext, withMaxWalSize } from './wal_stream_utils.js';
10
+ import { JSONBig } from '@powersync/service-jsonbig';
10
11
 
11
12
  const BASIC_SYNC_RULES = `
12
13
  bucket_definitions:
@@ -315,13 +316,157 @@ bucket_definitions:
315
316
  await pool.query(`UPDATE test_data SET description = 'updated'`);
316
317
  await pool.query('CREATE PUBLICATION powersync FOR ALL TABLES');
317
318
 
319
+ const serverVersion = await context.connectionManager.getServerVersion();
320
+
321
+ await context.loadActiveSyncRules();
322
+
323
+ if (serverVersion!.compareMain('18.0.0') >= 0) {
324
+ await context.replicateSnapshot();
325
+ // No error expected in Postres 18. Replication keeps on working depite the
326
+ // publication being re-created.
327
+ } else {
328
+ // Postgres < 18 invalidates the replication slot when the publication is re-created.
329
+ // The error is handled on a higher level, which triggers
330
+ // creating a new replication slot.
331
+ await expect(async () => {
332
+ await context.replicateSnapshot();
333
+ }).rejects.toThrowError(MissingReplicationSlotError);
334
+ }
335
+ }
336
+ });
337
+
338
+ test('dropped replication slot', async () => {
339
+ {
340
+ await using context = await WalStreamTestContext.open(factory);
341
+ const { pool } = context;
342
+ await context.updateSyncRules(`
343
+ bucket_definitions:
344
+ global:
345
+ data:
346
+ - SELECT id, description FROM "test_data"`);
347
+
348
+ await pool.query(
349
+ `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num int8)`
350
+ );
351
+ await pool.query(
352
+ `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
353
+ );
354
+ await context.replicateSnapshot();
355
+ await context.startStreaming();
356
+
357
+ const data = await context.getBucketData('global[]');
358
+
359
+ expect(data).toMatchObject([
360
+ putOp('test_data', {
361
+ id: '8133cd37-903b-4937-a022-7c8294015a3a',
362
+ description: 'test1'
363
+ })
364
+ ]);
365
+
366
+ expect(await context.storage!.getStatus()).toMatchObject({ active: true, snapshot_done: true });
367
+ }
368
+
369
+ {
370
+ await using context = await WalStreamTestContext.open(factory, { doNotClear: true });
371
+ const { pool } = context;
372
+ const storage = await context.factory.getActiveStorage();
373
+
374
+ // Here we explicitly drop the replication slot, which should always be handled.
375
+ await pool.query({
376
+ statement: `SELECT pg_drop_replication_slot($1)`,
377
+ params: [{ type: 'varchar', value: storage?.slot_name! }]
378
+ });
379
+
318
380
  await context.loadActiveSyncRules();
381
+
382
+ // The error is handled on a higher level, which triggers
383
+ // creating a new replication slot.
319
384
  await expect(async () => {
320
385
  await context.replicateSnapshot();
321
386
  }).rejects.toThrowError(MissingReplicationSlotError);
387
+ }
388
+ });
389
+
390
+ test('replication slot lost', async () => {
391
+ await using baseContext = await WalStreamTestContext.open(factory, { doNotClear: true });
392
+
393
+ const serverVersion = await baseContext.connectionManager.getServerVersion();
394
+ if (serverVersion!.compareMain('13.0.0') < 0) {
395
+ console.warn(`max_slot_wal_keep_size not supported on postgres ${serverVersion} - skipping test.`);
396
+ return;
397
+ }
398
+
399
+ // Configure max_slot_wal_keep_size for the test, reverting afterwards.
400
+ await using s = await withMaxWalSize(baseContext.pool, '100MB');
401
+
402
+ {
403
+ await using context = await WalStreamTestContext.open(factory);
404
+ const { pool } = context;
405
+ await context.updateSyncRules(`
406
+ bucket_definitions:
407
+ global:
408
+ data:
409
+ - SELECT id, description FROM "test_data"`);
410
+
411
+ await pool.query(
412
+ `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num int8)`
413
+ );
414
+ await pool.query(
415
+ `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
416
+ );
417
+ await context.replicateSnapshot();
418
+ await context.startStreaming();
419
+
420
+ const data = await context.getBucketData('global[]');
421
+
422
+ expect(data).toMatchObject([
423
+ putOp('test_data', {
424
+ id: '8133cd37-903b-4937-a022-7c8294015a3a',
425
+ description: 'test1'
426
+ })
427
+ ]);
428
+
429
+ expect(await context.storage!.getStatus()).toMatchObject({ active: true, snapshot_done: true });
430
+ }
431
+
432
+ {
433
+ await using context = await WalStreamTestContext.open(factory, { doNotClear: true });
434
+ const { pool } = context;
435
+ const storage = await context.factory.getActiveStorage();
436
+ const slotName = storage?.slot_name!;
437
+
438
+ // Here, we write data to the WAL until the replication slot is lost.
439
+ const TRIES = 100;
440
+ for (let i = 0; i < TRIES; i++) {
441
+ // Write something to the WAL.
442
+ await pool.query(`select pg_logical_emit_message(true, 'test', 'x')`);
443
+ // Switch WAL file. With default settings, each WAL file is around 16MB.
444
+ await pool.query(`select pg_switch_wal()`);
445
+ // Checkpoint command forces the old WAL files to be archived/removed.
446
+ await pool.query(`checkpoint`);
447
+ // Now check if the slot is still active.
448
+ const slot = pgwireRows(
449
+ await context.pool.query({
450
+ statement: `select slot_name, wal_status, safe_wal_size, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) as lag from pg_replication_slots where slot_name = $1`,
451
+ params: [{ type: 'varchar', value: slotName }]
452
+ })
453
+ )[0];
454
+ if (slot.wal_status == 'lost') {
455
+ break;
456
+ } else if (i == TRIES - 1) {
457
+ throw new Error(
458
+ `Could not generate test conditions to expire replication slot. Current status: ${JSONBig.stringify(slot)}`
459
+ );
460
+ }
461
+ }
462
+
463
+ await context.loadActiveSyncRules();
322
464
 
323
465
  // The error is handled on a higher level, which triggers
324
466
  // creating a new replication slot.
467
+ await expect(async () => {
468
+ await context.replicateSnapshot();
469
+ }).rejects.toThrowError(MissingReplicationSlotError);
325
470
  }
326
471
  });
327
472
 
@@ -203,3 +203,25 @@ export class WalStreamTestContext implements AsyncDisposable {
203
203
  return batches[0]?.chunkData.data ?? [];
204
204
  }
205
205
  }
206
+
207
+ export async function withMaxWalSize(db: pgwire.PgClient, size: string) {
208
+ try {
209
+ const r1 = await db.query(`SHOW max_slot_wal_keep_size`);
210
+
211
+ await db.query(`ALTER SYSTEM SET max_slot_wal_keep_size = '100MB'`);
212
+ await db.query(`SELECT pg_reload_conf()`);
213
+
214
+ const oldSize = r1.results[0].rows[0][0];
215
+
216
+ return {
217
+ [Symbol.asyncDispose]: async () => {
218
+ await db.query(`ALTER SYSTEM SET max_slot_wal_keep_size = '${oldSize}'`);
219
+ await db.query(`SELECT pg_reload_conf()`);
220
+ }
221
+ };
222
+ } catch (e) {
223
+ const err = new Error(`Failed to configure max_slot_wal_keep_size for test`);
224
+ err.cause = e;
225
+ throw err;
226
+ }
227
+ }