@powersync/service-module-postgres 0.0.0-dev-20250122110924 → 0.0.0-dev-20250227082606

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/CHANGELOG.md +102 -11
  2. package/dist/api/PostgresRouteAPIAdapter.d.ts +2 -1
  3. package/dist/api/PostgresRouteAPIAdapter.js +16 -9
  4. package/dist/api/PostgresRouteAPIAdapter.js.map +1 -1
  5. package/dist/auth/SupabaseKeyCollector.js +6 -5
  6. package/dist/auth/SupabaseKeyCollector.js.map +1 -1
  7. package/dist/module/PostgresModule.js +2 -2
  8. package/dist/module/PostgresModule.js.map +1 -1
  9. package/dist/replication/ConnectionManagerFactory.js +2 -0
  10. package/dist/replication/ConnectionManagerFactory.js.map +1 -1
  11. package/dist/replication/PgManager.d.ts +5 -0
  12. package/dist/replication/PgManager.js +17 -2
  13. package/dist/replication/PgManager.js.map +1 -1
  14. package/dist/replication/PostgresErrorRateLimiter.js +5 -7
  15. package/dist/replication/PostgresErrorRateLimiter.js.map +1 -1
  16. package/dist/replication/WalStream.d.ts +18 -3
  17. package/dist/replication/WalStream.js +132 -17
  18. package/dist/replication/WalStream.js.map +1 -1
  19. package/dist/replication/WalStreamReplicationJob.js +9 -7
  20. package/dist/replication/WalStreamReplicationJob.js.map +1 -1
  21. package/dist/replication/WalStreamReplicator.js +2 -1
  22. package/dist/replication/WalStreamReplicator.js.map +1 -1
  23. package/dist/types/types.d.ts +3 -0
  24. package/dist/utils/migration_lib.js +1 -3
  25. package/dist/utils/migration_lib.js.map +1 -1
  26. package/dist/utils/populate_test_data.js +1 -1
  27. package/dist/utils/populate_test_data.js.map +1 -1
  28. package/package.json +14 -12
  29. package/src/api/PostgresRouteAPIAdapter.ts +13 -8
  30. package/src/replication/PgManager.ts +10 -0
  31. package/src/replication/WalStream.ts +152 -20
  32. package/src/replication/WalStreamReplicationJob.ts +5 -5
  33. package/test/src/checkpoints.test.ts +74 -0
  34. package/test/src/slow_tests.test.ts +102 -114
  35. package/test/src/storage_combination.test.ts +35 -0
  36. package/test/src/util.ts +5 -4
  37. package/test/src/wal_stream_utils.ts +1 -2
  38. package/tsconfig.tsbuildinfo +1 -1
@@ -1,6 +1,8 @@
1
1
  import * as lib_postgres from '@powersync/lib-service-postgres';
2
2
  import {
3
3
  container,
4
+ DatabaseConnectionError,
5
+ ErrorCode,
4
6
  errors,
5
7
  logger,
6
8
  ReplicationAbortedError,
@@ -15,10 +17,6 @@ import { PgManager } from './PgManager.js';
15
17
  import { getPgOutputRelation, getRelId } from './PgRelation.js';
16
18
  import { checkSourceConfiguration, getReplicationIdentityColumns } from './replication-utils.js';
17
19
 
18
- export const ZERO_LSN = '00000000/00000000';
19
- export const PUBLICATION_NAME = 'powersync';
20
- export const POSTGRES_DEFAULT_SCHEMA = 'public';
21
-
22
20
  export interface WalStreamOptions {
23
21
  connections: PgManager;
24
22
  storage: storage.SyncRulesBucketStorage;
@@ -32,6 +30,35 @@ interface InitResult {
32
30
  needsNewSlot: boolean;
33
31
  }
34
32
 
33
+ export const ZERO_LSN = '00000000/00000000';
34
+ export const PUBLICATION_NAME = 'powersync';
35
+ export const POSTGRES_DEFAULT_SCHEMA = 'public';
36
+
37
+ export const KEEPALIVE_CONTENT = 'ping';
38
+ export const KEEPALIVE_BUFFER = Buffer.from(KEEPALIVE_CONTENT);
39
+ export const KEEPALIVE_STATEMENT: pgwire.Statement = {
40
+ statement: /* sql */ `
41
+ SELECT
42
+ *
43
+ FROM
44
+ pg_logical_emit_message(FALSE, 'powersync', $1)
45
+ `,
46
+ params: [{ type: 'varchar', value: KEEPALIVE_CONTENT }]
47
+ } as const;
48
+
49
+ export const isKeepAliveMessage = (msg: pgwire.PgoutputMessage) => {
50
+ return (
51
+ msg.tag == 'message' &&
52
+ msg.prefix == 'powersync' &&
53
+ msg.content &&
54
+ Buffer.from(msg.content).equals(KEEPALIVE_BUFFER)
55
+ );
56
+ };
57
+
58
+ export const sendKeepAlive = async (db: pgwire.PgClient) => {
59
+ await lib_postgres.retriedQuery(db, KEEPALIVE_STATEMENT);
60
+ };
61
+
35
62
  export class MissingReplicationSlotError extends Error {
36
63
  constructor(message: string) {
37
64
  super(message);
@@ -71,10 +98,7 @@ export class WalStream {
71
98
  // Ping to speed up cancellation of streaming replication
72
99
  // We're not using pg_snapshot here, since it could be in the middle of
73
100
  // an initial replication transaction.
74
- const promise = lib_postgres.retriedQuery(
75
- this.connections.pool,
76
- `SELECT * FROM pg_logical_emit_message(false, 'powersync', 'ping')`
77
- );
101
+ const promise = sendKeepAlive(this.connections.pool);
78
102
  promise.catch((e) => {
79
103
  // Failures here are okay - this only speeds up stopping the process.
80
104
  logger.warn('Failed to ping connection', e);
@@ -180,6 +204,7 @@ export class WalStream {
180
204
 
181
205
  async initSlot(): Promise<InitResult> {
182
206
  await checkSourceConfiguration(this.connections.pool, PUBLICATION_NAME);
207
+ await this.ensureStorageCompatibility();
183
208
 
184
209
  const slotName = this.slot_name;
185
210
 
@@ -214,6 +239,11 @@ export class WalStream {
214
239
  needsNewSlot: r.needsNewSlot
215
240
  };
216
241
  } else {
242
+ if (snapshotDone) {
243
+ // This will create a new slot, while keeping the current sync rules active
244
+ throw new MissingReplicationSlotError(`Replication slot ${slotName} is missing`);
245
+ }
246
+ // This will clear data and re-create the same slot
217
247
  return { needsInitialSync: true, needsNewSlot: true };
218
248
  }
219
249
  }
@@ -382,6 +412,15 @@ WHERE oid = $1::regclass`,
382
412
  await batch.commit(ZERO_LSN);
383
413
  }
384
414
  );
415
+ /**
416
+ * Send a keepalive message after initial replication.
417
+ * In some edge cases we wait for a keepalive after the initial snapshot.
418
+ * If we don't explicitly check the contents of keepalive messages then a keepalive is detected
419
+ * rather quickly after initial replication - perhaps due to other WAL events.
420
+ * If we do explicitly check the contents of messages, we need an actual keepalive payload in order
421
+ * to advance the active sync rules LSN.
422
+ */
423
+ await sendKeepAlive(db);
385
424
  }
386
425
 
387
426
  static *getQueryData(results: Iterable<DatabaseInputRow>): Generator<SqliteRow> {
@@ -599,13 +638,33 @@ WHERE oid = $1::regclass`,
599
638
  async streamChanges(replicationConnection: pgwire.PgConnection) {
600
639
  // When changing any logic here, check /docs/wal-lsns.md.
601
640
 
641
+ const { createEmptyCheckpoints } = await this.ensureStorageCompatibility();
642
+
643
+ const replicationOptions: Record<string, string> = {
644
+ proto_version: '1',
645
+ publication_names: PUBLICATION_NAME
646
+ };
647
+
648
+ /**
649
+ * Viewing the contents of logical messages emitted with `pg_logical_emit_message`
650
+ * is only supported on Postgres >= 14.0.
651
+ * https://www.postgresql.org/docs/14/protocol-logical-replication.html
652
+ */
653
+ const exposesLogicalMessages = await this.checkLogicalMessageSupport();
654
+ if (exposesLogicalMessages) {
655
+ /**
656
+ * Only add this option if the Postgres server supports it.
657
+ * Adding the option to a server that doesn't support it will throw an exception when starting logical replication.
658
+ * Error: `unrecognized pgoutput option: messages`
659
+ */
660
+ replicationOptions['messages'] = 'true';
661
+ }
662
+
602
663
  const replicationStream = replicationConnection.logicalReplication({
603
664
  slot: this.slot_name,
604
- options: {
605
- proto_version: '1',
606
- publication_names: PUBLICATION_NAME
607
- }
665
+ options: replicationOptions
608
666
  });
667
+
609
668
  this.startedStreaming = true;
610
669
 
611
670
  // Auto-activate as soon as initial replication is done
@@ -628,6 +687,15 @@ WHERE oid = $1::regclass`,
628
687
  // chunkLastLsn may come from normal messages in the chunk,
629
688
  // or from a PrimaryKeepalive message.
630
689
  const { messages, lastLsn: chunkLastLsn } = chunk;
690
+
691
+ /**
692
+ * We can check if an explicit keepalive was sent if `exposesLogicalMessages == true`.
693
+ * If we can't check the logical messages, we should assume a keepalive if we
694
+ * receive an empty array of messages in a replication event.
695
+ */
696
+ const assumeKeepAlive = !exposesLogicalMessages;
697
+ let keepAliveDetected = false;
698
+
631
699
  for (const msg of messages) {
632
700
  if (msg.tag == 'relation') {
633
701
  await this.handleRelation(batch, getPgOutputRelation(msg), true);
@@ -636,27 +704,44 @@ WHERE oid = $1::regclass`,
636
704
  } else if (msg.tag == 'commit') {
637
705
  Metrics.getInstance().transactions_replicated_total.add(1);
638
706
  inTx = false;
639
- await batch.commit(msg.lsn!);
707
+ await batch.commit(msg.lsn!, { createEmptyCheckpoints });
640
708
  await this.ack(msg.lsn!, replicationStream);
641
709
  } else {
642
710
  if (count % 100 == 0) {
643
711
  logger.info(`${this.slot_name} replicating op ${count} ${msg.lsn}`);
644
712
  }
645
713
 
714
+ /**
715
+ * If we can see the contents of logical messages, then we can check if a keepalive
716
+ * message is present. We only perform a keepalive (below) if we explicitly detect a keepalive message.
717
+ * If we can't see the contents of logical messages, then we should assume a keepalive is required
718
+ * due to the default value of `assumeKeepalive`.
719
+ */
720
+ if (exposesLogicalMessages && isKeepAliveMessage(msg)) {
721
+ keepAliveDetected = true;
722
+ }
723
+
646
724
  count += 1;
647
725
  await this.writeChange(batch, msg);
648
726
  }
649
727
  }
650
728
 
651
729
  if (!inTx) {
652
- // In a transaction, we ack and commit according to the transaction progress.
653
- // Outside transactions, we use the PrimaryKeepalive messages to advance progress.
654
- // Big caveat: This _must not_ be used to skip individual messages, since this LSN
655
- // may be in the middle of the next transaction.
656
- // It must only be used to associate checkpoints with LSNs.
657
- if (await batch.keepalive(chunkLastLsn)) {
658
- await this.ack(chunkLastLsn, replicationStream);
730
+ if (assumeKeepAlive || keepAliveDetected) {
731
+ // Reset the detection flag.
732
+ keepAliveDetected = false;
733
+
734
+ // In a transaction, we ack and commit according to the transaction progress.
735
+ // Outside transactions, we use the PrimaryKeepalive messages to advance progress.
736
+ // Big caveat: This _must not_ be used to skip individual messages, since this LSN
737
+ // may be in the middle of the next transaction.
738
+ // It must only be used to associate checkpoints with LSNs.
739
+ await batch.keepalive(chunkLastLsn);
659
740
  }
741
+
742
+ // We receive chunks with empty messages often (about each second).
743
+ // Acknowledging here progresses the slot past these and frees up resources.
744
+ await this.ack(chunkLastLsn, replicationStream);
660
745
  }
661
746
 
662
747
  Metrics.getInstance().chunks_replicated_total.add(1);
@@ -672,6 +757,53 @@ WHERE oid = $1::regclass`,
672
757
 
673
758
  replicationStream.ack(lsn);
674
759
  }
760
+
761
+ /**
762
+ * Ensures that the storage is compatible with the replication connection.
763
+ * @throws {DatabaseConnectionError} If the storage is not compatible with the replication connection.
764
+ */
765
+ protected async ensureStorageCompatibility(): Promise<storage.ResolvedBucketBatchCommitOptions> {
766
+ const supportsLogicalMessages = await this.checkLogicalMessageSupport();
767
+
768
+ const storageIdentifier = await this.storage.factory.getSystemIdentifier();
769
+ if (storageIdentifier.type != lib_postgres.POSTGRES_CONNECTION_TYPE) {
770
+ return {
771
+ // Keep the same behaviour as before allowing Postgres storage.
772
+ createEmptyCheckpoints: true
773
+ };
774
+ }
775
+
776
+ const parsedStorageIdentifier = lib_postgres.utils.decodePostgresSystemIdentifier(storageIdentifier.id);
777
+ /**
778
+ * Check if the same server is being used for both the sync bucket storage and the logical replication.
779
+ */
780
+ const replicationIdentifier = await lib_postgres.utils.queryPostgresSystemIdentifier(this.connections.pool);
781
+
782
+ if (!supportsLogicalMessages && replicationIdentifier.server_id == parsedStorageIdentifier.server_id) {
783
+ throw new DatabaseConnectionError(
784
+ ErrorCode.PSYNC_S1144,
785
+ `Separate Postgres servers are required for the replication source and sync bucket storage when using Postgres versions below 14.0.`,
786
+ new Error('Postgres version is below 14')
787
+ );
788
+ }
789
+
790
+ return {
791
+ /**
792
+ * Don't create empty checkpoints if the same Postgres database is used for the data source
793
+ * and sync bucket storage. Creating empty checkpoints will cause WAL feedback loops.
794
+ */
795
+ createEmptyCheckpoints: replicationIdentifier.database_name != parsedStorageIdentifier.database_name
796
+ };
797
+ }
798
+
799
+ /**
800
+ * Check if the replication connection Postgres server supports
801
+ * viewing the contents of logical replication messages.
802
+ */
803
+ protected async checkLogicalMessageSupport() {
804
+ const version = await this.connections.getServerVersion();
805
+ return version ? version.compareMain('14.0.0') >= 0 : false;
806
+ }
675
807
  }
676
808
 
677
809
  async function touch() {
@@ -1,6 +1,6 @@
1
1
  import { container } from '@powersync/lib-services-framework';
2
2
  import { PgManager } from './PgManager.js';
3
- import { MissingReplicationSlotError, WalStream } from './WalStream.js';
3
+ import { MissingReplicationSlotError, sendKeepAlive, WalStream } from './WalStream.js';
4
4
 
5
5
  import { replication } from '@powersync/service-core';
6
6
  import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
@@ -37,7 +37,7 @@ export class WalStreamReplicationJob extends replication.AbstractReplicationJob
37
37
  */
38
38
  async keepAlive() {
39
39
  try {
40
- await this.connectionManager.pool.query(`SELECT * FROM pg_logical_emit_message(false, 'powersync', 'ping')`);
40
+ await sendKeepAlive(this.connectionManager.pool);
41
41
  } catch (e) {
42
42
  this.logger.warn(`KeepAlive failed, unable to post to WAL`, e);
43
43
  }
@@ -60,8 +60,8 @@ export class WalStreamReplicationJob extends replication.AbstractReplicationJob
60
60
  this.logger.error(`Replication failed on ${this.slotName}`, e);
61
61
 
62
62
  if (e instanceof MissingReplicationSlotError) {
63
- // This stops replication on this slot, and creates a new slot
64
- await this.options.storage.factory.slotRemoved(this.slotName);
63
+ // This stops replication on this slot and restarts with a new slot
64
+ await this.options.storage.factory.restartReplication(this.storage.group_id);
65
65
  }
66
66
  } finally {
67
67
  this.abortController.abort();
@@ -99,7 +99,7 @@ export class WalStreamReplicationJob extends replication.AbstractReplicationJob
99
99
  });
100
100
  await stream.replicate();
101
101
  } catch (e) {
102
- this.logger.error(`Replication error`, e);
102
+ this.logger.error(`${this.slotName} Replication error`, e);
103
103
  if (e.cause != null) {
104
104
  // Example:
105
105
  // PgError.conn_ended: Unable to do postgres query on ended connection
@@ -0,0 +1,74 @@
1
+ import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js';
2
+ import { checkpointUserId, createWriteCheckpoint } from '@powersync/service-core';
3
+ import { describe, test } from 'vitest';
4
+ import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js';
5
+ import { WalStreamTestContext } from './wal_stream_utils.js';
6
+ import { env } from './env.js';
7
+
8
+ import timers from 'node:timers/promises';
9
+
10
+ const BASIC_SYNC_RULES = `bucket_definitions:
11
+ global:
12
+ data:
13
+ - SELECT id, description, other FROM "test_data"`;
14
+
15
+ describe.skipIf(!(env.CI || env.SLOW_TESTS))('checkpoint tests', () => {
16
+ test('write checkpoints', { timeout: 50_000 }, async () => {
17
+ const factory = INITIALIZED_MONGO_STORAGE_FACTORY;
18
+ await using context = await WalStreamTestContext.open(factory);
19
+
20
+ await context.updateSyncRules(BASIC_SYNC_RULES);
21
+ const { pool } = context;
22
+ const api = new PostgresRouteAPIAdapter(pool);
23
+
24
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
25
+
26
+ await context.replicateSnapshot();
27
+
28
+ context.startStreaming();
29
+ const storage = context.storage!;
30
+
31
+ const controller = new AbortController();
32
+ try {
33
+ const stream = storage.watchWriteCheckpoint({
34
+ user_id: checkpointUserId('test_user', 'test_client'),
35
+ signal: controller.signal
36
+ });
37
+
38
+ let lastWriteCheckpoint: bigint | null = null;
39
+
40
+ (async () => {
41
+ try {
42
+ for await (const cp of stream) {
43
+ lastWriteCheckpoint = cp.writeCheckpoint;
44
+ }
45
+ } catch (e) {
46
+ if (e.name != 'AbortError') {
47
+ throw e;
48
+ }
49
+ }
50
+ })();
51
+
52
+ for (let i = 0; i < 10; i++) {
53
+ const cp = await createWriteCheckpoint({
54
+ userId: 'test_user',
55
+ clientId: 'test_client',
56
+ api,
57
+ storage: context.factory
58
+ });
59
+
60
+ const start = Date.now();
61
+ while (lastWriteCheckpoint == null || lastWriteCheckpoint < BigInt(cp.writeCheckpoint)) {
62
+ if (Date.now() - start > 5_000) {
63
+ throw new Error(
64
+ `Timeout while waiting for checkpoint. last: ${lastWriteCheckpoint}, waiting for: ${cp.writeCheckpoint}`
65
+ );
66
+ }
67
+ await timers.setTimeout(5, undefined, { signal: controller.signal });
68
+ }
69
+ }
70
+ } finally {
71
+ controller.abort();
72
+ }
73
+ });
74
+ });
@@ -71,21 +71,13 @@ function defineSlowTests(factory: storage.TestStorageFactory) {
71
71
  // Past issues that this could reproduce intermittently:
72
72
  // * Skipping LSNs after a keepalive message
73
73
  // * Skipping LSNs when source transactions overlap
74
- test(
75
- 'repeated replication - basic',
76
- async () => {
77
- await testRepeatedReplication({ compact: false, maxBatchSize: 50, numBatches: 5 });
78
- },
79
- { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
80
- );
81
-
82
- test(
83
- 'repeated replication - compacted',
84
- async () => {
85
- await testRepeatedReplication({ compact: true, maxBatchSize: 100, numBatches: 2 });
86
- },
87
- { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
88
- );
74
+ test('repeated replication - basic', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
75
+ await testRepeatedReplication({ compact: false, maxBatchSize: 50, numBatches: 5 });
76
+ });
77
+
78
+ test('repeated replication - compacted', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
79
+ await testRepeatedReplication({ compact: true, maxBatchSize: 100, numBatches: 2 });
80
+ });
89
81
 
90
82
  async function testRepeatedReplication(testOptions: { compact: boolean; maxBatchSize: number; numBatches: number }) {
91
83
  const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
@@ -101,7 +93,7 @@ bucket_definitions:
101
93
  - SELECT * FROM "test_data"
102
94
  `;
103
95
  const syncRules = await f.updateSyncRules({ content: syncRuleContent });
104
- using storage = f.getInstance(syncRules);
96
+ const storage = f.getInstance(syncRules);
105
97
  abortController = new AbortController();
106
98
  const options: WalStreamOptions = {
107
99
  abort_signal: abortController.signal,
@@ -314,116 +306,112 @@ bucket_definitions:
314
306
  //
315
307
  // If the first LSN does not correctly match with the first replication transaction,
316
308
  // we may miss some updates.
317
- test(
318
- 'repeated initial replication',
319
- async () => {
320
- const pool = await connectPgPool();
321
- await clearTestDb(pool);
322
- await using f = await factory();
323
-
324
- const syncRuleContent = `
309
+ test('repeated initial replication', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
310
+ const pool = await connectPgPool();
311
+ await clearTestDb(pool);
312
+ await using f = await factory();
313
+
314
+ const syncRuleContent = `
325
315
  bucket_definitions:
326
316
  global:
327
317
  data:
328
318
  - SELECT id, description FROM "test_data"
329
319
  `;
330
- const syncRules = await f.updateSyncRules({ content: syncRuleContent });
331
- using storage = f.getInstance(syncRules);
320
+ const syncRules = await f.updateSyncRules({ content: syncRuleContent });
321
+ const storage = f.getInstance(syncRules);
332
322
 
333
- // 1. Setup some base data that will be replicated in initial replication
334
- await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
323
+ // 1. Setup some base data that will be replicated in initial replication
324
+ await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
335
325
 
336
- let statements: pgwire.Statement[] = [];
326
+ let statements: pgwire.Statement[] = [];
337
327
 
338
- const n = Math.floor(Math.random() * 200);
339
- for (let i = 0; i < n; i++) {
340
- statements.push({
341
- statement: `INSERT INTO test_data(description) VALUES('test_init')`
342
- });
343
- }
344
- await pool.query(...statements);
345
-
346
- const start = Date.now();
347
- let i = 0;
348
-
349
- while (Date.now() - start < TEST_DURATION_MS) {
350
- // 2. Each iteration starts with a clean slate
351
- await pool.query(`SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE active = FALSE`);
352
- i += 1;
353
-
354
- const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
355
- const replicationConnection = await connections.replicationConnection();
356
-
357
- abortController = new AbortController();
358
- const options: WalStreamOptions = {
359
- abort_signal: abortController.signal,
360
- connections,
361
- storage: storage
362
- };
363
- walStream = new WalStream(options);
364
-
365
- await storage.clear();
366
-
367
- // 3. Start initial replication, then streaming, but don't wait for any of this
368
- let initialReplicationDone = false;
369
- streamPromise = (async () => {
370
- await walStream.initReplication(replicationConnection);
371
- await storage.autoActivate();
328
+ const n = Math.floor(Math.random() * 200);
329
+ for (let i = 0; i < n; i++) {
330
+ statements.push({
331
+ statement: `INSERT INTO test_data(description) VALUES('test_init')`
332
+ });
333
+ }
334
+ await pool.query(...statements);
335
+
336
+ const start = Date.now();
337
+ let i = 0;
338
+
339
+ while (Date.now() - start < TEST_DURATION_MS) {
340
+ // 2. Each iteration starts with a clean slate
341
+ await pool.query(`SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE active = FALSE`);
342
+ i += 1;
343
+
344
+ const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
345
+ const replicationConnection = await connections.replicationConnection();
346
+
347
+ abortController = new AbortController();
348
+ const options: WalStreamOptions = {
349
+ abort_signal: abortController.signal,
350
+ connections,
351
+ storage: storage
352
+ };
353
+ walStream = new WalStream(options);
354
+
355
+ await storage.clear();
356
+
357
+ // 3. Start initial replication, then streaming, but don't wait for any of this
358
+ let initialReplicationDone = false;
359
+ streamPromise = (async () => {
360
+ await walStream.initReplication(replicationConnection);
361
+ await storage.autoActivate();
362
+ initialReplicationDone = true;
363
+ await walStream.streamChanges(replicationConnection);
364
+ })()
365
+ .catch((e) => {
372
366
  initialReplicationDone = true;
373
- await walStream.streamChanges(replicationConnection);
374
- })()
375
- .catch((e) => {
376
- initialReplicationDone = true;
377
- throw e;
378
- })
379
- .then((v) => {
380
- return v;
381
- });
367
+ throw e;
368
+ })
369
+ .then((v) => {
370
+ return v;
371
+ });
382
372
 
383
- // 4. While initial replication is still running, write more changes
384
- while (!initialReplicationDone) {
385
- let statements: pgwire.Statement[] = [];
386
- const n = Math.floor(Math.random() * 10) + 1;
387
- for (let i = 0; i < n; i++) {
388
- const description = `test${i}`;
389
- statements.push({
390
- statement: `INSERT INTO test_data(description) VALUES('test1') returning id as test_id`,
391
- params: [{ type: 'varchar', value: description }]
392
- });
393
- }
394
- const results = await pool.query(...statements);
395
- const ids = results.results.map((sub) => {
396
- return sub.rows[0][0] as string;
397
- });
398
- await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
399
- const deleteStatements: pgwire.Statement[] = ids.map((id) => {
400
- return {
401
- statement: `DELETE FROM test_data WHERE id = $1`,
402
- params: [{ type: 'uuid', value: id }]
403
- };
373
+ // 4. While initial replication is still running, write more changes
374
+ while (!initialReplicationDone) {
375
+ let statements: pgwire.Statement[] = [];
376
+ const n = Math.floor(Math.random() * 10) + 1;
377
+ for (let i = 0; i < n; i++) {
378
+ const description = `test${i}`;
379
+ statements.push({
380
+ statement: `INSERT INTO test_data(description) VALUES('test1') returning id as test_id`,
381
+ params: [{ type: 'varchar', value: description }]
404
382
  });
405
- await pool.query(...deleteStatements);
406
- await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
407
- }
408
-
409
- // 5. Once initial replication is done, wait for the streaming changes to complete syncing.
410
- // getClientCheckpoint() effectively waits for the above replication to complete
411
- // Race with streamingPromise to catch replication errors here.
412
- let checkpoint = await Promise.race([
413
- getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS }),
414
- streamPromise
415
- ]);
416
- if (typeof checkpoint == undefined) {
417
- // This indicates an issue with the test setup - streamingPromise completed instead
418
- // of getClientCheckpoint()
419
- throw new Error('Test failure - streamingPromise completed');
420
383
  }
384
+ const results = await pool.query(...statements);
385
+ const ids = results.results.map((sub) => {
386
+ return sub.rows[0][0] as string;
387
+ });
388
+ await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
389
+ const deleteStatements: pgwire.Statement[] = ids.map((id) => {
390
+ return {
391
+ statement: `DELETE FROM test_data WHERE id = $1`,
392
+ params: [{ type: 'uuid', value: id }]
393
+ };
394
+ });
395
+ await pool.query(...deleteStatements);
396
+ await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
397
+ }
421
398
 
422
- abortController.abort();
423
- await streamPromise;
424
- await connections.end();
399
+ // 5. Once initial replication is done, wait for the streaming changes to complete syncing.
400
+ // getClientCheckpoint() effectively waits for the above replication to complete
401
+ // Race with streamingPromise to catch replication errors here.
402
+ let checkpoint = await Promise.race([
403
+ getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS }),
404
+ streamPromise
405
+ ]);
406
+ if (typeof checkpoint == undefined) {
407
+ // This indicates an issue with the test setup - streamingPromise completed instead
408
+ // of getClientCheckpoint()
409
+ throw new Error('Test failure - streamingPromise completed');
425
410
  }
426
- },
427
- { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
428
- );
411
+
412
+ abortController.abort();
413
+ await streamPromise;
414
+ await connections.end();
415
+ }
416
+ });
429
417
  }
@@ -0,0 +1,35 @@
1
+ import * as postgres_storage from '@powersync/service-module-postgres-storage';
2
+ import { describe, expect, test } from 'vitest';
3
+ import { env } from './env.js';
4
+ import { WalStreamTestContext } from './wal_stream_utils.js';
5
+
6
+ describe.skipIf(!env.TEST_POSTGRES_STORAGE)('replication storage combination - postgres', function () {
7
+ test('should allow the same Postgres cluster to be used for data and storage', async () => {
8
+ // Use the same cluster for the storage as the data source
9
+ await using context = await WalStreamTestContext.open(
10
+ postgres_storage.PostgresTestStorageFactoryGenerator({
11
+ url: env.PG_TEST_URL
12
+ }),
13
+ { doNotClear: false }
14
+ );
15
+
16
+ await context.updateSyncRules(/* yaml */
17
+ ` bucket_definitions:
18
+ global:
19
+ data:
20
+ - SELECT * FROM "test_data" `);
21
+
22
+ const { pool, connectionManager } = context;
23
+
24
+ const sourceVersion = await connectionManager.getServerVersion();
25
+
26
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
27
+
28
+ if (sourceVersion!.compareMain('14.0.0') < 0) {
29
+ await expect(context.replicateSnapshot()).rejects.toThrow();
30
+ } else {
31
+ // Should resolve
32
+ await context.replicateSnapshot();
33
+ }
34
+ });
35
+ });