@powersync/service-module-postgres 0.18.0 → 0.19.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,21 +15,31 @@ import * as pgwire from '@powersync/service-jpgwire';
15
15
  import { SqliteRow } from '@powersync/service-sync-rules';
16
16
 
17
17
  import { PgManager } from '@module/replication/PgManager.js';
18
+ import { ReplicationAbortedError } from '@powersync/lib-services-framework';
18
19
  import {
19
20
  createCoreReplicationMetrics,
21
+ CURRENT_STORAGE_VERSION,
20
22
  initializeCoreReplicationMetrics,
23
+ reduceBucket,
21
24
  updateSyncRulesFromYaml
22
25
  } from '@powersync/service-core';
23
26
  import { METRICS_HELPER, test_utils } from '@powersync/service-core-tests';
24
27
  import * as mongo_storage from '@powersync/service-module-mongodb-storage';
25
28
  import * as postgres_storage from '@powersync/service-module-postgres-storage';
26
29
  import * as timers from 'node:timers/promises';
27
- import { CustomTypeRegistry } from '@module/types/registry.js';
30
+ import { WalStreamTestContext } from './wal_stream_utils.js';
28
31
 
29
32
  describe.skipIf(!(env.CI || env.SLOW_TESTS))('slow tests', function () {
30
- describeWithStorage({ timeout: 120_000 }, function ({ factory, storageVersion }) {
31
- defineSlowTests({ factory, storageVersion });
32
- });
33
+ describeWithStorage(
34
+ {
35
+ timeout: 120_000,
36
+ // These tests are slow, so only test the current storage version
37
+ storageVersions: [CURRENT_STORAGE_VERSION]
38
+ },
39
+ function ({ factory, storageVersion }) {
40
+ defineSlowTests({ factory, storageVersion });
41
+ }
42
+ );
33
43
  });
34
44
 
35
45
  function defineSlowTests({ factory, storageVersion }: StorageVersionTestContext) {
@@ -47,7 +57,7 @@ function defineSlowTests({ factory, storageVersion }: StorageVersionTestContext)
47
57
  // This cleans up, similar to WalStreamTestContext.dispose().
48
58
  // These tests are a little more complex than what is supported by WalStreamTestContext.
49
59
  abortController?.abort();
50
- await streamPromise;
60
+ await streamPromise?.catch((_) => {});
51
61
  streamPromise = undefined;
52
62
  connections?.destroy();
53
63
 
@@ -75,7 +85,6 @@ function defineSlowTests({ factory, storageVersion }: StorageVersionTestContext)
75
85
 
76
86
  async function testRepeatedReplication(testOptions: { compact: boolean; maxBatchSize: number; numBatches: number }) {
77
87
  const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
78
- const replicationConnection = await connections.replicationConnection();
79
88
  const pool = connections.pool;
80
89
  await clearTestDb(pool);
81
90
  await using f = await factory();
@@ -102,11 +111,11 @@ bucket_definitions:
102
111
  );
103
112
  await pool.query(`ALTER TABLE test_data REPLICA IDENTITY FULL`);
104
113
 
105
- await walStream.initReplication(replicationConnection);
106
114
  let abort = false;
107
- streamPromise = walStream.streamChanges(replicationConnection).finally(() => {
115
+ streamPromise = walStream.replicate().finally(() => {
108
116
  abort = true;
109
117
  });
118
+ await walStream.waitForInitialSnapshot();
110
119
  const start = Date.now();
111
120
 
112
121
  while (!abort && Date.now() - start < TEST_DURATION_MS) {
@@ -228,11 +237,12 @@ bucket_definitions:
228
237
  await compactPromise;
229
238
 
230
239
  // Wait for replication to finish
231
- let checkpoint = await getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS });
240
+ await getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS });
232
241
 
233
242
  if (f instanceof mongo_storage.storage.MongoBucketStorage) {
234
243
  // Check that all inserts have been deleted again
235
- const docs = await f.db.current_data.find().toArray();
244
+ // Note: at this point, the pending_delete cleanup may not have run yet.
245
+ const docs = await f.db.current_data.find({ pending_delete: { $exists: false } }).toArray();
236
246
  const transformed = docs.map((doc) => {
237
247
  return bson.deserialize(doc.data.buffer) as SqliteRow;
238
248
  });
@@ -254,13 +264,14 @@ bucket_definitions:
254
264
  } else if (f instanceof postgres_storage.storage.PostgresBucketStorageFactory) {
255
265
  const { db } = f;
256
266
  // Check that all inserts have been deleted again
267
+ // FIXME: handle different storage versions
257
268
  const docs = await db.sql`
258
269
  SELECT
259
270
  *
260
271
  FROM
261
272
  current_data
262
273
  `
263
- .decoded(postgres_storage.models.CurrentData)
274
+ .decoded(postgres_storage.models.V1CurrentData)
264
275
  .rows();
265
276
  const transformed = docs.map((doc) => {
266
277
  return bson.deserialize(doc.data) as SqliteRow;
@@ -293,14 +304,20 @@ bucket_definitions:
293
304
  }
294
305
 
295
306
  abortController.abort();
296
- await streamPromise;
307
+ await streamPromise.catch((e) => {
308
+ if (e instanceof ReplicationAbortedError) {
309
+ // Ignore
310
+ } else {
311
+ throw e;
312
+ }
313
+ });
297
314
  }
298
315
 
299
316
  // Test repeatedly performing initial replication.
300
317
  //
301
318
  // If the first LSN does not correctly match with the first replication transaction,
302
319
  // we may miss some updates.
303
- test('repeated initial replication', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
320
+ test('repeated initial replication (1)', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
304
321
  const pool = await connectPgPool();
305
322
  await clearTestDb(pool);
306
323
  await using f = await factory();
@@ -337,7 +354,6 @@ bucket_definitions:
337
354
  i += 1;
338
355
 
339
356
  const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
340
- const replicationConnection = await connections.replicationConnection();
341
357
 
342
358
  abortController = new AbortController();
343
359
  const options: WalStreamOptions = {
@@ -350,19 +366,14 @@ bucket_definitions:
350
366
 
351
367
  await storage.clear();
352
368
 
353
- // 3. Start initial replication, then streaming, but don't wait for any of this
369
+ // 3. Start replication, but don't wait for it
354
370
  let initialReplicationDone = false;
355
- streamPromise = (async () => {
356
- await walStream.initReplication(replicationConnection);
357
- initialReplicationDone = true;
358
- await walStream.streamChanges(replicationConnection);
359
- })()
360
- .catch((e) => {
371
+ streamPromise = walStream.replicate();
372
+ walStream
373
+ .waitForInitialSnapshot()
374
+ .catch((_) => {})
375
+ .finally(() => {
361
376
  initialReplicationDone = true;
362
- throw e;
363
- })
364
- .then((v) => {
365
- return v;
366
377
  });
367
378
 
368
379
  // 4. While initial replication is still running, write more changes
@@ -405,8 +416,104 @@ bucket_definitions:
405
416
  }
406
417
 
407
418
  abortController.abort();
408
- await streamPromise;
419
+ await streamPromise.catch((e) => {
420
+ if (e instanceof ReplicationAbortedError) {
421
+ // Ignore
422
+ } else {
423
+ throw e;
424
+ }
425
+ });
409
426
  await connections.end();
410
427
  }
411
428
  });
429
+
430
+ // Test repeatedly performing initial replication while deleting data.
431
+ //
432
+ // This specifically checks for data in the initial snapshot being deleted while snapshotting.
433
+ test('repeated initial replication with deletes', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
434
+ const syncRuleContent = `
435
+ bucket_definitions:
436
+ global:
437
+ data:
438
+ - SELECT id, description FROM "test_data"
439
+ `;
440
+
441
+ const start = Date.now();
442
+ let i = 0;
443
+
444
+ while (Date.now() - start < TEST_DURATION_MS) {
445
+ i += 1;
446
+
447
+ // 1. Each iteration starts with a clean slate
448
+ await using context = await WalStreamTestContext.open(factory, {
449
+ walStreamOptions: { snapshotChunkLength: 100 }
450
+ });
451
+ const pool = context.pool;
452
+
453
+ // Introduce an artificial delay in snapshot queries, to make it more likely to reproduce an
454
+ // issue.
455
+ const originalSnapshotConnectionFn = context.connectionManager.snapshotConnection;
456
+ context.connectionManager.snapshotConnection = async () => {
457
+ const conn = await originalSnapshotConnectionFn.call(context.connectionManager);
458
+ // Wrap streaming query to add delays to snapshots
459
+ const originalStream = conn.stream;
460
+ conn.stream = async function* (...args: any[]) {
461
+ const delay = Math.random() * 20;
462
+ yield* originalStream.call(this, ...args);
463
+ await new Promise((resolve) => setTimeout(resolve, delay));
464
+ };
465
+ return conn;
466
+ };
467
+
468
+ await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
469
+ await context.updateSyncRules(syncRuleContent);
470
+
471
+ let statements: pgwire.Statement[] = [];
472
+
473
+ const n = Math.floor(Math.random() * 200);
474
+ for (let i = 0; i < n; i++) {
475
+ statements.push({
476
+ statement: `INSERT INTO test_data(description) VALUES('test_init') RETURNING id`
477
+ });
478
+ }
479
+ const results = await pool.query(...statements);
480
+ const ids = new Set(
481
+ results.results.map((sub) => {
482
+ return sub.rows[0].decodeWithoutCustomTypes(0) as string;
483
+ })
484
+ );
485
+
486
+ // 3. Start replication, but don't wait for it
487
+ let initialReplicationDone = false;
488
+
489
+ streamPromise = context.replicateSnapshot().finally(() => {
490
+ initialReplicationDone = true;
491
+ });
492
+
493
+ // 4. While initial replication is still running, delete random rows
494
+ while (!initialReplicationDone && ids.size > 0) {
495
+ let statements: pgwire.Statement[] = [];
496
+
497
+ const m = Math.floor(Math.random() * 10) + 1;
498
+ const idArray = Array.from(ids);
499
+ for (let i = 0; i < m; i++) {
500
+ const id = idArray[Math.floor(Math.random() * idArray.length)];
501
+ statements.push({
502
+ statement: `DELETE FROM test_data WHERE id = $1`,
503
+ params: [{ type: 'uuid', value: id }]
504
+ });
505
+ ids.delete(id);
506
+ }
507
+ await pool.query(...statements);
508
+ await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
509
+ }
510
+
511
+ await streamPromise;
512
+
513
+ // 5. Once initial replication is done, wait for the streaming changes to complete syncing.
514
+ const data = await context.getBucketData('global[]', 0n);
515
+ const normalized = reduceBucket(data).filter((op) => op.op !== 'CLEAR');
516
+ expect(normalized.length).toEqual(ids.size);
517
+ }
518
+ });
412
519
  }
@@ -7,9 +7,9 @@ describe.skipIf(!env.TEST_POSTGRES_STORAGE)('replication storage combination - p
7
7
  test('should allow the same Postgres cluster to be used for data and storage', async () => {
8
8
  // Use the same cluster for the storage as the data source
9
9
  await using context = await WalStreamTestContext.open(
10
- postgres_storage.test_utils.postgresTestStorageFactoryGenerator({
10
+ postgres_storage.test_utils.postgresTestSetup({
11
11
  url: env.PG_TEST_URL
12
- }),
12
+ }).factory,
13
13
  { doNotClear: false }
14
14
  );
15
15
 
package/test/src/util.ts CHANGED
@@ -7,6 +7,8 @@ import {
7
7
  CURRENT_STORAGE_VERSION,
8
8
  InternalOpId,
9
9
  LEGACY_STORAGE_VERSION,
10
+ SUPPORTED_STORAGE_VERSIONS,
11
+ TestStorageConfig,
10
12
  TestStorageFactory
11
13
  } from '@powersync/service-core';
12
14
  import * as pgwire from '@powersync/service-jpgwire';
@@ -22,24 +24,28 @@ export const INITIALIZED_MONGO_STORAGE_FACTORY = mongo_storage.test_utils.mongoT
22
24
  isCI: env.CI
23
25
  });
24
26
 
25
- export const INITIALIZED_POSTGRES_STORAGE_FACTORY = postgres_storage.test_utils.postgresTestStorageFactoryGenerator({
27
+ export const INITIALIZED_POSTGRES_STORAGE_FACTORY = postgres_storage.test_utils.postgresTestSetup({
26
28
  url: env.PG_STORAGE_TEST_URL
27
29
  });
28
30
 
29
- const TEST_STORAGE_VERSIONS = [LEGACY_STORAGE_VERSION, CURRENT_STORAGE_VERSION];
31
+ const TEST_STORAGE_VERSIONS = SUPPORTED_STORAGE_VERSIONS;
30
32
 
31
33
  export interface StorageVersionTestContext {
32
34
  factory: TestStorageFactory;
33
35
  storageVersion: number;
34
36
  }
35
37
 
36
- export function describeWithStorage(options: TestOptions, fn: (context: StorageVersionTestContext) => void) {
37
- const describeFactory = (storageName: string, factory: TestStorageFactory) => {
38
+ export function describeWithStorage(
39
+ options: TestOptions & { storageVersions?: number[] },
40
+ fn: (context: StorageVersionTestContext) => void
41
+ ) {
42
+ const storageVersions = options.storageVersions ?? TEST_STORAGE_VERSIONS;
43
+ const describeFactory = (storageName: string, config: TestStorageConfig) => {
38
44
  describe(`${storageName} storage`, options, function () {
39
- for (const storageVersion of TEST_STORAGE_VERSIONS) {
45
+ for (const storageVersion of storageVersions) {
40
46
  describe(`storage v${storageVersion}`, function () {
41
47
  fn({
42
- factory,
48
+ factory: config.factory,
43
49
  storageVersion
44
50
  });
45
51
  });
@@ -138,7 +144,7 @@ export async function getClientCheckpoint(
138
144
  return cp.checkpoint;
139
145
  }
140
146
 
141
- await new Promise((resolve) => setTimeout(resolve, 30));
147
+ await new Promise((resolve) => setTimeout(resolve, 5));
142
148
  }
143
149
 
144
150
  throw new Error('Timeout while waiting for checkpoint');
@@ -6,7 +6,7 @@ import { WalStreamTestContext } from './wal_stream_utils.js';
6
6
  import { updateSyncRulesFromYaml } from '@powersync/service-core';
7
7
 
8
8
  test('validate tables', async () => {
9
- await using context = await WalStreamTestContext.open(INITIALIZED_MONGO_STORAGE_FACTORY);
9
+ await using context = await WalStreamTestContext.open(INITIALIZED_MONGO_STORAGE_FACTORY.factory);
10
10
  const { pool } = context;
11
11
 
12
12
  await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
@@ -1,12 +1,12 @@
1
1
  import { MissingReplicationSlotError } from '@module/replication/WalStream.js';
2
2
  import { METRICS_HELPER, putOp, removeOp } from '@powersync/service-core-tests';
3
3
  import { pgwireRows } from '@powersync/service-jpgwire';
4
+ import { JSONBig } from '@powersync/service-jsonbig';
4
5
  import { ReplicationMetric } from '@powersync/service-types';
5
6
  import * as crypto from 'crypto';
6
7
  import { describe, expect, test } from 'vitest';
7
8
  import { describeWithStorage, StorageVersionTestContext } from './util.js';
8
9
  import { WalStreamTestContext, withMaxWalSize } from './wal_stream_utils.js';
9
- import { JSONBig } from '@powersync/service-jsonbig';
10
10
 
11
11
  const BASIC_SYNC_RULES = `
12
12
  bucket_definitions:
@@ -105,7 +105,6 @@ bucket_definitions:
105
105
  );
106
106
 
107
107
  await context.replicateSnapshot();
108
- context.startStreaming();
109
108
 
110
109
  // Must be > 8kb after compression
111
110
  const largeDescription = crypto.randomBytes(20_000).toString('hex');
@@ -212,7 +211,6 @@ bucket_definitions:
212
211
  );
213
212
 
214
213
  await context.replicateSnapshot();
215
- context.startStreaming();
216
214
 
217
215
  const data = await context.getBucketData('global[]');
218
216
  expect(data).toMatchObject([putOp('test_data', { id: test_id, description: 'test1' })]);
@@ -244,8 +242,6 @@ bucket_definitions:
244
242
  params: [{ type: 'varchar', value: largeDescription }]
245
243
  });
246
244
 
247
- context.startStreaming();
248
-
249
245
  const data = await context.getBucketData('global[]');
250
246
  expect(data.length).toEqual(1);
251
247
  const row = JSON.parse(data[0].data as string);
@@ -297,7 +293,6 @@ bucket_definitions:
297
293
  `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
298
294
  );
299
295
  await context.replicateSnapshot();
300
- context.startStreaming();
301
296
 
302
297
  const data = await context.getBucketData('global[]');
303
298
 
@@ -322,15 +317,12 @@ bucket_definitions:
322
317
 
323
318
  await context.loadActiveSyncRules();
324
319
 
325
- // Previously, the `replicateSnapshot` call picked up on this error.
326
- // Now, we have removed that check, this only comes up when we start actually streaming.
327
- // We don't get the streaming response directly here, but getCheckpoint() checks for that.
328
- await context.replicateSnapshot();
329
- context.startStreaming();
320
+ // Note: The actual error may be thrown either in replicateSnapshot(), or in getCheckpoint().
330
321
 
331
322
  if (serverVersion!.compareMain('18.0.0') >= 0) {
332
323
  // No error expected in Postres 18. Replication keeps on working depite the
333
324
  // publication being re-created.
325
+ await context.replicateSnapshot();
334
326
  await context.getCheckpoint();
335
327
  } else {
336
328
  // await context.getCheckpoint();
@@ -338,9 +330,9 @@ bucket_definitions:
338
330
  // In the service, this error is handled in WalStreamReplicationJob,
339
331
  // creating a new replication slot.
340
332
  await expect(async () => {
333
+ await context.replicateSnapshot();
341
334
  await context.getCheckpoint();
342
335
  }).rejects.toThrowError(MissingReplicationSlotError);
343
- context.clearStreamError();
344
336
  }
345
337
  }
346
338
  });
@@ -362,7 +354,6 @@ bucket_definitions:
362
354
  `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
363
355
  );
364
356
  await context.replicateSnapshot();
365
- context.startStreaming();
366
357
 
367
358
  const data = await context.getBucketData('global[]');
368
359
 
@@ -425,7 +416,6 @@ bucket_definitions:
425
416
  `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
426
417
  );
427
418
  await context.replicateSnapshot();
428
- context.startStreaming();
429
419
 
430
420
  const data = await context.getBucketData('global[]');
431
421
 
@@ -512,7 +502,7 @@ config:
512
502
  await context.initializeReplication();
513
503
  await pool.query(`INSERT INTO test_data(id, description) VALUES ('t1', '2025-09-10 15:17:14+02')`);
514
504
 
515
- const data = await context.getBucketData('1#stream|0[]');
505
+ const data = await context.getBucketData('stream|0[]');
516
506
  expect(data).toMatchObject([putOp('test_data', { id: 't1', description: '2025-09-10T13:17:14.000000Z' })]);
517
507
  });
518
508
 
@@ -544,7 +534,7 @@ config:
544
534
  `INSERT INTO test_data(id, description, ts) VALUES ('t2', ROW(TRUE, 2)::composite, '2025-11-17T09:12:00Z')`
545
535
  );
546
536
 
547
- const data = await context.getBucketData('1#stream|0[]');
537
+ const data = await context.getBucketData('stream|0[]');
548
538
  expect(data).toMatchObject([
549
539
  putOp('test_data', { id: 't1', description: '{"foo":1,"bar":1}', ts: '2025-11-17T09:11:00.000000Z' }),
550
540
  putOp('test_data', { id: 't2', description: '{"foo":1,"bar":2}', ts: '2025-11-17T09:12:00.000000Z' })
@@ -571,7 +561,7 @@ config:
571
561
  await context.initializeReplication();
572
562
  await pool.query(`INSERT INTO test_data(id) VALUES ('t1')`);
573
563
 
574
- const data = await context.getBucketData('1#stream|0[]');
564
+ const data = await context.getBucketData('stream|0[]');
575
565
  expect(data).toMatchObject([putOp('test_data', { id: 't1' })]);
576
566
  });
577
567
 
@@ -593,7 +583,6 @@ config:
593
583
  );
594
584
 
595
585
  await context.replicateSnapshot();
596
- context.startStreaming();
597
586
 
598
587
  await pool.query(`UPDATE test_data SET description = 'test2' WHERE id = '${test_id}'`);
599
588
 
@@ -1,6 +1,6 @@
1
1
  import { PgManager } from '@module/replication/PgManager.js';
2
2
  import { PUBLICATION_NAME, WalStream, WalStreamOptions } from '@module/replication/WalStream.js';
3
- import { CustomTypeRegistry } from '@module/types/registry.js';
3
+ import { ReplicationAbortedError } from '@powersync/lib-services-framework';
4
4
  import {
5
5
  BucketStorageFactory,
6
6
  createCoreReplicationMetrics,
@@ -8,23 +8,24 @@ import {
8
8
  InternalOpId,
9
9
  LEGACY_STORAGE_VERSION,
10
10
  OplogEntry,
11
- STORAGE_VERSION_CONFIG,
11
+ settledPromise,
12
12
  storage,
13
+ STORAGE_VERSION_CONFIG,
13
14
  SyncRulesBucketStorage,
15
+ unsettledPromise,
14
16
  updateSyncRulesFromYaml
15
17
  } from '@powersync/service-core';
16
- import { METRICS_HELPER, test_utils } from '@powersync/service-core-tests';
18
+ import { bucketRequest, METRICS_HELPER, test_utils } from '@powersync/service-core-tests';
17
19
  import * as pgwire from '@powersync/service-jpgwire';
18
20
  import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js';
19
21
 
20
22
  export class WalStreamTestContext implements AsyncDisposable {
21
23
  private _walStream?: WalStream;
22
24
  private abortController = new AbortController();
23
- private streamPromise?: Promise<void>;
24
25
  private syncRulesId?: number;
26
+ private syncRulesContent?: storage.PersistedSyncRulesContent;
25
27
  public storage?: SyncRulesBucketStorage;
26
- private replicationConnection?: pgwire.PgConnection;
27
- private snapshotPromise?: Promise<void>;
28
+ private settledReplicationPromise?: Promise<PromiseSettledResult<void>>;
28
29
 
29
30
  /**
30
31
  * Tests operating on the wal stream need to configure the stream and manage asynchronous
@@ -64,21 +65,10 @@ export class WalStreamTestContext implements AsyncDisposable {
64
65
  await this.dispose();
65
66
  }
66
67
 
67
- /**
68
- * Clear any errors from startStream, to allow for a graceful dispose when streaming errors
69
- * were expected.
70
- */
71
- async clearStreamError() {
72
- if (this.streamPromise != null) {
73
- this.streamPromise = this.streamPromise.catch((e) => {});
74
- }
75
- }
76
-
77
68
  async dispose() {
78
69
  this.abortController.abort();
79
70
  try {
80
- await this.snapshotPromise;
81
- await this.streamPromise;
71
+ await this.settledReplicationPromise;
82
72
  await this.connectionManager.destroy();
83
73
  await this.factory?.[Symbol.asyncDispose]();
84
74
  } catch (e) {
@@ -108,6 +98,7 @@ export class WalStreamTestContext implements AsyncDisposable {
108
98
  updateSyncRulesFromYaml(content, { validate: true, storageVersion: this.storageVersion })
109
99
  );
110
100
  this.syncRulesId = syncRules.id;
101
+ this.syncRulesContent = syncRules;
111
102
  this.storage = this.factory.getInstance(syncRules);
112
103
  return this.storage!;
113
104
  }
@@ -119,6 +110,7 @@ export class WalStreamTestContext implements AsyncDisposable {
119
110
  }
120
111
 
121
112
  this.syncRulesId = syncRules.id;
113
+ this.syncRulesContent = syncRules;
122
114
  this.storage = this.factory.getInstance(syncRules);
123
115
  return this.storage!;
124
116
  }
@@ -130,10 +122,18 @@ export class WalStreamTestContext implements AsyncDisposable {
130
122
  }
131
123
 
132
124
  this.syncRulesId = syncRules.id;
125
+ this.syncRulesContent = syncRules;
133
126
  this.storage = this.factory.getInstance(syncRules);
134
127
  return this.storage!;
135
128
  }
136
129
 
130
+ private getSyncRulesContent(): storage.PersistedSyncRulesContent {
131
+ if (this.syncRulesContent == null) {
132
+ throw new Error('Sync rules not configured - call updateSyncRules() first');
133
+ }
134
+ return this.syncRulesContent;
135
+ }
136
+
137
137
  get walStream() {
138
138
  if (this.storage == null) {
139
139
  throw new Error('updateSyncRules() first');
@@ -157,55 +157,46 @@ export class WalStreamTestContext implements AsyncDisposable {
157
157
  */
158
158
  async initializeReplication() {
159
159
  await this.replicateSnapshot();
160
- this.startStreaming();
161
160
  // Make sure we're up to date
162
161
  await this.getCheckpoint();
163
162
  }
164
163
 
164
+ /**
165
+ * Replicate the initial snapshot, and start streaming.
166
+ */
165
167
  async replicateSnapshot() {
166
- const promise = (async () => {
167
- this.replicationConnection = await this.connectionManager.replicationConnection();
168
- await this.walStream.initReplication(this.replicationConnection);
169
- })();
170
- this.snapshotPromise = promise.catch((e) => e);
171
- await promise;
172
- }
173
-
174
- startStreaming() {
175
- if (this.replicationConnection == null) {
176
- throw new Error('Call replicateSnapshot() before startStreaming()');
168
+ // Use a settledPromise to avoid unhandled rejections
169
+ this.settledReplicationPromise = settledPromise(this.walStream.replicate());
170
+ try {
171
+ await Promise.race([unsettledPromise(this.settledReplicationPromise), this.walStream.waitForInitialSnapshot()]);
172
+ } catch (e) {
173
+ if (e instanceof ReplicationAbortedError && e.cause != null) {
174
+ // Edge case for tests: replicate() can throw an error, but we'd receive the ReplicationAbortedError from
175
+ // waitForInitialSnapshot() first. In that case, prioritize the cause, e.g. MissingReplicationSlotError.
176
+ // This is not a concern for production use, since we only use waitForInitialSnapshot() in tests.
177
+ throw e.cause;
178
+ }
179
+ throw e;
177
180
  }
178
- this.streamPromise = this.walStream.streamChanges(this.replicationConnection!);
179
181
  }
180
182
 
181
183
  async getCheckpoint(options?: { timeout?: number }) {
182
184
  let checkpoint = await Promise.race([
183
185
  getClientCheckpoint(this.pool, this.factory, { timeout: options?.timeout ?? 15_000 }),
184
- this.streamPromise
186
+ unsettledPromise(this.settledReplicationPromise!)
185
187
  ]);
186
188
  if (checkpoint == null) {
187
- // This indicates an issue with the test setup - streamingPromise completed instead
189
+ // This indicates an issue with the test setup - replicationPromise completed instead
188
190
  // of getClientCheckpoint()
189
- throw new Error('Test failure - streamingPromise completed');
191
+ throw new Error('Test failure - replicationPromise completed');
190
192
  }
191
193
  return checkpoint;
192
194
  }
193
195
 
194
- private resolveBucketName(bucket: string) {
195
- if (!this.versionedBuckets || /^\d+#/.test(bucket)) {
196
- return bucket;
197
- }
198
- if (this.syncRulesId == null) {
199
- throw new Error('Sync rules not configured - call updateSyncRules() first');
200
- }
201
- return `${this.syncRulesId}#${bucket}`;
202
- }
203
-
204
196
  async getBucketsDataBatch(buckets: Record<string, InternalOpId>, options?: { timeout?: number }) {
205
197
  let checkpoint = await this.getCheckpoint(options);
206
- const map = new Map<string, InternalOpId>(
207
- Object.entries(buckets).map(([bucket, opId]) => [this.resolveBucketName(bucket), opId])
208
- );
198
+ const syncRules = this.getSyncRulesContent();
199
+ const map = Object.entries(buckets).map(([bucket, start]) => bucketRequest(syncRules, bucket, start));
209
200
  return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map));
210
201
  }
211
202
 
@@ -217,9 +208,9 @@ export class WalStreamTestContext implements AsyncDisposable {
217
208
  if (typeof start == 'string') {
218
209
  start = BigInt(start);
219
210
  }
220
- const resolvedBucket = this.resolveBucketName(bucket);
211
+ const syncRules = this.getSyncRulesContent();
221
212
  const checkpoint = await this.getCheckpoint(options);
222
- const map = new Map<string, InternalOpId>([[resolvedBucket, start]]);
213
+ let map = [bucketRequest(syncRules, bucket, start)];
223
214
  let data: OplogEntry[] = [];
224
215
  while (true) {
225
216
  const batch = this.storage!.getBucketDataBatch(checkpoint, map);
@@ -229,19 +220,20 @@ export class WalStreamTestContext implements AsyncDisposable {
229
220
  if (batches.length == 0 || !batches[0]!.chunkData.has_more) {
230
221
  break;
231
222
  }
232
- map.set(resolvedBucket, BigInt(batches[0]!.chunkData.next_after));
223
+ map = [bucketRequest(syncRules, bucket, BigInt(batches[0]!.chunkData.next_after))];
233
224
  }
234
225
  return data;
235
226
  }
236
227
 
237
228
  async getChecksums(buckets: string[], options?: { timeout?: number }) {
238
229
  const checkpoint = await this.getCheckpoint(options);
239
- const versionedBuckets = buckets.map((bucket) => this.resolveBucketName(bucket));
230
+ const syncRules = this.getSyncRulesContent();
231
+ const versionedBuckets = buckets.map((bucket) => bucketRequest(syncRules, bucket, 0n));
240
232
  const checksums = await this.storage!.getChecksums(checkpoint, versionedBuckets);
241
233
 
242
234
  const unversioned = new Map();
243
235
  for (let i = 0; i < buckets.length; i++) {
244
- unversioned.set(buckets[i], checksums.get(versionedBuckets[i])!);
236
+ unversioned.set(buckets[i], checksums.get(versionedBuckets[i].bucket)!);
245
237
  }
246
238
 
247
239
  return unversioned;
@@ -260,9 +252,9 @@ export class WalStreamTestContext implements AsyncDisposable {
260
252
  if (typeof start == 'string') {
261
253
  start = BigInt(start);
262
254
  }
263
- const resolvedBucket = this.resolveBucketName(bucket);
255
+ const syncRules = this.getSyncRulesContent();
264
256
  const { checkpoint } = await this.storage!.getCheckpoint();
265
- const map = new Map<string, InternalOpId>([[resolvedBucket, start]]);
257
+ const map = [bucketRequest(syncRules, bucket, start)];
266
258
  const batch = this.storage!.getBucketDataBatch(checkpoint, map);
267
259
  const batches = await test_utils.fromAsync(batch);
268
260
  return batches[0]?.chunkData.data ?? [];