@powersync/service-module-postgres 0.18.0 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,12 +1,12 @@
1
1
  import { MissingReplicationSlotError } from '@module/replication/WalStream.js';
2
2
  import { METRICS_HELPER, putOp, removeOp } from '@powersync/service-core-tests';
3
3
  import { pgwireRows } from '@powersync/service-jpgwire';
4
+ import { JSONBig } from '@powersync/service-jsonbig';
4
5
  import { ReplicationMetric } from '@powersync/service-types';
5
6
  import * as crypto from 'crypto';
6
7
  import { describe, expect, test } from 'vitest';
7
8
  import { describeWithStorage, StorageVersionTestContext } from './util.js';
8
9
  import { WalStreamTestContext, withMaxWalSize } from './wal_stream_utils.js';
9
- import { JSONBig } from '@powersync/service-jsonbig';
10
10
 
11
11
  const BASIC_SYNC_RULES = `
12
12
  bucket_definitions:
@@ -105,7 +105,6 @@ bucket_definitions:
105
105
  );
106
106
 
107
107
  await context.replicateSnapshot();
108
- context.startStreaming();
109
108
 
110
109
  // Must be > 8kb after compression
111
110
  const largeDescription = crypto.randomBytes(20_000).toString('hex');
@@ -212,7 +211,6 @@ bucket_definitions:
212
211
  );
213
212
 
214
213
  await context.replicateSnapshot();
215
- context.startStreaming();
216
214
 
217
215
  const data = await context.getBucketData('global[]');
218
216
  expect(data).toMatchObject([putOp('test_data', { id: test_id, description: 'test1' })]);
@@ -244,8 +242,6 @@ bucket_definitions:
244
242
  params: [{ type: 'varchar', value: largeDescription }]
245
243
  });
246
244
 
247
- context.startStreaming();
248
-
249
245
  const data = await context.getBucketData('global[]');
250
246
  expect(data.length).toEqual(1);
251
247
  const row = JSON.parse(data[0].data as string);
@@ -297,7 +293,6 @@ bucket_definitions:
297
293
  `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
298
294
  );
299
295
  await context.replicateSnapshot();
300
- context.startStreaming();
301
296
 
302
297
  const data = await context.getBucketData('global[]');
303
298
 
@@ -322,15 +317,12 @@ bucket_definitions:
322
317
 
323
318
  await context.loadActiveSyncRules();
324
319
 
325
- // Previously, the `replicateSnapshot` call picked up on this error.
326
- // Now, we have removed that check, this only comes up when we start actually streaming.
327
- // We don't get the streaming response directly here, but getCheckpoint() checks for that.
328
- await context.replicateSnapshot();
329
- context.startStreaming();
320
+ // Note: The actual error may be thrown either in replicateSnapshot(), or in getCheckpoint().
330
321
 
331
322
  if (serverVersion!.compareMain('18.0.0') >= 0) {
332
323
  // No error expected in Postres 18. Replication keeps on working depite the
333
324
  // publication being re-created.
325
+ await context.replicateSnapshot();
334
326
  await context.getCheckpoint();
335
327
  } else {
336
328
  // await context.getCheckpoint();
@@ -338,9 +330,9 @@ bucket_definitions:
338
330
  // In the service, this error is handled in WalStreamReplicationJob,
339
331
  // creating a new replication slot.
340
332
  await expect(async () => {
333
+ await context.replicateSnapshot();
341
334
  await context.getCheckpoint();
342
335
  }).rejects.toThrowError(MissingReplicationSlotError);
343
- context.clearStreamError();
344
336
  }
345
337
  }
346
338
  });
@@ -362,7 +354,6 @@ bucket_definitions:
362
354
  `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
363
355
  );
364
356
  await context.replicateSnapshot();
365
- context.startStreaming();
366
357
 
367
358
  const data = await context.getBucketData('global[]');
368
359
 
@@ -425,7 +416,6 @@ bucket_definitions:
425
416
  `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
426
417
  );
427
418
  await context.replicateSnapshot();
428
- context.startStreaming();
429
419
 
430
420
  const data = await context.getBucketData('global[]');
431
421
 
@@ -512,7 +502,7 @@ config:
512
502
  await context.initializeReplication();
513
503
  await pool.query(`INSERT INTO test_data(id, description) VALUES ('t1', '2025-09-10 15:17:14+02')`);
514
504
 
515
- const data = await context.getBucketData('1#stream|0[]');
505
+ const data = await context.getBucketData('stream|0[]');
516
506
  expect(data).toMatchObject([putOp('test_data', { id: 't1', description: '2025-09-10T13:17:14.000000Z' })]);
517
507
  });
518
508
 
@@ -544,7 +534,7 @@ config:
544
534
  `INSERT INTO test_data(id, description, ts) VALUES ('t2', ROW(TRUE, 2)::composite, '2025-11-17T09:12:00Z')`
545
535
  );
546
536
 
547
- const data = await context.getBucketData('1#stream|0[]');
537
+ const data = await context.getBucketData('stream|0[]');
548
538
  expect(data).toMatchObject([
549
539
  putOp('test_data', { id: 't1', description: '{"foo":1,"bar":1}', ts: '2025-11-17T09:11:00.000000Z' }),
550
540
  putOp('test_data', { id: 't2', description: '{"foo":1,"bar":2}', ts: '2025-11-17T09:12:00.000000Z' })
@@ -571,7 +561,7 @@ config:
571
561
  await context.initializeReplication();
572
562
  await pool.query(`INSERT INTO test_data(id) VALUES ('t1')`);
573
563
 
574
- const data = await context.getBucketData('1#stream|0[]');
564
+ const data = await context.getBucketData('stream|0[]');
575
565
  expect(data).toMatchObject([putOp('test_data', { id: 't1' })]);
576
566
  });
577
567
 
@@ -593,7 +583,6 @@ config:
593
583
  );
594
584
 
595
585
  await context.replicateSnapshot();
596
- context.startStreaming();
597
586
 
598
587
  await pool.query(`UPDATE test_data SET description = 'test2' WHERE id = '${test_id}'`);
599
588
 
@@ -1,6 +1,6 @@
1
1
  import { PgManager } from '@module/replication/PgManager.js';
2
2
  import { PUBLICATION_NAME, WalStream, WalStreamOptions } from '@module/replication/WalStream.js';
3
- import { CustomTypeRegistry } from '@module/types/registry.js';
3
+ import { ReplicationAbortedError } from '@powersync/lib-services-framework';
4
4
  import {
5
5
  BucketStorageFactory,
6
6
  createCoreReplicationMetrics,
@@ -8,23 +8,24 @@ import {
8
8
  InternalOpId,
9
9
  LEGACY_STORAGE_VERSION,
10
10
  OplogEntry,
11
- STORAGE_VERSION_CONFIG,
11
+ settledPromise,
12
12
  storage,
13
+ STORAGE_VERSION_CONFIG,
13
14
  SyncRulesBucketStorage,
15
+ unsettledPromise,
14
16
  updateSyncRulesFromYaml
15
17
  } from '@powersync/service-core';
16
- import { METRICS_HELPER, test_utils } from '@powersync/service-core-tests';
18
+ import { bucketRequest, METRICS_HELPER, test_utils } from '@powersync/service-core-tests';
17
19
  import * as pgwire from '@powersync/service-jpgwire';
18
20
  import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js';
19
21
 
20
22
  export class WalStreamTestContext implements AsyncDisposable {
21
23
  private _walStream?: WalStream;
22
24
  private abortController = new AbortController();
23
- private streamPromise?: Promise<void>;
24
25
  private syncRulesId?: number;
26
+ private syncRulesContent?: storage.PersistedSyncRulesContent;
25
27
  public storage?: SyncRulesBucketStorage;
26
- private replicationConnection?: pgwire.PgConnection;
27
- private snapshotPromise?: Promise<void>;
28
+ private settledReplicationPromise?: Promise<PromiseSettledResult<void>>;
28
29
 
29
30
  /**
30
31
  * Tests operating on the wal stream need to configure the stream and manage asynchronous
@@ -64,21 +65,10 @@ export class WalStreamTestContext implements AsyncDisposable {
64
65
  await this.dispose();
65
66
  }
66
67
 
67
- /**
68
- * Clear any errors from startStream, to allow for a graceful dispose when streaming errors
69
- * were expected.
70
- */
71
- async clearStreamError() {
72
- if (this.streamPromise != null) {
73
- this.streamPromise = this.streamPromise.catch((e) => {});
74
- }
75
- }
76
-
77
68
  async dispose() {
78
69
  this.abortController.abort();
79
70
  try {
80
- await this.snapshotPromise;
81
- await this.streamPromise;
71
+ await this.settledReplicationPromise;
82
72
  await this.connectionManager.destroy();
83
73
  await this.factory?.[Symbol.asyncDispose]();
84
74
  } catch (e) {
@@ -108,6 +98,7 @@ export class WalStreamTestContext implements AsyncDisposable {
108
98
  updateSyncRulesFromYaml(content, { validate: true, storageVersion: this.storageVersion })
109
99
  );
110
100
  this.syncRulesId = syncRules.id;
101
+ this.syncRulesContent = syncRules;
111
102
  this.storage = this.factory.getInstance(syncRules);
112
103
  return this.storage!;
113
104
  }
@@ -119,6 +110,7 @@ export class WalStreamTestContext implements AsyncDisposable {
119
110
  }
120
111
 
121
112
  this.syncRulesId = syncRules.id;
113
+ this.syncRulesContent = syncRules;
122
114
  this.storage = this.factory.getInstance(syncRules);
123
115
  return this.storage!;
124
116
  }
@@ -130,10 +122,18 @@ export class WalStreamTestContext implements AsyncDisposable {
130
122
  }
131
123
 
132
124
  this.syncRulesId = syncRules.id;
125
+ this.syncRulesContent = syncRules;
133
126
  this.storage = this.factory.getInstance(syncRules);
134
127
  return this.storage!;
135
128
  }
136
129
 
130
+ private getSyncRulesContent(): storage.PersistedSyncRulesContent {
131
+ if (this.syncRulesContent == null) {
132
+ throw new Error('Sync rules not configured - call updateSyncRules() first');
133
+ }
134
+ return this.syncRulesContent;
135
+ }
136
+
137
137
  get walStream() {
138
138
  if (this.storage == null) {
139
139
  throw new Error('updateSyncRules() first');
@@ -157,55 +157,46 @@ export class WalStreamTestContext implements AsyncDisposable {
157
157
  */
158
158
  async initializeReplication() {
159
159
  await this.replicateSnapshot();
160
- this.startStreaming();
161
160
  // Make sure we're up to date
162
161
  await this.getCheckpoint();
163
162
  }
164
163
 
164
+ /**
165
+ * Replicate the initial snapshot, and start streaming.
166
+ */
165
167
  async replicateSnapshot() {
166
- const promise = (async () => {
167
- this.replicationConnection = await this.connectionManager.replicationConnection();
168
- await this.walStream.initReplication(this.replicationConnection);
169
- })();
170
- this.snapshotPromise = promise.catch((e) => e);
171
- await promise;
172
- }
173
-
174
- startStreaming() {
175
- if (this.replicationConnection == null) {
176
- throw new Error('Call replicateSnapshot() before startStreaming()');
168
+ // Use a settledPromise to avoid unhandled rejections
169
+ this.settledReplicationPromise = settledPromise(this.walStream.replicate());
170
+ try {
171
+ await Promise.race([unsettledPromise(this.settledReplicationPromise), this.walStream.waitForInitialSnapshot()]);
172
+ } catch (e) {
173
+ if (e instanceof ReplicationAbortedError && e.cause != null) {
174
+ // Edge case for tests: replicate() can throw an error, but we'd receive the ReplicationAbortedError from
175
+ // waitForInitialSnapshot() first. In that case, prioritize the cause, e.g. MissingReplicationSlotError.
176
+ // This is not a concern for production use, since we only use waitForInitialSnapshot() in tests.
177
+ throw e.cause;
178
+ }
179
+ throw e;
177
180
  }
178
- this.streamPromise = this.walStream.streamChanges(this.replicationConnection!);
179
181
  }
180
182
 
181
183
  async getCheckpoint(options?: { timeout?: number }) {
182
184
  let checkpoint = await Promise.race([
183
185
  getClientCheckpoint(this.pool, this.factory, { timeout: options?.timeout ?? 15_000 }),
184
- this.streamPromise
186
+ unsettledPromise(this.settledReplicationPromise!)
185
187
  ]);
186
188
  if (checkpoint == null) {
187
- // This indicates an issue with the test setup - streamingPromise completed instead
189
+ // This indicates an issue with the test setup - replicationPromise completed instead
188
190
  // of getClientCheckpoint()
189
- throw new Error('Test failure - streamingPromise completed');
191
+ throw new Error('Test failure - replicationPromise completed');
190
192
  }
191
193
  return checkpoint;
192
194
  }
193
195
 
194
- private resolveBucketName(bucket: string) {
195
- if (!this.versionedBuckets || /^\d+#/.test(bucket)) {
196
- return bucket;
197
- }
198
- if (this.syncRulesId == null) {
199
- throw new Error('Sync rules not configured - call updateSyncRules() first');
200
- }
201
- return `${this.syncRulesId}#${bucket}`;
202
- }
203
-
204
196
  async getBucketsDataBatch(buckets: Record<string, InternalOpId>, options?: { timeout?: number }) {
205
197
  let checkpoint = await this.getCheckpoint(options);
206
- const map = new Map<string, InternalOpId>(
207
- Object.entries(buckets).map(([bucket, opId]) => [this.resolveBucketName(bucket), opId])
208
- );
198
+ const syncRules = this.getSyncRulesContent();
199
+ const map = Object.entries(buckets).map(([bucket, start]) => bucketRequest(syncRules, bucket, start));
209
200
  return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map));
210
201
  }
211
202
 
@@ -217,9 +208,9 @@ export class WalStreamTestContext implements AsyncDisposable {
217
208
  if (typeof start == 'string') {
218
209
  start = BigInt(start);
219
210
  }
220
- const resolvedBucket = this.resolveBucketName(bucket);
211
+ const syncRules = this.getSyncRulesContent();
221
212
  const checkpoint = await this.getCheckpoint(options);
222
- const map = new Map<string, InternalOpId>([[resolvedBucket, start]]);
213
+ let map = [bucketRequest(syncRules, bucket, start)];
223
214
  let data: OplogEntry[] = [];
224
215
  while (true) {
225
216
  const batch = this.storage!.getBucketDataBatch(checkpoint, map);
@@ -229,19 +220,20 @@ export class WalStreamTestContext implements AsyncDisposable {
229
220
  if (batches.length == 0 || !batches[0]!.chunkData.has_more) {
230
221
  break;
231
222
  }
232
- map.set(resolvedBucket, BigInt(batches[0]!.chunkData.next_after));
223
+ map = [bucketRequest(syncRules, bucket, BigInt(batches[0]!.chunkData.next_after))];
233
224
  }
234
225
  return data;
235
226
  }
236
227
 
237
228
  async getChecksums(buckets: string[], options?: { timeout?: number }) {
238
229
  const checkpoint = await this.getCheckpoint(options);
239
- const versionedBuckets = buckets.map((bucket) => this.resolveBucketName(bucket));
230
+ const syncRules = this.getSyncRulesContent();
231
+ const versionedBuckets = buckets.map((bucket) => bucketRequest(syncRules, bucket, 0n));
240
232
  const checksums = await this.storage!.getChecksums(checkpoint, versionedBuckets);
241
233
 
242
234
  const unversioned = new Map();
243
235
  for (let i = 0; i < buckets.length; i++) {
244
- unversioned.set(buckets[i], checksums.get(versionedBuckets[i])!);
236
+ unversioned.set(buckets[i], checksums.get(versionedBuckets[i].bucket)!);
245
237
  }
246
238
 
247
239
  return unversioned;
@@ -260,9 +252,9 @@ export class WalStreamTestContext implements AsyncDisposable {
260
252
  if (typeof start == 'string') {
261
253
  start = BigInt(start);
262
254
  }
263
- const resolvedBucket = this.resolveBucketName(bucket);
255
+ const syncRules = this.getSyncRulesContent();
264
256
  const { checkpoint } = await this.storage!.getCheckpoint();
265
- const map = new Map<string, InternalOpId>([[resolvedBucket, start]]);
257
+ const map = [bucketRequest(syncRules, bucket, start)];
266
258
  const batch = this.storage!.getBucketDataBatch(checkpoint, map);
267
259
  const batches = await test_utils.fromAsync(batch);
268
260
  return batches[0]?.chunkData.data ?? [];