@powersync/service-module-postgres 0.18.0 → 0.19.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,9 +3,17 @@ import { populateData } from '../../dist/utils/populate_test_data.js';
3
3
  import { env } from './env.js';
4
4
  import { describeWithStorage, StorageVersionTestContext, TEST_CONNECTION_OPTIONS } from './util.js';
5
5
  import { WalStreamTestContext } from './wal_stream_utils.js';
6
+ import { CURRENT_STORAGE_VERSION } from '@powersync/service-core';
6
7
 
7
8
  describe.skipIf(!(env.CI || env.SLOW_TESTS))('batch replication', function () {
8
- describeWithStorage({ timeout: 240_000 }, defineBatchTests);
9
+ describeWithStorage(
10
+ {
11
+ timeout: 240_000,
12
+ // These tests are slow, so only test the current storage version
13
+ storageVersions: [CURRENT_STORAGE_VERSION]
14
+ },
15
+ defineBatchTests
16
+ );
9
17
  });
10
18
 
11
19
  const BASIC_SYNC_RULES = `bucket_definitions:
@@ -40,9 +48,7 @@ function defineBatchTests({ factory, storageVersion }: StorageVersionTestContext
40
48
 
41
49
  const start = Date.now();
42
50
 
43
- context.startStreaming();
44
-
45
- const checksum = await context.getChecksums(['global[]'], { timeout: 50_000 });
51
+ const checksum = await context.getChecksums(['global[]'], { timeout: 100_000 });
46
52
  const duration = Date.now() - start;
47
53
  const used = Math.round(process.memoryUsage().heapUsed / 1024 / 1024);
48
54
  expect(checksum.get('global[]')!.count).toEqual(operation_count);
@@ -87,7 +93,6 @@ function defineBatchTests({ factory, storageVersion }: StorageVersionTestContext
87
93
  const start = Date.now();
88
94
 
89
95
  await context.replicateSnapshot();
90
- context.startStreaming();
91
96
 
92
97
  const checksum = await context.getChecksums(['global[]'], { timeout: 100_000 });
93
98
  const duration = Date.now() - start;
@@ -138,8 +143,6 @@ function defineBatchTests({ factory, storageVersion }: StorageVersionTestContext
138
143
 
139
144
  const start = Date.now();
140
145
 
141
- context.startStreaming();
142
-
143
146
  const checksum = await context.getChecksums(['global[]']);
144
147
  const duration = Date.now() - start;
145
148
  const used = Math.round(process.memoryUsage().heapUsed / 1024 / 1024);
@@ -222,7 +225,6 @@ function defineBatchTests({ factory, storageVersion }: StorageVersionTestContext
222
225
  });
223
226
  await context.replicateSnapshot();
224
227
 
225
- context.startStreaming();
226
228
  const checksum = await context.getChecksums(['global[]'], { timeout: 50_000 });
227
229
  expect(checksum.get('global[]')!.count).toEqual((numDocs + 2) * 4);
228
230
  });
@@ -1,20 +1,20 @@
1
- import type { LookupFunction } from 'node:net';
1
+ import { WalStream } from '@module/replication/WalStream.js';
2
+ import { PostgresTypeResolver } from '@module/types/resolver.js';
2
3
  import * as dns from 'node:dns';
4
+ import type { LookupFunction } from 'node:net';
3
5
 
4
6
  import * as pgwire from '@powersync/service-jpgwire';
5
7
  import {
6
8
  applyRowContext,
7
9
  CompatibilityContext,
8
- SqliteInputRow,
10
+ CompatibilityEdition,
9
11
  DateTimeValue,
12
+ SqliteInputRow,
10
13
  TimeValue,
11
- CompatibilityEdition,
12
14
  TimeValuePrecision
13
15
  } from '@powersync/service-sync-rules';
14
16
  import { describe, expect, Mock, test, vi } from 'vitest';
15
17
  import { clearTestDb, connectPgPool, connectPgWire, TEST_CONNECTION_OPTIONS, TEST_URI } from './util.js';
16
- import { WalStream } from '@module/replication/WalStream.js';
17
- import { PostgresTypeResolver } from '@module/types/resolver.js';
18
18
 
19
19
  describe('connection options', () => {
20
20
  test('uses custom lookup', async () => {
@@ -1,11 +1,10 @@
1
+ import { METRICS_HELPER } from '@powersync/service-core-tests';
2
+ import { ReplicationMetric } from '@powersync/service-types';
3
+ import * as timers from 'node:timers/promises';
1
4
  import { describe, expect, test } from 'vitest';
2
5
  import { env } from './env.js';
3
6
  import { describeWithStorage, StorageVersionTestContext } from './util.js';
4
7
  import { WalStreamTestContext } from './wal_stream_utils.js';
5
- import { METRICS_HELPER } from '@powersync/service-core-tests';
6
- import { ReplicationMetric } from '@powersync/service-types';
7
- import * as timers from 'node:timers/promises';
8
- import { ReplicationAbortedError } from '@powersync/lib-services-framework';
9
8
 
10
9
  describe.skipIf(!(env.CI || env.SLOW_TESTS))('batch replication', function () {
11
10
  describeWithStorage({ timeout: 240_000 }, function ({ factory, storageVersion }) {
@@ -80,8 +79,7 @@ async function testResumingReplication(
80
79
  await context.dispose();
81
80
  })();
82
81
  // This confirms that initial replication was interrupted
83
- const error = await p.catch((e) => e);
84
- expect(error).toBeInstanceOf(ReplicationAbortedError);
82
+ await expect(p).rejects.toThrowError();
85
83
  done = true;
86
84
  } finally {
87
85
  done = true;
@@ -111,7 +109,6 @@ async function testResumingReplication(
111
109
  await context2.loadNextSyncRules();
112
110
  await context2.replicateSnapshot();
113
111
 
114
- context2.startStreaming();
115
112
  const data = await context2.getBucketData('global[]', undefined, {});
116
113
 
117
114
  const deletedRowOps = data.filter(
@@ -134,14 +131,14 @@ async function testResumingReplication(
134
131
  // so it's not in the resulting ops at all.
135
132
  }
136
133
 
137
- expect(updatedRowOps.length).toEqual(2);
134
+ expect(updatedRowOps.length).toBeGreaterThanOrEqual(2);
138
135
  // description for the first op could be 'foo' or 'update1'.
139
136
  // We only test the final version.
140
- expect(JSON.parse(updatedRowOps[1].data as string).description).toEqual('update1');
137
+ expect(JSON.parse(updatedRowOps[updatedRowOps.length - 1].data as string).description).toEqual('update1');
141
138
 
142
- expect(insertedRowOps.length).toEqual(2);
139
+ expect(insertedRowOps.length).toBeGreaterThanOrEqual(1);
143
140
  expect(JSON.parse(insertedRowOps[0].data as string).description).toEqual('insert1');
144
- expect(JSON.parse(insertedRowOps[1].data as string).description).toEqual('insert1');
141
+ expect(JSON.parse(insertedRowOps[insertedRowOps.length - 1].data as string).description).toEqual('insert1');
145
142
 
146
143
  // 1000 of test_data1 during first replication attempt.
147
144
  // N >= 1000 of test_data2 during first replication attempt.
@@ -152,12 +149,12 @@ async function testResumingReplication(
152
149
  // This adds 2 ops.
153
150
  // We expect this to be 11002 for stopAfter: 2000, and 11004 for stopAfter: 8000.
154
151
  // However, this is not deterministic.
155
- const expectedCount = 11002 + deletedRowOps.length;
152
+ const expectedCount = 11000 - 2 + insertedRowOps.length + updatedRowOps.length + deletedRowOps.length;
156
153
  expect(data.length).toEqual(expectedCount);
157
154
 
158
155
  const replicatedCount =
159
156
  ((await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0) - startRowCount;
160
157
 
161
158
  // With resumable replication, there should be no need to re-replicate anything.
162
- expect(replicatedCount).toEqual(expectedCount);
159
+ expect(replicatedCount).toBeGreaterThanOrEqual(expectedCount);
163
160
  }
@@ -1,7 +1,7 @@
1
- import { describe, expect, test } from 'vitest';
2
- import { clearTestDb, connectPgPool } from './util.js';
3
1
  import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js';
4
2
  import { TYPE_INTEGER, TYPE_REAL, TYPE_TEXT } from '@powersync/service-sync-rules';
3
+ import { describe, expect, test } from 'vitest';
4
+ import { clearTestDb, connectPgPool } from './util.js';
5
5
 
6
6
  describe('PostgresRouteAPIAdapter tests', () => {
7
7
  test('infers connection schema', async () => {
@@ -39,7 +39,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
39
39
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
40
40
 
41
41
  await context.replicateSnapshot();
42
- context.startStreaming();
43
42
 
44
43
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
45
44
 
@@ -62,13 +61,17 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
62
61
  // Truncate - order doesn't matter
63
62
  expect(data.slice(2, 4).sort(compareIds)).toMatchObject([REMOVE_T1, REMOVE_T2]);
64
63
 
65
- expect(data.slice(4)).toMatchObject([
66
- // Snapshot insert
67
- PUT_T3,
68
- // Replicated insert
69
- // We may eventually be able to de-duplicate this
64
+ expect(data.slice(4, 5)).toMatchObject([
65
+ // Snapshot and/or replication insert
70
66
  PUT_T3
71
67
  ]);
68
+
69
+ if (data.length > 5) {
70
+ expect(data.slice(5)).toMatchObject([
71
+ // Replicated insert (optional duplication)
72
+ PUT_T3
73
+ ]);
74
+ }
72
75
  });
73
76
 
74
77
  test('add table', async () => {
@@ -78,7 +81,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
78
81
  const { pool } = context;
79
82
 
80
83
  await context.replicateSnapshot();
81
- context.startStreaming();
82
84
 
83
85
  await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
84
86
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
@@ -86,17 +88,10 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
86
88
  const data = await context.getBucketData('global[]');
87
89
 
88
90
  // "Reduce" the bucket to get a stable output to test.
91
+ // The specific operation sequence may vary depending on storage implementation, so just check the end result.
89
92
  // slice(1) to skip the CLEAR op.
90
93
  const reduced = reduceBucket(data).slice(1);
91
94
  expect(reduced.sort(compareIds)).toMatchObject([PUT_T1]);
92
-
93
- expect(data).toMatchObject([
94
- // Snapshot insert
95
- PUT_T1,
96
- // Replicated insert
97
- // We may eventually be able to de-duplicate this
98
- PUT_T1
99
- ]);
100
95
  });
101
96
 
102
97
  test('rename table (1)', async () => {
@@ -110,7 +105,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
110
105
  await pool.query(`INSERT INTO test_data_old(id, description) VALUES('t1', 'test1')`);
111
106
 
112
107
  await context.replicateSnapshot();
113
- context.startStreaming();
114
108
 
115
109
  await pool.query(
116
110
  { statement: `ALTER TABLE test_data_old RENAME TO test_data` },
@@ -130,11 +124,13 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
130
124
  PUT_T1,
131
125
  PUT_T2
132
126
  ]);
133
- expect(data.slice(2)).toMatchObject([
134
- // Replicated insert
135
- // We may eventually be able to de-duplicate this
136
- PUT_T2
137
- ]);
127
+ if (data.length > 2) {
128
+ expect(data.slice(2)).toMatchObject([
129
+ // Replicated insert
130
+ // May be de-duplicated
131
+ PUT_T2
132
+ ]);
133
+ }
138
134
  });
139
135
 
140
136
  test('rename table (2)', async () => {
@@ -153,7 +149,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
153
149
  await pool.query(`INSERT INTO test_data1(id, description) VALUES('t1', 'test1')`);
154
150
 
155
151
  await context.replicateSnapshot();
156
- context.startStreaming();
157
152
 
158
153
  await pool.query(
159
154
  { statement: `ALTER TABLE test_data1 RENAME TO test_data2` },
@@ -183,11 +178,13 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
183
178
  putOp('test_data2', { id: 't1', description: 'test1' }),
184
179
  putOp('test_data2', { id: 't2', description: 'test2' })
185
180
  ]);
186
- expect(data.slice(4)).toMatchObject([
187
- // Replicated insert
188
- // We may eventually be able to de-duplicate this
189
- putOp('test_data2', { id: 't2', description: 'test2' })
190
- ]);
181
+ if (data.length > 4) {
182
+ expect(data.slice(4)).toMatchObject([
183
+ // Replicated insert
184
+ // This may be de-duplicated
185
+ putOp('test_data2', { id: 't2', description: 'test2' })
186
+ ]);
187
+ }
191
188
  });
192
189
 
193
190
  test('rename table (3)', async () => {
@@ -202,7 +199,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
202
199
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
203
200
 
204
201
  await context.replicateSnapshot();
205
- context.startStreaming();
206
202
 
207
203
  await pool.query(
208
204
  { statement: `ALTER TABLE test_data RENAME TO test_data_na` },
@@ -237,7 +233,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
237
233
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
238
234
 
239
235
  await context.replicateSnapshot();
240
- context.startStreaming();
241
236
 
242
237
  await pool.query(
243
238
  { statement: `ALTER TABLE test_data REPLICA IDENTITY FULL` },
@@ -262,11 +257,13 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
262
257
  // Snapshot - order doesn't matter
263
258
  expect(data.slice(2, 4).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2]);
264
259
 
265
- expect(data.slice(4).sort(compareIds)).toMatchObject([
266
- // Replicated insert
267
- // We may eventually be able to de-duplicate this
268
- PUT_T2
269
- ]);
260
+ if (data.length > 4) {
261
+ expect(data.slice(4).sort(compareIds)).toMatchObject([
262
+ // Replicated insert
263
+ // This may be de-duplicated
264
+ PUT_T2
265
+ ]);
266
+ }
270
267
  });
271
268
 
272
269
  test('change full replica id by adding column', async () => {
@@ -283,7 +280,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
283
280
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
284
281
 
285
282
  await context.replicateSnapshot();
286
- context.startStreaming();
287
283
 
288
284
  await pool.query(
289
285
  { statement: `ALTER TABLE test_data ADD COLUMN other TEXT` },
@@ -305,11 +301,13 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
305
301
  putOp('test_data', { id: 't2', description: 'test2', other: null })
306
302
  ]);
307
303
 
308
- expect(data.slice(4).sort(compareIds)).toMatchObject([
309
- // Replicated insert
310
- // We may eventually be able to de-duplicate this
311
- putOp('test_data', { id: 't2', description: 'test2', other: null })
312
- ]);
304
+ if (data.length > 4) {
305
+ expect(data.slice(4).sort(compareIds)).toMatchObject([
306
+ // Replicated insert
307
+ // This may be de-duplicated
308
+ putOp('test_data', { id: 't2', description: 'test2', other: null })
309
+ ]);
310
+ }
313
311
  });
314
312
 
315
313
  test('change default replica id by changing column type', async () => {
@@ -323,7 +321,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
323
321
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
324
322
 
325
323
  await context.replicateSnapshot();
326
- context.startStreaming();
327
324
 
328
325
  await pool.query(
329
326
  { statement: `ALTER TABLE test_data ALTER COLUMN id TYPE varchar` },
@@ -342,11 +339,13 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
342
339
  // Snapshot - order doesn't matter
343
340
  expect(data.slice(2, 4).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2]);
344
341
 
345
- expect(data.slice(4).sort(compareIds)).toMatchObject([
346
- // Replicated insert
347
- // We may eventually be able to de-duplicate this
348
- PUT_T2
349
- ]);
342
+ if (data.length > 4) {
343
+ expect(data.slice(4).sort(compareIds)).toMatchObject([
344
+ // Replicated insert
345
+ // May be de-duplicated
346
+ PUT_T2
347
+ ]);
348
+ }
350
349
  });
351
350
 
352
351
  test('change index id by changing column type', async () => {
@@ -365,7 +364,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
365
364
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
366
365
 
367
366
  await context.replicateSnapshot();
368
- context.startStreaming();
369
367
 
370
368
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
371
369
 
@@ -388,21 +386,7 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
388
386
  const reduced = reduceBucket(data).slice(1);
389
387
  expect(reduced.sort(compareIds)).toMatchObject([PUT_T1, PUT_T2, PUT_T3]);
390
388
 
391
- // Previously had more specific tests, but this varies too much based on timing:
392
- // expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
393
- // // Truncate - any order
394
- // REMOVE_T1,
395
- // REMOVE_T2
396
- // ]);
397
-
398
- // // Snapshot - order doesn't matter
399
- // expect(data.slice(4, 7).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2, PUT_T3]);
400
-
401
- // expect(data.slice(7).sort(compareIds)).toMatchObject([
402
- // // Replicated insert
403
- // // We may eventually be able to de-duplicate this
404
- // PUT_T3
405
- // ]);
389
+ // Previously had more specific tests, but this varies too much based on timing.
406
390
  });
407
391
 
408
392
  test('add to publication', async () => {
@@ -420,7 +404,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
420
404
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
421
405
 
422
406
  await context.replicateSnapshot();
423
- context.startStreaming();
424
407
 
425
408
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
426
409
 
@@ -436,11 +419,13 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
436
419
  PUT_T3
437
420
  ]);
438
421
 
439
- expect(data.slice(3)).toMatchObject([
440
- // Replicated insert
441
- // We may eventually be able to de-duplicate this
442
- PUT_T3
443
- ]);
422
+ if (data.length > 3) {
423
+ expect(data.slice(3)).toMatchObject([
424
+ // Replicated insert
425
+ // May be de-duplicated
426
+ PUT_T3
427
+ ]);
428
+ }
444
429
 
445
430
  // "Reduce" the bucket to get a stable output to test.
446
431
  // slice(1) to skip the CLEAR op.
@@ -464,7 +449,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
464
449
  await pool.query(`INSERT INTO test_other(id, description) VALUES('t1', 'test1')`);
465
450
 
466
451
  await context.replicateSnapshot();
467
- context.startStreaming();
468
452
 
469
453
  await pool.query(`INSERT INTO test_other(id, description) VALUES('t2', 'test2')`);
470
454
 
@@ -489,7 +473,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
489
473
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
490
474
 
491
475
  await context.replicateSnapshot();
492
- context.startStreaming();
493
476
 
494
477
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
495
478
 
@@ -532,7 +515,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
532
515
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
533
516
 
534
517
  await context.replicateSnapshot();
535
- context.startStreaming();
536
518
 
537
519
  await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
538
520
 
@@ -586,7 +568,6 @@ function defineTests({ factory, storageVersion }: StorageVersionTestContext) {
586
568
  await pool.query(`INSERT INTO test_data_old(id, num) VALUES('t2', 0)`);
587
569
 
588
570
  await context.replicateSnapshot();
589
- context.startStreaming();
590
571
 
591
572
  await pool.query(
592
573
  { statement: `ALTER TABLE test_data_old RENAME TO test_data` },
@@ -658,7 +639,6 @@ config:
658
639
  await pool.query(`INSERT INTO test_data(id) VALUES ('t1')`);
659
640
 
660
641
  await context.replicateSnapshot();
661
- context.startStreaming();
662
642
 
663
643
  await pool.query(
664
644
  { statement: `CREATE TYPE composite AS (foo bool, bar int4);` },
@@ -666,7 +646,7 @@ config:
666
646
  { statement: `UPDATE test_data SET other = ROW(TRUE, 2)::composite;` }
667
647
  );
668
648
 
669
- const data = await context.getBucketData('1#stream|0[]');
649
+ const data = await context.getBucketData('stream|0[]');
670
650
  expect(data).toMatchObject([
671
651
  putOp('test_data', { id: 't1' }),
672
652
  putOp('test_data', { id: 't1', other: '{"foo":1,"bar":2}' })