@powersync/service-module-postgres 0.7.1 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +17 -0
- package/package.json +8 -8
- package/test/src/checkpoints.test.ts +13 -9
- package/test/src/slow_tests.test.ts +102 -114
- package/test/src/util.ts +4 -3
- package/test/src/wal_stream_utils.ts +0 -1
- package/tsconfig.tsbuildinfo +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,22 @@
|
|
|
1
1
|
# @powersync/service-module-postgres
|
|
2
2
|
|
|
3
|
+
## 0.8.0
|
|
4
|
+
|
|
5
|
+
### Minor Changes
|
|
6
|
+
|
|
7
|
+
- 436eee6: Minor optimizations to new checkpoint calulations.
|
|
8
|
+
|
|
9
|
+
### Patch Changes
|
|
10
|
+
|
|
11
|
+
- Updated dependencies [436eee6]
|
|
12
|
+
- Updated dependencies [15283d4]
|
|
13
|
+
- Updated dependencies [88d4cb3]
|
|
14
|
+
- Updated dependencies [f55e36a]
|
|
15
|
+
- @powersync/service-core@1.7.0
|
|
16
|
+
- @powersync/service-sync-rules@0.24.0
|
|
17
|
+
- @powersync/lib-services-framework@0.5.2
|
|
18
|
+
- @powersync/lib-service-postgres@0.3.2
|
|
19
|
+
|
|
3
20
|
## 0.7.1
|
|
4
21
|
|
|
5
22
|
### Patch Changes
|
package/package.json
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
"publishConfig": {
|
|
6
6
|
"access": "public"
|
|
7
7
|
},
|
|
8
|
-
"version": "0.
|
|
8
|
+
"version": "0.8.0",
|
|
9
9
|
"main": "dist/index.js",
|
|
10
10
|
"license": "FSL-1.1-Apache-2.0",
|
|
11
11
|
"type": "module",
|
|
@@ -28,20 +28,20 @@
|
|
|
28
28
|
"ts-codec": "^1.3.0",
|
|
29
29
|
"uri-js": "^4.4.1",
|
|
30
30
|
"uuid": "^9.0.1",
|
|
31
|
-
"@powersync/lib-service-postgres": "0.3.
|
|
32
|
-
"@powersync/lib-services-framework": "0.5.
|
|
33
|
-
"@powersync/service-core": "
|
|
31
|
+
"@powersync/lib-service-postgres": "0.3.2",
|
|
32
|
+
"@powersync/lib-services-framework": "0.5.2",
|
|
33
|
+
"@powersync/service-core": "1.7.0",
|
|
34
34
|
"@powersync/service-jpgwire": "0.19.0",
|
|
35
35
|
"@powersync/service-jsonbig": "0.17.10",
|
|
36
|
-
"@powersync/service-sync-rules": "0.
|
|
36
|
+
"@powersync/service-sync-rules": "0.24.0",
|
|
37
37
|
"@powersync/service-types": "0.8.0"
|
|
38
38
|
},
|
|
39
39
|
"devDependencies": {
|
|
40
40
|
"@types/semver": "^7.5.4",
|
|
41
41
|
"@types/uuid": "^9.0.4",
|
|
42
|
-
"@powersync/service-core-tests": "0.
|
|
43
|
-
"@powersync/service-module-mongodb-storage": "0.
|
|
44
|
-
"@powersync/service-module-postgres-storage": "0.
|
|
42
|
+
"@powersync/service-core-tests": "0.5.0",
|
|
43
|
+
"@powersync/service-module-mongodb-storage": "0.6.0",
|
|
44
|
+
"@powersync/service-module-postgres-storage": "0.4.0"
|
|
45
45
|
},
|
|
46
46
|
"scripts": {
|
|
47
47
|
"build": "tsc -b",
|
|
@@ -3,6 +3,7 @@ import { checkpointUserId, createWriteCheckpoint } from '@powersync/service-core
|
|
|
3
3
|
import { describe, test } from 'vitest';
|
|
4
4
|
import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js';
|
|
5
5
|
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
6
|
+
import { env } from './env.js';
|
|
6
7
|
|
|
7
8
|
import timers from 'node:timers/promises';
|
|
8
9
|
|
|
@@ -11,8 +12,8 @@ const BASIC_SYNC_RULES = `bucket_definitions:
|
|
|
11
12
|
data:
|
|
12
13
|
- SELECT id, description, other FROM "test_data"`;
|
|
13
14
|
|
|
14
|
-
describe('checkpoint tests', () => {
|
|
15
|
-
test('write checkpoints', { timeout:
|
|
15
|
+
describe.skipIf(!(env.CI || env.SLOW_TESTS))('checkpoint tests', () => {
|
|
16
|
+
test('write checkpoints', { timeout: 50_000 }, async () => {
|
|
16
17
|
const factory = INITIALIZED_MONGO_STORAGE_FACTORY;
|
|
17
18
|
await using context = await WalStreamTestContext.open(factory);
|
|
18
19
|
|
|
@@ -25,13 +26,14 @@ describe('checkpoint tests', () => {
|
|
|
25
26
|
await context.replicateSnapshot();
|
|
26
27
|
|
|
27
28
|
context.startStreaming();
|
|
29
|
+
const storage = context.storage!;
|
|
28
30
|
|
|
29
31
|
const controller = new AbortController();
|
|
30
32
|
try {
|
|
31
|
-
const stream =
|
|
32
|
-
checkpointUserId('test_user', 'test_client'),
|
|
33
|
-
controller.signal
|
|
34
|
-
);
|
|
33
|
+
const stream = storage.watchWriteCheckpoint({
|
|
34
|
+
user_id: checkpointUserId('test_user', 'test_client'),
|
|
35
|
+
signal: controller.signal
|
|
36
|
+
});
|
|
35
37
|
|
|
36
38
|
let lastWriteCheckpoint: bigint | null = null;
|
|
37
39
|
|
|
@@ -57,10 +59,12 @@ describe('checkpoint tests', () => {
|
|
|
57
59
|
|
|
58
60
|
const start = Date.now();
|
|
59
61
|
while (lastWriteCheckpoint == null || lastWriteCheckpoint < BigInt(cp.writeCheckpoint)) {
|
|
60
|
-
if (Date.now() - start >
|
|
61
|
-
throw new Error(
|
|
62
|
+
if (Date.now() - start > 5_000) {
|
|
63
|
+
throw new Error(
|
|
64
|
+
`Timeout while waiting for checkpoint. last: ${lastWriteCheckpoint}, waiting for: ${cp.writeCheckpoint}`
|
|
65
|
+
);
|
|
62
66
|
}
|
|
63
|
-
await timers.setTimeout(
|
|
67
|
+
await timers.setTimeout(5, undefined, { signal: controller.signal });
|
|
64
68
|
}
|
|
65
69
|
}
|
|
66
70
|
} finally {
|
|
@@ -71,21 +71,13 @@ function defineSlowTests(factory: storage.TestStorageFactory) {
|
|
|
71
71
|
// Past issues that this could reproduce intermittently:
|
|
72
72
|
// * Skipping LSNs after a keepalive message
|
|
73
73
|
// * Skipping LSNs when source transactions overlap
|
|
74
|
-
test(
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
{
|
|
80
|
-
);
|
|
81
|
-
|
|
82
|
-
test(
|
|
83
|
-
'repeated replication - compacted',
|
|
84
|
-
async () => {
|
|
85
|
-
await testRepeatedReplication({ compact: true, maxBatchSize: 100, numBatches: 2 });
|
|
86
|
-
},
|
|
87
|
-
{ timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
|
|
88
|
-
);
|
|
74
|
+
test('repeated replication - basic', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
|
|
75
|
+
await testRepeatedReplication({ compact: false, maxBatchSize: 50, numBatches: 5 });
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
test('repeated replication - compacted', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
|
|
79
|
+
await testRepeatedReplication({ compact: true, maxBatchSize: 100, numBatches: 2 });
|
|
80
|
+
});
|
|
89
81
|
|
|
90
82
|
async function testRepeatedReplication(testOptions: { compact: boolean; maxBatchSize: number; numBatches: number }) {
|
|
91
83
|
const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
|
|
@@ -101,7 +93,7 @@ bucket_definitions:
|
|
|
101
93
|
- SELECT * FROM "test_data"
|
|
102
94
|
`;
|
|
103
95
|
const syncRules = await f.updateSyncRules({ content: syncRuleContent });
|
|
104
|
-
|
|
96
|
+
const storage = f.getInstance(syncRules);
|
|
105
97
|
abortController = new AbortController();
|
|
106
98
|
const options: WalStreamOptions = {
|
|
107
99
|
abort_signal: abortController.signal,
|
|
@@ -314,116 +306,112 @@ bucket_definitions:
|
|
|
314
306
|
//
|
|
315
307
|
// If the first LSN does not correctly match with the first replication transaction,
|
|
316
308
|
// we may miss some updates.
|
|
317
|
-
test(
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
const syncRuleContent = `
|
|
309
|
+
test('repeated initial replication', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
|
|
310
|
+
const pool = await connectPgPool();
|
|
311
|
+
await clearTestDb(pool);
|
|
312
|
+
await using f = await factory();
|
|
313
|
+
|
|
314
|
+
const syncRuleContent = `
|
|
325
315
|
bucket_definitions:
|
|
326
316
|
global:
|
|
327
317
|
data:
|
|
328
318
|
- SELECT id, description FROM "test_data"
|
|
329
319
|
`;
|
|
330
|
-
|
|
331
|
-
|
|
320
|
+
const syncRules = await f.updateSyncRules({ content: syncRuleContent });
|
|
321
|
+
const storage = f.getInstance(syncRules);
|
|
332
322
|
|
|
333
|
-
|
|
334
|
-
|
|
323
|
+
// 1. Setup some base data that will be replicated in initial replication
|
|
324
|
+
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
335
325
|
|
|
336
|
-
|
|
326
|
+
let statements: pgwire.Statement[] = [];
|
|
337
327
|
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
328
|
+
const n = Math.floor(Math.random() * 200);
|
|
329
|
+
for (let i = 0; i < n; i++) {
|
|
330
|
+
statements.push({
|
|
331
|
+
statement: `INSERT INTO test_data(description) VALUES('test_init')`
|
|
332
|
+
});
|
|
333
|
+
}
|
|
334
|
+
await pool.query(...statements);
|
|
335
|
+
|
|
336
|
+
const start = Date.now();
|
|
337
|
+
let i = 0;
|
|
338
|
+
|
|
339
|
+
while (Date.now() - start < TEST_DURATION_MS) {
|
|
340
|
+
// 2. Each iteration starts with a clean slate
|
|
341
|
+
await pool.query(`SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE active = FALSE`);
|
|
342
|
+
i += 1;
|
|
343
|
+
|
|
344
|
+
const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
|
|
345
|
+
const replicationConnection = await connections.replicationConnection();
|
|
346
|
+
|
|
347
|
+
abortController = new AbortController();
|
|
348
|
+
const options: WalStreamOptions = {
|
|
349
|
+
abort_signal: abortController.signal,
|
|
350
|
+
connections,
|
|
351
|
+
storage: storage
|
|
352
|
+
};
|
|
353
|
+
walStream = new WalStream(options);
|
|
354
|
+
|
|
355
|
+
await storage.clear();
|
|
356
|
+
|
|
357
|
+
// 3. Start initial replication, then streaming, but don't wait for any of this
|
|
358
|
+
let initialReplicationDone = false;
|
|
359
|
+
streamPromise = (async () => {
|
|
360
|
+
await walStream.initReplication(replicationConnection);
|
|
361
|
+
await storage.autoActivate();
|
|
362
|
+
initialReplicationDone = true;
|
|
363
|
+
await walStream.streamChanges(replicationConnection);
|
|
364
|
+
})()
|
|
365
|
+
.catch((e) => {
|
|
372
366
|
initialReplicationDone = true;
|
|
373
|
-
|
|
374
|
-
})
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
})
|
|
379
|
-
.then((v) => {
|
|
380
|
-
return v;
|
|
381
|
-
});
|
|
367
|
+
throw e;
|
|
368
|
+
})
|
|
369
|
+
.then((v) => {
|
|
370
|
+
return v;
|
|
371
|
+
});
|
|
382
372
|
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
});
|
|
393
|
-
}
|
|
394
|
-
const results = await pool.query(...statements);
|
|
395
|
-
const ids = results.results.map((sub) => {
|
|
396
|
-
return sub.rows[0][0] as string;
|
|
397
|
-
});
|
|
398
|
-
await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
|
|
399
|
-
const deleteStatements: pgwire.Statement[] = ids.map((id) => {
|
|
400
|
-
return {
|
|
401
|
-
statement: `DELETE FROM test_data WHERE id = $1`,
|
|
402
|
-
params: [{ type: 'uuid', value: id }]
|
|
403
|
-
};
|
|
373
|
+
// 4. While initial replication is still running, write more changes
|
|
374
|
+
while (!initialReplicationDone) {
|
|
375
|
+
let statements: pgwire.Statement[] = [];
|
|
376
|
+
const n = Math.floor(Math.random() * 10) + 1;
|
|
377
|
+
for (let i = 0; i < n; i++) {
|
|
378
|
+
const description = `test${i}`;
|
|
379
|
+
statements.push({
|
|
380
|
+
statement: `INSERT INTO test_data(description) VALUES('test1') returning id as test_id`,
|
|
381
|
+
params: [{ type: 'varchar', value: description }]
|
|
404
382
|
});
|
|
405
|
-
await pool.query(...deleteStatements);
|
|
406
|
-
await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
|
|
407
|
-
}
|
|
408
|
-
|
|
409
|
-
// 5. Once initial replication is done, wait for the streaming changes to complete syncing.
|
|
410
|
-
// getClientCheckpoint() effectively waits for the above replication to complete
|
|
411
|
-
// Race with streamingPromise to catch replication errors here.
|
|
412
|
-
let checkpoint = await Promise.race([
|
|
413
|
-
getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS }),
|
|
414
|
-
streamPromise
|
|
415
|
-
]);
|
|
416
|
-
if (typeof checkpoint == undefined) {
|
|
417
|
-
// This indicates an issue with the test setup - streamingPromise completed instead
|
|
418
|
-
// of getClientCheckpoint()
|
|
419
|
-
throw new Error('Test failure - streamingPromise completed');
|
|
420
383
|
}
|
|
384
|
+
const results = await pool.query(...statements);
|
|
385
|
+
const ids = results.results.map((sub) => {
|
|
386
|
+
return sub.rows[0][0] as string;
|
|
387
|
+
});
|
|
388
|
+
await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
|
|
389
|
+
const deleteStatements: pgwire.Statement[] = ids.map((id) => {
|
|
390
|
+
return {
|
|
391
|
+
statement: `DELETE FROM test_data WHERE id = $1`,
|
|
392
|
+
params: [{ type: 'uuid', value: id }]
|
|
393
|
+
};
|
|
394
|
+
});
|
|
395
|
+
await pool.query(...deleteStatements);
|
|
396
|
+
await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
|
|
397
|
+
}
|
|
421
398
|
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
399
|
+
// 5. Once initial replication is done, wait for the streaming changes to complete syncing.
|
|
400
|
+
// getClientCheckpoint() effectively waits for the above replication to complete
|
|
401
|
+
// Race with streamingPromise to catch replication errors here.
|
|
402
|
+
let checkpoint = await Promise.race([
|
|
403
|
+
getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS }),
|
|
404
|
+
streamPromise
|
|
405
|
+
]);
|
|
406
|
+
if (typeof checkpoint == undefined) {
|
|
407
|
+
// This indicates an issue with the test setup - streamingPromise completed instead
|
|
408
|
+
// of getClientCheckpoint()
|
|
409
|
+
throw new Error('Test failure - streamingPromise completed');
|
|
425
410
|
}
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
411
|
+
|
|
412
|
+
abortController.abort();
|
|
413
|
+
await streamPromise;
|
|
414
|
+
await connections.end();
|
|
415
|
+
}
|
|
416
|
+
});
|
|
429
417
|
}
|
package/test/src/util.ts
CHANGED
|
@@ -62,7 +62,7 @@ export function connectPgPool() {
|
|
|
62
62
|
|
|
63
63
|
export async function getClientCheckpoint(
|
|
64
64
|
db: pgwire.PgClient,
|
|
65
|
-
|
|
65
|
+
storageFactory: BucketStorageFactory,
|
|
66
66
|
options?: { timeout?: number }
|
|
67
67
|
): Promise<OpId> {
|
|
68
68
|
const start = Date.now();
|
|
@@ -77,8 +77,9 @@ export async function getClientCheckpoint(
|
|
|
77
77
|
|
|
78
78
|
logger.info(`Waiting for LSN checkpoint: ${lsn}`);
|
|
79
79
|
while (Date.now() - start < timeout) {
|
|
80
|
-
const
|
|
81
|
-
|
|
80
|
+
const storage = await storageFactory.getActiveStorage();
|
|
81
|
+
const cp = await storage?.getCheckpoint();
|
|
82
|
+
if (cp == null) {
|
|
82
83
|
throw new Error('No sync rules available');
|
|
83
84
|
}
|
|
84
85
|
if (cp.lsn && cp.lsn >= lsn) {
|