@powersync/service-module-postgres 0.13.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +35 -0
- package/dist/api/PostgresRouteAPIAdapter.d.ts +1 -1
- package/dist/api/PostgresRouteAPIAdapter.js +5 -1
- package/dist/api/PostgresRouteAPIAdapter.js.map +1 -1
- package/dist/replication/SnapshotQuery.d.ts +78 -0
- package/dist/replication/SnapshotQuery.js +175 -0
- package/dist/replication/SnapshotQuery.js.map +1 -0
- package/dist/replication/WalStream.d.ts +37 -4
- package/dist/replication/WalStream.js +318 -91
- package/dist/replication/WalStream.js.map +1 -1
- package/dist/replication/WalStreamReplicationJob.d.ts +2 -0
- package/dist/replication/WalStreamReplicationJob.js +14 -3
- package/dist/replication/WalStreamReplicationJob.js.map +1 -1
- package/dist/replication/WalStreamReplicator.d.ts +1 -0
- package/dist/replication/WalStreamReplicator.js +22 -0
- package/dist/replication/WalStreamReplicator.js.map +1 -1
- package/dist/replication/replication-utils.d.ts +4 -0
- package/dist/replication/replication-utils.js +46 -2
- package/dist/replication/replication-utils.js.map +1 -1
- package/package.json +11 -10
- package/src/api/PostgresRouteAPIAdapter.ts +5 -1
- package/src/replication/SnapshotQuery.ts +209 -0
- package/src/replication/WalStream.ts +373 -98
- package/src/replication/WalStreamReplicationJob.ts +15 -3
- package/src/replication/WalStreamReplicator.ts +26 -0
- package/src/replication/replication-utils.ts +60 -2
- package/test/src/__snapshots__/schema_changes.test.ts.snap +2 -2
- package/test/src/checkpoints.test.ts +17 -7
- package/test/src/chunked_snapshots.test.ts +156 -0
- package/test/src/large_batch.test.ts +5 -154
- package/test/src/resuming_snapshots.test.ts +150 -0
- package/test/src/schema_changes.test.ts +5 -10
- package/test/src/slow_tests.test.ts +13 -30
- package/test/src/util.ts +12 -1
- package/test/src/validation.test.ts +0 -1
- package/test/src/wal_stream.test.ts +4 -9
- package/test/src/wal_stream_utils.ts +15 -7
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { container } from '@powersync/lib-services-framework';
|
|
1
|
+
import { container, logger, ReplicationAbortedError } from '@powersync/lib-services-framework';
|
|
2
2
|
import { PgManager } from './PgManager.js';
|
|
3
3
|
import { MissingReplicationSlotError, sendKeepAlive, WalStream } from './WalStream.js';
|
|
4
4
|
|
|
@@ -12,9 +12,11 @@ export interface WalStreamReplicationJobOptions extends replication.AbstractRepl
|
|
|
12
12
|
export class WalStreamReplicationJob extends replication.AbstractReplicationJob {
|
|
13
13
|
private connectionFactory: ConnectionManagerFactory;
|
|
14
14
|
private readonly connectionManager: PgManager;
|
|
15
|
+
private lastStream: WalStream | null = null;
|
|
15
16
|
|
|
16
17
|
constructor(options: WalStreamReplicationJobOptions) {
|
|
17
18
|
super(options);
|
|
19
|
+
this.logger = logger.child({ prefix: `[${this.slotName}] ` });
|
|
18
20
|
this.connectionFactory = options.connectionFactory;
|
|
19
21
|
this.connectionManager = this.connectionFactory.create({
|
|
20
22
|
// Pool connections are only used intermittently.
|
|
@@ -57,7 +59,7 @@ export class WalStreamReplicationJob extends replication.AbstractReplicationJob
|
|
|
57
59
|
replication_slot: this.slotName
|
|
58
60
|
}
|
|
59
61
|
});
|
|
60
|
-
this.logger.error(`Replication failed
|
|
62
|
+
this.logger.error(`Replication failed`, e);
|
|
61
63
|
|
|
62
64
|
if (e instanceof MissingReplicationSlotError) {
|
|
63
65
|
// This stops replication on this slot and restarts with a new slot
|
|
@@ -93,14 +95,20 @@ export class WalStreamReplicationJob extends replication.AbstractReplicationJob
|
|
|
93
95
|
return;
|
|
94
96
|
}
|
|
95
97
|
const stream = new WalStream({
|
|
98
|
+
logger: this.logger,
|
|
96
99
|
abort_signal: this.abortController.signal,
|
|
97
100
|
storage: this.options.storage,
|
|
98
101
|
metrics: this.options.metrics,
|
|
99
102
|
connections: connectionManager
|
|
100
103
|
});
|
|
104
|
+
this.lastStream = stream;
|
|
101
105
|
await stream.replicate();
|
|
102
106
|
} catch (e) {
|
|
103
|
-
|
|
107
|
+
if (this.isStopped && e instanceof ReplicationAbortedError) {
|
|
108
|
+
// Ignore aborted errors
|
|
109
|
+
return;
|
|
110
|
+
}
|
|
111
|
+
this.logger.error(`Replication error`, e);
|
|
104
112
|
if (e.cause != null) {
|
|
105
113
|
// Example:
|
|
106
114
|
// PgError.conn_ended: Unable to do postgres query on ended connection
|
|
@@ -140,4 +148,8 @@ export class WalStreamReplicationJob extends replication.AbstractReplicationJob
|
|
|
140
148
|
await connectionManager.end();
|
|
141
149
|
}
|
|
142
150
|
}
|
|
151
|
+
|
|
152
|
+
async getReplicationLagMillis(): Promise<number | undefined> {
|
|
153
|
+
return this.lastStream?.getReplicationLagMillis();
|
|
154
|
+
}
|
|
143
155
|
}
|
|
@@ -48,4 +48,30 @@ export class WalStreamReplicator extends replication.AbstractReplicator<WalStrea
|
|
|
48
48
|
async testConnection() {
|
|
49
49
|
return await PostgresModule.testConnection(this.connectionFactory.dbConnectionConfig);
|
|
50
50
|
}
|
|
51
|
+
|
|
52
|
+
async getReplicationLagMillis(): Promise<number | undefined> {
|
|
53
|
+
const lag = await super.getReplicationLagMillis();
|
|
54
|
+
if (lag != null) {
|
|
55
|
+
return lag;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// Booting or in an error loop. Check last active replication status.
|
|
59
|
+
// This includes sync rules in an ERROR state.
|
|
60
|
+
const content = await this.storage.getActiveSyncRulesContent();
|
|
61
|
+
if (content == null) {
|
|
62
|
+
return undefined;
|
|
63
|
+
}
|
|
64
|
+
// Measure the lag from the last commit or keepalive timestamp.
|
|
65
|
+
// This is not 100% accurate since it is the commit time in the storage db rather than
|
|
66
|
+
// the source db, but it's the best we have for postgres.
|
|
67
|
+
|
|
68
|
+
const checkpointTs = content.last_checkpoint_ts?.getTime() ?? 0;
|
|
69
|
+
const keepaliveTs = content.last_keepalive_ts?.getTime() ?? 0;
|
|
70
|
+
const latestTs = Math.max(checkpointTs, keepaliveTs);
|
|
71
|
+
if (latestTs != 0) {
|
|
72
|
+
return Date.now() - latestTs;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
return undefined;
|
|
76
|
+
}
|
|
51
77
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import * as pgwire from '@powersync/service-jpgwire';
|
|
2
2
|
|
|
3
3
|
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
4
|
-
import { ErrorCode, logger, ServiceError } from '@powersync/lib-services-framework';
|
|
4
|
+
import { ErrorCode, logger, ServiceAssertionError, ServiceError } from '@powersync/lib-services-framework';
|
|
5
5
|
import { PatternResult, storage } from '@powersync/service-core';
|
|
6
6
|
import * as sync_rules from '@powersync/service-sync-rules';
|
|
7
7
|
import * as service_types from '@powersync/service-types';
|
|
@@ -136,6 +136,61 @@ $$ LANGUAGE plpgsql;`
|
|
|
136
136
|
}
|
|
137
137
|
}
|
|
138
138
|
|
|
139
|
+
export async function checkTableRls(
|
|
140
|
+
db: pgwire.PgClient,
|
|
141
|
+
relationId: number
|
|
142
|
+
): Promise<{ canRead: boolean; message?: string }> {
|
|
143
|
+
const rs = await lib_postgres.retriedQuery(db, {
|
|
144
|
+
statement: `
|
|
145
|
+
WITH user_info AS (
|
|
146
|
+
SELECT
|
|
147
|
+
current_user as username,
|
|
148
|
+
r.rolsuper,
|
|
149
|
+
r.rolbypassrls
|
|
150
|
+
FROM pg_roles r
|
|
151
|
+
WHERE r.rolname = current_user
|
|
152
|
+
)
|
|
153
|
+
SELECT
|
|
154
|
+
c.relname as tablename,
|
|
155
|
+
c.relrowsecurity as rls_enabled,
|
|
156
|
+
u.username as username,
|
|
157
|
+
u.rolsuper as is_superuser,
|
|
158
|
+
u.rolbypassrls as bypasses_rls
|
|
159
|
+
FROM pg_class c
|
|
160
|
+
CROSS JOIN user_info u
|
|
161
|
+
WHERE c.oid = $1::oid;
|
|
162
|
+
`,
|
|
163
|
+
params: [{ type: 'int4', value: relationId }]
|
|
164
|
+
});
|
|
165
|
+
|
|
166
|
+
const rows = pgwire.pgwireRows<{
|
|
167
|
+
rls_enabled: boolean;
|
|
168
|
+
tablename: string;
|
|
169
|
+
username: string;
|
|
170
|
+
is_superuser: boolean;
|
|
171
|
+
bypasses_rls: boolean;
|
|
172
|
+
}>(rs);
|
|
173
|
+
if (rows.length == 0) {
|
|
174
|
+
// Not expected, since we already got the oid
|
|
175
|
+
throw new ServiceAssertionError(`Table with OID ${relationId} does not exist.`);
|
|
176
|
+
}
|
|
177
|
+
const row = rows[0];
|
|
178
|
+
if (row.is_superuser || row.bypasses_rls) {
|
|
179
|
+
// Bypasses RLS automatically.
|
|
180
|
+
return { canRead: true };
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
if (row.rls_enabled) {
|
|
184
|
+
// Don't skip, since we _may_ still be able to get results.
|
|
185
|
+
return {
|
|
186
|
+
canRead: false,
|
|
187
|
+
message: `[${ErrorCode.PSYNC_S1145}] Row Level Security is enabled on table "${row.tablename}". To make sure that ${row.username} can read the table, run: 'ALTER ROLE ${row.username} BYPASSRLS'.`
|
|
188
|
+
};
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
return { canRead: true };
|
|
192
|
+
}
|
|
193
|
+
|
|
139
194
|
export interface GetDebugTablesInfoOptions {
|
|
140
195
|
db: pgwire.PgClient;
|
|
141
196
|
publicationName: string;
|
|
@@ -309,6 +364,9 @@ export async function getDebugTableInfo(options: GetDebugTableInfoOptions): Prom
|
|
|
309
364
|
};
|
|
310
365
|
}
|
|
311
366
|
|
|
367
|
+
const rlsCheck = await checkTableRls(db, relationId);
|
|
368
|
+
const rlsError = rlsCheck.canRead ? null : { message: rlsCheck.message!, level: 'warning' };
|
|
369
|
+
|
|
312
370
|
return {
|
|
313
371
|
schema: schema,
|
|
314
372
|
name: name,
|
|
@@ -316,7 +374,7 @@ export async function getDebugTableInfo(options: GetDebugTableInfoOptions): Prom
|
|
|
316
374
|
replication_id: id_columns.map((c) => c.name),
|
|
317
375
|
data_queries: syncData,
|
|
318
376
|
parameter_queries: syncParameters,
|
|
319
|
-
errors: [id_columns_error, selectError, replicateError].filter(
|
|
377
|
+
errors: [id_columns_error, selectError, replicateError, rlsError].filter(
|
|
320
378
|
(error) => error != null
|
|
321
379
|
) as service_types.ReplicationError[]
|
|
322
380
|
};
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
|
|
2
2
|
|
|
3
|
-
exports[`schema changes
|
|
3
|
+
exports[`schema changes > mongodb storage > add to publication (not in sync rules) 1`] = `0`;
|
|
4
4
|
|
|
5
|
-
exports[`schema changes
|
|
5
|
+
exports[`schema changes > postgres storage > add to publication (not in sync rules) 1`] = `16384`;
|
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js';
|
|
2
|
-
import { checkpointUserId, createWriteCheckpoint } from '@powersync/service-core';
|
|
2
|
+
import { checkpointUserId, createWriteCheckpoint, TestStorageFactory } from '@powersync/service-core';
|
|
3
3
|
import { describe, test } from 'vitest';
|
|
4
|
-
import {
|
|
4
|
+
import { describeWithStorage } from './util.js';
|
|
5
5
|
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
6
|
-
import { env } from './env.js';
|
|
7
6
|
|
|
8
7
|
import timers from 'node:timers/promises';
|
|
9
8
|
|
|
@@ -12,14 +11,25 @@ const BASIC_SYNC_RULES = `bucket_definitions:
|
|
|
12
11
|
data:
|
|
13
12
|
- SELECT id, description, other FROM "test_data"`;
|
|
14
13
|
|
|
15
|
-
describe
|
|
14
|
+
describe('checkpoint tests', () => {
|
|
15
|
+
describeWithStorage({}, checkpointTests);
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
const checkpointTests = (factory: TestStorageFactory) => {
|
|
16
19
|
test('write checkpoints', { timeout: 50_000 }, async () => {
|
|
17
|
-
const factory = INITIALIZED_MONGO_STORAGE_FACTORY;
|
|
18
20
|
await using context = await WalStreamTestContext.open(factory);
|
|
19
21
|
|
|
20
22
|
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
21
23
|
const { pool } = context;
|
|
22
24
|
const api = new PostgresRouteAPIAdapter(pool);
|
|
25
|
+
const serverVersion = await context.connectionManager.getServerVersion();
|
|
26
|
+
if (serverVersion!.compareMain('13.0.0') < 0) {
|
|
27
|
+
// The test is not stable on Postgres 11 or 12. See the notes on
|
|
28
|
+
// PostgresRouteAPIAdapter.createReplicationHead() for details.
|
|
29
|
+
// Postgres 12 is already EOL, so not worth finding a fix - just skip the tests.
|
|
30
|
+
console.log('Skipping write checkpoint test on Postgres < 13.0.0');
|
|
31
|
+
return;
|
|
32
|
+
}
|
|
23
33
|
|
|
24
34
|
await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
|
|
25
35
|
|
|
@@ -59,7 +69,7 @@ describe.skipIf(!(env.CI || env.SLOW_TESTS))('checkpoint tests', () => {
|
|
|
59
69
|
|
|
60
70
|
const start = Date.now();
|
|
61
71
|
while (lastWriteCheckpoint == null || lastWriteCheckpoint < BigInt(cp.writeCheckpoint)) {
|
|
62
|
-
if (Date.now() - start >
|
|
72
|
+
if (Date.now() - start > 3_000) {
|
|
63
73
|
throw new Error(
|
|
64
74
|
`Timeout while waiting for checkpoint. last: ${lastWriteCheckpoint}, waiting for: ${cp.writeCheckpoint}`
|
|
65
75
|
);
|
|
@@ -71,4 +81,4 @@ describe.skipIf(!(env.CI || env.SLOW_TESTS))('checkpoint tests', () => {
|
|
|
71
81
|
controller.abort();
|
|
72
82
|
}
|
|
73
83
|
});
|
|
74
|
-
}
|
|
84
|
+
};
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
import { reduceBucket, TestStorageFactory } from '@powersync/service-core';
|
|
2
|
+
import { METRICS_HELPER } from '@powersync/service-core-tests';
|
|
3
|
+
import { SqliteJsonValue } from '@powersync/service-sync-rules';
|
|
4
|
+
import * as crypto from 'node:crypto';
|
|
5
|
+
import * as timers from 'timers/promises';
|
|
6
|
+
import { describe, expect, test } from 'vitest';
|
|
7
|
+
import { describeWithStorage } from './util.js';
|
|
8
|
+
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
9
|
+
|
|
10
|
+
describe('chunked snapshots', () => {
|
|
11
|
+
describeWithStorage({ timeout: 120_000 }, defineBatchTests);
|
|
12
|
+
});
|
|
13
|
+
|
|
14
|
+
function defineBatchTests(factory: TestStorageFactory) {
|
|
15
|
+
// We need to test every supported type, since chunking could be quite sensitive to
|
|
16
|
+
// how each specific type is handled.
|
|
17
|
+
test('chunked snapshot edge case (int2)', async () => {
|
|
18
|
+
await testChunkedSnapshot({
|
|
19
|
+
idType: 'int2',
|
|
20
|
+
genId: 'i',
|
|
21
|
+
lastId: '2000',
|
|
22
|
+
moveTo: '0',
|
|
23
|
+
moveToJs: 0
|
|
24
|
+
});
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
test('chunked snapshot edge case (int4)', async () => {
|
|
28
|
+
await testChunkedSnapshot({
|
|
29
|
+
idType: 'int4',
|
|
30
|
+
genId: 'i',
|
|
31
|
+
lastId: '2000',
|
|
32
|
+
moveTo: '0',
|
|
33
|
+
moveToJs: 0
|
|
34
|
+
});
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
test('chunked snapshot edge case (int8)', async () => {
|
|
38
|
+
await testChunkedSnapshot({
|
|
39
|
+
idType: 'int8',
|
|
40
|
+
genId: 'i',
|
|
41
|
+
lastId: '2000',
|
|
42
|
+
moveTo: '0',
|
|
43
|
+
moveToJs: 0
|
|
44
|
+
});
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
test('chunked snapshot edge case (text)', async () => {
|
|
48
|
+
await testChunkedSnapshot({
|
|
49
|
+
idType: 'text',
|
|
50
|
+
genId: `to_char(i, 'fm0000')`,
|
|
51
|
+
lastId: `'2000'`,
|
|
52
|
+
moveTo: `'0000'`,
|
|
53
|
+
moveToJs: '0000'
|
|
54
|
+
});
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
test('chunked snapshot edge case (varchar)', async () => {
|
|
58
|
+
await testChunkedSnapshot({
|
|
59
|
+
idType: 'varchar',
|
|
60
|
+
genId: `to_char(i, 'fm0000')`,
|
|
61
|
+
lastId: `'2000'`,
|
|
62
|
+
moveTo: `'0000'`,
|
|
63
|
+
moveToJs: '0000'
|
|
64
|
+
});
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
test('chunked snapshot edge case (uuid)', async () => {
|
|
68
|
+
await testChunkedSnapshot({
|
|
69
|
+
idType: 'uuid',
|
|
70
|
+
// Generate a uuid by using the first part of a uuid and appending a 4-digit number.
|
|
71
|
+
genId: `('00000000-0000-4000-8000-00000000' || to_char(i, 'fm0000')) :: uuid`,
|
|
72
|
+
lastId: `'00000000-0000-4000-8000-000000002000'`,
|
|
73
|
+
moveTo: `'00000000-0000-4000-8000-000000000000'`,
|
|
74
|
+
moveToJs: '00000000-0000-4000-8000-000000000000'
|
|
75
|
+
});
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
async function testChunkedSnapshot(options: {
|
|
79
|
+
idType: string;
|
|
80
|
+
genId: string;
|
|
81
|
+
lastId: string;
|
|
82
|
+
moveTo: string;
|
|
83
|
+
moveToJs: SqliteJsonValue;
|
|
84
|
+
}) {
|
|
85
|
+
// 1. Start with 2k rows, one row with id = 2000, and a large TOAST value in another column.
|
|
86
|
+
// 2. Replicate one batch of rows (id < 2000).
|
|
87
|
+
// 3. `UPDATE table SET id = 0 WHERE id = 2000`
|
|
88
|
+
// 4. Replicate the rest of the table.
|
|
89
|
+
// 5. Logical replication picks up the UPDATE above, but it is missing the TOAST column.
|
|
90
|
+
// 6. We end up with a row that has a missing TOAST column.
|
|
91
|
+
|
|
92
|
+
await using context = await WalStreamTestContext.open(factory, {
|
|
93
|
+
// We need to use a smaller chunk size here, so that we can run a query in between chunks
|
|
94
|
+
walStreamOptions: { snapshotChunkLength: 100 }
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
await context.updateSyncRules(`bucket_definitions:
|
|
98
|
+
global:
|
|
99
|
+
data:
|
|
100
|
+
- SELECT * FROM test_data`);
|
|
101
|
+
const { pool } = context;
|
|
102
|
+
|
|
103
|
+
await pool.query(`CREATE TABLE test_data(id ${options.idType} primary key, description text)`);
|
|
104
|
+
|
|
105
|
+
// 1. Start with 2k rows, one row with id = 2000...
|
|
106
|
+
await pool.query({
|
|
107
|
+
statement: `INSERT INTO test_data(id, description) SELECT ${options.genId}, 'foo' FROM generate_series(1, 2000) i`
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
// ...and a large TOAST value in another column.
|
|
111
|
+
// Toast value, must be > 8kb after compression
|
|
112
|
+
const largeDescription = crypto.randomBytes(20_000).toString('hex');
|
|
113
|
+
await pool.query({
|
|
114
|
+
statement: `UPDATE test_data SET description = $1 WHERE id = ${options.lastId} :: ${options.idType}`,
|
|
115
|
+
params: [{ type: 'varchar', value: largeDescription }]
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
// 2. Replicate one batch of rows (id < 100).
|
|
119
|
+
// Our "stopping point" here is not quite deterministic.
|
|
120
|
+
const p = context.replicateSnapshot();
|
|
121
|
+
|
|
122
|
+
const stopAfter = 100;
|
|
123
|
+
const startRowCount = (await METRICS_HELPER.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
124
|
+
|
|
125
|
+
while (true) {
|
|
126
|
+
const count =
|
|
127
|
+
((await METRICS_HELPER.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0) - startRowCount;
|
|
128
|
+
|
|
129
|
+
if (count >= stopAfter) {
|
|
130
|
+
break;
|
|
131
|
+
}
|
|
132
|
+
await timers.setTimeout(1);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// 3. `UPDATE table SET id = 0 WHERE id = 2000`
|
|
136
|
+
const rs = await pool.query(
|
|
137
|
+
`UPDATE test_data SET id = ${options.moveTo} WHERE id = ${options.lastId} RETURNING id`
|
|
138
|
+
);
|
|
139
|
+
expect(rs.rows.length).toEqual(1);
|
|
140
|
+
|
|
141
|
+
// 4. Replicate the rest of the table.
|
|
142
|
+
await p;
|
|
143
|
+
|
|
144
|
+
// 5. Logical replication picks up the UPDATE above, but it is missing the TOAST column.
|
|
145
|
+
context.startStreaming();
|
|
146
|
+
|
|
147
|
+
// 6. If all went well, the "resnapshot" process would take care of this.
|
|
148
|
+
const data = await context.getBucketData('global[]', undefined, {});
|
|
149
|
+
const reduced = reduceBucket(data);
|
|
150
|
+
|
|
151
|
+
const movedRow = reduced.find((row) => row.object_id == String(options.moveToJs));
|
|
152
|
+
expect(movedRow?.data).toEqual(JSON.stringify({ id: options.moveToJs, description: largeDescription }));
|
|
153
|
+
|
|
154
|
+
expect(reduced.length).toEqual(2001);
|
|
155
|
+
}
|
|
156
|
+
}
|
|
@@ -1,37 +1,14 @@
|
|
|
1
1
|
import { storage } from '@powersync/service-core';
|
|
2
|
-
import * as timers from 'timers/promises';
|
|
3
2
|
import { describe, expect, test } from 'vitest';
|
|
4
3
|
import { populateData } from '../../dist/utils/populate_test_data.js';
|
|
5
4
|
import { env } from './env.js';
|
|
6
|
-
import {
|
|
7
|
-
INITIALIZED_MONGO_STORAGE_FACTORY,
|
|
8
|
-
INITIALIZED_POSTGRES_STORAGE_FACTORY,
|
|
9
|
-
TEST_CONNECTION_OPTIONS
|
|
10
|
-
} from './util.js';
|
|
5
|
+
import { describeWithStorage, TEST_CONNECTION_OPTIONS } from './util.js';
|
|
11
6
|
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
12
|
-
import { METRICS_HELPER } from '@powersync/service-core-tests';
|
|
13
|
-
import { ReplicationMetric } from '@powersync/service-types';
|
|
14
|
-
|
|
15
|
-
describe.skipIf(!env.TEST_MONGO_STORAGE)('batch replication tests - mongodb', { timeout: 120_000 }, function () {
|
|
16
|
-
// These are slow but consistent tests.
|
|
17
|
-
// Not run on every test run, but we do run on CI, or when manually debugging issues.
|
|
18
|
-
if (env.CI || env.SLOW_TESTS) {
|
|
19
|
-
defineBatchTests(INITIALIZED_MONGO_STORAGE_FACTORY);
|
|
20
|
-
} else {
|
|
21
|
-
// Need something in this file.
|
|
22
|
-
test('no-op', () => {});
|
|
23
|
-
}
|
|
24
|
-
});
|
|
25
7
|
|
|
26
|
-
describe.skipIf(!env.
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
defineBatchTests(INITIALIZED_POSTGRES_STORAGE_FACTORY);
|
|
31
|
-
} else {
|
|
32
|
-
// Need something in this file.
|
|
33
|
-
test('no-op', () => {});
|
|
34
|
-
}
|
|
8
|
+
describe.skipIf(!(env.CI || env.SLOW_TESTS))('batch replication', function () {
|
|
9
|
+
describeWithStorage({ timeout: 240_000 }, function (factory) {
|
|
10
|
+
defineBatchTests(factory);
|
|
11
|
+
});
|
|
35
12
|
});
|
|
36
13
|
|
|
37
14
|
const BASIC_SYNC_RULES = `bucket_definitions:
|
|
@@ -257,132 +234,6 @@ function defineBatchTests(factory: storage.TestStorageFactory) {
|
|
|
257
234
|
expect(checksum.get('global[]')!.count).toEqual((numDocs + 2) * 4);
|
|
258
235
|
});
|
|
259
236
|
|
|
260
|
-
test('resuming initial replication (1)', async () => {
|
|
261
|
-
// Stop early - likely to not include deleted row in first replication attempt.
|
|
262
|
-
await testResumingReplication(2000);
|
|
263
|
-
});
|
|
264
|
-
test('resuming initial replication (2)', async () => {
|
|
265
|
-
// Stop late - likely to include deleted row in first replication attempt.
|
|
266
|
-
await testResumingReplication(8000);
|
|
267
|
-
});
|
|
268
|
-
|
|
269
|
-
async function testResumingReplication(stopAfter: number) {
|
|
270
|
-
// This tests interrupting and then resuming initial replication.
|
|
271
|
-
// We interrupt replication after test_data1 has fully replicated, and
|
|
272
|
-
// test_data2 has partially replicated.
|
|
273
|
-
// This test relies on interval behavior that is not 100% deterministic:
|
|
274
|
-
// 1. We attempt to abort initial replication once a certain number of
|
|
275
|
-
// rows have been replicated, but this is not exact. Our only requirement
|
|
276
|
-
// is that we have not fully replicated test_data2 yet.
|
|
277
|
-
// 2. Order of replication is not deterministic, so which specific rows
|
|
278
|
-
// have been / have not been replicated at that point is not deterministic.
|
|
279
|
-
// We do allow for some variation in the test results to account for this.
|
|
280
|
-
|
|
281
|
-
await using context = await WalStreamTestContext.open(factory);
|
|
282
|
-
|
|
283
|
-
await context.updateSyncRules(`bucket_definitions:
|
|
284
|
-
global:
|
|
285
|
-
data:
|
|
286
|
-
- SELECT * FROM test_data1
|
|
287
|
-
- SELECT * FROM test_data2`);
|
|
288
|
-
const { pool } = context;
|
|
289
|
-
|
|
290
|
-
await pool.query(`CREATE TABLE test_data1(id serial primary key, description text)`);
|
|
291
|
-
await pool.query(`CREATE TABLE test_data2(id serial primary key, description text)`);
|
|
292
|
-
|
|
293
|
-
await pool.query(
|
|
294
|
-
{
|
|
295
|
-
statement: `INSERT INTO test_data1(description) SELECT 'foo' FROM generate_series(1, 1000) i`
|
|
296
|
-
},
|
|
297
|
-
{
|
|
298
|
-
statement: `INSERT INTO test_data2( description) SELECT 'foo' FROM generate_series(1, 10000) i`
|
|
299
|
-
}
|
|
300
|
-
);
|
|
301
|
-
|
|
302
|
-
const p = context.replicateSnapshot();
|
|
303
|
-
|
|
304
|
-
let done = false;
|
|
305
|
-
|
|
306
|
-
const startRowCount = (await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0;
|
|
307
|
-
try {
|
|
308
|
-
(async () => {
|
|
309
|
-
while (!done) {
|
|
310
|
-
const count =
|
|
311
|
-
((await METRICS_HELPER.getMetricValueForTests(ReplicationMetric.ROWS_REPLICATED)) ?? 0) - startRowCount;
|
|
312
|
-
|
|
313
|
-
if (count >= stopAfter) {
|
|
314
|
-
break;
|
|
315
|
-
}
|
|
316
|
-
await timers.setTimeout(1);
|
|
317
|
-
}
|
|
318
|
-
// This interrupts initial replication
|
|
319
|
-
await context.dispose();
|
|
320
|
-
})();
|
|
321
|
-
// This confirms that initial replication was interrupted
|
|
322
|
-
await expect(p).rejects.toThrowError();
|
|
323
|
-
done = true;
|
|
324
|
-
} finally {
|
|
325
|
-
done = true;
|
|
326
|
-
}
|
|
327
|
-
|
|
328
|
-
// Bypass the usual "clear db on factory open" step.
|
|
329
|
-
await using context2 = await WalStreamTestContext.open(factory, { doNotClear: true });
|
|
330
|
-
|
|
331
|
-
// This delete should be using one of the ids already replicated
|
|
332
|
-
const {
|
|
333
|
-
rows: [[id1]]
|
|
334
|
-
} = await context2.pool.query(`DELETE FROM test_data2 WHERE id = (SELECT id FROM test_data2 LIMIT 1) RETURNING id`);
|
|
335
|
-
// This update should also be using one of the ids already replicated
|
|
336
|
-
const {
|
|
337
|
-
rows: [[id2]]
|
|
338
|
-
} = await context2.pool.query(
|
|
339
|
-
`UPDATE test_data2 SET description = 'update1' WHERE id = (SELECT id FROM test_data2 LIMIT 1) RETURNING id`
|
|
340
|
-
);
|
|
341
|
-
const {
|
|
342
|
-
rows: [[id3]]
|
|
343
|
-
} = await context2.pool.query(`INSERT INTO test_data2(description) SELECT 'insert1' RETURNING id`);
|
|
344
|
-
|
|
345
|
-
await context2.loadNextSyncRules();
|
|
346
|
-
await context2.replicateSnapshot();
|
|
347
|
-
|
|
348
|
-
context2.startStreaming();
|
|
349
|
-
const data = await context2.getBucketData('global[]', undefined, {});
|
|
350
|
-
|
|
351
|
-
const deletedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === String(id1));
|
|
352
|
-
const updatedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === String(id2));
|
|
353
|
-
const insertedRowOps = data.filter((row) => row.object_type == 'test_data2' && row.object_id === String(id3));
|
|
354
|
-
|
|
355
|
-
if (deletedRowOps.length != 0) {
|
|
356
|
-
// The deleted row was part of the first replication batch,
|
|
357
|
-
// so it is removed by streaming replication.
|
|
358
|
-
expect(deletedRowOps.length).toEqual(2);
|
|
359
|
-
expect(deletedRowOps[1].op).toEqual('REMOVE');
|
|
360
|
-
} else {
|
|
361
|
-
// The deleted row was not part of the first replication batch,
|
|
362
|
-
// so it's not in the resulting ops at all.
|
|
363
|
-
}
|
|
364
|
-
|
|
365
|
-
expect(updatedRowOps.length).toEqual(2);
|
|
366
|
-
// description for the first op could be 'foo' or 'update1'.
|
|
367
|
-
// We only test the final version.
|
|
368
|
-
expect(JSON.parse(updatedRowOps[1].data as string).description).toEqual('update1');
|
|
369
|
-
|
|
370
|
-
expect(insertedRowOps.length).toEqual(2);
|
|
371
|
-
expect(JSON.parse(insertedRowOps[0].data as string).description).toEqual('insert1');
|
|
372
|
-
expect(JSON.parse(insertedRowOps[1].data as string).description).toEqual('insert1');
|
|
373
|
-
|
|
374
|
-
// 1000 of test_data1 during first replication attempt.
|
|
375
|
-
// N >= 1000 of test_data2 during first replication attempt.
|
|
376
|
-
// 10000 - N - 1 + 1 of test_data2 during second replication attempt.
|
|
377
|
-
// An additional update during streaming replication (2x total for this row).
|
|
378
|
-
// An additional insert during streaming replication (2x total for this row).
|
|
379
|
-
// If the deleted row was part of the first replication batch, it's removed by streaming replication.
|
|
380
|
-
// This adds 2 ops.
|
|
381
|
-
// We expect this to be 11002 for stopAfter: 2000, and 11004 for stopAfter: 8000.
|
|
382
|
-
// However, this is not deterministic.
|
|
383
|
-
expect(data.length).toEqual(11002 + deletedRowOps.length);
|
|
384
|
-
}
|
|
385
|
-
|
|
386
237
|
function printMemoryUsage() {
|
|
387
238
|
const memoryUsage = process.memoryUsage();
|
|
388
239
|
|