@powersync/service-module-postgres 0.0.0-dev-20240918092408
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/LICENSE +67 -0
- package/README.md +3 -0
- package/dist/api/PostgresRouteAPIAdapter.d.ts +22 -0
- package/dist/api/PostgresRouteAPIAdapter.js +273 -0
- package/dist/api/PostgresRouteAPIAdapter.js.map +1 -0
- package/dist/auth/SupabaseKeyCollector.d.ts +22 -0
- package/dist/auth/SupabaseKeyCollector.js +64 -0
- package/dist/auth/SupabaseKeyCollector.js.map +1 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +4 -0
- package/dist/index.js.map +1 -0
- package/dist/module/PostgresModule.d.ts +14 -0
- package/dist/module/PostgresModule.js +108 -0
- package/dist/module/PostgresModule.js.map +1 -0
- package/dist/replication/ConnectionManagerFactory.d.ts +10 -0
- package/dist/replication/ConnectionManagerFactory.js +21 -0
- package/dist/replication/ConnectionManagerFactory.js.map +1 -0
- package/dist/replication/PgManager.d.ts +25 -0
- package/dist/replication/PgManager.js +60 -0
- package/dist/replication/PgManager.js.map +1 -0
- package/dist/replication/PgRelation.d.ts +6 -0
- package/dist/replication/PgRelation.js +27 -0
- package/dist/replication/PgRelation.js.map +1 -0
- package/dist/replication/PostgresErrorRateLimiter.d.ts +11 -0
- package/dist/replication/PostgresErrorRateLimiter.js +43 -0
- package/dist/replication/PostgresErrorRateLimiter.js.map +1 -0
- package/dist/replication/WalStream.d.ts +53 -0
- package/dist/replication/WalStream.js +536 -0
- package/dist/replication/WalStream.js.map +1 -0
- package/dist/replication/WalStreamReplicationJob.d.ts +27 -0
- package/dist/replication/WalStreamReplicationJob.js +131 -0
- package/dist/replication/WalStreamReplicationJob.js.map +1 -0
- package/dist/replication/WalStreamReplicator.d.ts +13 -0
- package/dist/replication/WalStreamReplicator.js +36 -0
- package/dist/replication/WalStreamReplicator.js.map +1 -0
- package/dist/replication/replication-index.d.ts +5 -0
- package/dist/replication/replication-index.js +6 -0
- package/dist/replication/replication-index.js.map +1 -0
- package/dist/replication/replication-utils.d.ts +32 -0
- package/dist/replication/replication-utils.js +272 -0
- package/dist/replication/replication-utils.js.map +1 -0
- package/dist/types/types.d.ts +76 -0
- package/dist/types/types.js +110 -0
- package/dist/types/types.js.map +1 -0
- package/dist/utils/migration_lib.d.ts +11 -0
- package/dist/utils/migration_lib.js +64 -0
- package/dist/utils/migration_lib.js.map +1 -0
- package/dist/utils/pgwire_utils.d.ts +16 -0
- package/dist/utils/pgwire_utils.js +70 -0
- package/dist/utils/pgwire_utils.js.map +1 -0
- package/dist/utils/populate_test_data.d.ts +8 -0
- package/dist/utils/populate_test_data.js +65 -0
- package/dist/utils/populate_test_data.js.map +1 -0
- package/package.json +49 -0
- package/src/api/PostgresRouteAPIAdapter.ts +307 -0
- package/src/auth/SupabaseKeyCollector.ts +70 -0
- package/src/index.ts +5 -0
- package/src/module/PostgresModule.ts +122 -0
- package/src/replication/ConnectionManagerFactory.ts +28 -0
- package/src/replication/PgManager.ts +70 -0
- package/src/replication/PgRelation.ts +31 -0
- package/src/replication/PostgresErrorRateLimiter.ts +44 -0
- package/src/replication/WalStream.ts +639 -0
- package/src/replication/WalStreamReplicationJob.ts +142 -0
- package/src/replication/WalStreamReplicator.ts +45 -0
- package/src/replication/replication-index.ts +5 -0
- package/src/replication/replication-utils.ts +329 -0
- package/src/types/types.ts +159 -0
- package/src/utils/migration_lib.ts +79 -0
- package/src/utils/pgwire_utils.ts +73 -0
- package/src/utils/populate_test_data.ts +77 -0
- package/test/src/__snapshots__/pg_test.test.ts.snap +256 -0
- package/test/src/env.ts +7 -0
- package/test/src/large_batch.test.ts +195 -0
- package/test/src/pg_test.test.ts +450 -0
- package/test/src/schema_changes.test.ts +543 -0
- package/test/src/setup.ts +7 -0
- package/test/src/slow_tests.test.ts +335 -0
- package/test/src/util.ts +105 -0
- package/test/src/validation.test.ts +64 -0
- package/test/src/wal_stream.test.ts +319 -0
- package/test/src/wal_stream_utils.ts +121 -0
- package/test/tsconfig.json +28 -0
- package/tsconfig.json +31 -0
- package/tsconfig.tsbuildinfo +1 -0
- package/vitest.config.ts +9 -0
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
import * as bson from 'bson';
|
|
2
|
+
import { afterEach, describe, expect, test } from 'vitest';
|
|
3
|
+
import { WalStream, WalStreamOptions } from '../../src/replication/WalStream.js';
|
|
4
|
+
import { env } from './env.js';
|
|
5
|
+
import { TEST_CONNECTION_OPTIONS, clearTestDb, connectPgPool, getClientCheckpoint } from './util.js';
|
|
6
|
+
|
|
7
|
+
import * as pgwire from '@powersync/service-jpgwire';
|
|
8
|
+
import { SqliteRow } from '@powersync/service-sync-rules';
|
|
9
|
+
|
|
10
|
+
import { mapOpEntry, MongoBucketStorage } from '@/storage/storage-index.js';
|
|
11
|
+
import * as timers from 'node:timers/promises';
|
|
12
|
+
import { MONGO_STORAGE_FACTORY, StorageFactory } from '@core-tests/util.js';
|
|
13
|
+
import { PgManager } from '@module/replication/PgManager.js';
|
|
14
|
+
import { reduceBucket, validateCompactedBucket } from '@core-tests/bucket_validation.js';
|
|
15
|
+
|
|
16
|
+
describe('slow tests - mongodb', function () {
|
|
17
|
+
// These are slow, inconsistent tests.
|
|
18
|
+
// Not run on every test run, but we do run on CI, or when manually debugging issues.
|
|
19
|
+
if (env.CI || env.SLOW_TESTS) {
|
|
20
|
+
defineSlowTests(MONGO_STORAGE_FACTORY);
|
|
21
|
+
} else {
|
|
22
|
+
// Need something in this file.
|
|
23
|
+
test('no-op', () => {});
|
|
24
|
+
}
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
function defineSlowTests(factory: StorageFactory) {
|
|
28
|
+
let walStream: WalStream | undefined;
|
|
29
|
+
let connections: PgManager | undefined;
|
|
30
|
+
let abortController: AbortController | undefined;
|
|
31
|
+
let streamPromise: Promise<void> | undefined;
|
|
32
|
+
|
|
33
|
+
afterEach(async () => {
|
|
34
|
+
// This cleans up, similar to WalStreamTestContext.dispose().
|
|
35
|
+
// These tests are a little more complex than what is supported by WalStreamTestContext.
|
|
36
|
+
abortController?.abort();
|
|
37
|
+
await streamPromise;
|
|
38
|
+
streamPromise = undefined;
|
|
39
|
+
connections?.destroy();
|
|
40
|
+
|
|
41
|
+
connections = undefined;
|
|
42
|
+
walStream = undefined;
|
|
43
|
+
abortController = undefined;
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
const TEST_DURATION_MS = 15_000;
|
|
47
|
+
const TIMEOUT_MARGIN_MS = env.CI ? 30_000 : 15_000;
|
|
48
|
+
|
|
49
|
+
// Test repeatedly replicating inserts and deletes, then check that we get
|
|
50
|
+
// consistent data out at the end.
|
|
51
|
+
//
|
|
52
|
+
// Past issues that this could reproduce intermittently:
|
|
53
|
+
// * Skipping LSNs after a keepalive message
|
|
54
|
+
// * Skipping LSNs when source transactions overlap
|
|
55
|
+
test(
|
|
56
|
+
'repeated replication - basic',
|
|
57
|
+
async () => {
|
|
58
|
+
await testRepeatedReplication({ compact: false, maxBatchSize: 50, numBatches: 5 });
|
|
59
|
+
},
|
|
60
|
+
{ timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
|
|
61
|
+
);
|
|
62
|
+
|
|
63
|
+
test(
|
|
64
|
+
'repeated replication - compacted',
|
|
65
|
+
async () => {
|
|
66
|
+
await testRepeatedReplication({ compact: true, maxBatchSize: 100, numBatches: 2 });
|
|
67
|
+
},
|
|
68
|
+
{ timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
|
|
69
|
+
);
|
|
70
|
+
|
|
71
|
+
async function testRepeatedReplication(testOptions: { compact: boolean; maxBatchSize: number; numBatches: number }) {
|
|
72
|
+
const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
|
|
73
|
+
const replicationConnection = await connections.replicationConnection();
|
|
74
|
+
const pool = connections.pool;
|
|
75
|
+
await clearTestDb(pool);
|
|
76
|
+
const f = (await factory()) as MongoBucketStorage;
|
|
77
|
+
|
|
78
|
+
const syncRuleContent = `
|
|
79
|
+
bucket_definitions:
|
|
80
|
+
global:
|
|
81
|
+
data:
|
|
82
|
+
- SELECT * FROM "test_data"
|
|
83
|
+
`;
|
|
84
|
+
const syncRules = await f.updateSyncRules({ content: syncRuleContent });
|
|
85
|
+
const storage = f.getInstance(syncRules);
|
|
86
|
+
abortController = new AbortController();
|
|
87
|
+
const options: WalStreamOptions = {
|
|
88
|
+
abort_signal: abortController.signal,
|
|
89
|
+
connections,
|
|
90
|
+
storage: storage
|
|
91
|
+
};
|
|
92
|
+
walStream = new WalStream(options);
|
|
93
|
+
|
|
94
|
+
await pool.query(
|
|
95
|
+
`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num decimal)`
|
|
96
|
+
);
|
|
97
|
+
await pool.query(`ALTER TABLE test_data REPLICA IDENTITY FULL`);
|
|
98
|
+
|
|
99
|
+
await walStream.initReplication(replicationConnection);
|
|
100
|
+
await storage.autoActivate();
|
|
101
|
+
let abort = false;
|
|
102
|
+
streamPromise = walStream.streamChanges(replicationConnection).finally(() => {
|
|
103
|
+
abort = true;
|
|
104
|
+
});
|
|
105
|
+
const start = Date.now();
|
|
106
|
+
|
|
107
|
+
while (!abort && Date.now() - start < TEST_DURATION_MS) {
|
|
108
|
+
const bg = async () => {
|
|
109
|
+
for (let j = 0; j < testOptions.numBatches && !abort; j++) {
|
|
110
|
+
const n = Math.max(1, Math.floor(Math.random() * testOptions.maxBatchSize));
|
|
111
|
+
let statements: pgwire.Statement[] = [];
|
|
112
|
+
for (let i = 0; i < n; i++) {
|
|
113
|
+
const description = `test${i}`;
|
|
114
|
+
statements.push({
|
|
115
|
+
statement: `INSERT INTO test_data(description, num) VALUES($1, $2) returning id as test_id`,
|
|
116
|
+
params: [
|
|
117
|
+
{ type: 'varchar', value: description },
|
|
118
|
+
{ type: 'float8', value: Math.random() }
|
|
119
|
+
]
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
const results = await pool.query(...statements);
|
|
123
|
+
const ids = results.results.map((sub) => {
|
|
124
|
+
return sub.rows[0][0] as string;
|
|
125
|
+
});
|
|
126
|
+
await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
|
|
127
|
+
|
|
128
|
+
if (Math.random() > 0.5) {
|
|
129
|
+
const updateStatements: pgwire.Statement[] = ids.map((id) => {
|
|
130
|
+
return {
|
|
131
|
+
statement: `UPDATE test_data SET num = $2 WHERE id = $1`,
|
|
132
|
+
params: [
|
|
133
|
+
{ type: 'uuid', value: id },
|
|
134
|
+
{ type: 'float8', value: Math.random() }
|
|
135
|
+
]
|
|
136
|
+
};
|
|
137
|
+
});
|
|
138
|
+
|
|
139
|
+
await pool.query(...updateStatements);
|
|
140
|
+
if (Math.random() > 0.5) {
|
|
141
|
+
// Special case - an update that doesn't change data
|
|
142
|
+
await pool.query(...updateStatements);
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
const deleteStatements: pgwire.Statement[] = ids.map((id) => {
|
|
147
|
+
return {
|
|
148
|
+
statement: `DELETE FROM test_data WHERE id = $1`,
|
|
149
|
+
params: [{ type: 'uuid', value: id }]
|
|
150
|
+
};
|
|
151
|
+
});
|
|
152
|
+
await pool.query(...deleteStatements);
|
|
153
|
+
|
|
154
|
+
await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
|
|
155
|
+
}
|
|
156
|
+
};
|
|
157
|
+
|
|
158
|
+
let compactController = new AbortController();
|
|
159
|
+
|
|
160
|
+
const bgCompact = async () => {
|
|
161
|
+
// Repeatedly compact, and check that the compact conditions hold
|
|
162
|
+
while (!compactController.signal.aborted) {
|
|
163
|
+
const delay = Math.random() * 50;
|
|
164
|
+
try {
|
|
165
|
+
await timers.setTimeout(delay, undefined, { signal: compactController.signal });
|
|
166
|
+
} catch (e) {
|
|
167
|
+
break;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
const checkpoint = BigInt((await storage.getCheckpoint()).checkpoint);
|
|
171
|
+
const opsBefore = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
|
|
172
|
+
.filter((row) => row._id.o <= checkpoint)
|
|
173
|
+
.map(mapOpEntry);
|
|
174
|
+
await storage.compact({ maxOpId: checkpoint });
|
|
175
|
+
const opsAfter = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
|
|
176
|
+
.filter((row) => row._id.o <= checkpoint)
|
|
177
|
+
.map(mapOpEntry);
|
|
178
|
+
|
|
179
|
+
validateCompactedBucket(opsBefore, opsAfter);
|
|
180
|
+
}
|
|
181
|
+
};
|
|
182
|
+
|
|
183
|
+
// Call the above loop multiple times concurrently
|
|
184
|
+
const promises = [1, 2, 3].map((i) => bg());
|
|
185
|
+
const compactPromise = testOptions.compact ? bgCompact() : null;
|
|
186
|
+
await Promise.all(promises);
|
|
187
|
+
compactController.abort();
|
|
188
|
+
await compactPromise;
|
|
189
|
+
|
|
190
|
+
// Wait for replication to finish
|
|
191
|
+
let checkpoint = await getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS });
|
|
192
|
+
|
|
193
|
+
// Check that all inserts have been deleted again
|
|
194
|
+
const docs = await f.db.current_data.find().toArray();
|
|
195
|
+
const transformed = docs.map((doc) => {
|
|
196
|
+
return bson.deserialize(doc.data.buffer) as SqliteRow;
|
|
197
|
+
});
|
|
198
|
+
expect(transformed).toEqual([]);
|
|
199
|
+
|
|
200
|
+
// Check that each PUT has a REMOVE
|
|
201
|
+
const ops = await f.db.bucket_data.find().sort({ _id: 1 }).toArray();
|
|
202
|
+
|
|
203
|
+
// All a single bucket in this test
|
|
204
|
+
const bucket = ops.map((op) => mapOpEntry(op));
|
|
205
|
+
const reduced = reduceBucket(bucket);
|
|
206
|
+
expect(reduced).toMatchObject([
|
|
207
|
+
{
|
|
208
|
+
op_id: '0',
|
|
209
|
+
op: 'CLEAR'
|
|
210
|
+
}
|
|
211
|
+
// Should contain no additional data
|
|
212
|
+
]);
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
abortController.abort();
|
|
216
|
+
await streamPromise;
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
// Test repeatedly performing initial replication.
|
|
220
|
+
//
|
|
221
|
+
// If the first LSN does not correctly match with the first replication transaction,
|
|
222
|
+
// we may miss some updates.
|
|
223
|
+
test(
|
|
224
|
+
'repeated initial replication',
|
|
225
|
+
async () => {
|
|
226
|
+
const pool = await connectPgPool();
|
|
227
|
+
await clearTestDb(pool);
|
|
228
|
+
const f = await factory();
|
|
229
|
+
|
|
230
|
+
const syncRuleContent = `
|
|
231
|
+
bucket_definitions:
|
|
232
|
+
global:
|
|
233
|
+
data:
|
|
234
|
+
- SELECT id, description FROM "test_data"
|
|
235
|
+
`;
|
|
236
|
+
const syncRules = await f.updateSyncRules({ content: syncRuleContent });
|
|
237
|
+
const storage = f.getInstance(syncRules);
|
|
238
|
+
|
|
239
|
+
// 1. Setup some base data that will be replicated in initial replication
|
|
240
|
+
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
241
|
+
|
|
242
|
+
let statements: pgwire.Statement[] = [];
|
|
243
|
+
|
|
244
|
+
const n = Math.floor(Math.random() * 200);
|
|
245
|
+
for (let i = 0; i < n; i++) {
|
|
246
|
+
statements.push({
|
|
247
|
+
statement: `INSERT INTO test_data(description) VALUES('test_init')`
|
|
248
|
+
});
|
|
249
|
+
}
|
|
250
|
+
await pool.query(...statements);
|
|
251
|
+
|
|
252
|
+
const start = Date.now();
|
|
253
|
+
let i = 0;
|
|
254
|
+
|
|
255
|
+
while (Date.now() - start < TEST_DURATION_MS) {
|
|
256
|
+
// 2. Each iteration starts with a clean slate
|
|
257
|
+
await pool.query(`SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE active = FALSE`);
|
|
258
|
+
i += 1;
|
|
259
|
+
|
|
260
|
+
const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
|
|
261
|
+
const replicationConnection = await connections.replicationConnection();
|
|
262
|
+
|
|
263
|
+
abortController = new AbortController();
|
|
264
|
+
const options: WalStreamOptions = {
|
|
265
|
+
abort_signal: abortController.signal,
|
|
266
|
+
connections,
|
|
267
|
+
storage: storage
|
|
268
|
+
};
|
|
269
|
+
walStream = new WalStream(options);
|
|
270
|
+
|
|
271
|
+
await storage.clear();
|
|
272
|
+
|
|
273
|
+
// 3. Start initial replication, then streaming, but don't wait for any of this
|
|
274
|
+
let initialReplicationDone = false;
|
|
275
|
+
streamPromise = (async () => {
|
|
276
|
+
await walStream.initReplication(replicationConnection);
|
|
277
|
+
await storage.autoActivate();
|
|
278
|
+
initialReplicationDone = true;
|
|
279
|
+
await walStream.streamChanges(replicationConnection);
|
|
280
|
+
})()
|
|
281
|
+
.catch((e) => {
|
|
282
|
+
initialReplicationDone = true;
|
|
283
|
+
throw e;
|
|
284
|
+
})
|
|
285
|
+
.then((v) => {
|
|
286
|
+
return v;
|
|
287
|
+
});
|
|
288
|
+
|
|
289
|
+
// 4. While initial replication is still running, write more changes
|
|
290
|
+
while (!initialReplicationDone) {
|
|
291
|
+
let statements: pgwire.Statement[] = [];
|
|
292
|
+
const n = Math.floor(Math.random() * 10) + 1;
|
|
293
|
+
for (let i = 0; i < n; i++) {
|
|
294
|
+
const description = `test${i}`;
|
|
295
|
+
statements.push({
|
|
296
|
+
statement: `INSERT INTO test_data(description) VALUES('test1') returning id as test_id`,
|
|
297
|
+
params: [{ type: 'varchar', value: description }]
|
|
298
|
+
});
|
|
299
|
+
}
|
|
300
|
+
const results = await pool.query(...statements);
|
|
301
|
+
const ids = results.results.map((sub) => {
|
|
302
|
+
return sub.rows[0][0] as string;
|
|
303
|
+
});
|
|
304
|
+
await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
|
|
305
|
+
const deleteStatements: pgwire.Statement[] = ids.map((id) => {
|
|
306
|
+
return {
|
|
307
|
+
statement: `DELETE FROM test_data WHERE id = $1`,
|
|
308
|
+
params: [{ type: 'uuid', value: id }]
|
|
309
|
+
};
|
|
310
|
+
});
|
|
311
|
+
await pool.query(...deleteStatements);
|
|
312
|
+
await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
// 5. Once initial replication is done, wait for the streaming changes to complete syncing.
|
|
316
|
+
// getClientCheckpoint() effectively waits for the above replication to complete
|
|
317
|
+
// Race with streamingPromise to catch replication errors here.
|
|
318
|
+
let checkpoint = await Promise.race([
|
|
319
|
+
getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS }),
|
|
320
|
+
streamPromise
|
|
321
|
+
]);
|
|
322
|
+
if (typeof checkpoint == undefined) {
|
|
323
|
+
// This indicates an issue with the test setup - streamingPromise completed instead
|
|
324
|
+
// of getClientCheckpoint()
|
|
325
|
+
throw new Error('Test failure - streamingPromise completed');
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
abortController.abort();
|
|
329
|
+
await streamPromise;
|
|
330
|
+
await connections.end();
|
|
331
|
+
}
|
|
332
|
+
},
|
|
333
|
+
{ timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
|
|
334
|
+
);
|
|
335
|
+
}
|
package/test/src/util.ts
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import * as types from '@module/types/types.js';
|
|
2
|
+
import * as pg_utils from '@module/utils/pgwire_utils.js';
|
|
3
|
+
import { BucketStorageFactory, Metrics, MongoBucketStorage, OpId } from '@powersync/service-core';
|
|
4
|
+
import * as pgwire from '@powersync/service-jpgwire';
|
|
5
|
+
import { env } from './env.js';
|
|
6
|
+
import { pgwireRows } from '@powersync/service-jpgwire';
|
|
7
|
+
import { logger } from '@powersync/lib-services-framework';
|
|
8
|
+
import { connectMongo } from '@core-tests/util.js';
|
|
9
|
+
|
|
10
|
+
// The metrics need to be initialized before they can be used
|
|
11
|
+
await Metrics.initialise({
|
|
12
|
+
disable_telemetry_sharing: true,
|
|
13
|
+
powersync_instance_id: 'test',
|
|
14
|
+
internal_metrics_endpoint: 'unused.for.tests.com'
|
|
15
|
+
});
|
|
16
|
+
Metrics.getInstance().resetCounters();
|
|
17
|
+
|
|
18
|
+
export const TEST_URI = env.PG_TEST_URL;
|
|
19
|
+
|
|
20
|
+
export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({
|
|
21
|
+
type: 'postgresql',
|
|
22
|
+
uri: TEST_URI,
|
|
23
|
+
sslmode: 'disable'
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
export type StorageFactory = () => Promise<BucketStorageFactory>;
|
|
27
|
+
|
|
28
|
+
export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => {
|
|
29
|
+
const db = await connectMongo();
|
|
30
|
+
|
|
31
|
+
// None of the PG tests insert data into this collection, so it was never created
|
|
32
|
+
if (!(await db.db.listCollections({ name: db.bucket_parameters.collectionName }).hasNext())) {
|
|
33
|
+
await db.db.createCollection('bucket_parameters');
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
await db.clear();
|
|
37
|
+
|
|
38
|
+
return new MongoBucketStorage(db, { slot_name_prefix: 'test_' });
|
|
39
|
+
};
|
|
40
|
+
|
|
41
|
+
export async function clearTestDb(db: pgwire.PgClient) {
|
|
42
|
+
await db.query(
|
|
43
|
+
"select pg_drop_replication_slot(slot_name) from pg_replication_slots where active = false and slot_name like 'test_%'"
|
|
44
|
+
);
|
|
45
|
+
|
|
46
|
+
await db.query(`CREATE EXTENSION IF NOT EXISTS "uuid-ossp"`);
|
|
47
|
+
try {
|
|
48
|
+
await db.query(`DROP PUBLICATION powersync`);
|
|
49
|
+
} catch (e) {
|
|
50
|
+
// Ignore
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
await db.query(`CREATE PUBLICATION powersync FOR ALL TABLES`);
|
|
54
|
+
|
|
55
|
+
const tableRows = pgwire.pgwireRows(
|
|
56
|
+
await db.query(`SELECT table_name FROM information_schema.tables where table_schema = 'public'`)
|
|
57
|
+
);
|
|
58
|
+
for (let row of tableRows) {
|
|
59
|
+
const name = row.table_name;
|
|
60
|
+
if (name.startsWith('test_')) {
|
|
61
|
+
await db.query(`DROP TABLE public.${pg_utils.escapeIdentifier(name)}`);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
export async function connectPgWire(type?: 'replication' | 'standard') {
|
|
67
|
+
const db = await pgwire.connectPgWire(TEST_CONNECTION_OPTIONS, { type });
|
|
68
|
+
return db;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
export function connectPgPool() {
|
|
72
|
+
const db = pgwire.connectPgWirePool(TEST_CONNECTION_OPTIONS);
|
|
73
|
+
return db;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
export async function getClientCheckpoint(
|
|
77
|
+
db: pgwire.PgClient,
|
|
78
|
+
bucketStorage: BucketStorageFactory,
|
|
79
|
+
options?: { timeout?: number }
|
|
80
|
+
): Promise<OpId> {
|
|
81
|
+
const start = Date.now();
|
|
82
|
+
|
|
83
|
+
const [{ lsn }] = pgwireRows(await db.query(`SELECT pg_logical_emit_message(false, 'powersync', 'ping') as lsn`));
|
|
84
|
+
|
|
85
|
+
// This old API needs a persisted checkpoint id.
|
|
86
|
+
// Since we don't use LSNs anymore, the only way to get that is to wait.
|
|
87
|
+
|
|
88
|
+
const timeout = options?.timeout ?? 50_000;
|
|
89
|
+
|
|
90
|
+
logger.info(`Waiting for LSN checkpoint: ${lsn}`);
|
|
91
|
+
while (Date.now() - start < timeout) {
|
|
92
|
+
const cp = await bucketStorage.getActiveCheckpoint();
|
|
93
|
+
if (!cp.hasSyncRules()) {
|
|
94
|
+
throw new Error('No sync rules available');
|
|
95
|
+
}
|
|
96
|
+
if (cp.lsn && cp.lsn >= lsn) {
|
|
97
|
+
logger.info(`Got write checkpoint: ${lsn} : ${cp.checkpoint}`);
|
|
98
|
+
return cp.checkpoint;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
await new Promise((resolve) => setTimeout(resolve, 30));
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
throw new Error('Timeout while waiting for checkpoint');
|
|
105
|
+
}
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js';
|
|
2
|
+
import { expect, test } from 'vitest';
|
|
3
|
+
import { walStreamTest } from './wal_stream_utils.js';
|
|
4
|
+
import { getDebugTablesInfo } from '@module/replication/replication-utils.js';
|
|
5
|
+
|
|
6
|
+
// Not quite a walStreamTest, but it helps to manage the connection
|
|
7
|
+
test(
|
|
8
|
+
'validate tables',
|
|
9
|
+
walStreamTest(MONGO_STORAGE_FACTORY, async (context) => {
|
|
10
|
+
const { pool } = context;
|
|
11
|
+
|
|
12
|
+
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
13
|
+
|
|
14
|
+
const syncRuleContent = `
|
|
15
|
+
bucket_definitions:
|
|
16
|
+
global:
|
|
17
|
+
data:
|
|
18
|
+
- SELECT id, description FROM "test_data"
|
|
19
|
+
- SELECT * FROM "other"
|
|
20
|
+
- SELECT * FROM "other%"
|
|
21
|
+
`;
|
|
22
|
+
|
|
23
|
+
const syncRules = await context.factory.updateSyncRules({ content: syncRuleContent });
|
|
24
|
+
|
|
25
|
+
const tablePatterns = syncRules.parsed({ defaultSchema: 'public' }).sync_rules.getSourceTables();
|
|
26
|
+
const tableInfo = await getDebugTablesInfo({
|
|
27
|
+
db: pool,
|
|
28
|
+
publicationName: context.publicationName,
|
|
29
|
+
connectionTag: context.connectionTag,
|
|
30
|
+
tablePatterns: tablePatterns,
|
|
31
|
+
syncRules: syncRules.parsed({ defaultSchema: 'public' }).sync_rules
|
|
32
|
+
});
|
|
33
|
+
expect(tableInfo).toEqual([
|
|
34
|
+
{
|
|
35
|
+
schema: 'public',
|
|
36
|
+
pattern: 'test_data',
|
|
37
|
+
wildcard: false,
|
|
38
|
+
table: {
|
|
39
|
+
schema: 'public',
|
|
40
|
+
name: 'test_data',
|
|
41
|
+
replication_id: ['id'],
|
|
42
|
+
pattern: undefined,
|
|
43
|
+
data_queries: true,
|
|
44
|
+
parameter_queries: false,
|
|
45
|
+
errors: []
|
|
46
|
+
}
|
|
47
|
+
},
|
|
48
|
+
{
|
|
49
|
+
schema: 'public',
|
|
50
|
+
pattern: 'other',
|
|
51
|
+
wildcard: false,
|
|
52
|
+
table: {
|
|
53
|
+
schema: 'public',
|
|
54
|
+
name: 'other',
|
|
55
|
+
replication_id: [],
|
|
56
|
+
data_queries: true,
|
|
57
|
+
parameter_queries: false,
|
|
58
|
+
errors: [{ level: 'warning', message: 'Table "public"."other" not found.' }]
|
|
59
|
+
}
|
|
60
|
+
},
|
|
61
|
+
{ schema: 'public', pattern: 'other%', wildcard: true, tables: [] }
|
|
62
|
+
]);
|
|
63
|
+
})
|
|
64
|
+
);
|