@powersync/service-module-postgres 0.19.2 → 0.19.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api/PostgresRouteAPIAdapter.d.ts +1 -1
- package/dist/api/PostgresRouteAPIAdapter.js +63 -72
- package/dist/api/PostgresRouteAPIAdapter.js.map +1 -1
- package/dist/module/PostgresModule.js.map +1 -1
- package/dist/replication/MissingReplicationSlotError.d.ts +41 -0
- package/dist/replication/MissingReplicationSlotError.js +33 -0
- package/dist/replication/MissingReplicationSlotError.js.map +1 -0
- package/dist/replication/PostgresErrorRateLimiter.js +1 -1
- package/dist/replication/PostgresErrorRateLimiter.js.map +1 -1
- package/dist/replication/SnapshotQuery.js +2 -2
- package/dist/replication/SnapshotQuery.js.map +1 -1
- package/dist/replication/WalStream.d.ts +37 -14
- package/dist/replication/WalStream.js +145 -41
- package/dist/replication/WalStream.js.map +1 -1
- package/dist/replication/WalStreamReplicationJob.d.ts +1 -1
- package/dist/replication/WalStreamReplicationJob.js +7 -4
- package/dist/replication/WalStreamReplicationJob.js.map +1 -1
- package/dist/replication/WalStreamReplicator.d.ts +0 -1
- package/dist/replication/WalStreamReplicator.js +0 -22
- package/dist/replication/WalStreamReplicator.js.map +1 -1
- package/dist/replication/replication-index.d.ts +3 -1
- package/dist/replication/replication-index.js +3 -1
- package/dist/replication/replication-index.js.map +1 -1
- package/dist/replication/replication-utils.d.ts +3 -11
- package/dist/replication/replication-utils.js +101 -164
- package/dist/replication/replication-utils.js.map +1 -1
- package/dist/replication/wal-budget-utils.d.ts +23 -0
- package/dist/replication/wal-budget-utils.js +57 -0
- package/dist/replication/wal-budget-utils.js.map +1 -0
- package/dist/types/registry.js +1 -1
- package/dist/types/registry.js.map +1 -1
- package/package.json +15 -11
- package/sql/check-source-configuration.plpgsql +13 -0
- package/sql/debug-tables-info-batched.plpgsql +230 -0
- package/CHANGELOG.md +0 -843
- package/src/api/PostgresRouteAPIAdapter.ts +0 -356
- package/src/index.ts +0 -1
- package/src/module/PostgresModule.ts +0 -122
- package/src/replication/ConnectionManagerFactory.ts +0 -33
- package/src/replication/PgManager.ts +0 -122
- package/src/replication/PgRelation.ts +0 -41
- package/src/replication/PostgresErrorRateLimiter.ts +0 -48
- package/src/replication/SnapshotQuery.ts +0 -213
- package/src/replication/WalStream.ts +0 -1157
- package/src/replication/WalStreamReplicationJob.ts +0 -138
- package/src/replication/WalStreamReplicator.ts +0 -79
- package/src/replication/replication-index.ts +0 -5
- package/src/replication/replication-utils.ts +0 -398
- package/src/types/registry.ts +0 -275
- package/src/types/resolver.ts +0 -227
- package/src/types/types.ts +0 -44
- package/src/utils/application-name.ts +0 -8
- package/src/utils/migration_lib.ts +0 -80
- package/src/utils/populate_test_data.ts +0 -37
- package/src/utils/populate_test_data_worker.ts +0 -53
- package/src/utils/postgres_version.ts +0 -8
- package/test/src/checkpoints.test.ts +0 -86
- package/test/src/chunked_snapshots.test.ts +0 -161
- package/test/src/env.ts +0 -11
- package/test/src/large_batch.test.ts +0 -241
- package/test/src/pg_test.test.ts +0 -729
- package/test/src/resuming_snapshots.test.ts +0 -160
- package/test/src/route_api_adapter.test.ts +0 -62
- package/test/src/schema_changes.test.ts +0 -655
- package/test/src/setup.ts +0 -12
- package/test/src/slow_tests.test.ts +0 -519
- package/test/src/storage_combination.test.ts +0 -35
- package/test/src/types/registry.test.ts +0 -149
- package/test/src/util.ts +0 -151
- package/test/src/validation.test.ts +0 -63
- package/test/src/wal_stream.test.ts +0 -607
- package/test/src/wal_stream_utils.ts +0 -284
- package/test/tsconfig.json +0 -27
- package/tsconfig.json +0 -34
- package/tsconfig.tsbuildinfo +0 -1
- package/vitest.config.ts +0 -3
|
@@ -1,519 +0,0 @@
|
|
|
1
|
-
import * as bson from 'bson';
|
|
2
|
-
import { afterEach, beforeAll, describe, expect, test } from 'vitest';
|
|
3
|
-
import { WalStream, WalStreamOptions } from '../../src/replication/WalStream.js';
|
|
4
|
-
import { env } from './env.js';
|
|
5
|
-
import {
|
|
6
|
-
clearTestDb,
|
|
7
|
-
connectPgPool,
|
|
8
|
-
describeWithStorage,
|
|
9
|
-
getClientCheckpoint,
|
|
10
|
-
StorageVersionTestContext,
|
|
11
|
-
TEST_CONNECTION_OPTIONS
|
|
12
|
-
} from './util.js';
|
|
13
|
-
|
|
14
|
-
import * as pgwire from '@powersync/service-jpgwire';
|
|
15
|
-
import { SqliteRow } from '@powersync/service-sync-rules';
|
|
16
|
-
|
|
17
|
-
import { PgManager } from '@module/replication/PgManager.js';
|
|
18
|
-
import { ReplicationAbortedError } from '@powersync/lib-services-framework';
|
|
19
|
-
import {
|
|
20
|
-
createCoreReplicationMetrics,
|
|
21
|
-
CURRENT_STORAGE_VERSION,
|
|
22
|
-
initializeCoreReplicationMetrics,
|
|
23
|
-
reduceBucket,
|
|
24
|
-
updateSyncRulesFromYaml
|
|
25
|
-
} from '@powersync/service-core';
|
|
26
|
-
import { METRICS_HELPER, test_utils } from '@powersync/service-core-tests';
|
|
27
|
-
import * as mongo_storage from '@powersync/service-module-mongodb-storage';
|
|
28
|
-
import * as postgres_storage from '@powersync/service-module-postgres-storage';
|
|
29
|
-
import * as timers from 'node:timers/promises';
|
|
30
|
-
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
31
|
-
|
|
32
|
-
describe.skipIf(!(env.CI || env.SLOW_TESTS))('slow tests', function () {
|
|
33
|
-
describeWithStorage(
|
|
34
|
-
{
|
|
35
|
-
timeout: 120_000,
|
|
36
|
-
// These tests are slow, so only test the current storage version
|
|
37
|
-
storageVersions: [CURRENT_STORAGE_VERSION]
|
|
38
|
-
},
|
|
39
|
-
function ({ factory, storageVersion }) {
|
|
40
|
-
defineSlowTests({ factory, storageVersion });
|
|
41
|
-
}
|
|
42
|
-
);
|
|
43
|
-
});
|
|
44
|
-
|
|
45
|
-
function defineSlowTests({ factory, storageVersion }: StorageVersionTestContext) {
|
|
46
|
-
let walStream: WalStream | undefined;
|
|
47
|
-
let connections: PgManager | undefined;
|
|
48
|
-
let abortController: AbortController | undefined;
|
|
49
|
-
let streamPromise: Promise<void> | undefined;
|
|
50
|
-
|
|
51
|
-
beforeAll(async () => {
|
|
52
|
-
createCoreReplicationMetrics(METRICS_HELPER.metricsEngine);
|
|
53
|
-
initializeCoreReplicationMetrics(METRICS_HELPER.metricsEngine);
|
|
54
|
-
});
|
|
55
|
-
|
|
56
|
-
afterEach(async () => {
|
|
57
|
-
// This cleans up, similar to WalStreamTestContext.dispose().
|
|
58
|
-
// These tests are a little more complex than what is supported by WalStreamTestContext.
|
|
59
|
-
abortController?.abort();
|
|
60
|
-
await streamPromise?.catch((_) => {});
|
|
61
|
-
streamPromise = undefined;
|
|
62
|
-
connections?.destroy();
|
|
63
|
-
|
|
64
|
-
connections = undefined;
|
|
65
|
-
walStream = undefined;
|
|
66
|
-
abortController = undefined;
|
|
67
|
-
});
|
|
68
|
-
|
|
69
|
-
const TEST_DURATION_MS = 15_000;
|
|
70
|
-
const TIMEOUT_MARGIN_MS = env.CI ? 30_000 : 15_000;
|
|
71
|
-
|
|
72
|
-
// Test repeatedly replicating inserts and deletes, then check that we get
|
|
73
|
-
// consistent data out at the end.
|
|
74
|
-
//
|
|
75
|
-
// Past issues that this could reproduce intermittently:
|
|
76
|
-
// * Skipping LSNs after a keepalive message
|
|
77
|
-
// * Skipping LSNs when source transactions overlap
|
|
78
|
-
test('repeated replication - basic', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
|
|
79
|
-
await testRepeatedReplication({ compact: false, maxBatchSize: 50, numBatches: 5 });
|
|
80
|
-
});
|
|
81
|
-
|
|
82
|
-
test('repeated replication - compacted', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
|
|
83
|
-
await testRepeatedReplication({ compact: true, maxBatchSize: 100, numBatches: 2 });
|
|
84
|
-
});
|
|
85
|
-
|
|
86
|
-
async function testRepeatedReplication(testOptions: { compact: boolean; maxBatchSize: number; numBatches: number }) {
|
|
87
|
-
const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
|
|
88
|
-
const pool = connections.pool;
|
|
89
|
-
await clearTestDb(pool);
|
|
90
|
-
await using f = await factory();
|
|
91
|
-
|
|
92
|
-
const syncRuleContent = `
|
|
93
|
-
bucket_definitions:
|
|
94
|
-
global:
|
|
95
|
-
data:
|
|
96
|
-
- SELECT * FROM "test_data"
|
|
97
|
-
`;
|
|
98
|
-
const syncRules = await f.updateSyncRules(updateSyncRulesFromYaml(syncRuleContent, { storageVersion }));
|
|
99
|
-
const storage = f.getInstance(syncRules);
|
|
100
|
-
abortController = new AbortController();
|
|
101
|
-
const options: WalStreamOptions = {
|
|
102
|
-
abort_signal: abortController.signal,
|
|
103
|
-
connections,
|
|
104
|
-
storage: storage,
|
|
105
|
-
metrics: METRICS_HELPER.metricsEngine
|
|
106
|
-
};
|
|
107
|
-
walStream = new WalStream(options);
|
|
108
|
-
|
|
109
|
-
await pool.query(
|
|
110
|
-
`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num decimal)`
|
|
111
|
-
);
|
|
112
|
-
await pool.query(`ALTER TABLE test_data REPLICA IDENTITY FULL`);
|
|
113
|
-
|
|
114
|
-
let abort = false;
|
|
115
|
-
streamPromise = walStream.replicate().finally(() => {
|
|
116
|
-
abort = true;
|
|
117
|
-
});
|
|
118
|
-
await walStream.waitForInitialSnapshot();
|
|
119
|
-
const start = Date.now();
|
|
120
|
-
|
|
121
|
-
while (!abort && Date.now() - start < TEST_DURATION_MS) {
|
|
122
|
-
const bg = async () => {
|
|
123
|
-
for (let j = 0; j < testOptions.numBatches && !abort; j++) {
|
|
124
|
-
const n = Math.max(1, Math.floor(Math.random() * testOptions.maxBatchSize));
|
|
125
|
-
let statements: pgwire.Statement[] = [];
|
|
126
|
-
for (let i = 0; i < n; i++) {
|
|
127
|
-
const description = `test${i}`;
|
|
128
|
-
statements.push({
|
|
129
|
-
statement: `INSERT INTO test_data(description, num) VALUES($1, $2) returning id as test_id`,
|
|
130
|
-
params: [
|
|
131
|
-
{ type: 'varchar', value: description },
|
|
132
|
-
{ type: 'float8', value: Math.random() }
|
|
133
|
-
]
|
|
134
|
-
});
|
|
135
|
-
}
|
|
136
|
-
const results = await pool.query(...statements);
|
|
137
|
-
const ids = results.results.map((sub) => {
|
|
138
|
-
return sub.rows[0].decodeWithoutCustomTypes(0) as string;
|
|
139
|
-
});
|
|
140
|
-
await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
|
|
141
|
-
|
|
142
|
-
if (Math.random() > 0.5) {
|
|
143
|
-
const updateStatements: pgwire.Statement[] = ids.map((id) => {
|
|
144
|
-
return {
|
|
145
|
-
statement: `UPDATE test_data SET num = $2 WHERE id = $1`,
|
|
146
|
-
params: [
|
|
147
|
-
{ type: 'uuid', value: id },
|
|
148
|
-
{ type: 'float8', value: Math.random() }
|
|
149
|
-
]
|
|
150
|
-
};
|
|
151
|
-
});
|
|
152
|
-
|
|
153
|
-
await pool.query(...updateStatements);
|
|
154
|
-
if (Math.random() > 0.5) {
|
|
155
|
-
// Special case - an update that doesn't change data
|
|
156
|
-
await pool.query(...updateStatements);
|
|
157
|
-
}
|
|
158
|
-
}
|
|
159
|
-
|
|
160
|
-
const deleteStatements: pgwire.Statement[] = ids.map((id) => {
|
|
161
|
-
return {
|
|
162
|
-
statement: `DELETE FROM test_data WHERE id = $1`,
|
|
163
|
-
params: [{ type: 'uuid', value: id }]
|
|
164
|
-
};
|
|
165
|
-
});
|
|
166
|
-
await pool.query(...deleteStatements);
|
|
167
|
-
|
|
168
|
-
await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
|
|
169
|
-
}
|
|
170
|
-
};
|
|
171
|
-
|
|
172
|
-
let compactController = new AbortController();
|
|
173
|
-
|
|
174
|
-
const bgCompact = async () => {
|
|
175
|
-
// Repeatedly compact, and check that the compact conditions hold
|
|
176
|
-
while (!compactController.signal.aborted) {
|
|
177
|
-
const delay = Math.random() * 50;
|
|
178
|
-
try {
|
|
179
|
-
await timers.setTimeout(delay, undefined, { signal: compactController.signal });
|
|
180
|
-
} catch (e) {
|
|
181
|
-
break;
|
|
182
|
-
}
|
|
183
|
-
|
|
184
|
-
const checkpoint = (await storage.getCheckpoint()).checkpoint;
|
|
185
|
-
if (f instanceof mongo_storage.storage.MongoBucketStorage) {
|
|
186
|
-
const opsBefore = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
|
|
187
|
-
.filter((row) => row._id.o <= checkpoint)
|
|
188
|
-
.map(mongo_storage.storage.mapOpEntry);
|
|
189
|
-
await storage.compact({ maxOpId: checkpoint });
|
|
190
|
-
const opsAfter = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
|
|
191
|
-
.filter((row) => row._id.o <= checkpoint)
|
|
192
|
-
.map(mongo_storage.storage.mapOpEntry);
|
|
193
|
-
|
|
194
|
-
test_utils.validateCompactedBucket(opsBefore, opsAfter);
|
|
195
|
-
} else if (f instanceof postgres_storage.PostgresBucketStorageFactory) {
|
|
196
|
-
const { db } = f;
|
|
197
|
-
const opsBefore = (
|
|
198
|
-
await db.sql`
|
|
199
|
-
SELECT
|
|
200
|
-
*
|
|
201
|
-
FROM
|
|
202
|
-
bucket_data
|
|
203
|
-
WHERE
|
|
204
|
-
op_id <= ${{ type: 'int8', value: checkpoint }}
|
|
205
|
-
ORDER BY
|
|
206
|
-
op_id ASC
|
|
207
|
-
`
|
|
208
|
-
.decoded(postgres_storage.models.BucketData)
|
|
209
|
-
.rows()
|
|
210
|
-
).map(postgres_storage.utils.mapOpEntry);
|
|
211
|
-
await storage.compact({ maxOpId: checkpoint });
|
|
212
|
-
const opsAfter = (
|
|
213
|
-
await db.sql`
|
|
214
|
-
SELECT
|
|
215
|
-
*
|
|
216
|
-
FROM
|
|
217
|
-
bucket_data
|
|
218
|
-
WHERE
|
|
219
|
-
op_id <= ${{ type: 'int8', value: checkpoint }}
|
|
220
|
-
ORDER BY
|
|
221
|
-
op_id ASC
|
|
222
|
-
`
|
|
223
|
-
.decoded(postgres_storage.models.BucketData)
|
|
224
|
-
.rows()
|
|
225
|
-
).map(postgres_storage.utils.mapOpEntry);
|
|
226
|
-
|
|
227
|
-
test_utils.validateCompactedBucket(opsBefore, opsAfter);
|
|
228
|
-
}
|
|
229
|
-
}
|
|
230
|
-
};
|
|
231
|
-
|
|
232
|
-
// Call the above loop multiple times concurrently
|
|
233
|
-
const promises = [1, 2, 3].map((i) => bg());
|
|
234
|
-
const compactPromise = testOptions.compact ? bgCompact() : null;
|
|
235
|
-
await Promise.all(promises);
|
|
236
|
-
compactController.abort();
|
|
237
|
-
await compactPromise;
|
|
238
|
-
|
|
239
|
-
// Wait for replication to finish
|
|
240
|
-
await getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS });
|
|
241
|
-
|
|
242
|
-
if (f instanceof mongo_storage.storage.MongoBucketStorage) {
|
|
243
|
-
// Check that all inserts have been deleted again
|
|
244
|
-
// Note: at this point, the pending_delete cleanup may not have run yet.
|
|
245
|
-
const docs = await f.db.current_data.find({ pending_delete: { $exists: false } }).toArray();
|
|
246
|
-
const transformed = docs.map((doc) => {
|
|
247
|
-
return bson.deserialize(doc.data.buffer) as SqliteRow;
|
|
248
|
-
});
|
|
249
|
-
expect(transformed).toEqual([]);
|
|
250
|
-
|
|
251
|
-
// Check that each PUT has a REMOVE
|
|
252
|
-
const ops = await f.db.bucket_data.find().sort({ _id: 1 }).toArray();
|
|
253
|
-
|
|
254
|
-
// All a single bucket in this test
|
|
255
|
-
const bucket = ops.map((op) => mongo_storage.storage.mapOpEntry(op));
|
|
256
|
-
const reduced = test_utils.reduceBucket(bucket);
|
|
257
|
-
expect(reduced).toMatchObject([
|
|
258
|
-
{
|
|
259
|
-
op_id: '0',
|
|
260
|
-
op: 'CLEAR'
|
|
261
|
-
}
|
|
262
|
-
// Should contain no additional data
|
|
263
|
-
]);
|
|
264
|
-
} else if (f instanceof postgres_storage.storage.PostgresBucketStorageFactory) {
|
|
265
|
-
const { db } = f;
|
|
266
|
-
// Check that all inserts have been deleted again
|
|
267
|
-
// FIXME: handle different storage versions
|
|
268
|
-
const docs = await db.sql`
|
|
269
|
-
SELECT
|
|
270
|
-
*
|
|
271
|
-
FROM
|
|
272
|
-
current_data
|
|
273
|
-
`
|
|
274
|
-
.decoded(postgres_storage.models.V1CurrentData)
|
|
275
|
-
.rows();
|
|
276
|
-
const transformed = docs.map((doc) => {
|
|
277
|
-
return bson.deserialize(doc.data) as SqliteRow;
|
|
278
|
-
});
|
|
279
|
-
expect(transformed).toEqual([]);
|
|
280
|
-
|
|
281
|
-
// Check that each PUT has a REMOVE
|
|
282
|
-
const ops = await db.sql`
|
|
283
|
-
SELECT
|
|
284
|
-
*
|
|
285
|
-
FROM
|
|
286
|
-
bucket_data
|
|
287
|
-
ORDER BY
|
|
288
|
-
op_id ASC
|
|
289
|
-
`
|
|
290
|
-
.decoded(postgres_storage.models.BucketData)
|
|
291
|
-
.rows();
|
|
292
|
-
|
|
293
|
-
// All a single bucket in this test
|
|
294
|
-
const bucket = ops.map((op) => postgres_storage.utils.mapOpEntry(op));
|
|
295
|
-
const reduced = test_utils.reduceBucket(bucket);
|
|
296
|
-
expect(reduced).toMatchObject([
|
|
297
|
-
{
|
|
298
|
-
op_id: '0',
|
|
299
|
-
op: 'CLEAR'
|
|
300
|
-
}
|
|
301
|
-
// Should contain no additional data
|
|
302
|
-
]);
|
|
303
|
-
}
|
|
304
|
-
}
|
|
305
|
-
|
|
306
|
-
abortController.abort();
|
|
307
|
-
await streamPromise.catch((e) => {
|
|
308
|
-
if (e instanceof ReplicationAbortedError) {
|
|
309
|
-
// Ignore
|
|
310
|
-
} else {
|
|
311
|
-
throw e;
|
|
312
|
-
}
|
|
313
|
-
});
|
|
314
|
-
}
|
|
315
|
-
|
|
316
|
-
// Test repeatedly performing initial replication.
|
|
317
|
-
//
|
|
318
|
-
// If the first LSN does not correctly match with the first replication transaction,
|
|
319
|
-
// we may miss some updates.
|
|
320
|
-
test('repeated initial replication (1)', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
|
|
321
|
-
const pool = await connectPgPool();
|
|
322
|
-
await clearTestDb(pool);
|
|
323
|
-
await using f = await factory();
|
|
324
|
-
|
|
325
|
-
const syncRuleContent = `
|
|
326
|
-
bucket_definitions:
|
|
327
|
-
global:
|
|
328
|
-
data:
|
|
329
|
-
- SELECT id, description FROM "test_data"
|
|
330
|
-
`;
|
|
331
|
-
|
|
332
|
-
const syncRules = await f.updateSyncRules(updateSyncRulesFromYaml(syncRuleContent, { storageVersion }));
|
|
333
|
-
const storage = f.getInstance(syncRules);
|
|
334
|
-
|
|
335
|
-
// 1. Setup some base data that will be replicated in initial replication
|
|
336
|
-
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
337
|
-
|
|
338
|
-
let statements: pgwire.Statement[] = [];
|
|
339
|
-
|
|
340
|
-
const n = Math.floor(Math.random() * 200);
|
|
341
|
-
for (let i = 0; i < n; i++) {
|
|
342
|
-
statements.push({
|
|
343
|
-
statement: `INSERT INTO test_data(description) VALUES('test_init')`
|
|
344
|
-
});
|
|
345
|
-
}
|
|
346
|
-
await pool.query(...statements);
|
|
347
|
-
|
|
348
|
-
const start = Date.now();
|
|
349
|
-
let i = 0;
|
|
350
|
-
|
|
351
|
-
while (Date.now() - start < TEST_DURATION_MS) {
|
|
352
|
-
// 2. Each iteration starts with a clean slate
|
|
353
|
-
await pool.query(`SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE active = FALSE`);
|
|
354
|
-
i += 1;
|
|
355
|
-
|
|
356
|
-
const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
|
|
357
|
-
|
|
358
|
-
abortController = new AbortController();
|
|
359
|
-
const options: WalStreamOptions = {
|
|
360
|
-
abort_signal: abortController.signal,
|
|
361
|
-
connections,
|
|
362
|
-
storage: storage,
|
|
363
|
-
metrics: METRICS_HELPER.metricsEngine
|
|
364
|
-
};
|
|
365
|
-
walStream = new WalStream(options);
|
|
366
|
-
|
|
367
|
-
await storage.clear();
|
|
368
|
-
|
|
369
|
-
// 3. Start replication, but don't wait for it
|
|
370
|
-
let initialReplicationDone = false;
|
|
371
|
-
streamPromise = walStream.replicate();
|
|
372
|
-
walStream
|
|
373
|
-
.waitForInitialSnapshot()
|
|
374
|
-
.catch((_) => {})
|
|
375
|
-
.finally(() => {
|
|
376
|
-
initialReplicationDone = true;
|
|
377
|
-
});
|
|
378
|
-
|
|
379
|
-
// 4. While initial replication is still running, write more changes
|
|
380
|
-
while (!initialReplicationDone) {
|
|
381
|
-
let statements: pgwire.Statement[] = [];
|
|
382
|
-
const n = Math.floor(Math.random() * 10) + 1;
|
|
383
|
-
for (let i = 0; i < n; i++) {
|
|
384
|
-
const description = `test${i}`;
|
|
385
|
-
statements.push({
|
|
386
|
-
statement: `INSERT INTO test_data(description) VALUES('test1') returning id as test_id`,
|
|
387
|
-
params: [{ type: 'varchar', value: description }]
|
|
388
|
-
});
|
|
389
|
-
}
|
|
390
|
-
const results = await pool.query(...statements);
|
|
391
|
-
const ids = results.results.map((sub) => {
|
|
392
|
-
return sub.rows[0].decodeWithoutCustomTypes(0) as string;
|
|
393
|
-
});
|
|
394
|
-
await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
|
|
395
|
-
const deleteStatements: pgwire.Statement[] = ids.map((id) => {
|
|
396
|
-
return {
|
|
397
|
-
statement: `DELETE FROM test_data WHERE id = $1`,
|
|
398
|
-
params: [{ type: 'uuid', value: id }]
|
|
399
|
-
};
|
|
400
|
-
});
|
|
401
|
-
await pool.query(...deleteStatements);
|
|
402
|
-
await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
|
|
403
|
-
}
|
|
404
|
-
|
|
405
|
-
// 5. Once initial replication is done, wait for the streaming changes to complete syncing.
|
|
406
|
-
// getClientCheckpoint() effectively waits for the above replication to complete
|
|
407
|
-
// Race with streamingPromise to catch replication errors here.
|
|
408
|
-
let checkpoint = await Promise.race([
|
|
409
|
-
getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS }),
|
|
410
|
-
streamPromise
|
|
411
|
-
]);
|
|
412
|
-
if (checkpoint == null) {
|
|
413
|
-
// This indicates an issue with the test setup - streamingPromise completed instead
|
|
414
|
-
// of getClientCheckpoint()
|
|
415
|
-
throw new Error('Test failure - streamingPromise completed');
|
|
416
|
-
}
|
|
417
|
-
|
|
418
|
-
abortController.abort();
|
|
419
|
-
await streamPromise.catch((e) => {
|
|
420
|
-
if (e instanceof ReplicationAbortedError) {
|
|
421
|
-
// Ignore
|
|
422
|
-
} else {
|
|
423
|
-
throw e;
|
|
424
|
-
}
|
|
425
|
-
});
|
|
426
|
-
await connections.end();
|
|
427
|
-
}
|
|
428
|
-
});
|
|
429
|
-
|
|
430
|
-
// Test repeatedly performing initial replication while deleting data.
|
|
431
|
-
//
|
|
432
|
-
// This specifically checks for data in the initial snapshot being deleted while snapshotting.
|
|
433
|
-
test('repeated initial replication with deletes', { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }, async () => {
|
|
434
|
-
const syncRuleContent = `
|
|
435
|
-
bucket_definitions:
|
|
436
|
-
global:
|
|
437
|
-
data:
|
|
438
|
-
- SELECT id, description FROM "test_data"
|
|
439
|
-
`;
|
|
440
|
-
|
|
441
|
-
const start = Date.now();
|
|
442
|
-
let i = 0;
|
|
443
|
-
|
|
444
|
-
while (Date.now() - start < TEST_DURATION_MS) {
|
|
445
|
-
i += 1;
|
|
446
|
-
|
|
447
|
-
// 1. Each iteration starts with a clean slate
|
|
448
|
-
await using context = await WalStreamTestContext.open(factory, {
|
|
449
|
-
walStreamOptions: { snapshotChunkLength: 100 }
|
|
450
|
-
});
|
|
451
|
-
const pool = context.pool;
|
|
452
|
-
|
|
453
|
-
// Introduce an artificial delay in snapshot queries, to make it more likely to reproduce an
|
|
454
|
-
// issue.
|
|
455
|
-
const originalSnapshotConnectionFn = context.connectionManager.snapshotConnection;
|
|
456
|
-
context.connectionManager.snapshotConnection = async () => {
|
|
457
|
-
const conn = await originalSnapshotConnectionFn.call(context.connectionManager);
|
|
458
|
-
// Wrap streaming query to add delays to snapshots
|
|
459
|
-
const originalStream = conn.stream;
|
|
460
|
-
conn.stream = async function* (...args: any[]) {
|
|
461
|
-
const delay = Math.random() * 20;
|
|
462
|
-
yield* originalStream.call(this, ...args);
|
|
463
|
-
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
464
|
-
};
|
|
465
|
-
return conn;
|
|
466
|
-
};
|
|
467
|
-
|
|
468
|
-
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
469
|
-
await context.updateSyncRules(syncRuleContent);
|
|
470
|
-
|
|
471
|
-
let statements: pgwire.Statement[] = [];
|
|
472
|
-
|
|
473
|
-
const n = Math.floor(Math.random() * 200);
|
|
474
|
-
for (let i = 0; i < n; i++) {
|
|
475
|
-
statements.push({
|
|
476
|
-
statement: `INSERT INTO test_data(description) VALUES('test_init') RETURNING id`
|
|
477
|
-
});
|
|
478
|
-
}
|
|
479
|
-
const results = await pool.query(...statements);
|
|
480
|
-
const ids = new Set(
|
|
481
|
-
results.results.map((sub) => {
|
|
482
|
-
return sub.rows[0].decodeWithoutCustomTypes(0) as string;
|
|
483
|
-
})
|
|
484
|
-
);
|
|
485
|
-
|
|
486
|
-
// 3. Start replication, but don't wait for it
|
|
487
|
-
let initialReplicationDone = false;
|
|
488
|
-
|
|
489
|
-
streamPromise = context.replicateSnapshot().finally(() => {
|
|
490
|
-
initialReplicationDone = true;
|
|
491
|
-
});
|
|
492
|
-
|
|
493
|
-
// 4. While initial replication is still running, delete random rows
|
|
494
|
-
while (!initialReplicationDone && ids.size > 0) {
|
|
495
|
-
let statements: pgwire.Statement[] = [];
|
|
496
|
-
|
|
497
|
-
const m = Math.floor(Math.random() * 10) + 1;
|
|
498
|
-
const idArray = Array.from(ids);
|
|
499
|
-
for (let i = 0; i < m; i++) {
|
|
500
|
-
const id = idArray[Math.floor(Math.random() * idArray.length)];
|
|
501
|
-
statements.push({
|
|
502
|
-
statement: `DELETE FROM test_data WHERE id = $1`,
|
|
503
|
-
params: [{ type: 'uuid', value: id }]
|
|
504
|
-
});
|
|
505
|
-
ids.delete(id);
|
|
506
|
-
}
|
|
507
|
-
await pool.query(...statements);
|
|
508
|
-
await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
|
|
509
|
-
}
|
|
510
|
-
|
|
511
|
-
await streamPromise;
|
|
512
|
-
|
|
513
|
-
// 5. Once initial replication is done, wait for the streaming changes to complete syncing.
|
|
514
|
-
const data = await context.getBucketData('global[]', 0n);
|
|
515
|
-
const normalized = reduceBucket(data).filter((op) => op.op !== 'CLEAR');
|
|
516
|
-
expect(normalized.length).toEqual(ids.size);
|
|
517
|
-
}
|
|
518
|
-
});
|
|
519
|
-
}
|
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
import * as postgres_storage from '@powersync/service-module-postgres-storage';
|
|
2
|
-
import { describe, expect, test } from 'vitest';
|
|
3
|
-
import { env } from './env.js';
|
|
4
|
-
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
5
|
-
|
|
6
|
-
describe.skipIf(!env.TEST_POSTGRES_STORAGE)('replication storage combination - postgres', function () {
|
|
7
|
-
test('should allow the same Postgres cluster to be used for data and storage', async () => {
|
|
8
|
-
// Use the same cluster for the storage as the data source
|
|
9
|
-
await using context = await WalStreamTestContext.open(
|
|
10
|
-
postgres_storage.test_utils.postgresTestSetup({
|
|
11
|
-
url: env.PG_TEST_URL
|
|
12
|
-
}).factory,
|
|
13
|
-
{ doNotClear: false }
|
|
14
|
-
);
|
|
15
|
-
|
|
16
|
-
await context.updateSyncRules(/* yaml */
|
|
17
|
-
` bucket_definitions:
|
|
18
|
-
global:
|
|
19
|
-
data:
|
|
20
|
-
- SELECT * FROM "test_data" `);
|
|
21
|
-
|
|
22
|
-
const { pool, connectionManager } = context;
|
|
23
|
-
|
|
24
|
-
const sourceVersion = await connectionManager.getServerVersion();
|
|
25
|
-
|
|
26
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
|
|
27
|
-
|
|
28
|
-
if (sourceVersion!.compareMain('14.0.0') < 0) {
|
|
29
|
-
await expect(context.replicateSnapshot()).rejects.toThrow();
|
|
30
|
-
} else {
|
|
31
|
-
// Should resolve
|
|
32
|
-
await context.replicateSnapshot();
|
|
33
|
-
}
|
|
34
|
-
});
|
|
35
|
-
});
|
|
@@ -1,149 +0,0 @@
|
|
|
1
|
-
import { describe, expect, test, beforeEach } from 'vitest';
|
|
2
|
-
import { CustomTypeRegistry } from '@module/types/registry.js';
|
|
3
|
-
import { CHAR_CODE_COMMA, PgTypeOid } from '@powersync/service-jpgwire';
|
|
4
|
-
import {
|
|
5
|
-
applyValueContext,
|
|
6
|
-
CompatibilityContext,
|
|
7
|
-
CompatibilityEdition,
|
|
8
|
-
toSyncRulesValue
|
|
9
|
-
} from '@powersync/service-sync-rules';
|
|
10
|
-
|
|
11
|
-
describe('custom type registry', () => {
|
|
12
|
-
let registry: CustomTypeRegistry;
|
|
13
|
-
|
|
14
|
-
beforeEach(() => {
|
|
15
|
-
registry = new CustomTypeRegistry();
|
|
16
|
-
});
|
|
17
|
-
|
|
18
|
-
function checkResult(raw: string, type: number, old: any, fixed: any) {
|
|
19
|
-
const input = registry.decodeDatabaseValue(raw, type);
|
|
20
|
-
const syncRulesValue = toSyncRulesValue(input);
|
|
21
|
-
|
|
22
|
-
expect(applyValueContext(syncRulesValue, CompatibilityContext.FULL_BACKWARDS_COMPATIBILITY)).toStrictEqual(old);
|
|
23
|
-
expect(
|
|
24
|
-
applyValueContext(syncRulesValue, new CompatibilityContext({ edition: CompatibilityEdition.SYNC_STREAMS }))
|
|
25
|
-
).toStrictEqual(fixed);
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
test('domain types', () => {
|
|
29
|
-
registry.setDomainType(1337, PgTypeOid.INT4); // create domain wrapping integer
|
|
30
|
-
checkResult('12', 1337, '12', 12n); // Should be raw text value without fix, parsed as inner type if enabled
|
|
31
|
-
});
|
|
32
|
-
|
|
33
|
-
test('array of domain types', () => {
|
|
34
|
-
registry.setDomainType(1337, PgTypeOid.INT4);
|
|
35
|
-
registry.set(1338, { type: 'array', separatorCharCode: CHAR_CODE_COMMA, innerId: 1337, sqliteType: () => 'text' });
|
|
36
|
-
|
|
37
|
-
checkResult('{1,2,3}', 1338, '{1,2,3}', '[1,2,3]');
|
|
38
|
-
});
|
|
39
|
-
|
|
40
|
-
test('nested array through domain type', () => {
|
|
41
|
-
registry.setDomainType(1337, PgTypeOid.INT4);
|
|
42
|
-
registry.set(1338, { type: 'array', separatorCharCode: CHAR_CODE_COMMA, innerId: 1337, sqliteType: () => 'text' });
|
|
43
|
-
registry.setDomainType(1339, 1338);
|
|
44
|
-
|
|
45
|
-
checkResult('{1,2,3}', 1339, '{1,2,3}', '[1,2,3]');
|
|
46
|
-
|
|
47
|
-
registry.set(1400, { type: 'array', separatorCharCode: CHAR_CODE_COMMA, innerId: 1339, sqliteType: () => 'text' });
|
|
48
|
-
checkResult('{{1,2,3}}', 1400, '{{1,2,3}}', '[[1,2,3]]');
|
|
49
|
-
});
|
|
50
|
-
|
|
51
|
-
test('structure', () => {
|
|
52
|
-
// create type c1 AS (a bool, b integer, c text[]);
|
|
53
|
-
registry.set(1337, {
|
|
54
|
-
type: 'composite',
|
|
55
|
-
sqliteType: () => 'text',
|
|
56
|
-
members: [
|
|
57
|
-
{ name: 'a', typeId: PgTypeOid.BOOL },
|
|
58
|
-
{ name: 'b', typeId: PgTypeOid.INT4 },
|
|
59
|
-
{ name: 'c', typeId: 1009 } // text array
|
|
60
|
-
]
|
|
61
|
-
});
|
|
62
|
-
|
|
63
|
-
// SELECT (TRUE, 123, ARRAY['foo', 'bar'])::c1;
|
|
64
|
-
checkResult('(t,123,"{foo,bar}")', 1337, '(t,123,"{foo,bar}")', '{"a":1,"b":123,"c":["foo","bar"]}');
|
|
65
|
-
});
|
|
66
|
-
|
|
67
|
-
test('array of structure', () => {
|
|
68
|
-
// create type c1 AS (a bool, b integer, c text[]);
|
|
69
|
-
registry.set(1337, {
|
|
70
|
-
type: 'composite',
|
|
71
|
-
sqliteType: () => 'text',
|
|
72
|
-
members: [
|
|
73
|
-
{ name: 'a', typeId: PgTypeOid.BOOL },
|
|
74
|
-
{ name: 'b', typeId: PgTypeOid.INT4 },
|
|
75
|
-
{ name: 'c', typeId: 1009 } // text array
|
|
76
|
-
]
|
|
77
|
-
});
|
|
78
|
-
registry.set(1338, { type: 'array', separatorCharCode: CHAR_CODE_COMMA, innerId: 1337, sqliteType: () => 'text' });
|
|
79
|
-
|
|
80
|
-
// SELECT ARRAY[(TRUE, 123, ARRAY['foo', 'bar']),(FALSE, NULL, ARRAY[]::text[])]::c1[];
|
|
81
|
-
checkResult(
|
|
82
|
-
'{"(t,123,\\"{foo,bar}\\")","(f,,{})"}',
|
|
83
|
-
1338,
|
|
84
|
-
'{"(t,123,\\"{foo,bar}\\")","(f,,{})"}',
|
|
85
|
-
'[{"a":1,"b":123,"c":["foo","bar"]},{"a":0,"b":null,"c":[]}]'
|
|
86
|
-
);
|
|
87
|
-
});
|
|
88
|
-
|
|
89
|
-
test('domain type of structure', () => {
|
|
90
|
-
registry.set(1337, {
|
|
91
|
-
type: 'composite',
|
|
92
|
-
sqliteType: () => 'text',
|
|
93
|
-
members: [
|
|
94
|
-
{ name: 'a', typeId: PgTypeOid.BOOL },
|
|
95
|
-
{ name: 'b', typeId: PgTypeOid.INT4 }
|
|
96
|
-
]
|
|
97
|
-
});
|
|
98
|
-
registry.setDomainType(1338, 1337);
|
|
99
|
-
|
|
100
|
-
checkResult('(t,123)', 1337, '(t,123)', '{"a":1,"b":123}');
|
|
101
|
-
});
|
|
102
|
-
|
|
103
|
-
test('structure of another structure', () => {
|
|
104
|
-
// CREATE TYPE c2 AS (a BOOLEAN, b INTEGER);
|
|
105
|
-
registry.set(1337, {
|
|
106
|
-
type: 'composite',
|
|
107
|
-
sqliteType: () => 'text',
|
|
108
|
-
members: [
|
|
109
|
-
{ name: 'a', typeId: PgTypeOid.BOOL },
|
|
110
|
-
{ name: 'b', typeId: PgTypeOid.INT4 }
|
|
111
|
-
]
|
|
112
|
-
});
|
|
113
|
-
registry.set(1338, { type: 'array', separatorCharCode: CHAR_CODE_COMMA, innerId: 1337, sqliteType: () => 'text' });
|
|
114
|
-
// CREATE TYPE c3 (c c2[]);
|
|
115
|
-
registry.set(1339, {
|
|
116
|
-
type: 'composite',
|
|
117
|
-
sqliteType: () => 'text',
|
|
118
|
-
members: [{ name: 'c', typeId: 1338 }]
|
|
119
|
-
});
|
|
120
|
-
|
|
121
|
-
// SELECT ROW(ARRAY[(FALSE,2)]::c2[])::c3;
|
|
122
|
-
checkResult('("{""(f,2)""}")', 1339, '("{""(f,2)""}")', '{"c":[{"a":0,"b":2}]}');
|
|
123
|
-
});
|
|
124
|
-
|
|
125
|
-
test('range', () => {
|
|
126
|
-
registry.set(1337, {
|
|
127
|
-
type: 'range',
|
|
128
|
-
sqliteType: () => 'text',
|
|
129
|
-
innerId: PgTypeOid.INT2
|
|
130
|
-
});
|
|
131
|
-
|
|
132
|
-
checkResult('[1,2]', 1337, '[1,2]', '{"lower":1,"upper":2,"lower_exclusive":0,"upper_exclusive":0}');
|
|
133
|
-
});
|
|
134
|
-
|
|
135
|
-
test('multirange', () => {
|
|
136
|
-
registry.set(1337, {
|
|
137
|
-
type: 'multirange',
|
|
138
|
-
sqliteType: () => 'text',
|
|
139
|
-
innerId: PgTypeOid.INT2
|
|
140
|
-
});
|
|
141
|
-
|
|
142
|
-
checkResult(
|
|
143
|
-
'{[1,2),[3,4)}',
|
|
144
|
-
1337,
|
|
145
|
-
'{[1,2),[3,4)}',
|
|
146
|
-
'[{"lower":1,"upper":2,"lower_exclusive":0,"upper_exclusive":1},{"lower":3,"upper":4,"lower_exclusive":0,"upper_exclusive":1}]'
|
|
147
|
-
);
|
|
148
|
-
});
|
|
149
|
-
});
|