@powersync/service-module-postgres 0.2.4 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +33 -0
- package/dist/api/PostgresRouteAPIAdapter.js +9 -9
- package/dist/api/PostgresRouteAPIAdapter.js.map +1 -1
- package/dist/auth/SupabaseKeyCollector.js +2 -2
- package/dist/auth/SupabaseKeyCollector.js.map +1 -1
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/dist/index.js.map +1 -1
- package/dist/module/PostgresModule.js +1 -1
- package/dist/module/PostgresModule.js.map +1 -1
- package/dist/replication/WalStream.js +2 -1
- package/dist/replication/WalStream.js.map +1 -1
- package/dist/replication/replication-utils.d.ts +1 -1
- package/dist/replication/replication-utils.js +11 -13
- package/dist/replication/replication-utils.js.map +1 -1
- package/dist/types/types.d.ts +52 -51
- package/dist/types/types.js +11 -95
- package/dist/types/types.js.map +1 -1
- package/dist/utils/pgwire_utils.d.ts +1 -5
- package/dist/utils/pgwire_utils.js +0 -45
- package/dist/utils/pgwire_utils.js.map +1 -1
- package/package.json +12 -8
- package/src/api/PostgresRouteAPIAdapter.ts +9 -10
- package/src/auth/SupabaseKeyCollector.ts +2 -2
- package/src/index.ts +2 -0
- package/src/module/PostgresModule.ts +1 -1
- package/src/replication/WalStream.ts +3 -1
- package/src/replication/replication-utils.ts +12 -14
- package/src/types/types.ts +18 -132
- package/src/utils/pgwire_utils.ts +1 -46
- package/test/src/__snapshots__/schema_changes.test.ts.snap +5 -0
- package/test/src/env.ts +5 -1
- package/test/src/large_batch.test.ts +21 -7
- package/test/src/schema_changes.test.ts +19 -13
- package/test/src/setup.ts +8 -2
- package/test/src/slow_tests.test.ts +132 -39
- package/test/src/util.ts +14 -30
- package/test/src/validation.test.ts +4 -2
- package/test/src/wal_stream.test.ts +12 -9
- package/test/src/wal_stream_utils.ts +7 -7
- package/tsconfig.json +3 -0
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -1,9 +1,7 @@
|
|
|
1
1
|
// Adapted from https://github.com/kagis/pgwire/blob/0dc927f9f8990a903f238737326e53ba1c8d094f/mod.js#L2218
|
|
2
2
|
|
|
3
3
|
import * as pgwire from '@powersync/service-jpgwire';
|
|
4
|
-
import {
|
|
5
|
-
|
|
6
|
-
import { logger } from '@powersync/lib-services-framework';
|
|
4
|
+
import { SqliteRow, toSyncRulesRow } from '@powersync/service-sync-rules';
|
|
7
5
|
|
|
8
6
|
/**
|
|
9
7
|
* pgwire message -> SQLite row.
|
|
@@ -28,46 +26,3 @@ export function constructBeforeRecord(message: pgwire.PgoutputDelete | pgwire.Pg
|
|
|
28
26
|
const record = pgwire.decodeTuple(message.relation, rawData);
|
|
29
27
|
return toSyncRulesRow(record);
|
|
30
28
|
}
|
|
31
|
-
|
|
32
|
-
export function escapeIdentifier(identifier: string) {
|
|
33
|
-
return `"${identifier.replace(/"/g, '""').replace(/\./g, '"."')}"`;
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
export function autoParameter(arg: SqliteJsonValue | boolean): pgwire.StatementParam {
|
|
37
|
-
if (arg == null) {
|
|
38
|
-
return { type: 'varchar', value: null };
|
|
39
|
-
} else if (typeof arg == 'string') {
|
|
40
|
-
return { type: 'varchar', value: arg };
|
|
41
|
-
} else if (typeof arg == 'number') {
|
|
42
|
-
if (Number.isInteger(arg)) {
|
|
43
|
-
return { type: 'int8', value: arg };
|
|
44
|
-
} else {
|
|
45
|
-
return { type: 'float8', value: arg };
|
|
46
|
-
}
|
|
47
|
-
} else if (typeof arg == 'boolean') {
|
|
48
|
-
return { type: 'bool', value: arg };
|
|
49
|
-
} else if (typeof arg == 'bigint') {
|
|
50
|
-
return { type: 'int8', value: arg };
|
|
51
|
-
} else {
|
|
52
|
-
throw new Error(`Unsupported query parameter: ${typeof arg}`);
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
export async function retriedQuery(db: pgwire.PgClient, ...statements: pgwire.Statement[]): Promise<pgwire.PgResult>;
|
|
57
|
-
export async function retriedQuery(db: pgwire.PgClient, query: string): Promise<pgwire.PgResult>;
|
|
58
|
-
|
|
59
|
-
/**
|
|
60
|
-
* Retry a simple query - up to 2 attempts total.
|
|
61
|
-
*/
|
|
62
|
-
export async function retriedQuery(db: pgwire.PgClient, ...args: any[]) {
|
|
63
|
-
for (let tries = 2; ; tries--) {
|
|
64
|
-
try {
|
|
65
|
-
return await db.query(...args);
|
|
66
|
-
} catch (e) {
|
|
67
|
-
if (tries == 1) {
|
|
68
|
-
throw e;
|
|
69
|
-
}
|
|
70
|
-
logger.warn('Query error, retrying', e);
|
|
71
|
-
}
|
|
72
|
-
}
|
|
73
|
-
}
|
package/test/src/env.ts
CHANGED
|
@@ -2,6 +2,10 @@ import { utils } from '@powersync/lib-services-framework';
|
|
|
2
2
|
|
|
3
3
|
export const env = utils.collectEnvironmentVariables({
|
|
4
4
|
PG_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5432/powersync_test'),
|
|
5
|
+
PG_STORAGE_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5431/powersync_storage_test'),
|
|
6
|
+
MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'),
|
|
5
7
|
CI: utils.type.boolean.default('false'),
|
|
6
|
-
SLOW_TESTS: utils.type.boolean.default('false')
|
|
8
|
+
SLOW_TESTS: utils.type.boolean.default('false'),
|
|
9
|
+
TEST_MONGO_STORAGE: utils.type.boolean.default('true'),
|
|
10
|
+
TEST_POSTGRES_STORAGE: utils.type.boolean.default('true')
|
|
7
11
|
});
|
|
@@ -1,17 +1,31 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { Metrics, storage } from '@powersync/service-core';
|
|
2
|
+
import * as timers from 'timers/promises';
|
|
2
3
|
import { describe, expect, test } from 'vitest';
|
|
3
4
|
import { populateData } from '../../dist/utils/populate_test_data.js';
|
|
4
5
|
import { env } from './env.js';
|
|
5
|
-
import {
|
|
6
|
+
import {
|
|
7
|
+
INITIALIZED_MONGO_STORAGE_FACTORY,
|
|
8
|
+
INITIALIZED_POSTGRES_STORAGE_FACTORY,
|
|
9
|
+
TEST_CONNECTION_OPTIONS
|
|
10
|
+
} from './util.js';
|
|
6
11
|
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
7
|
-
import * as timers from 'timers/promises';
|
|
8
|
-
import { Metrics } from '@powersync/service-core';
|
|
9
12
|
|
|
10
|
-
describe('batch replication tests - mongodb', { timeout: 120_000 }, function () {
|
|
13
|
+
describe.skipIf(!env.TEST_MONGO_STORAGE)('batch replication tests - mongodb', { timeout: 120_000 }, function () {
|
|
14
|
+
// These are slow but consistent tests.
|
|
15
|
+
// Not run on every test run, but we do run on CI, or when manually debugging issues.
|
|
16
|
+
if (env.CI || env.SLOW_TESTS) {
|
|
17
|
+
defineBatchTests(INITIALIZED_MONGO_STORAGE_FACTORY);
|
|
18
|
+
} else {
|
|
19
|
+
// Need something in this file.
|
|
20
|
+
test('no-op', () => {});
|
|
21
|
+
}
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
describe.skipIf(!env.TEST_POSTGRES_STORAGE)('batch replication tests - postgres', { timeout: 240_000 }, function () {
|
|
11
25
|
// These are slow but consistent tests.
|
|
12
26
|
// Not run on every test run, but we do run on CI, or when manually debugging issues.
|
|
13
27
|
if (env.CI || env.SLOW_TESTS) {
|
|
14
|
-
defineBatchTests(
|
|
28
|
+
defineBatchTests(INITIALIZED_POSTGRES_STORAGE_FACTORY);
|
|
15
29
|
} else {
|
|
16
30
|
// Need something in this file.
|
|
17
31
|
test('no-op', () => {});
|
|
@@ -23,7 +37,7 @@ const BASIC_SYNC_RULES = `bucket_definitions:
|
|
|
23
37
|
data:
|
|
24
38
|
- SELECT id, description, other FROM "test_data"`;
|
|
25
39
|
|
|
26
|
-
function defineBatchTests(factory:
|
|
40
|
+
function defineBatchTests(factory: storage.TestStorageFactory) {
|
|
27
41
|
test('update large record', async () => {
|
|
28
42
|
await using context = await WalStreamTestContext.open(factory);
|
|
29
43
|
// This test generates a large transaction in MongoDB, despite the replicated data
|
|
@@ -1,14 +1,20 @@
|
|
|
1
|
-
import { compareIds, putOp, removeOp } from '@core-tests
|
|
2
|
-
import
|
|
3
|
-
import { setTimeout } from 'node:timers/promises';
|
|
1
|
+
import { compareIds, putOp, reduceBucket, removeOp, test_utils } from '@powersync/service-core-tests';
|
|
2
|
+
import * as timers from 'timers/promises';
|
|
4
3
|
import { describe, expect, test } from 'vitest';
|
|
5
|
-
|
|
4
|
+
|
|
5
|
+
import { storage } from '@powersync/service-core';
|
|
6
|
+
import { env } from './env.js';
|
|
7
|
+
import { INITIALIZED_MONGO_STORAGE_FACTORY, INITIALIZED_POSTGRES_STORAGE_FACTORY } from './util.js';
|
|
6
8
|
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
7
9
|
|
|
8
|
-
describe('schema changes', { timeout: 20_000 }, function () {
|
|
10
|
+
describe.skipIf(!env.TEST_MONGO_STORAGE)('schema changes - mongodb', { timeout: 20_000 }, function () {
|
|
9
11
|
defineTests(INITIALIZED_MONGO_STORAGE_FACTORY);
|
|
10
12
|
});
|
|
11
13
|
|
|
14
|
+
describe.skipIf(!env.TEST_POSTGRES_STORAGE)('schema changes - postgres', { timeout: 20_000 }, function () {
|
|
15
|
+
defineTests(INITIALIZED_POSTGRES_STORAGE_FACTORY);
|
|
16
|
+
});
|
|
17
|
+
|
|
12
18
|
const BASIC_SYNC_RULES = `
|
|
13
19
|
bucket_definitions:
|
|
14
20
|
global:
|
|
@@ -16,14 +22,14 @@ bucket_definitions:
|
|
|
16
22
|
- SELECT id, * FROM "test_data"
|
|
17
23
|
`;
|
|
18
24
|
|
|
19
|
-
const PUT_T1 = putOp('test_data', { id: 't1', description: 'test1' });
|
|
20
|
-
const PUT_T2 = putOp('test_data', { id: 't2', description: 'test2' });
|
|
21
|
-
const PUT_T3 = putOp('test_data', { id: 't3', description: 'test3' });
|
|
25
|
+
const PUT_T1 = test_utils.putOp('test_data', { id: 't1', description: 'test1' });
|
|
26
|
+
const PUT_T2 = test_utils.putOp('test_data', { id: 't2', description: 'test2' });
|
|
27
|
+
const PUT_T3 = test_utils.putOp('test_data', { id: 't3', description: 'test3' });
|
|
22
28
|
|
|
23
|
-
const REMOVE_T1 = removeOp('test_data', 't1');
|
|
24
|
-
const REMOVE_T2 = removeOp('test_data', 't2');
|
|
29
|
+
const REMOVE_T1 = test_utils.removeOp('test_data', 't1');
|
|
30
|
+
const REMOVE_T2 = test_utils.removeOp('test_data', 't2');
|
|
25
31
|
|
|
26
|
-
function defineTests(factory:
|
|
32
|
+
function defineTests(factory: storage.TestStorageFactory) {
|
|
27
33
|
test('re-create table', async () => {
|
|
28
34
|
await using context = await WalStreamTestContext.open(factory);
|
|
29
35
|
|
|
@@ -431,7 +437,7 @@ function defineTests(factory: StorageFactory) {
|
|
|
431
437
|
expect(data).toMatchObject([]);
|
|
432
438
|
|
|
433
439
|
const metrics = await storage.factory.getStorageMetrics();
|
|
434
|
-
expect(metrics.replication_size_bytes).
|
|
440
|
+
expect(metrics.replication_size_bytes).toMatchSnapshot();
|
|
435
441
|
});
|
|
436
442
|
|
|
437
443
|
test('replica identity nothing', async () => {
|
|
@@ -544,7 +550,7 @@ function defineTests(factory: StorageFactory) {
|
|
|
544
550
|
);
|
|
545
551
|
|
|
546
552
|
// Need some delay for the snapshot to be triggered
|
|
547
|
-
await setTimeout(5);
|
|
553
|
+
await timers.setTimeout(5);
|
|
548
554
|
|
|
549
555
|
let stop = false;
|
|
550
556
|
|
package/test/src/setup.ts
CHANGED
|
@@ -1,7 +1,13 @@
|
|
|
1
1
|
import { container } from '@powersync/lib-services-framework';
|
|
2
|
-
import {
|
|
2
|
+
import { test_utils } from '@powersync/service-core-tests';
|
|
3
|
+
import { beforeAll, beforeEach } from 'vitest';
|
|
3
4
|
|
|
4
|
-
beforeAll(() => {
|
|
5
|
+
beforeAll(async () => {
|
|
5
6
|
// Executes for every test file
|
|
6
7
|
container.registerDefaults();
|
|
8
|
+
await test_utils.initMetrics();
|
|
9
|
+
});
|
|
10
|
+
|
|
11
|
+
beforeEach(async () => {
|
|
12
|
+
await test_utils.resetMetrics();
|
|
7
13
|
});
|
|
@@ -2,30 +2,48 @@ import * as bson from 'bson';
|
|
|
2
2
|
import { afterEach, describe, expect, test } from 'vitest';
|
|
3
3
|
import { WalStream, WalStreamOptions } from '../../src/replication/WalStream.js';
|
|
4
4
|
import { env } from './env.js';
|
|
5
|
-
import {
|
|
5
|
+
import {
|
|
6
|
+
clearTestDb,
|
|
7
|
+
connectPgPool,
|
|
8
|
+
getClientCheckpoint,
|
|
9
|
+
INITIALIZED_MONGO_STORAGE_FACTORY,
|
|
10
|
+
INITIALIZED_POSTGRES_STORAGE_FACTORY,
|
|
11
|
+
TEST_CONNECTION_OPTIONS
|
|
12
|
+
} from './util.js';
|
|
6
13
|
|
|
7
14
|
import * as pgwire from '@powersync/service-jpgwire';
|
|
8
15
|
import { SqliteRow } from '@powersync/service-sync-rules';
|
|
9
16
|
|
|
10
|
-
import { mapOpEntry, MongoBucketStorage } from '@/storage/storage-index.js';
|
|
11
|
-
import { validateCompactedBucket } from '@core-tests/bucket_validation.js';
|
|
12
|
-
import { MONGO_STORAGE_FACTORY, StorageFactory } from '@core-tests/util.js';
|
|
13
17
|
import { PgManager } from '@module/replication/PgManager.js';
|
|
18
|
+
import { storage } from '@powersync/service-core';
|
|
19
|
+
import { test_utils } from '@powersync/service-core-tests';
|
|
20
|
+
import * as mongo_storage from '@powersync/service-module-mongodb-storage';
|
|
21
|
+
import * as postgres_storage from '@powersync/service-module-postgres-storage';
|
|
14
22
|
import * as timers from 'node:timers/promises';
|
|
15
|
-
import { reduceBucket } from '@powersync/service-core';
|
|
16
23
|
|
|
17
|
-
describe('slow tests - mongodb', function () {
|
|
24
|
+
describe.skipIf(!env.TEST_MONGO_STORAGE)('slow tests - mongodb', function () {
|
|
18
25
|
// These are slow, inconsistent tests.
|
|
19
26
|
// Not run on every test run, but we do run on CI, or when manually debugging issues.
|
|
20
27
|
if (env.CI || env.SLOW_TESTS) {
|
|
21
|
-
defineSlowTests(
|
|
28
|
+
defineSlowTests(INITIALIZED_MONGO_STORAGE_FACTORY);
|
|
22
29
|
} else {
|
|
23
30
|
// Need something in this file.
|
|
24
31
|
test('no-op', () => {});
|
|
25
32
|
}
|
|
26
33
|
});
|
|
27
34
|
|
|
28
|
-
function
|
|
35
|
+
describe.skipIf(!env.TEST_POSTGRES_STORAGE)('slow tests - postgres', function () {
|
|
36
|
+
// These are slow, inconsistent tests.
|
|
37
|
+
// Not run on every test run, but we do run on CI, or when manually debugging issues.
|
|
38
|
+
if (env.CI || env.SLOW_TESTS) {
|
|
39
|
+
defineSlowTests(INITIALIZED_POSTGRES_STORAGE_FACTORY);
|
|
40
|
+
} else {
|
|
41
|
+
// Need something in this file.
|
|
42
|
+
test('no-op', () => {});
|
|
43
|
+
}
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
function defineSlowTests(factory: storage.TestStorageFactory) {
|
|
29
47
|
let walStream: WalStream | undefined;
|
|
30
48
|
let connections: PgManager | undefined;
|
|
31
49
|
let abortController: AbortController | undefined;
|
|
@@ -74,7 +92,7 @@ function defineSlowTests(factory: StorageFactory) {
|
|
|
74
92
|
const replicationConnection = await connections.replicationConnection();
|
|
75
93
|
const pool = connections.pool;
|
|
76
94
|
await clearTestDb(pool);
|
|
77
|
-
|
|
95
|
+
await using f = await factory();
|
|
78
96
|
|
|
79
97
|
const syncRuleContent = `
|
|
80
98
|
bucket_definitions:
|
|
@@ -169,15 +187,50 @@ bucket_definitions:
|
|
|
169
187
|
}
|
|
170
188
|
|
|
171
189
|
const checkpoint = BigInt((await storage.getCheckpoint()).checkpoint);
|
|
172
|
-
|
|
173
|
-
.
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
.
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
190
|
+
if (f instanceof mongo_storage.storage.MongoBucketStorage) {
|
|
191
|
+
const opsBefore = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
|
|
192
|
+
.filter((row) => row._id.o <= checkpoint)
|
|
193
|
+
.map(mongo_storage.storage.mapOpEntry);
|
|
194
|
+
await storage.compact({ maxOpId: checkpoint });
|
|
195
|
+
const opsAfter = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
|
|
196
|
+
.filter((row) => row._id.o <= checkpoint)
|
|
197
|
+
.map(mongo_storage.storage.mapOpEntry);
|
|
198
|
+
|
|
199
|
+
test_utils.validateCompactedBucket(opsBefore, opsAfter);
|
|
200
|
+
} else if (f instanceof postgres_storage.PostgresBucketStorageFactory) {
|
|
201
|
+
const { db } = f;
|
|
202
|
+
const opsBefore = (
|
|
203
|
+
await db.sql`
|
|
204
|
+
SELECT
|
|
205
|
+
*
|
|
206
|
+
FROM
|
|
207
|
+
bucket_data
|
|
208
|
+
WHERE
|
|
209
|
+
op_id <= ${{ type: 'int8', value: checkpoint }}
|
|
210
|
+
ORDER BY
|
|
211
|
+
op_id ASC
|
|
212
|
+
`
|
|
213
|
+
.decoded(postgres_storage.models.BucketData)
|
|
214
|
+
.rows()
|
|
215
|
+
).map(postgres_storage.utils.mapOpEntry);
|
|
216
|
+
await storage.compact({ maxOpId: checkpoint });
|
|
217
|
+
const opsAfter = (
|
|
218
|
+
await db.sql`
|
|
219
|
+
SELECT
|
|
220
|
+
*
|
|
221
|
+
FROM
|
|
222
|
+
bucket_data
|
|
223
|
+
WHERE
|
|
224
|
+
op_id <= ${{ type: 'int8', value: checkpoint }}
|
|
225
|
+
ORDER BY
|
|
226
|
+
op_id ASC
|
|
227
|
+
`
|
|
228
|
+
.decoded(postgres_storage.models.BucketData)
|
|
229
|
+
.rows()
|
|
230
|
+
).map(postgres_storage.utils.mapOpEntry);
|
|
231
|
+
|
|
232
|
+
test_utils.validateCompactedBucket(opsBefore, opsAfter);
|
|
233
|
+
}
|
|
181
234
|
}
|
|
182
235
|
};
|
|
183
236
|
|
|
@@ -191,26 +244,66 @@ bucket_definitions:
|
|
|
191
244
|
// Wait for replication to finish
|
|
192
245
|
let checkpoint = await getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS });
|
|
193
246
|
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
247
|
+
if (f instanceof mongo_storage.storage.MongoBucketStorage) {
|
|
248
|
+
// Check that all inserts have been deleted again
|
|
249
|
+
const docs = await f.db.current_data.find().toArray();
|
|
250
|
+
const transformed = docs.map((doc) => {
|
|
251
|
+
return bson.deserialize(doc.data.buffer) as SqliteRow;
|
|
252
|
+
});
|
|
253
|
+
expect(transformed).toEqual([]);
|
|
254
|
+
|
|
255
|
+
// Check that each PUT has a REMOVE
|
|
256
|
+
const ops = await f.db.bucket_data.find().sort({ _id: 1 }).toArray();
|
|
257
|
+
|
|
258
|
+
// All a single bucket in this test
|
|
259
|
+
const bucket = ops.map((op) => mongo_storage.storage.mapOpEntry(op));
|
|
260
|
+
const reduced = test_utils.reduceBucket(bucket);
|
|
261
|
+
expect(reduced).toMatchObject([
|
|
262
|
+
{
|
|
263
|
+
op_id: '0',
|
|
264
|
+
op: 'CLEAR'
|
|
265
|
+
}
|
|
266
|
+
// Should contain no additional data
|
|
267
|
+
]);
|
|
268
|
+
} else if (f instanceof postgres_storage.storage.PostgresBucketStorageFactory) {
|
|
269
|
+
const { db } = f;
|
|
270
|
+
// Check that all inserts have been deleted again
|
|
271
|
+
const docs = await db.sql`
|
|
272
|
+
SELECT
|
|
273
|
+
*
|
|
274
|
+
FROM
|
|
275
|
+
current_data
|
|
276
|
+
`
|
|
277
|
+
.decoded(postgres_storage.models.CurrentData)
|
|
278
|
+
.rows();
|
|
279
|
+
const transformed = docs.map((doc) => {
|
|
280
|
+
return bson.deserialize(doc.data) as SqliteRow;
|
|
281
|
+
});
|
|
282
|
+
expect(transformed).toEqual([]);
|
|
283
|
+
|
|
284
|
+
// Check that each PUT has a REMOVE
|
|
285
|
+
const ops = await db.sql`
|
|
286
|
+
SELECT
|
|
287
|
+
*
|
|
288
|
+
FROM
|
|
289
|
+
bucket_data
|
|
290
|
+
ORDER BY
|
|
291
|
+
op_id ASC
|
|
292
|
+
`
|
|
293
|
+
.decoded(postgres_storage.models.BucketData)
|
|
294
|
+
.rows();
|
|
295
|
+
|
|
296
|
+
// All a single bucket in this test
|
|
297
|
+
const bucket = ops.map((op) => postgres_storage.utils.mapOpEntry(op));
|
|
298
|
+
const reduced = test_utils.reduceBucket(bucket);
|
|
299
|
+
expect(reduced).toMatchObject([
|
|
300
|
+
{
|
|
301
|
+
op_id: '0',
|
|
302
|
+
op: 'CLEAR'
|
|
303
|
+
}
|
|
304
|
+
// Should contain no additional data
|
|
305
|
+
]);
|
|
306
|
+
}
|
|
214
307
|
}
|
|
215
308
|
|
|
216
309
|
abortController.abort();
|
|
@@ -226,7 +319,7 @@ bucket_definitions:
|
|
|
226
319
|
async () => {
|
|
227
320
|
const pool = await connectPgPool();
|
|
228
321
|
await clearTestDb(pool);
|
|
229
|
-
|
|
322
|
+
await using f = await factory();
|
|
230
323
|
|
|
231
324
|
const syncRuleContent = `
|
|
232
325
|
bucket_definitions:
|
package/test/src/util.ts
CHANGED
|
@@ -1,22 +1,23 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js';
|
|
2
2
|
import * as types from '@module/types/types.js';
|
|
3
|
-
import * as
|
|
3
|
+
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
4
4
|
import { logger } from '@powersync/lib-services-framework';
|
|
5
|
-
import { BucketStorageFactory,
|
|
5
|
+
import { BucketStorageFactory, OpId } from '@powersync/service-core';
|
|
6
6
|
import * as pgwire from '@powersync/service-jpgwire';
|
|
7
|
-
import
|
|
7
|
+
import * as mongo_storage from '@powersync/service-module-mongodb-storage';
|
|
8
|
+
import * as postgres_storage from '@powersync/service-module-postgres-storage';
|
|
8
9
|
import { env } from './env.js';
|
|
9
|
-
import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js';
|
|
10
10
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
11
|
+
export const TEST_URI = env.PG_TEST_URL;
|
|
12
|
+
|
|
13
|
+
export const INITIALIZED_MONGO_STORAGE_FACTORY = mongo_storage.MongoTestStorageFactoryGenerator({
|
|
14
|
+
url: env.MONGO_TEST_URL,
|
|
15
|
+
isCI: env.CI
|
|
16
16
|
});
|
|
17
|
-
Metrics.getInstance().resetCounters();
|
|
18
17
|
|
|
19
|
-
export const
|
|
18
|
+
export const INITIALIZED_POSTGRES_STORAGE_FACTORY = postgres_storage.PostgresTestStorageFactoryGenerator({
|
|
19
|
+
url: env.PG_STORAGE_TEST_URL
|
|
20
|
+
});
|
|
20
21
|
|
|
21
22
|
export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({
|
|
22
23
|
type: 'postgresql',
|
|
@@ -24,23 +25,6 @@ export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({
|
|
|
24
25
|
sslmode: 'disable'
|
|
25
26
|
});
|
|
26
27
|
|
|
27
|
-
export type StorageFactory = () => Promise<BucketStorageFactory>;
|
|
28
|
-
|
|
29
|
-
export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => {
|
|
30
|
-
const db = await connectMongo();
|
|
31
|
-
|
|
32
|
-
// None of the PG tests insert data into this collection, so it was never created
|
|
33
|
-
if (!(await db.db.listCollections({ name: db.bucket_parameters.collectionName }).hasNext())) {
|
|
34
|
-
await db.db.createCollection('bucket_parameters');
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
await db.clear();
|
|
38
|
-
|
|
39
|
-
return new MongoBucketStorage(db, {
|
|
40
|
-
slot_name_prefix: 'test_'
|
|
41
|
-
});
|
|
42
|
-
};
|
|
43
|
-
|
|
44
28
|
export async function clearTestDb(db: pgwire.PgClient) {
|
|
45
29
|
await db.query(
|
|
46
30
|
"select pg_drop_replication_slot(slot_name) from pg_replication_slots where active = false and slot_name like 'test_%'"
|
|
@@ -61,7 +45,7 @@ export async function clearTestDb(db: pgwire.PgClient) {
|
|
|
61
45
|
for (let row of tableRows) {
|
|
62
46
|
const name = row.table_name;
|
|
63
47
|
if (name.startsWith('test_')) {
|
|
64
|
-
await db.query(`DROP TABLE public.${
|
|
48
|
+
await db.query(`DROP TABLE public.${lib_postgres.escapeIdentifier(name)}`);
|
|
65
49
|
}
|
|
66
50
|
}
|
|
67
51
|
}
|
|
@@ -1,10 +1,12 @@
|
|
|
1
|
-
import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js';
|
|
2
1
|
import { getDebugTablesInfo } from '@module/replication/replication-utils.js';
|
|
3
2
|
import { expect, test } from 'vitest';
|
|
3
|
+
|
|
4
|
+
// Not quite a walStreamTest, but it helps to manage the connection
|
|
5
|
+
import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js';
|
|
4
6
|
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
5
7
|
|
|
6
8
|
test('validate tables', async () => {
|
|
7
|
-
await using context = await WalStreamTestContext.open(
|
|
9
|
+
await using context = await WalStreamTestContext.open(INITIALIZED_MONGO_STORAGE_FACTORY);
|
|
8
10
|
const { pool } = context;
|
|
9
11
|
|
|
10
12
|
await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
|
|
@@ -1,13 +1,12 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
1
|
+
import { MissingReplicationSlotError } from '@module/replication/WalStream.js';
|
|
2
|
+
import { Metrics, storage } from '@powersync/service-core';
|
|
3
|
+
import { putOp, removeOp } from '@powersync/service-core-tests';
|
|
4
4
|
import { pgwireRows } from '@powersync/service-jpgwire';
|
|
5
5
|
import * as crypto from 'crypto';
|
|
6
6
|
import { describe, expect, test } from 'vitest';
|
|
7
|
+
import { env } from './env.js';
|
|
8
|
+
import { INITIALIZED_MONGO_STORAGE_FACTORY, INITIALIZED_POSTGRES_STORAGE_FACTORY } from './util.js';
|
|
7
9
|
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
8
|
-
import { MissingReplicationSlotError } from '@module/replication/WalStream.js';
|
|
9
|
-
|
|
10
|
-
type StorageFactory = () => Promise<BucketStorageFactory>;
|
|
11
10
|
|
|
12
11
|
const BASIC_SYNC_RULES = `
|
|
13
12
|
bucket_definitions:
|
|
@@ -16,11 +15,15 @@ bucket_definitions:
|
|
|
16
15
|
- SELECT id, description FROM "test_data"
|
|
17
16
|
`;
|
|
18
17
|
|
|
19
|
-
describe('wal stream - mongodb', { timeout: 20_000 }, function () {
|
|
20
|
-
defineWalStreamTests(
|
|
18
|
+
describe.skipIf(!env.TEST_MONGO_STORAGE)('wal stream - mongodb', { timeout: 20_000 }, function () {
|
|
19
|
+
defineWalStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY);
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
describe.skipIf(!env.TEST_POSTGRES_STORAGE)('wal stream - postgres', { timeout: 20_000 }, function () {
|
|
23
|
+
defineWalStreamTests(INITIALIZED_POSTGRES_STORAGE_FACTORY);
|
|
21
24
|
});
|
|
22
25
|
|
|
23
|
-
function defineWalStreamTests(factory:
|
|
26
|
+
function defineWalStreamTests(factory: storage.TestStorageFactory) {
|
|
24
27
|
test('replicating basic values', async () => {
|
|
25
28
|
await using context = await WalStreamTestContext.open(factory);
|
|
26
29
|
const { pool } = context;
|
|
@@ -1,10 +1,9 @@
|
|
|
1
|
-
import { fromAsync } from '@core-tests/stream_utils.js';
|
|
2
1
|
import { PgManager } from '@module/replication/PgManager.js';
|
|
3
2
|
import { PUBLICATION_NAME, WalStream, WalStreamOptions } from '@module/replication/WalStream.js';
|
|
4
|
-
import { BucketStorageFactory, OplogEntry, SyncRulesBucketStorage } from '@powersync/service-core';
|
|
3
|
+
import { BucketStorageFactory, OplogEntry, storage, SyncRulesBucketStorage } from '@powersync/service-core';
|
|
4
|
+
import { test_utils } from '@powersync/service-core-tests';
|
|
5
5
|
import * as pgwire from '@powersync/service-jpgwire';
|
|
6
6
|
import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js';
|
|
7
|
-
import { StorageOptions } from '@core-tests/util.js';
|
|
8
7
|
|
|
9
8
|
export class WalStreamTestContext implements AsyncDisposable {
|
|
10
9
|
private _walStream?: WalStream;
|
|
@@ -20,7 +19,7 @@ export class WalStreamTestContext implements AsyncDisposable {
|
|
|
20
19
|
* This configures all the context, and tears it down afterwards.
|
|
21
20
|
*/
|
|
22
21
|
static async open(
|
|
23
|
-
factory: (options:
|
|
22
|
+
factory: (options: storage.TestStorageOptions) => Promise<BucketStorageFactory>,
|
|
24
23
|
options?: { doNotClear?: boolean }
|
|
25
24
|
) {
|
|
26
25
|
const f = await factory({ doNotClear: options?.doNotClear });
|
|
@@ -47,6 +46,7 @@ export class WalStreamTestContext implements AsyncDisposable {
|
|
|
47
46
|
await this.streamPromise;
|
|
48
47
|
await this.connectionManager.destroy();
|
|
49
48
|
this.storage?.[Symbol.dispose]();
|
|
49
|
+
await this.factory?.[Symbol.asyncDispose]();
|
|
50
50
|
}
|
|
51
51
|
|
|
52
52
|
get pool() {
|
|
@@ -132,7 +132,7 @@ export class WalStreamTestContext implements AsyncDisposable {
|
|
|
132
132
|
async getBucketsDataBatch(buckets: Record<string, string>, options?: { timeout?: number }) {
|
|
133
133
|
let checkpoint = await this.getCheckpoint(options);
|
|
134
134
|
const map = new Map<string, string>(Object.entries(buckets));
|
|
135
|
-
return fromAsync(this.storage!.getBucketDataBatch(checkpoint, map));
|
|
135
|
+
return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map));
|
|
136
136
|
}
|
|
137
137
|
|
|
138
138
|
/**
|
|
@@ -146,7 +146,7 @@ export class WalStreamTestContext implements AsyncDisposable {
|
|
|
146
146
|
while (true) {
|
|
147
147
|
const batch = this.storage!.getBucketDataBatch(checkpoint, map);
|
|
148
148
|
|
|
149
|
-
const batches = await fromAsync(batch);
|
|
149
|
+
const batches = await test_utils.fromAsync(batch);
|
|
150
150
|
data = data.concat(batches[0]?.batch.data ?? []);
|
|
151
151
|
if (batches.length == 0 || !batches[0]!.batch.has_more) {
|
|
152
152
|
break;
|
|
@@ -164,7 +164,7 @@ export class WalStreamTestContext implements AsyncDisposable {
|
|
|
164
164
|
const { checkpoint } = await this.storage!.getCheckpoint();
|
|
165
165
|
const map = new Map<string, string>([[bucket, start]]);
|
|
166
166
|
const batch = this.storage!.getBucketDataBatch(checkpoint, map);
|
|
167
|
-
const batches = await fromAsync(batch);
|
|
167
|
+
const batches = await test_utils.fromAsync(batch);
|
|
168
168
|
return batches[0]?.batch.data ?? [];
|
|
169
169
|
}
|
|
170
170
|
}
|