@powersync/service-module-postgres 0.19.3 → 0.19.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api/PostgresRouteAPIAdapter.d.ts +1 -1
- package/dist/api/PostgresRouteAPIAdapter.js +63 -72
- package/dist/api/PostgresRouteAPIAdapter.js.map +1 -1
- package/dist/module/PostgresModule.js.map +1 -1
- package/dist/replication/MissingReplicationSlotError.d.ts +41 -0
- package/dist/replication/MissingReplicationSlotError.js +33 -0
- package/dist/replication/MissingReplicationSlotError.js.map +1 -0
- package/dist/replication/PostgresErrorRateLimiter.js +1 -1
- package/dist/replication/PostgresErrorRateLimiter.js.map +1 -1
- package/dist/replication/SnapshotQuery.js +2 -2
- package/dist/replication/SnapshotQuery.js.map +1 -1
- package/dist/replication/WalStream.d.ts +35 -3
- package/dist/replication/WalStream.js +135 -9
- package/dist/replication/WalStream.js.map +1 -1
- package/dist/replication/WalStreamReplicationJob.js +6 -3
- package/dist/replication/WalStreamReplicationJob.js.map +1 -1
- package/dist/replication/replication-index.d.ts +3 -1
- package/dist/replication/replication-index.js +3 -1
- package/dist/replication/replication-index.js.map +1 -1
- package/dist/replication/replication-utils.d.ts +3 -11
- package/dist/replication/replication-utils.js +101 -164
- package/dist/replication/replication-utils.js.map +1 -1
- package/dist/replication/wal-budget-utils.d.ts +23 -0
- package/dist/replication/wal-budget-utils.js +57 -0
- package/dist/replication/wal-budget-utils.js.map +1 -0
- package/dist/types/registry.js +1 -1
- package/dist/types/registry.js.map +1 -1
- package/package.json +15 -11
- package/sql/check-source-configuration.plpgsql +13 -0
- package/sql/debug-tables-info-batched.plpgsql +230 -0
- package/CHANGELOG.md +0 -858
- package/src/api/PostgresRouteAPIAdapter.ts +0 -356
- package/src/index.ts +0 -1
- package/src/module/PostgresModule.ts +0 -122
- package/src/replication/ConnectionManagerFactory.ts +0 -33
- package/src/replication/PgManager.ts +0 -122
- package/src/replication/PgRelation.ts +0 -41
- package/src/replication/PostgresErrorRateLimiter.ts +0 -48
- package/src/replication/SnapshotQuery.ts +0 -213
- package/src/replication/WalStream.ts +0 -1137
- package/src/replication/WalStreamReplicationJob.ts +0 -138
- package/src/replication/WalStreamReplicator.ts +0 -53
- package/src/replication/replication-index.ts +0 -5
- package/src/replication/replication-utils.ts +0 -398
- package/src/types/registry.ts +0 -275
- package/src/types/resolver.ts +0 -227
- package/src/types/types.ts +0 -44
- package/src/utils/application-name.ts +0 -8
- package/src/utils/migration_lib.ts +0 -80
- package/src/utils/populate_test_data.ts +0 -37
- package/src/utils/populate_test_data_worker.ts +0 -53
- package/src/utils/postgres_version.ts +0 -8
- package/test/src/checkpoints.test.ts +0 -86
- package/test/src/chunked_snapshots.test.ts +0 -161
- package/test/src/env.ts +0 -11
- package/test/src/large_batch.test.ts +0 -241
- package/test/src/pg_test.test.ts +0 -729
- package/test/src/resuming_snapshots.test.ts +0 -160
- package/test/src/route_api_adapter.test.ts +0 -62
- package/test/src/schema_changes.test.ts +0 -655
- package/test/src/setup.ts +0 -12
- package/test/src/slow_tests.test.ts +0 -519
- package/test/src/storage_combination.test.ts +0 -35
- package/test/src/types/registry.test.ts +0 -149
- package/test/src/util.ts +0 -151
- package/test/src/validation.test.ts +0 -63
- package/test/src/wal_stream.test.ts +0 -607
- package/test/src/wal_stream_utils.ts +0 -284
- package/test/tsconfig.json +0 -27
- package/tsconfig.json +0 -34
- package/tsconfig.tsbuildinfo +0 -1
- package/vitest.config.ts +0 -3
|
@@ -1,53 +0,0 @@
|
|
|
1
|
-
import * as crypto from 'crypto';
|
|
2
|
-
import { isMainThread, parentPort, workerData } from 'node:worker_threads';
|
|
3
|
-
|
|
4
|
-
import * as pgwire from '@powersync/service-jpgwire';
|
|
5
|
-
import type { PopulateDataOptions } from './populate_test_data.js';
|
|
6
|
-
|
|
7
|
-
// This util is actually for tests only, but we need it compiled to JS for the service to work, so it's placed in the service.
|
|
8
|
-
|
|
9
|
-
if (isMainThread || parentPort == null) {
|
|
10
|
-
// Must not be imported - only expected to run in a worker
|
|
11
|
-
throw new Error('Do not import this file');
|
|
12
|
-
} else {
|
|
13
|
-
try {
|
|
14
|
-
const options = workerData as PopulateDataOptions;
|
|
15
|
-
if (options == null) {
|
|
16
|
-
throw new Error('loaded worker without options');
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
const result = await populateDataInner(options);
|
|
20
|
-
parentPort.postMessage(result);
|
|
21
|
-
process.exit(0);
|
|
22
|
-
} catch (e) {
|
|
23
|
-
// This is a bug, not a connection issue
|
|
24
|
-
console.error(e);
|
|
25
|
-
// Only closes the Worker thread
|
|
26
|
-
process.exit(2);
|
|
27
|
-
}
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
async function populateDataInner(options: PopulateDataOptions) {
|
|
31
|
-
// Dedicated connection so we can release the memory easily
|
|
32
|
-
const initialDb = await pgwire.connectPgWire(options.connection, {
|
|
33
|
-
type: 'standard',
|
|
34
|
-
applicationName: 'powersync-tests'
|
|
35
|
-
});
|
|
36
|
-
const largeDescription = crypto.randomBytes(options.size / 2).toString('hex');
|
|
37
|
-
let operation_count = 0;
|
|
38
|
-
for (let i = 0; i < options.num_transactions; i++) {
|
|
39
|
-
const prefix = `test${i}K`;
|
|
40
|
-
|
|
41
|
-
await initialDb.query({
|
|
42
|
-
statement: `INSERT INTO test_data(id, description, other) SELECT $1 || i, $2, 'foo' FROM generate_series(1, $3) i`,
|
|
43
|
-
params: [
|
|
44
|
-
{ type: 'varchar', value: prefix },
|
|
45
|
-
{ type: 'varchar', value: largeDescription },
|
|
46
|
-
{ type: 'int4', value: options.per_transaction }
|
|
47
|
-
]
|
|
48
|
-
});
|
|
49
|
-
operation_count += options.per_transaction;
|
|
50
|
-
}
|
|
51
|
-
await initialDb.end();
|
|
52
|
-
return operation_count;
|
|
53
|
-
}
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
import * as pgwire from '@powersync/service-jpgwire';
|
|
2
|
-
import semver, { type SemVer } from 'semver';
|
|
3
|
-
|
|
4
|
-
export async function getServerVersion(db: pgwire.PgClient): Promise<SemVer | null> {
|
|
5
|
-
const result = await db.query(`SHOW server_version;`);
|
|
6
|
-
// The result is usually of the form "16.2 (Debian 16.2-1.pgdg120+2)"
|
|
7
|
-
return semver.coerce(result.rows[0].decodeWithoutCustomTypes(0).split(' ')[0]);
|
|
8
|
-
}
|
|
@@ -1,86 +0,0 @@
|
|
|
1
|
-
import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js';
|
|
2
|
-
import { checkpointUserId, createWriteCheckpoint } from '@powersync/service-core';
|
|
3
|
-
import { describe, test } from 'vitest';
|
|
4
|
-
import { describeWithStorage, StorageVersionTestContext } from './util.js';
|
|
5
|
-
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
6
|
-
|
|
7
|
-
import timers from 'node:timers/promises';
|
|
8
|
-
|
|
9
|
-
const BASIC_SYNC_RULES = `bucket_definitions:
|
|
10
|
-
global:
|
|
11
|
-
data:
|
|
12
|
-
- SELECT id, description, other FROM "test_data"`;
|
|
13
|
-
|
|
14
|
-
describe('checkpoint tests', () => {
|
|
15
|
-
describeWithStorage({}, checkpointTests);
|
|
16
|
-
});
|
|
17
|
-
|
|
18
|
-
const checkpointTests = ({ factory, storageVersion }: StorageVersionTestContext) => {
|
|
19
|
-
test('write checkpoints', { timeout: 50_000 }, async () => {
|
|
20
|
-
await using context = await WalStreamTestContext.open(factory, { storageVersion });
|
|
21
|
-
|
|
22
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
23
|
-
const { pool } = context;
|
|
24
|
-
const api = new PostgresRouteAPIAdapter(pool);
|
|
25
|
-
const serverVersion = await context.connectionManager.getServerVersion();
|
|
26
|
-
if (serverVersion!.compareMain('14.0.0') < 0) {
|
|
27
|
-
// The test is not stable on Postgres 11 or 12. See the notes on
|
|
28
|
-
// PostgresRouteAPIAdapter.createReplicationHead() for details.
|
|
29
|
-
// Postgres 12 is already EOL, so not worth finding a fix - just skip the tests.
|
|
30
|
-
// Postgres 13 fares a little better, but even there the test is still unstable.
|
|
31
|
-
// Postgres 14+ appears to have no issue.
|
|
32
|
-
console.log('Skipping write checkpoint test on Postgres < 14.0.0');
|
|
33
|
-
return;
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
|
|
37
|
-
|
|
38
|
-
// Wait for a consistent checkpoint before we start.
|
|
39
|
-
await context.initializeReplication();
|
|
40
|
-
|
|
41
|
-
const storage = context.storage!;
|
|
42
|
-
|
|
43
|
-
const controller = new AbortController();
|
|
44
|
-
try {
|
|
45
|
-
const stream = storage.watchCheckpointChanges({
|
|
46
|
-
user_id: checkpointUserId('test_user', 'test_client'),
|
|
47
|
-
signal: controller.signal
|
|
48
|
-
});
|
|
49
|
-
|
|
50
|
-
let lastWriteCheckpoint: bigint | null = null;
|
|
51
|
-
|
|
52
|
-
(async () => {
|
|
53
|
-
try {
|
|
54
|
-
for await (const cp of stream) {
|
|
55
|
-
lastWriteCheckpoint = cp.writeCheckpoint;
|
|
56
|
-
}
|
|
57
|
-
} catch (e) {
|
|
58
|
-
if (e.name != 'AbortError') {
|
|
59
|
-
throw e;
|
|
60
|
-
}
|
|
61
|
-
}
|
|
62
|
-
})();
|
|
63
|
-
|
|
64
|
-
for (let i = 0; i < 10; i++) {
|
|
65
|
-
const cp = await createWriteCheckpoint({
|
|
66
|
-
userId: 'test_user',
|
|
67
|
-
clientId: 'test_client',
|
|
68
|
-
api,
|
|
69
|
-
storage: context.factory
|
|
70
|
-
});
|
|
71
|
-
|
|
72
|
-
const start = Date.now();
|
|
73
|
-
while (lastWriteCheckpoint == null || lastWriteCheckpoint < BigInt(cp.writeCheckpoint)) {
|
|
74
|
-
if (Date.now() - start > 3_000) {
|
|
75
|
-
throw new Error(
|
|
76
|
-
`Timeout while waiting for checkpoint. last: ${lastWriteCheckpoint}, waiting for: ${cp.writeCheckpoint}`
|
|
77
|
-
);
|
|
78
|
-
}
|
|
79
|
-
await timers.setTimeout(5, undefined, { signal: controller.signal });
|
|
80
|
-
}
|
|
81
|
-
}
|
|
82
|
-
} finally {
|
|
83
|
-
controller.abort();
|
|
84
|
-
}
|
|
85
|
-
});
|
|
86
|
-
};
|
|
@@ -1,161 +0,0 @@
|
|
|
1
|
-
import { reduceBucket } from '@powersync/service-core';
|
|
2
|
-
import { METRICS_HELPER } from '@powersync/service-core-tests';
|
|
3
|
-
import { SqliteJsonValue } from '@powersync/service-sync-rules';
|
|
4
|
-
import * as crypto from 'node:crypto';
|
|
5
|
-
import * as timers from 'timers/promises';
|
|
6
|
-
import { describe, expect, test } from 'vitest';
|
|
7
|
-
import { describeWithStorage, StorageVersionTestContext } from './util.js';
|
|
8
|
-
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
9
|
-
|
|
10
|
-
describe('chunked snapshots', () => {
|
|
11
|
-
describeWithStorage({ timeout: 120_000 }, defineBatchTests);
|
|
12
|
-
});
|
|
13
|
-
|
|
14
|
-
function defineBatchTests({ factory, storageVersion }: StorageVersionTestContext) {
|
|
15
|
-
const openContext = (options?: Parameters<typeof WalStreamTestContext.open>[1]) => {
|
|
16
|
-
return WalStreamTestContext.open(factory, { ...options, storageVersion });
|
|
17
|
-
};
|
|
18
|
-
|
|
19
|
-
// We need to test every supported type, since chunking could be quite sensitive to
|
|
20
|
-
// how each specific type is handled.
|
|
21
|
-
test('chunked snapshot edge case (int2)', async () => {
|
|
22
|
-
await testChunkedSnapshot({
|
|
23
|
-
idType: 'int2',
|
|
24
|
-
genId: 'i',
|
|
25
|
-
lastId: '2000',
|
|
26
|
-
moveTo: '0',
|
|
27
|
-
moveToJs: 0
|
|
28
|
-
});
|
|
29
|
-
});
|
|
30
|
-
|
|
31
|
-
test('chunked snapshot edge case (int4)', async () => {
|
|
32
|
-
await testChunkedSnapshot({
|
|
33
|
-
idType: 'int4',
|
|
34
|
-
genId: 'i',
|
|
35
|
-
lastId: '2000',
|
|
36
|
-
moveTo: '0',
|
|
37
|
-
moveToJs: 0
|
|
38
|
-
});
|
|
39
|
-
});
|
|
40
|
-
|
|
41
|
-
test('chunked snapshot edge case (int8)', async () => {
|
|
42
|
-
await testChunkedSnapshot({
|
|
43
|
-
idType: 'int8',
|
|
44
|
-
genId: 'i',
|
|
45
|
-
lastId: '2000',
|
|
46
|
-
moveTo: '0',
|
|
47
|
-
moveToJs: 0
|
|
48
|
-
});
|
|
49
|
-
});
|
|
50
|
-
|
|
51
|
-
test('chunked snapshot edge case (text)', async () => {
|
|
52
|
-
await testChunkedSnapshot({
|
|
53
|
-
idType: 'text',
|
|
54
|
-
genId: `to_char(i, 'fm0000')`,
|
|
55
|
-
lastId: `'2000'`,
|
|
56
|
-
moveTo: `'0000'`,
|
|
57
|
-
moveToJs: '0000'
|
|
58
|
-
});
|
|
59
|
-
});
|
|
60
|
-
|
|
61
|
-
test('chunked snapshot edge case (varchar)', async () => {
|
|
62
|
-
await testChunkedSnapshot({
|
|
63
|
-
idType: 'varchar',
|
|
64
|
-
genId: `to_char(i, 'fm0000')`,
|
|
65
|
-
lastId: `'2000'`,
|
|
66
|
-
moveTo: `'0000'`,
|
|
67
|
-
moveToJs: '0000'
|
|
68
|
-
});
|
|
69
|
-
});
|
|
70
|
-
|
|
71
|
-
test('chunked snapshot edge case (uuid)', async () => {
|
|
72
|
-
await testChunkedSnapshot({
|
|
73
|
-
idType: 'uuid',
|
|
74
|
-
// Generate a uuid by using the first part of a uuid and appending a 4-digit number.
|
|
75
|
-
genId: `('00000000-0000-4000-8000-00000000' || to_char(i, 'fm0000')) :: uuid`,
|
|
76
|
-
lastId: `'00000000-0000-4000-8000-000000002000'`,
|
|
77
|
-
moveTo: `'00000000-0000-4000-8000-000000000000'`,
|
|
78
|
-
moveToJs: '00000000-0000-4000-8000-000000000000'
|
|
79
|
-
});
|
|
80
|
-
});
|
|
81
|
-
|
|
82
|
-
async function testChunkedSnapshot(options: {
|
|
83
|
-
idType: string;
|
|
84
|
-
genId: string;
|
|
85
|
-
lastId: string;
|
|
86
|
-
moveTo: string;
|
|
87
|
-
moveToJs: SqliteJsonValue;
|
|
88
|
-
}) {
|
|
89
|
-
// 1. Start with 2k rows, one row with id = 2000, and a large TOAST value in another column.
|
|
90
|
-
// 2. Replicate one batch of rows (id < 2000).
|
|
91
|
-
// 3. `UPDATE table SET id = 0 WHERE id = 2000`
|
|
92
|
-
// 4. Replicate the rest of the table.
|
|
93
|
-
// 5. Logical replication picks up the UPDATE above, but it is missing the TOAST column.
|
|
94
|
-
// 6. We end up with a row that has a missing TOAST column.
|
|
95
|
-
|
|
96
|
-
await using context = await openContext({
|
|
97
|
-
// We need to use a smaller chunk size here, so that we can run a query in between chunks
|
|
98
|
-
walStreamOptions: { snapshotChunkLength: 100 }
|
|
99
|
-
});
|
|
100
|
-
|
|
101
|
-
await context.updateSyncRules(`bucket_definitions:
|
|
102
|
-
global:
|
|
103
|
-
data:
|
|
104
|
-
- SELECT * FROM test_data`);
|
|
105
|
-
const { pool } = context;
|
|
106
|
-
|
|
107
|
-
await pool.query(`CREATE TABLE test_data(id ${options.idType} primary key, description text)`);
|
|
108
|
-
|
|
109
|
-
// 1. Start with 2k rows, one row with id = 2000...
|
|
110
|
-
await pool.query({
|
|
111
|
-
statement: `INSERT INTO test_data(id, description) SELECT ${options.genId}, 'foo' FROM generate_series(1, 2000) i`
|
|
112
|
-
});
|
|
113
|
-
|
|
114
|
-
// ...and a large TOAST value in another column.
|
|
115
|
-
// Toast value, must be > 8kb after compression
|
|
116
|
-
const largeDescription = crypto.randomBytes(20_000).toString('hex');
|
|
117
|
-
await pool.query({
|
|
118
|
-
statement: `UPDATE test_data SET description = $1 WHERE id = ${options.lastId} :: ${options.idType}`,
|
|
119
|
-
params: [{ type: 'varchar', value: largeDescription }]
|
|
120
|
-
});
|
|
121
|
-
|
|
122
|
-
// 2. Replicate one batch of rows (id < 100).
|
|
123
|
-
// Our "stopping point" here is not quite deterministic.
|
|
124
|
-
const p = context.replicateSnapshot();
|
|
125
|
-
|
|
126
|
-
const stopAfter = 100;
|
|
127
|
-
const startRowCount = (await METRICS_HELPER.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
128
|
-
|
|
129
|
-
while (true) {
|
|
130
|
-
const count =
|
|
131
|
-
((await METRICS_HELPER.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0) - startRowCount;
|
|
132
|
-
|
|
133
|
-
if (count >= stopAfter) {
|
|
134
|
-
break;
|
|
135
|
-
}
|
|
136
|
-
await timers.setTimeout(1);
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
// 3. `UPDATE table SET id = 0 WHERE id = 2000`
|
|
140
|
-
const rs = await pool.query(
|
|
141
|
-
`UPDATE test_data SET id = ${options.moveTo} WHERE id = ${options.lastId} RETURNING id`
|
|
142
|
-
);
|
|
143
|
-
expect(rs.rows.length).toEqual(1);
|
|
144
|
-
|
|
145
|
-
// 4. Replicate the rest of the table.
|
|
146
|
-
await p;
|
|
147
|
-
|
|
148
|
-
// 5. Logical replication picks up the UPDATE above, but it is missing the TOAST column.
|
|
149
|
-
// Note: logical replication now runs concurrently with the snapshot.
|
|
150
|
-
// TODO: re-check the test logic here.
|
|
151
|
-
|
|
152
|
-
// 6. If all went well, the "resnapshot" process would take care of this.
|
|
153
|
-
const data = await context.getBucketData('global[]', undefined, {});
|
|
154
|
-
const reduced = reduceBucket(data);
|
|
155
|
-
|
|
156
|
-
const movedRow = reduced.find((row) => row.object_id == String(options.moveToJs));
|
|
157
|
-
expect(movedRow?.data).toEqual(JSON.stringify({ id: options.moveToJs, description: largeDescription }));
|
|
158
|
-
|
|
159
|
-
expect(reduced.length).toEqual(2001);
|
|
160
|
-
}
|
|
161
|
-
}
|
package/test/src/env.ts
DELETED
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
import { utils } from '@powersync/lib-services-framework';
|
|
2
|
-
|
|
3
|
-
export const env = utils.collectEnvironmentVariables({
|
|
4
|
-
PG_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5432/powersync_test'),
|
|
5
|
-
PG_STORAGE_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5432/powersync_storage_test'),
|
|
6
|
-
MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'),
|
|
7
|
-
CI: utils.type.boolean.default('false'),
|
|
8
|
-
SLOW_TESTS: utils.type.boolean.default('false'),
|
|
9
|
-
TEST_MONGO_STORAGE: utils.type.boolean.default('true'),
|
|
10
|
-
TEST_POSTGRES_STORAGE: utils.type.boolean.default('true')
|
|
11
|
-
});
|
|
@@ -1,241 +0,0 @@
|
|
|
1
|
-
import { describe, expect, test } from 'vitest';
|
|
2
|
-
import { populateData } from '../../dist/utils/populate_test_data.js';
|
|
3
|
-
import { env } from './env.js';
|
|
4
|
-
import { describeWithStorage, StorageVersionTestContext, TEST_CONNECTION_OPTIONS } from './util.js';
|
|
5
|
-
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
6
|
-
import { CURRENT_STORAGE_VERSION } from '@powersync/service-core';
|
|
7
|
-
|
|
8
|
-
describe.skipIf(!(env.CI || env.SLOW_TESTS))('batch replication', function () {
|
|
9
|
-
describeWithStorage(
|
|
10
|
-
{
|
|
11
|
-
timeout: 240_000,
|
|
12
|
-
// These tests are slow, so only test the current storage version
|
|
13
|
-
storageVersions: [CURRENT_STORAGE_VERSION]
|
|
14
|
-
},
|
|
15
|
-
defineBatchTests
|
|
16
|
-
);
|
|
17
|
-
});
|
|
18
|
-
|
|
19
|
-
const BASIC_SYNC_RULES = `bucket_definitions:
|
|
20
|
-
global:
|
|
21
|
-
data:
|
|
22
|
-
- SELECT id, description, other FROM "test_data"`;
|
|
23
|
-
|
|
24
|
-
function defineBatchTests({ factory, storageVersion }: StorageVersionTestContext) {
|
|
25
|
-
const openContext = (options?: Parameters<typeof WalStreamTestContext.open>[1]) => {
|
|
26
|
-
return WalStreamTestContext.open(factory, { ...options, storageVersion });
|
|
27
|
-
};
|
|
28
|
-
|
|
29
|
-
test('update large record', async () => {
|
|
30
|
-
await using context = await openContext();
|
|
31
|
-
// This test generates a large transaction in MongoDB, despite the replicated data
|
|
32
|
-
// not being that large.
|
|
33
|
-
// If we don't limit transaction size, we could run into this error:
|
|
34
|
-
// > -31800: transaction is too large and will not fit in the storage engine cache
|
|
35
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
36
|
-
const { pool } = context;
|
|
37
|
-
|
|
38
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
|
|
39
|
-
|
|
40
|
-
await context.replicateSnapshot();
|
|
41
|
-
|
|
42
|
-
let operation_count = await populateData({
|
|
43
|
-
num_transactions: 1,
|
|
44
|
-
per_transaction: 80,
|
|
45
|
-
size: 4_000_000,
|
|
46
|
-
connection: TEST_CONNECTION_OPTIONS
|
|
47
|
-
});
|
|
48
|
-
|
|
49
|
-
const start = Date.now();
|
|
50
|
-
|
|
51
|
-
const checksum = await context.getChecksums(['global[]'], { timeout: 100_000 });
|
|
52
|
-
const duration = Date.now() - start;
|
|
53
|
-
const used = Math.round(process.memoryUsage().heapUsed / 1024 / 1024);
|
|
54
|
-
expect(checksum.get('global[]')!.count).toEqual(operation_count);
|
|
55
|
-
const perSecond = Math.round((operation_count / duration) * 1000);
|
|
56
|
-
console.log(`${operation_count} ops in ${duration}ms ${perSecond} ops/s. ${used}MB heap`);
|
|
57
|
-
});
|
|
58
|
-
|
|
59
|
-
test('initial replication performance', async () => {
|
|
60
|
-
await using context = await openContext();
|
|
61
|
-
// Manual test to check initial replication performance and memory usage
|
|
62
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
63
|
-
const { pool } = context;
|
|
64
|
-
|
|
65
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
|
|
66
|
-
|
|
67
|
-
// Some stats (varies a lot):
|
|
68
|
-
// Old 'postgres' driver, using cursor(2)
|
|
69
|
-
// 15 ops in 19559ms 1 ops/s. 354MB RSS, 115MB heap, 137MB external
|
|
70
|
-
// 25 ops in 42984ms 1 ops/s. 377MB RSS, 129MB heap, 137MB external
|
|
71
|
-
// 35 ops in 41337ms 1 ops/s. 365MB RSS, 115MB heap, 137MB external
|
|
72
|
-
|
|
73
|
-
// streaming with pgwire
|
|
74
|
-
// 15 ops in 26423ms 1 ops/s. 379MB RSS, 128MB heap, 182MB external, 165MB ArrayBuffers
|
|
75
|
-
// 35 ops in 78897ms 0 ops/s. 539MB RSS, 52MB heap, 87MB external, 83MB ArrayBuffers
|
|
76
|
-
|
|
77
|
-
let operation_count = await populateData({
|
|
78
|
-
num_transactions: 1,
|
|
79
|
-
per_transaction: 35,
|
|
80
|
-
size: 14_000_000,
|
|
81
|
-
connection: TEST_CONNECTION_OPTIONS
|
|
82
|
-
});
|
|
83
|
-
|
|
84
|
-
global.gc?.();
|
|
85
|
-
|
|
86
|
-
// Note that we could already have high memory usage at this point
|
|
87
|
-
printMemoryUsage();
|
|
88
|
-
|
|
89
|
-
let interval = setInterval(() => {
|
|
90
|
-
printMemoryUsage();
|
|
91
|
-
}, 2000);
|
|
92
|
-
try {
|
|
93
|
-
const start = Date.now();
|
|
94
|
-
|
|
95
|
-
await context.replicateSnapshot();
|
|
96
|
-
|
|
97
|
-
const checksum = await context.getChecksums(['global[]'], { timeout: 100_000 });
|
|
98
|
-
const duration = Date.now() - start;
|
|
99
|
-
expect(checksum.get('global[]')!.count).toEqual(operation_count);
|
|
100
|
-
const perSecond = Math.round((operation_count / duration) * 1000);
|
|
101
|
-
console.log(`${operation_count} ops in ${duration}ms ${perSecond} ops/s.`);
|
|
102
|
-
printMemoryUsage();
|
|
103
|
-
} finally {
|
|
104
|
-
clearInterval(interval);
|
|
105
|
-
}
|
|
106
|
-
});
|
|
107
|
-
|
|
108
|
-
test('large number of operations', async () => {
|
|
109
|
-
await using context = await openContext();
|
|
110
|
-
// This just tests performance of a large number of operations inside a transaction.
|
|
111
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
112
|
-
const { pool } = context;
|
|
113
|
-
|
|
114
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
|
|
115
|
-
|
|
116
|
-
await context.replicateSnapshot();
|
|
117
|
-
|
|
118
|
-
const numTransactions = 20;
|
|
119
|
-
const perTransaction = 1500;
|
|
120
|
-
let operationCount = 0;
|
|
121
|
-
|
|
122
|
-
const description = 'description';
|
|
123
|
-
|
|
124
|
-
for (let i = 0; i < numTransactions; i++) {
|
|
125
|
-
const prefix = `test${i}K`;
|
|
126
|
-
|
|
127
|
-
await pool.query(
|
|
128
|
-
{
|
|
129
|
-
statement: `INSERT INTO test_data(id, description, other) SELECT $1 || i, $2 || i, 'foo' FROM generate_series(1, $3) i`,
|
|
130
|
-
params: [
|
|
131
|
-
{ type: 'varchar', value: prefix },
|
|
132
|
-
{ type: 'varchar', value: description },
|
|
133
|
-
{ type: 'int4', value: perTransaction }
|
|
134
|
-
]
|
|
135
|
-
},
|
|
136
|
-
{
|
|
137
|
-
statement: `UPDATE test_data SET other = other || '#' WHERE id LIKE $1 || '%'`,
|
|
138
|
-
params: [{ type: 'varchar', value: prefix }]
|
|
139
|
-
}
|
|
140
|
-
);
|
|
141
|
-
operationCount += perTransaction * 2;
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
const start = Date.now();
|
|
145
|
-
|
|
146
|
-
const checksum = await context.getChecksums(['global[]']);
|
|
147
|
-
const duration = Date.now() - start;
|
|
148
|
-
const used = Math.round(process.memoryUsage().heapUsed / 1024 / 1024);
|
|
149
|
-
expect(checksum.get('global[]')!.count).toEqual(operationCount);
|
|
150
|
-
const perSecond = Math.round((operationCount / duration) * 1000);
|
|
151
|
-
// This number depends on the test machine, so we keep the test significantly
|
|
152
|
-
// lower than expected numbers.
|
|
153
|
-
expect(perSecond).toBeGreaterThan(1000);
|
|
154
|
-
console.log(`${operationCount} ops in ${duration}ms ${perSecond} ops/s. ${used}MB heap`);
|
|
155
|
-
|
|
156
|
-
// Truncating is fast (~10k ops/second).
|
|
157
|
-
// We'd need a really large set of data to actually run into limits when truncating,
|
|
158
|
-
// but we just test with the data we have here.
|
|
159
|
-
const truncateStart = Date.now();
|
|
160
|
-
await pool.query(`TRUNCATE test_data`);
|
|
161
|
-
|
|
162
|
-
const checksum2 = await context.getChecksums(['global[]'], { timeout: 20_000 });
|
|
163
|
-
const truncateDuration = Date.now() - truncateStart;
|
|
164
|
-
const truncateCount = checksum2.get('global[]')!.count - checksum.get('global[]')!.count;
|
|
165
|
-
expect(truncateCount).toEqual(numTransactions * perTransaction);
|
|
166
|
-
const truncatePerSecond = Math.round((truncateCount / truncateDuration) * 1000);
|
|
167
|
-
console.log(`Truncated ${truncateCount} ops in ${truncateDuration}ms ${truncatePerSecond} ops/s. ${used}MB heap`);
|
|
168
|
-
});
|
|
169
|
-
|
|
170
|
-
test('large number of bucket_data docs', async () => {
|
|
171
|
-
// This tests that we don't run into this error:
|
|
172
|
-
// MongoBulkWriteError: BSONObj size: 16814023 (0x1008FC7) is invalid. Size must be between 0 and 16793600(16MB) First element: insert: "bucket_data"
|
|
173
|
-
// The test is quite sensitive to internals, since we need to
|
|
174
|
-
// generate an internal batch that is just below 16MB.
|
|
175
|
-
//
|
|
176
|
-
// For the test to work, we need a:
|
|
177
|
-
// 1. Large number of documents in the batch.
|
|
178
|
-
// 2. More bucket_data documents than current_data documents,
|
|
179
|
-
// otherwise other batch limiting thresholds are hit.
|
|
180
|
-
// 3. A large document to make sure we get to just below the 16MB
|
|
181
|
-
// limit.
|
|
182
|
-
// 4. Another document to make sure the internal batching overflows
|
|
183
|
-
// to a second batch.
|
|
184
|
-
|
|
185
|
-
await using context = await openContext();
|
|
186
|
-
await context.updateSyncRules(`bucket_definitions:
|
|
187
|
-
global:
|
|
188
|
-
data:
|
|
189
|
-
# Sync 4x so we get more bucket_data documents
|
|
190
|
-
- SELECT * FROM test_data
|
|
191
|
-
- SELECT * FROM test_data
|
|
192
|
-
- SELECT * FROM test_data
|
|
193
|
-
- SELECT * FROM test_data
|
|
194
|
-
`);
|
|
195
|
-
const { pool } = context;
|
|
196
|
-
|
|
197
|
-
await pool.query(`CREATE TABLE test_data(id serial primary key, description text)`);
|
|
198
|
-
|
|
199
|
-
const numDocs = 499;
|
|
200
|
-
let description = '';
|
|
201
|
-
while (description.length < 2650) {
|
|
202
|
-
description += '.';
|
|
203
|
-
}
|
|
204
|
-
|
|
205
|
-
await pool.query({
|
|
206
|
-
statement: `INSERT INTO test_data(description) SELECT $2 FROM generate_series(1, $1) i`,
|
|
207
|
-
params: [
|
|
208
|
-
{ type: 'int4', value: numDocs },
|
|
209
|
-
{ type: 'varchar', value: description }
|
|
210
|
-
]
|
|
211
|
-
});
|
|
212
|
-
|
|
213
|
-
let largeDescription = '';
|
|
214
|
-
|
|
215
|
-
while (largeDescription.length < 2_768_000) {
|
|
216
|
-
largeDescription += '.';
|
|
217
|
-
}
|
|
218
|
-
await pool.query({
|
|
219
|
-
statement: 'INSERT INTO test_data(description) VALUES($1)',
|
|
220
|
-
params: [{ type: 'varchar', value: largeDescription }]
|
|
221
|
-
});
|
|
222
|
-
await pool.query({
|
|
223
|
-
statement: 'INSERT INTO test_data(description) VALUES($1)',
|
|
224
|
-
params: [{ type: 'varchar', value: 'testingthis' }]
|
|
225
|
-
});
|
|
226
|
-
await context.replicateSnapshot();
|
|
227
|
-
|
|
228
|
-
const checksum = await context.getChecksums(['global[]'], { timeout: 50_000 });
|
|
229
|
-
expect(checksum.get('global[]')!.count).toEqual((numDocs + 2) * 4);
|
|
230
|
-
});
|
|
231
|
-
|
|
232
|
-
function printMemoryUsage() {
|
|
233
|
-
const memoryUsage = process.memoryUsage();
|
|
234
|
-
|
|
235
|
-
const rss = Math.round(memoryUsage.rss / 1024 / 1024);
|
|
236
|
-
const heap = Math.round(memoryUsage.heapUsed / 1024 / 1024);
|
|
237
|
-
const external = Math.round(memoryUsage.external / 1024 / 1024);
|
|
238
|
-
const arrayBuffers = Math.round(memoryUsage.arrayBuffers / 1024 / 1024);
|
|
239
|
-
console.log(`${rss}MB RSS, ${heap}MB heap, ${external}MB external, ${arrayBuffers}MB ArrayBuffers`);
|
|
240
|
-
}
|
|
241
|
-
}
|