@powersync/service-module-postgres 0.0.0-dev-20240918092408

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/CHANGELOG.md +18 -0
  2. package/LICENSE +67 -0
  3. package/README.md +3 -0
  4. package/dist/api/PostgresRouteAPIAdapter.d.ts +22 -0
  5. package/dist/api/PostgresRouteAPIAdapter.js +273 -0
  6. package/dist/api/PostgresRouteAPIAdapter.js.map +1 -0
  7. package/dist/auth/SupabaseKeyCollector.d.ts +22 -0
  8. package/dist/auth/SupabaseKeyCollector.js +64 -0
  9. package/dist/auth/SupabaseKeyCollector.js.map +1 -0
  10. package/dist/index.d.ts +3 -0
  11. package/dist/index.js +4 -0
  12. package/dist/index.js.map +1 -0
  13. package/dist/module/PostgresModule.d.ts +14 -0
  14. package/dist/module/PostgresModule.js +108 -0
  15. package/dist/module/PostgresModule.js.map +1 -0
  16. package/dist/replication/ConnectionManagerFactory.d.ts +10 -0
  17. package/dist/replication/ConnectionManagerFactory.js +21 -0
  18. package/dist/replication/ConnectionManagerFactory.js.map +1 -0
  19. package/dist/replication/PgManager.d.ts +25 -0
  20. package/dist/replication/PgManager.js +60 -0
  21. package/dist/replication/PgManager.js.map +1 -0
  22. package/dist/replication/PgRelation.d.ts +6 -0
  23. package/dist/replication/PgRelation.js +27 -0
  24. package/dist/replication/PgRelation.js.map +1 -0
  25. package/dist/replication/PostgresErrorRateLimiter.d.ts +11 -0
  26. package/dist/replication/PostgresErrorRateLimiter.js +43 -0
  27. package/dist/replication/PostgresErrorRateLimiter.js.map +1 -0
  28. package/dist/replication/WalStream.d.ts +53 -0
  29. package/dist/replication/WalStream.js +536 -0
  30. package/dist/replication/WalStream.js.map +1 -0
  31. package/dist/replication/WalStreamReplicationJob.d.ts +27 -0
  32. package/dist/replication/WalStreamReplicationJob.js +131 -0
  33. package/dist/replication/WalStreamReplicationJob.js.map +1 -0
  34. package/dist/replication/WalStreamReplicator.d.ts +13 -0
  35. package/dist/replication/WalStreamReplicator.js +36 -0
  36. package/dist/replication/WalStreamReplicator.js.map +1 -0
  37. package/dist/replication/replication-index.d.ts +5 -0
  38. package/dist/replication/replication-index.js +6 -0
  39. package/dist/replication/replication-index.js.map +1 -0
  40. package/dist/replication/replication-utils.d.ts +32 -0
  41. package/dist/replication/replication-utils.js +272 -0
  42. package/dist/replication/replication-utils.js.map +1 -0
  43. package/dist/types/types.d.ts +76 -0
  44. package/dist/types/types.js +110 -0
  45. package/dist/types/types.js.map +1 -0
  46. package/dist/utils/migration_lib.d.ts +11 -0
  47. package/dist/utils/migration_lib.js +64 -0
  48. package/dist/utils/migration_lib.js.map +1 -0
  49. package/dist/utils/pgwire_utils.d.ts +16 -0
  50. package/dist/utils/pgwire_utils.js +70 -0
  51. package/dist/utils/pgwire_utils.js.map +1 -0
  52. package/dist/utils/populate_test_data.d.ts +8 -0
  53. package/dist/utils/populate_test_data.js +65 -0
  54. package/dist/utils/populate_test_data.js.map +1 -0
  55. package/package.json +49 -0
  56. package/src/api/PostgresRouteAPIAdapter.ts +307 -0
  57. package/src/auth/SupabaseKeyCollector.ts +70 -0
  58. package/src/index.ts +5 -0
  59. package/src/module/PostgresModule.ts +122 -0
  60. package/src/replication/ConnectionManagerFactory.ts +28 -0
  61. package/src/replication/PgManager.ts +70 -0
  62. package/src/replication/PgRelation.ts +31 -0
  63. package/src/replication/PostgresErrorRateLimiter.ts +44 -0
  64. package/src/replication/WalStream.ts +639 -0
  65. package/src/replication/WalStreamReplicationJob.ts +142 -0
  66. package/src/replication/WalStreamReplicator.ts +45 -0
  67. package/src/replication/replication-index.ts +5 -0
  68. package/src/replication/replication-utils.ts +329 -0
  69. package/src/types/types.ts +159 -0
  70. package/src/utils/migration_lib.ts +79 -0
  71. package/src/utils/pgwire_utils.ts +73 -0
  72. package/src/utils/populate_test_data.ts +77 -0
  73. package/test/src/__snapshots__/pg_test.test.ts.snap +256 -0
  74. package/test/src/env.ts +7 -0
  75. package/test/src/large_batch.test.ts +195 -0
  76. package/test/src/pg_test.test.ts +450 -0
  77. package/test/src/schema_changes.test.ts +543 -0
  78. package/test/src/setup.ts +7 -0
  79. package/test/src/slow_tests.test.ts +335 -0
  80. package/test/src/util.ts +105 -0
  81. package/test/src/validation.test.ts +64 -0
  82. package/test/src/wal_stream.test.ts +319 -0
  83. package/test/src/wal_stream_utils.ts +121 -0
  84. package/test/tsconfig.json +28 -0
  85. package/tsconfig.json +31 -0
  86. package/tsconfig.tsbuildinfo +1 -0
  87. package/vitest.config.ts +9 -0
@@ -0,0 +1,79 @@
1
+ import * as pgwire from '@powersync/service-jpgwire';
2
+
3
+ export type MigrationFunction = (db: pgwire.PgConnection) => Promise<void>;
4
+
5
+ interface Migration {
6
+ id: number;
7
+ name: string;
8
+ up: MigrationFunction;
9
+ }
10
+
11
+ // Very loosely based on https://github.com/porsager/postgres-shift/
12
+ export class Migrations {
13
+ private migrations: Migration[] = [];
14
+
15
+ add(id: number, name: string, up: MigrationFunction) {
16
+ if (this.migrations.length > 0 && this.migrations[this.migrations.length - 1].id >= id) {
17
+ throw new Error('Migration ids must be strictly incrementing');
18
+ }
19
+ this.migrations.push({ id, up, name });
20
+ }
21
+
22
+ async up(db: pgwire.PgConnection) {
23
+ await db.query('BEGIN');
24
+ try {
25
+ await this.ensureMigrationsTable(db);
26
+ const current = await this.getCurrentMigration(db);
27
+ let currentId = current ? current.id : 0;
28
+
29
+ for (let migration of this.migrations) {
30
+ if (migration.id <= currentId) {
31
+ continue;
32
+ }
33
+ await migration.up(db);
34
+
35
+ await db.query({
36
+ statement: `
37
+ insert into migrations (
38
+ migration_id,
39
+ name
40
+ ) values (
41
+ $1,
42
+ $2
43
+ )
44
+ `,
45
+ params: [
46
+ { type: 'int4', value: migration.id },
47
+ { type: 'varchar', value: migration.name }
48
+ ]
49
+ });
50
+ }
51
+
52
+ await db.query('COMMIT');
53
+ } catch (e) {
54
+ await db.query('ROLLBACK');
55
+ throw e;
56
+ }
57
+ }
58
+
59
+ getCurrentMigration(db: pgwire.PgConnection) {
60
+ return db
61
+ .query(
62
+ `
63
+ select migration_id as id from migrations
64
+ order by migration_id desc
65
+ limit 1
66
+ `
67
+ )
68
+ .then((results) => ({ id: results.rows[0][0] as number }));
69
+ }
70
+
71
+ async ensureMigrationsTable(db: pgwire.PgConnection) {
72
+ await db.query(`create table if not exists migrations (
73
+ migration_id serial primary key,
74
+ created_at timestamp with time zone not null default now(),
75
+ name text
76
+ )
77
+ `);
78
+ }
79
+ }
@@ -0,0 +1,73 @@
1
+ // Adapted from https://github.com/kagis/pgwire/blob/0dc927f9f8990a903f238737326e53ba1c8d094f/mod.js#L2218
2
+
3
+ import * as pgwire from '@powersync/service-jpgwire';
4
+ import { SqliteJsonValue, SqliteRow, toSyncRulesRow } from '@powersync/service-sync-rules';
5
+
6
+ import { logger } from '@powersync/lib-services-framework';
7
+
8
+ /**
9
+ * pgwire message -> SQLite row.
10
+ * @param message
11
+ */
12
+ export function constructAfterRecord(message: pgwire.PgoutputInsert | pgwire.PgoutputUpdate): SqliteRow {
13
+ const rawData = (message as any).afterRaw;
14
+
15
+ const record = pgwire.decodeTuple(message.relation, rawData);
16
+ return toSyncRulesRow(record);
17
+ }
18
+
19
+ /**
20
+ * pgwire message -> SQLite row.
21
+ * @param message
22
+ */
23
+ export function constructBeforeRecord(message: pgwire.PgoutputDelete | pgwire.PgoutputUpdate): SqliteRow | undefined {
24
+ const rawData = (message as any).beforeRaw;
25
+ if (rawData == null) {
26
+ return undefined;
27
+ }
28
+ const record = pgwire.decodeTuple(message.relation, rawData);
29
+ return toSyncRulesRow(record);
30
+ }
31
+
32
+ export function escapeIdentifier(identifier: string) {
33
+ return `"${identifier.replace(/"/g, '""').replace(/\./g, '"."')}"`;
34
+ }
35
+
36
+ export function autoParameter(arg: SqliteJsonValue | boolean): pgwire.StatementParam {
37
+ if (arg == null) {
38
+ return { type: 'varchar', value: null };
39
+ } else if (typeof arg == 'string') {
40
+ return { type: 'varchar', value: arg };
41
+ } else if (typeof arg == 'number') {
42
+ if (Number.isInteger(arg)) {
43
+ return { type: 'int8', value: arg };
44
+ } else {
45
+ return { type: 'float8', value: arg };
46
+ }
47
+ } else if (typeof arg == 'boolean') {
48
+ return { type: 'bool', value: arg };
49
+ } else if (typeof arg == 'bigint') {
50
+ return { type: 'int8', value: arg };
51
+ } else {
52
+ throw new Error(`Unsupported query parameter: ${typeof arg}`);
53
+ }
54
+ }
55
+
56
+ export async function retriedQuery(db: pgwire.PgClient, ...statements: pgwire.Statement[]): Promise<pgwire.PgResult>;
57
+ export async function retriedQuery(db: pgwire.PgClient, query: string): Promise<pgwire.PgResult>;
58
+
59
+ /**
60
+ * Retry a simple query - up to 2 attempts total.
61
+ */
62
+ export async function retriedQuery(db: pgwire.PgClient, ...args: any[]) {
63
+ for (let tries = 2; ; tries--) {
64
+ try {
65
+ return await db.query(...args);
66
+ } catch (e) {
67
+ if (tries == 1) {
68
+ throw e;
69
+ }
70
+ logger.warn('Query error, retrying', e);
71
+ }
72
+ }
73
+ }
@@ -0,0 +1,77 @@
1
+ import * as crypto from 'crypto';
2
+ import { Worker, isMainThread, parentPort, workerData } from 'node:worker_threads';
3
+
4
+ import * as pgwire from '@powersync/service-jpgwire';
5
+
6
+ // This util is actually for tests only, but we need it compiled to JS for the service to work, so it's placed in the service.
7
+
8
+ export interface PopulateDataOptions {
9
+ connection: pgwire.NormalizedConnectionConfig;
10
+ num_transactions: number;
11
+ per_transaction: number;
12
+ size: number;
13
+ }
14
+
15
+ if (isMainThread || parentPort == null) {
16
+ // Not a worker - ignore
17
+ } else {
18
+ try {
19
+ const options = workerData as PopulateDataOptions;
20
+
21
+ const result = await populateDataInner(options);
22
+ parentPort.postMessage(result);
23
+ process.exit(0);
24
+ } catch (e) {
25
+ // This is a bug, not a connection issue
26
+ console.error(e);
27
+ // Only closes the Worker thread
28
+ process.exit(2);
29
+ }
30
+ }
31
+
32
+ async function populateDataInner(options: PopulateDataOptions) {
33
+ // Dedicated connection so we can release the memory easily
34
+ const initialDb = await pgwire.connectPgWire(options.connection, { type: 'standard' });
35
+ const largeDescription = crypto.randomBytes(options.size / 2).toString('hex');
36
+ let operation_count = 0;
37
+ for (let i = 0; i < options.num_transactions; i++) {
38
+ const prefix = `test${i}K`;
39
+
40
+ await initialDb.query({
41
+ statement: `INSERT INTO test_data(id, description, other) SELECT $1 || i, $2, 'foo' FROM generate_series(1, $3) i`,
42
+ params: [
43
+ { type: 'varchar', value: prefix },
44
+ { type: 'varchar', value: largeDescription },
45
+ { type: 'int4', value: options.per_transaction }
46
+ ]
47
+ });
48
+ operation_count += options.per_transaction;
49
+ }
50
+ await initialDb.end();
51
+ return operation_count;
52
+ }
53
+
54
+ export async function populateData(options: PopulateDataOptions) {
55
+ const WORKER_TIMEOUT = 30_000;
56
+
57
+ const worker = new Worker(new URL('./populate_test_data.js', import.meta.url), {
58
+ workerData: options
59
+ });
60
+ const timeout = setTimeout(() => {
61
+ // Exits with code 1 below
62
+ worker.terminate();
63
+ }, WORKER_TIMEOUT);
64
+ try {
65
+ return await new Promise<number>((resolve, reject) => {
66
+ worker.on('message', resolve);
67
+ worker.on('error', reject);
68
+ worker.on('exit', (code) => {
69
+ if (code !== 0) {
70
+ reject(new Error(`Populating data failed with exit code ${code}`));
71
+ }
72
+ });
73
+ });
74
+ } finally {
75
+ clearTimeout(timeout);
76
+ }
77
+ }
@@ -0,0 +1,256 @@
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`pg data types > schema 1`] = `
4
+ [
5
+ {
6
+ "name": "public",
7
+ "tables": [
8
+ {
9
+ "columns": [
10
+ {
11
+ "name": "id",
12
+ "pg_type": "int4",
13
+ "type": "integer",
14
+ },
15
+ {
16
+ "name": "text",
17
+ "pg_type": "text",
18
+ "type": "text",
19
+ },
20
+ {
21
+ "name": "uuid",
22
+ "pg_type": "uuid",
23
+ "type": "uuid",
24
+ },
25
+ {
26
+ "name": "varchar",
27
+ "pg_type": "varchar",
28
+ "type": "character varying(255)",
29
+ },
30
+ {
31
+ "name": "bool",
32
+ "pg_type": "bool",
33
+ "type": "boolean",
34
+ },
35
+ {
36
+ "name": "bytea",
37
+ "pg_type": "bytea",
38
+ "type": "bytea",
39
+ },
40
+ {
41
+ "name": "int2",
42
+ "pg_type": "int2",
43
+ "type": "smallint",
44
+ },
45
+ {
46
+ "name": "int4",
47
+ "pg_type": "int4",
48
+ "type": "integer",
49
+ },
50
+ {
51
+ "name": "int8",
52
+ "pg_type": "int8",
53
+ "type": "bigint",
54
+ },
55
+ {
56
+ "name": "float4",
57
+ "pg_type": "float4",
58
+ "type": "real",
59
+ },
60
+ {
61
+ "name": "float8",
62
+ "pg_type": "float8",
63
+ "type": "double precision",
64
+ },
65
+ {
66
+ "name": "numeric",
67
+ "pg_type": "numeric",
68
+ "type": "numeric",
69
+ },
70
+ {
71
+ "name": "json",
72
+ "pg_type": "json",
73
+ "type": "json",
74
+ },
75
+ {
76
+ "name": "jsonb",
77
+ "pg_type": "jsonb",
78
+ "type": "jsonb",
79
+ },
80
+ {
81
+ "name": "pg_lsn",
82
+ "pg_type": "pg_lsn",
83
+ "type": "pg_lsn",
84
+ },
85
+ {
86
+ "name": "date",
87
+ "pg_type": "date",
88
+ "type": "date",
89
+ },
90
+ {
91
+ "name": "time",
92
+ "pg_type": "time",
93
+ "type": "time without time zone",
94
+ },
95
+ {
96
+ "name": "timestamp",
97
+ "pg_type": "timestamp",
98
+ "type": "timestamp without time zone",
99
+ },
100
+ {
101
+ "name": "timestamptz",
102
+ "pg_type": "timestamptz",
103
+ "type": "timestamp with time zone",
104
+ },
105
+ {
106
+ "name": "interval",
107
+ "pg_type": "interval",
108
+ "type": "interval",
109
+ },
110
+ {
111
+ "name": "macaddr",
112
+ "pg_type": "macaddr",
113
+ "type": "macaddr",
114
+ },
115
+ {
116
+ "name": "inet",
117
+ "pg_type": "inet",
118
+ "type": "inet",
119
+ },
120
+ {
121
+ "name": "oid",
122
+ "pg_type": "oid",
123
+ "type": "oid",
124
+ },
125
+ ],
126
+ "name": "test_data",
127
+ },
128
+ {
129
+ "columns": [
130
+ {
131
+ "name": "id",
132
+ "pg_type": "int4",
133
+ "type": "integer",
134
+ },
135
+ {
136
+ "name": "text",
137
+ "pg_type": "text[]",
138
+ "type": "text[]",
139
+ },
140
+ {
141
+ "name": "uuid",
142
+ "pg_type": "uuid[]",
143
+ "type": "uuid[]",
144
+ },
145
+ {
146
+ "name": "varchar",
147
+ "pg_type": "varchar[]",
148
+ "type": "character varying(255)[]",
149
+ },
150
+ {
151
+ "name": "bool",
152
+ "pg_type": "bool[]",
153
+ "type": "boolean[]",
154
+ },
155
+ {
156
+ "name": "bytea",
157
+ "pg_type": "bytea[]",
158
+ "type": "bytea[]",
159
+ },
160
+ {
161
+ "name": "int2",
162
+ "pg_type": "int2[]",
163
+ "type": "smallint[]",
164
+ },
165
+ {
166
+ "name": "int4",
167
+ "pg_type": "int4[]",
168
+ "type": "integer[]",
169
+ },
170
+ {
171
+ "name": "int8",
172
+ "pg_type": "int8[]",
173
+ "type": "bigint[]",
174
+ },
175
+ {
176
+ "name": "float4",
177
+ "pg_type": "float4[]",
178
+ "type": "real[]",
179
+ },
180
+ {
181
+ "name": "float8",
182
+ "pg_type": "float8[]",
183
+ "type": "double precision[]",
184
+ },
185
+ {
186
+ "name": "numeric",
187
+ "pg_type": "numeric[]",
188
+ "type": "numeric[]",
189
+ },
190
+ {
191
+ "name": "json",
192
+ "pg_type": "json[]",
193
+ "type": "json[]",
194
+ },
195
+ {
196
+ "name": "jsonb",
197
+ "pg_type": "jsonb[]",
198
+ "type": "jsonb[]",
199
+ },
200
+ {
201
+ "name": "pg_lsn",
202
+ "pg_type": "pg_lsn[]",
203
+ "type": "pg_lsn[]",
204
+ },
205
+ {
206
+ "name": "date",
207
+ "pg_type": "date[]",
208
+ "type": "date[]",
209
+ },
210
+ {
211
+ "name": "time",
212
+ "pg_type": "time[]",
213
+ "type": "time without time zone[]",
214
+ },
215
+ {
216
+ "name": "timestamp",
217
+ "pg_type": "timestamp[]",
218
+ "type": "timestamp without time zone[]",
219
+ },
220
+ {
221
+ "name": "timestamptz",
222
+ "pg_type": "timestamptz[]",
223
+ "type": "timestamp with time zone[]",
224
+ },
225
+ {
226
+ "name": "interval",
227
+ "pg_type": "interval[]",
228
+ "type": "interval[]",
229
+ },
230
+ {
231
+ "name": "macaddr",
232
+ "pg_type": "macaddr[]",
233
+ "type": "macaddr[]",
234
+ },
235
+ {
236
+ "name": "inet",
237
+ "pg_type": "inet[]",
238
+ "type": "inet[]",
239
+ },
240
+ {
241
+ "name": "oid",
242
+ "pg_type": "oid[]",
243
+ "type": "oid[]",
244
+ },
245
+ {
246
+ "name": "multidimensional",
247
+ "pg_type": "text[]",
248
+ "type": "text[]",
249
+ },
250
+ ],
251
+ "name": "test_data_arrays",
252
+ },
253
+ ],
254
+ },
255
+ ]
256
+ `;
@@ -0,0 +1,7 @@
1
+ import { utils } from '@powersync/lib-services-framework';
2
+
3
+ export const env = utils.collectEnvironmentVariables({
4
+ PG_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5432/powersync_test'),
5
+ CI: utils.type.boolean.default('false'),
6
+ SLOW_TESTS: utils.type.boolean.default('false')
7
+ });
@@ -0,0 +1,195 @@
1
+ import { MONGO_STORAGE_FACTORY, StorageFactory } from '@core-tests/util.js';
2
+ import { describe, expect, test } from 'vitest';
3
+ import { env } from './env.js';
4
+ import { TEST_CONNECTION_OPTIONS } from './util.js';
5
+ import { walStreamTest } from './wal_stream_utils.js';
6
+ import { populateData } from '../../dist/utils/populate_test_data.js';
7
+
8
+ describe('batch replication tests - mongodb', function () {
9
+ // These are slow but consistent tests.
10
+ // Not run on every test run, but we do run on CI, or when manually debugging issues.
11
+ if (env.CI || env.SLOW_TESTS) {
12
+ defineBatchTests(MONGO_STORAGE_FACTORY);
13
+ } else {
14
+ // Need something in this file.
15
+ test('no-op', () => {});
16
+ }
17
+ });
18
+
19
+ const BASIC_SYNC_RULES = `bucket_definitions:
20
+ global:
21
+ data:
22
+ - SELECT id, description, other FROM "test_data"`;
23
+
24
+ function defineBatchTests(factory: StorageFactory) {
25
+ test(
26
+ 'update large record',
27
+ walStreamTest(factory, async (context) => {
28
+ // This test generates a large transaction in MongoDB, despite the replicated data
29
+ // not being that large.
30
+ // If we don't limit transaction size, we could run into this error:
31
+ // > -31800: transaction is too large and will not fit in the storage engine cache
32
+ await context.updateSyncRules(BASIC_SYNC_RULES);
33
+ const { pool } = context;
34
+
35
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
36
+
37
+ await context.replicateSnapshot();
38
+
39
+ let operation_count = await populateData({
40
+ num_transactions: 1,
41
+ per_transaction: 80,
42
+ size: 4_000_000,
43
+ connection: TEST_CONNECTION_OPTIONS
44
+ });
45
+
46
+ const start = Date.now();
47
+
48
+ context.startStreaming();
49
+
50
+ const checkpoint = await context.getCheckpoint({ timeout: 100_000 });
51
+ const duration = Date.now() - start;
52
+ const used = Math.round(process.memoryUsage().heapUsed / 1024 / 1024);
53
+ const checksum = await context.storage!.getChecksums(checkpoint, ['global[]']);
54
+ expect(checksum.get('global[]')!.count).toEqual(operation_count);
55
+ const perSecond = Math.round((operation_count / duration) * 1000);
56
+ console.log(`${operation_count} ops in ${duration}ms ${perSecond} ops/s. ${used}MB heap`);
57
+ }),
58
+ { timeout: 120_000 }
59
+ );
60
+
61
+ test(
62
+ 'initial replication performance',
63
+ walStreamTest(factory, async (context) => {
64
+ // Manual test to check initial replication performance and memory usage
65
+ await context.updateSyncRules(BASIC_SYNC_RULES);
66
+ const { pool } = context;
67
+
68
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
69
+
70
+ // Some stats (varies a lot):
71
+ // Old 'postgres' driver, using cursor(2)
72
+ // 15 ops in 19559ms 1 ops/s. 354MB RSS, 115MB heap, 137MB external
73
+ // 25 ops in 42984ms 1 ops/s. 377MB RSS, 129MB heap, 137MB external
74
+ // 35 ops in 41337ms 1 ops/s. 365MB RSS, 115MB heap, 137MB external
75
+
76
+ // streaming with pgwire
77
+ // 15 ops in 26423ms 1 ops/s. 379MB RSS, 128MB heap, 182MB external, 165MB ArrayBuffers
78
+ // 35 ops in 78897ms 0 ops/s. 539MB RSS, 52MB heap, 87MB external, 83MB ArrayBuffers
79
+
80
+ let operation_count = await populateData({
81
+ num_transactions: 1,
82
+ per_transaction: 35,
83
+ size: 14_000_000,
84
+ connection: TEST_CONNECTION_OPTIONS
85
+ });
86
+
87
+ global.gc?.();
88
+
89
+ // Note that we could already have high memory usage at this point
90
+ printMemoryUsage();
91
+
92
+ let interval = setInterval(() => {
93
+ printMemoryUsage();
94
+ }, 2000);
95
+ try {
96
+ const start = Date.now();
97
+
98
+ await context.replicateSnapshot();
99
+ await context.storage!.autoActivate();
100
+ context.startStreaming();
101
+
102
+ const checkpoint = await context.getCheckpoint({ timeout: 100_000 });
103
+ const duration = Date.now() - start;
104
+ const checksum = await context.storage!.getChecksums(checkpoint, ['global[]']);
105
+ expect(checksum.get('global[]')!.count).toEqual(operation_count);
106
+ const perSecond = Math.round((operation_count / duration) * 1000);
107
+ console.log(`${operation_count} ops in ${duration}ms ${perSecond} ops/s.`);
108
+ printMemoryUsage();
109
+ } finally {
110
+ clearInterval(interval);
111
+ }
112
+ }),
113
+ { timeout: 120_000 }
114
+ );
115
+
116
+ test(
117
+ 'large number of operations',
118
+ walStreamTest(factory, async (context) => {
119
+ // This just tests performance of a large number of operations inside a transaction.
120
+ await context.updateSyncRules(BASIC_SYNC_RULES);
121
+ const { pool } = context;
122
+
123
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
124
+
125
+ await context.replicateSnapshot();
126
+
127
+ const numTransactions = 20;
128
+ const perTransaction = 1500;
129
+ let operationCount = 0;
130
+
131
+ const description = 'description';
132
+
133
+ for (let i = 0; i < numTransactions; i++) {
134
+ const prefix = `test${i}K`;
135
+
136
+ await pool.query(
137
+ {
138
+ statement: `INSERT INTO test_data(id, description, other) SELECT $1 || i, $2 || i, 'foo' FROM generate_series(1, $3) i`,
139
+ params: [
140
+ { type: 'varchar', value: prefix },
141
+ { type: 'varchar', value: description },
142
+ { type: 'int4', value: perTransaction }
143
+ ]
144
+ },
145
+ {
146
+ statement: `UPDATE test_data SET other = other || '#' WHERE id LIKE $1 || '%'`,
147
+ params: [{ type: 'varchar', value: prefix }]
148
+ }
149
+ );
150
+ operationCount += perTransaction * 2;
151
+ }
152
+
153
+ const start = Date.now();
154
+
155
+ context.startStreaming();
156
+
157
+ const checkpoint = await context.getCheckpoint({ timeout: 50_000 });
158
+ const duration = Date.now() - start;
159
+ const used = Math.round(process.memoryUsage().heapUsed / 1024 / 1024);
160
+ const checksum = await context.storage!.getChecksums(checkpoint, ['global[]']);
161
+ expect(checksum.get('global[]')!.count).toEqual(operationCount);
162
+ const perSecond = Math.round((operationCount / duration) * 1000);
163
+ // This number depends on the test machine, so we keep the test significantly
164
+ // lower than expected numbers.
165
+ expect(perSecond).toBeGreaterThan(1000);
166
+ console.log(`${operationCount} ops in ${duration}ms ${perSecond} ops/s. ${used}MB heap`);
167
+
168
+ // Truncating is fast (~10k ops/second).
169
+ // We'd need a really large set of data to actually run into limits when truncating,
170
+ // but we just test with the data we have here.
171
+ const truncateStart = Date.now();
172
+ await pool.query(`TRUNCATE test_data`);
173
+
174
+ const checkpoint2 = await context.getCheckpoint({ timeout: 20_000 });
175
+ const truncateDuration = Date.now() - truncateStart;
176
+
177
+ const checksum2 = await context.storage!.getChecksums(checkpoint2, ['global[]']);
178
+ const truncateCount = checksum2.get('global[]')!.count - checksum.get('global[]')!.count;
179
+ expect(truncateCount).toEqual(numTransactions * perTransaction);
180
+ const truncatePerSecond = Math.round((truncateCount / truncateDuration) * 1000);
181
+ console.log(`Truncated ${truncateCount} ops in ${truncateDuration}ms ${truncatePerSecond} ops/s. ${used}MB heap`);
182
+ }),
183
+ { timeout: 90_000 }
184
+ );
185
+
186
+ function printMemoryUsage() {
187
+ const memoryUsage = process.memoryUsage();
188
+
189
+ const rss = Math.round(memoryUsage.rss / 1024 / 1024);
190
+ const heap = Math.round(memoryUsage.heapUsed / 1024 / 1024);
191
+ const external = Math.round(memoryUsage.external / 1024 / 1024);
192
+ const arrayBuffers = Math.round(memoryUsage.arrayBuffers / 1024 / 1024);
193
+ console.log(`${rss}MB RSS, ${heap}MB heap, ${external}MB external, ${arrayBuffers}MB ArrayBuffers`);
194
+ }
195
+ }