@powersync/service-module-postgres 0.3.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,7 @@
1
1
  // Adapted from https://github.com/kagis/pgwire/blob/0dc927f9f8990a903f238737326e53ba1c8d094f/mod.js#L2218
2
2
 
3
3
  import * as pgwire from '@powersync/service-jpgwire';
4
- import { SqliteJsonValue, SqliteRow, toSyncRulesRow } from '@powersync/service-sync-rules';
5
-
6
- import { logger } from '@powersync/lib-services-framework';
4
+ import { SqliteRow, toSyncRulesRow } from '@powersync/service-sync-rules';
7
5
 
8
6
  /**
9
7
  * pgwire message -> SQLite row.
@@ -28,46 +26,3 @@ export function constructBeforeRecord(message: pgwire.PgoutputDelete | pgwire.Pg
28
26
  const record = pgwire.decodeTuple(message.relation, rawData);
29
27
  return toSyncRulesRow(record);
30
28
  }
31
-
32
- export function escapeIdentifier(identifier: string) {
33
- return `"${identifier.replace(/"/g, '""').replace(/\./g, '"."')}"`;
34
- }
35
-
36
- export function autoParameter(arg: SqliteJsonValue | boolean): pgwire.StatementParam {
37
- if (arg == null) {
38
- return { type: 'varchar', value: null };
39
- } else if (typeof arg == 'string') {
40
- return { type: 'varchar', value: arg };
41
- } else if (typeof arg == 'number') {
42
- if (Number.isInteger(arg)) {
43
- return { type: 'int8', value: arg };
44
- } else {
45
- return { type: 'float8', value: arg };
46
- }
47
- } else if (typeof arg == 'boolean') {
48
- return { type: 'bool', value: arg };
49
- } else if (typeof arg == 'bigint') {
50
- return { type: 'int8', value: arg };
51
- } else {
52
- throw new Error(`Unsupported query parameter: ${typeof arg}`);
53
- }
54
- }
55
-
56
- export async function retriedQuery(db: pgwire.PgClient, ...statements: pgwire.Statement[]): Promise<pgwire.PgResult>;
57
- export async function retriedQuery(db: pgwire.PgClient, query: string): Promise<pgwire.PgResult>;
58
-
59
- /**
60
- * Retry a simple query - up to 2 attempts total.
61
- */
62
- export async function retriedQuery(db: pgwire.PgClient, ...args: any[]) {
63
- for (let tries = 2; ; tries--) {
64
- try {
65
- return await db.query(...args);
66
- } catch (e) {
67
- if (tries == 1) {
68
- throw e;
69
- }
70
- logger.warn('Query error, retrying', e);
71
- }
72
- }
73
- }
@@ -0,0 +1,5 @@
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`schema changes - mongodb > add to publication (not in sync rules) 1`] = `0`;
4
+
5
+ exports[`schema changes - postgres > add to publication (not in sync rules) 1`] = `16384`;
package/test/src/env.ts CHANGED
@@ -2,7 +2,10 @@ import { utils } from '@powersync/lib-services-framework';
2
2
 
3
3
  export const env = utils.collectEnvironmentVariables({
4
4
  PG_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5432/powersync_test'),
5
+ PG_STORAGE_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5431/powersync_storage_test'),
5
6
  MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'),
6
7
  CI: utils.type.boolean.default('false'),
7
- SLOW_TESTS: utils.type.boolean.default('false')
8
+ SLOW_TESTS: utils.type.boolean.default('false'),
9
+ TEST_MONGO_STORAGE: utils.type.boolean.default('true'),
10
+ TEST_POSTGRES_STORAGE: utils.type.boolean.default('true')
8
11
  });
@@ -3,10 +3,14 @@ import * as timers from 'timers/promises';
3
3
  import { describe, expect, test } from 'vitest';
4
4
  import { populateData } from '../../dist/utils/populate_test_data.js';
5
5
  import { env } from './env.js';
6
- import { INITIALIZED_MONGO_STORAGE_FACTORY, TEST_CONNECTION_OPTIONS } from './util.js';
6
+ import {
7
+ INITIALIZED_MONGO_STORAGE_FACTORY,
8
+ INITIALIZED_POSTGRES_STORAGE_FACTORY,
9
+ TEST_CONNECTION_OPTIONS
10
+ } from './util.js';
7
11
  import { WalStreamTestContext } from './wal_stream_utils.js';
8
12
 
9
- describe('batch replication tests - mongodb', { timeout: 120_000 }, function () {
13
+ describe.skipIf(!env.TEST_MONGO_STORAGE)('batch replication tests - mongodb', { timeout: 120_000 }, function () {
10
14
  // These are slow but consistent tests.
11
15
  // Not run on every test run, but we do run on CI, or when manually debugging issues.
12
16
  if (env.CI || env.SLOW_TESTS) {
@@ -17,6 +21,17 @@ describe('batch replication tests - mongodb', { timeout: 120_000 }, function ()
17
21
  }
18
22
  });
19
23
 
24
+ describe.skipIf(!env.TEST_POSTGRES_STORAGE)('batch replication tests - postgres', { timeout: 240_000 }, function () {
25
+ // These are slow but consistent tests.
26
+ // Not run on every test run, but we do run on CI, or when manually debugging issues.
27
+ if (env.CI || env.SLOW_TESTS) {
28
+ defineBatchTests(INITIALIZED_POSTGRES_STORAGE_FACTORY);
29
+ } else {
30
+ // Need something in this file.
31
+ test('no-op', () => {});
32
+ }
33
+ });
34
+
20
35
  const BASIC_SYNC_RULES = `bucket_definitions:
21
36
  global:
22
37
  data:
@@ -3,13 +3,18 @@ import * as timers from 'timers/promises';
3
3
  import { describe, expect, test } from 'vitest';
4
4
 
5
5
  import { storage } from '@powersync/service-core';
6
- import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js';
6
+ import { env } from './env.js';
7
+ import { INITIALIZED_MONGO_STORAGE_FACTORY, INITIALIZED_POSTGRES_STORAGE_FACTORY } from './util.js';
7
8
  import { WalStreamTestContext } from './wal_stream_utils.js';
8
9
 
9
- describe('schema changes', { timeout: 20_000 }, function () {
10
+ describe.skipIf(!env.TEST_MONGO_STORAGE)('schema changes - mongodb', { timeout: 20_000 }, function () {
10
11
  defineTests(INITIALIZED_MONGO_STORAGE_FACTORY);
11
12
  });
12
13
 
14
+ describe.skipIf(!env.TEST_POSTGRES_STORAGE)('schema changes - postgres', { timeout: 20_000 }, function () {
15
+ defineTests(INITIALIZED_POSTGRES_STORAGE_FACTORY);
16
+ });
17
+
13
18
  const BASIC_SYNC_RULES = `
14
19
  bucket_definitions:
15
20
  global:
@@ -432,7 +437,7 @@ function defineTests(factory: storage.TestStorageFactory) {
432
437
  expect(data).toMatchObject([]);
433
438
 
434
439
  const metrics = await storage.factory.getStorageMetrics();
435
- expect(metrics.replication_size_bytes).toEqual(0);
440
+ expect(metrics.replication_size_bytes).toMatchSnapshot();
436
441
  });
437
442
 
438
443
  test('replica identity nothing', async () => {
package/test/src/setup.ts CHANGED
@@ -1,9 +1,13 @@
1
1
  import { container } from '@powersync/lib-services-framework';
2
2
  import { test_utils } from '@powersync/service-core-tests';
3
- import { beforeAll } from 'vitest';
3
+ import { beforeAll, beforeEach } from 'vitest';
4
4
 
5
5
  beforeAll(async () => {
6
6
  // Executes for every test file
7
7
  container.registerDefaults();
8
8
  await test_utils.initMetrics();
9
9
  });
10
+
11
+ beforeEach(async () => {
12
+ await test_utils.resetMetrics();
13
+ });
@@ -7,6 +7,7 @@ import {
7
7
  connectPgPool,
8
8
  getClientCheckpoint,
9
9
  INITIALIZED_MONGO_STORAGE_FACTORY,
10
+ INITIALIZED_POSTGRES_STORAGE_FACTORY,
10
11
  TEST_CONNECTION_OPTIONS
11
12
  } from './util.js';
12
13
 
@@ -17,9 +18,10 @@ import { PgManager } from '@module/replication/PgManager.js';
17
18
  import { storage } from '@powersync/service-core';
18
19
  import { test_utils } from '@powersync/service-core-tests';
19
20
  import * as mongo_storage from '@powersync/service-module-mongodb-storage';
21
+ import * as postgres_storage from '@powersync/service-module-postgres-storage';
20
22
  import * as timers from 'node:timers/promises';
21
23
 
22
- describe('slow tests - mongodb', function () {
24
+ describe.skipIf(!env.TEST_MONGO_STORAGE)('slow tests - mongodb', function () {
23
25
  // These are slow, inconsistent tests.
24
26
  // Not run on every test run, but we do run on CI, or when manually debugging issues.
25
27
  if (env.CI || env.SLOW_TESTS) {
@@ -30,6 +32,17 @@ describe('slow tests - mongodb', function () {
30
32
  }
31
33
  });
32
34
 
35
+ describe.skipIf(!env.TEST_POSTGRES_STORAGE)('slow tests - postgres', function () {
36
+ // These are slow, inconsistent tests.
37
+ // Not run on every test run, but we do run on CI, or when manually debugging issues.
38
+ if (env.CI || env.SLOW_TESTS) {
39
+ defineSlowTests(INITIALIZED_POSTGRES_STORAGE_FACTORY);
40
+ } else {
41
+ // Need something in this file.
42
+ test('no-op', () => {});
43
+ }
44
+ });
45
+
33
46
  function defineSlowTests(factory: storage.TestStorageFactory) {
34
47
  let walStream: WalStream | undefined;
35
48
  let connections: PgManager | undefined;
@@ -79,7 +92,7 @@ function defineSlowTests(factory: storage.TestStorageFactory) {
79
92
  const replicationConnection = await connections.replicationConnection();
80
93
  const pool = connections.pool;
81
94
  await clearTestDb(pool);
82
- const f = (await factory()) as mongo_storage.storage.MongoBucketStorage;
95
+ await using f = await factory();
83
96
 
84
97
  const syncRuleContent = `
85
98
  bucket_definitions:
@@ -174,15 +187,50 @@ bucket_definitions:
174
187
  }
175
188
 
176
189
  const checkpoint = BigInt((await storage.getCheckpoint()).checkpoint);
177
- const opsBefore = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
178
- .filter((row) => row._id.o <= checkpoint)
179
- .map(mongo_storage.storage.mapOpEntry);
180
- await storage.compact({ maxOpId: checkpoint });
181
- const opsAfter = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
182
- .filter((row) => row._id.o <= checkpoint)
183
- .map(mongo_storage.storage.mapOpEntry);
184
-
185
- test_utils.validateCompactedBucket(opsBefore, opsAfter);
190
+ if (f instanceof mongo_storage.storage.MongoBucketStorage) {
191
+ const opsBefore = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
192
+ .filter((row) => row._id.o <= checkpoint)
193
+ .map(mongo_storage.storage.mapOpEntry);
194
+ await storage.compact({ maxOpId: checkpoint });
195
+ const opsAfter = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
196
+ .filter((row) => row._id.o <= checkpoint)
197
+ .map(mongo_storage.storage.mapOpEntry);
198
+
199
+ test_utils.validateCompactedBucket(opsBefore, opsAfter);
200
+ } else if (f instanceof postgres_storage.PostgresBucketStorageFactory) {
201
+ const { db } = f;
202
+ const opsBefore = (
203
+ await db.sql`
204
+ SELECT
205
+ *
206
+ FROM
207
+ bucket_data
208
+ WHERE
209
+ op_id <= ${{ type: 'int8', value: checkpoint }}
210
+ ORDER BY
211
+ op_id ASC
212
+ `
213
+ .decoded(postgres_storage.models.BucketData)
214
+ .rows()
215
+ ).map(postgres_storage.utils.mapOpEntry);
216
+ await storage.compact({ maxOpId: checkpoint });
217
+ const opsAfter = (
218
+ await db.sql`
219
+ SELECT
220
+ *
221
+ FROM
222
+ bucket_data
223
+ WHERE
224
+ op_id <= ${{ type: 'int8', value: checkpoint }}
225
+ ORDER BY
226
+ op_id ASC
227
+ `
228
+ .decoded(postgres_storage.models.BucketData)
229
+ .rows()
230
+ ).map(postgres_storage.utils.mapOpEntry);
231
+
232
+ test_utils.validateCompactedBucket(opsBefore, opsAfter);
233
+ }
186
234
  }
187
235
  };
188
236
 
@@ -196,26 +244,66 @@ bucket_definitions:
196
244
  // Wait for replication to finish
197
245
  let checkpoint = await getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS });
198
246
 
199
- // Check that all inserts have been deleted again
200
- const docs = await f.db.current_data.find().toArray();
201
- const transformed = docs.map((doc) => {
202
- return bson.deserialize(doc.data.buffer) as SqliteRow;
203
- });
204
- expect(transformed).toEqual([]);
205
-
206
- // Check that each PUT has a REMOVE
207
- const ops = await f.db.bucket_data.find().sort({ _id: 1 }).toArray();
208
-
209
- // All a single bucket in this test
210
- const bucket = ops.map((op) => mongo_storage.storage.mapOpEntry(op));
211
- const reduced = test_utils.reduceBucket(bucket);
212
- expect(reduced).toMatchObject([
213
- {
214
- op_id: '0',
215
- op: 'CLEAR'
216
- }
217
- // Should contain no additional data
218
- ]);
247
+ if (f instanceof mongo_storage.storage.MongoBucketStorage) {
248
+ // Check that all inserts have been deleted again
249
+ const docs = await f.db.current_data.find().toArray();
250
+ const transformed = docs.map((doc) => {
251
+ return bson.deserialize(doc.data.buffer) as SqliteRow;
252
+ });
253
+ expect(transformed).toEqual([]);
254
+
255
+ // Check that each PUT has a REMOVE
256
+ const ops = await f.db.bucket_data.find().sort({ _id: 1 }).toArray();
257
+
258
+ // All a single bucket in this test
259
+ const bucket = ops.map((op) => mongo_storage.storage.mapOpEntry(op));
260
+ const reduced = test_utils.reduceBucket(bucket);
261
+ expect(reduced).toMatchObject([
262
+ {
263
+ op_id: '0',
264
+ op: 'CLEAR'
265
+ }
266
+ // Should contain no additional data
267
+ ]);
268
+ } else if (f instanceof postgres_storage.storage.PostgresBucketStorageFactory) {
269
+ const { db } = f;
270
+ // Check that all inserts have been deleted again
271
+ const docs = await db.sql`
272
+ SELECT
273
+ *
274
+ FROM
275
+ current_data
276
+ `
277
+ .decoded(postgres_storage.models.CurrentData)
278
+ .rows();
279
+ const transformed = docs.map((doc) => {
280
+ return bson.deserialize(doc.data) as SqliteRow;
281
+ });
282
+ expect(transformed).toEqual([]);
283
+
284
+ // Check that each PUT has a REMOVE
285
+ const ops = await db.sql`
286
+ SELECT
287
+ *
288
+ FROM
289
+ bucket_data
290
+ ORDER BY
291
+ op_id ASC
292
+ `
293
+ .decoded(postgres_storage.models.BucketData)
294
+ .rows();
295
+
296
+ // All a single bucket in this test
297
+ const bucket = ops.map((op) => postgres_storage.utils.mapOpEntry(op));
298
+ const reduced = test_utils.reduceBucket(bucket);
299
+ expect(reduced).toMatchObject([
300
+ {
301
+ op_id: '0',
302
+ op: 'CLEAR'
303
+ }
304
+ // Should contain no additional data
305
+ ]);
306
+ }
219
307
  }
220
308
 
221
309
  abortController.abort();
@@ -231,7 +319,7 @@ bucket_definitions:
231
319
  async () => {
232
320
  const pool = await connectPgPool();
233
321
  await clearTestDb(pool);
234
- const f = await factory();
322
+ await using f = await factory();
235
323
 
236
324
  const syncRuleContent = `
237
325
  bucket_definitions:
package/test/src/util.ts CHANGED
@@ -1,10 +1,11 @@
1
1
  import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js';
2
2
  import * as types from '@module/types/types.js';
3
- import * as pg_utils from '@module/utils/pgwire_utils.js';
3
+ import * as lib_postgres from '@powersync/lib-service-postgres';
4
4
  import { logger } from '@powersync/lib-services-framework';
5
5
  import { BucketStorageFactory, OpId } from '@powersync/service-core';
6
6
  import * as pgwire from '@powersync/service-jpgwire';
7
7
  import * as mongo_storage from '@powersync/service-module-mongodb-storage';
8
+ import * as postgres_storage from '@powersync/service-module-postgres-storage';
8
9
  import { env } from './env.js';
9
10
 
10
11
  export const TEST_URI = env.PG_TEST_URL;
@@ -14,6 +15,10 @@ export const INITIALIZED_MONGO_STORAGE_FACTORY = mongo_storage.MongoTestStorageF
14
15
  isCI: env.CI
15
16
  });
16
17
 
18
+ export const INITIALIZED_POSTGRES_STORAGE_FACTORY = postgres_storage.PostgresTestStorageFactoryGenerator({
19
+ url: env.PG_STORAGE_TEST_URL
20
+ });
21
+
17
22
  export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({
18
23
  type: 'postgresql',
19
24
  uri: TEST_URI,
@@ -40,7 +45,7 @@ export async function clearTestDb(db: pgwire.PgClient) {
40
45
  for (let row of tableRows) {
41
46
  const name = row.table_name;
42
47
  if (name.startsWith('test_')) {
43
- await db.query(`DROP TABLE public.${pg_utils.escapeIdentifier(name)}`);
48
+ await db.query(`DROP TABLE public.${lib_postgres.escapeIdentifier(name)}`);
44
49
  }
45
50
  }
46
51
  }
@@ -4,7 +4,8 @@ import { putOp, removeOp } from '@powersync/service-core-tests';
4
4
  import { pgwireRows } from '@powersync/service-jpgwire';
5
5
  import * as crypto from 'crypto';
6
6
  import { describe, expect, test } from 'vitest';
7
- import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js';
7
+ import { env } from './env.js';
8
+ import { INITIALIZED_MONGO_STORAGE_FACTORY, INITIALIZED_POSTGRES_STORAGE_FACTORY } from './util.js';
8
9
  import { WalStreamTestContext } from './wal_stream_utils.js';
9
10
 
10
11
  const BASIC_SYNC_RULES = `
@@ -14,10 +15,14 @@ bucket_definitions:
14
15
  - SELECT id, description FROM "test_data"
15
16
  `;
16
17
 
17
- describe('wal stream - mongodb', { timeout: 20_000 }, function () {
18
+ describe.skipIf(!env.TEST_MONGO_STORAGE)('wal stream - mongodb', { timeout: 20_000 }, function () {
18
19
  defineWalStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY);
19
20
  });
20
21
 
22
+ describe.skipIf(!env.TEST_POSTGRES_STORAGE)('wal stream - postgres', { timeout: 20_000 }, function () {
23
+ defineWalStreamTests(INITIALIZED_POSTGRES_STORAGE_FACTORY);
24
+ });
25
+
21
26
  function defineWalStreamTests(factory: storage.TestStorageFactory) {
22
27
  test('replicating basic values', async () => {
23
28
  await using context = await WalStreamTestContext.open(factory);
@@ -46,6 +46,7 @@ export class WalStreamTestContext implements AsyncDisposable {
46
46
  await this.streamPromise;
47
47
  await this.connectionManager.destroy();
48
48
  this.storage?.[Symbol.dispose]();
49
+ await this.factory?.[Symbol.asyncDispose]();
49
50
  }
50
51
 
51
52
  get pool() {
package/tsconfig.json CHANGED
@@ -26,6 +26,9 @@
26
26
  },
27
27
  {
28
28
  "path": "../../libs/lib-services"
29
+ },
30
+ {
31
+ "path": "../../libs/lib-postgres"
29
32
  }
30
33
  ]
31
34
  }