@powersync/service-module-postgres 0.0.0-dev-20241209071534 → 0.0.0-dev-20241219091224

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -192,6 +192,15 @@ export class WalStream {
192
192
  if (slotExists) {
193
193
  // This checks that the slot is still valid
194
194
  const r = await this.checkReplicationSlot();
195
+ if (snapshotDone && r.needsNewSlot) {
196
+ // We keep the current snapshot, and create a new replication slot
197
+ throw new MissingReplicationSlotError(`Replication slot ${slotName} is not valid anymore`);
198
+ }
199
+ // We can have:
200
+ // needsInitialSync: true, needsNewSlot: true -> initial sync from scratch
201
+ // needsInitialSync: true, needsNewSlot: false -> resume initial sync
202
+ // needsInitialSync: false, needsNewSlot: true -> handled above
203
+ // needsInitialSync: false, needsNewSlot: false -> resume streaming replication
195
204
  return {
196
205
  needsInitialSync: !snapshotDone,
197
206
  needsNewSlot: r.needsNewSlot
@@ -204,7 +213,7 @@ export class WalStream {
204
213
  /**
205
214
  * If a replication slot exists, check that it is healthy.
206
215
  */
207
- private async checkReplicationSlot(): Promise<InitResult> {
216
+ private async checkReplicationSlot(): Promise<{ needsNewSlot: boolean }> {
208
217
  let last_error = null;
209
218
  const slotName = this.slot_name;
210
219
 
@@ -244,7 +253,7 @@ export class WalStream {
244
253
 
245
254
  // Success
246
255
  logger.info(`Slot ${slotName} appears healthy`);
247
- return { needsInitialSync: false, needsNewSlot: false };
256
+ return { needsNewSlot: false };
248
257
  } catch (e) {
249
258
  last_error = e;
250
259
  logger.warn(`${slotName} Replication slot error`, e);
@@ -274,9 +283,9 @@ export class WalStream {
274
283
  // Sample: publication "powersync" does not exist
275
284
  // Happens when publication deleted or never created.
276
285
  // Slot must be re-created in this case.
277
- logger.info(`${slotName} does not exist anymore, will create new slot`);
286
+ logger.info(`${slotName} is not valid anymore`);
278
287
 
279
- return { needsInitialSync: true, needsNewSlot: true };
288
+ return { needsNewSlot: true };
280
289
  }
281
290
  // Try again after a pause
282
291
  await new Promise((resolve) => setTimeout(resolve, 1000));
@@ -349,18 +358,10 @@ WHERE oid = $1::regclass`,
349
358
  logger.info(`${this.slot_name} Skipping ${table.qualifiedName} - snapshot already done`);
350
359
  continue;
351
360
  }
352
- let tableLsnNotBefore: string;
353
- await db.query('BEGIN');
354
- try {
355
- await this.snapshotTable(batch, db, table);
356
-
357
- const rs = await db.query(`select pg_current_wal_lsn() as lsn`);
358
- tableLsnNotBefore = rs.rows[0][0];
359
- } finally {
360
- // Read-only transaction, commit does not actually do anything.
361
- await db.query('COMMIT');
362
- }
361
+ await this.snapshotTable(batch, db, table);
363
362
 
363
+ const rs = await db.query(`select pg_current_wal_lsn() as lsn`);
364
+ const tableLsnNotBefore = rs.rows[0][0];
364
365
  await batch.markSnapshotDone([table], tableLsnNotBefore);
365
366
  await touch();
366
367
  }
@@ -386,70 +387,51 @@ WHERE oid = $1::regclass`,
386
387
  const estimatedCount = await this.estimatedCount(db, table);
387
388
  let at = 0;
388
389
  let lastLogIndex = 0;
389
-
390
- // We do streaming on two levels:
391
- // 1. Coarse level: DELCARE CURSOR, FETCH 10000 at a time.
392
- // 2. Fine level: Stream chunks from each fetch call.
393
- await db.query(`DECLARE powersync_cursor CURSOR FOR SELECT * FROM ${table.escapedIdentifier}`);
394
-
390
+ const cursor = db.stream({ statement: `SELECT * FROM ${table.escapedIdentifier}` });
395
391
  let columns: { i: number; name: string }[] = [];
396
- let hasRemainingData = true;
397
- while (hasRemainingData) {
398
- // Fetch 10k at a time.
399
- // The balance here is between latency overhead per FETCH call,
400
- // and not spending too much time on each FETCH call.
401
- // We aim for a couple of seconds on each FETCH call.
402
- const cursor = db.stream({
403
- statement: `FETCH 10000 FROM powersync_cursor`
404
- });
405
- hasRemainingData = false;
406
- // pgwire streams rows in chunks.
407
- // These chunks can be quite small (as little as 16KB), so we don't flush chunks automatically.
408
- // There are typically 100-200 rows per chunk.
409
- for await (let chunk of cursor) {
410
- if (chunk.tag == 'RowDescription') {
411
- // We get a RowDescription for each FETCH call, but they should
412
- // all be the same.
413
- let i = 0;
414
- columns = chunk.payload.map((c) => {
415
- return { i: i++, name: c.name };
416
- });
417
- continue;
418
- }
419
-
420
- const rows = chunk.rows.map((row) => {
421
- let q: DatabaseInputRow = {};
422
- for (let c of columns) {
423
- q[c.name] = row[c.i];
424
- }
425
- return q;
392
+ // pgwire streams rows in chunks.
393
+ // These chunks can be quite small (as little as 16KB), so we don't flush chunks automatically.
394
+
395
+ for await (let chunk of cursor) {
396
+ if (chunk.tag == 'RowDescription') {
397
+ let i = 0;
398
+ columns = chunk.payload.map((c) => {
399
+ return { i: i++, name: c.name };
426
400
  });
427
- if (rows.length > 0 && at - lastLogIndex >= 5000) {
428
- logger.info(`${this.slot_name} Replicating ${table.qualifiedName} ${at}/${estimatedCount}`);
429
- lastLogIndex = at;
430
- hasRemainingData = true;
431
- }
432
- if (this.abort_signal.aborted) {
433
- throw new Error(`Aborted initial replication of ${this.slot_name}`);
434
- }
401
+ continue;
402
+ }
435
403
 
436
- for (const record of WalStream.getQueryData(rows)) {
437
- // This auto-flushes when the batch reaches its size limit
438
- await batch.save({
439
- tag: storage.SaveOperationTag.INSERT,
440
- sourceTable: table,
441
- before: undefined,
442
- beforeReplicaId: undefined,
443
- after: record,
444
- afterReplicaId: getUuidReplicaIdentityBson(record, table.replicaIdColumns)
445
- });
404
+ const rows = chunk.rows.map((row) => {
405
+ let q: DatabaseInputRow = {};
406
+ for (let c of columns) {
407
+ q[c.name] = row[c.i];
446
408
  }
409
+ return q;
410
+ });
411
+ if (rows.length > 0 && at - lastLogIndex >= 5000) {
412
+ logger.info(`${this.slot_name} Replicating ${table.qualifiedName} ${at}/${estimatedCount}`);
413
+ lastLogIndex = at;
414
+ }
415
+ if (this.abort_signal.aborted) {
416
+ throw new Error(`Aborted initial replication of ${this.slot_name}`);
417
+ }
447
418
 
448
- at += rows.length;
449
- Metrics.getInstance().rows_replicated_total.add(rows.length);
450
-
451
- await touch();
419
+ for (const record of WalStream.getQueryData(rows)) {
420
+ // This auto-flushes when the batch reaches its size limit
421
+ await batch.save({
422
+ tag: storage.SaveOperationTag.INSERT,
423
+ sourceTable: table,
424
+ before: undefined,
425
+ beforeReplicaId: undefined,
426
+ after: record,
427
+ afterReplicaId: getUuidReplicaIdentityBson(record, table.replicaIdColumns)
428
+ });
452
429
  }
430
+
431
+ at += rows.length;
432
+ Metrics.getInstance().rows_replicated_total.add(rows.length);
433
+
434
+ await touch();
453
435
  }
454
436
 
455
437
  await batch.flush();
@@ -133,6 +133,12 @@ export function normalizeConnectionConfig(options: PostgresConnectionConfig): No
133
133
  };
134
134
  }
135
135
 
136
+ export function isPostgresConfig(
137
+ config: service_types.configFile.DataSourceConfig
138
+ ): config is PostgresConnectionConfig {
139
+ return config.type == POSTGRES_CONNECTION_TYPE;
140
+ }
141
+
136
142
  /**
137
143
  * Check whether the port is in a "safe" range.
138
144
  *
package/test/src/env.ts CHANGED
@@ -2,6 +2,7 @@ import { utils } from '@powersync/lib-services-framework';
2
2
 
3
3
  export const env = utils.collectEnvironmentVariables({
4
4
  PG_TEST_URL: utils.type.string.default('postgres://postgres:postgres@localhost:5432/powersync_test'),
5
+ MONGO_TEST_URL: utils.type.string.default('mongodb://localhost:27017/powersync_test'),
5
6
  CI: utils.type.boolean.default('false'),
6
7
  SLOW_TESTS: utils.type.boolean.default('false')
7
8
  });
@@ -1,17 +1,17 @@
1
- import { MONGO_STORAGE_FACTORY, StorageFactory, StorageOptions } from '@core-tests/util.js';
1
+ import { Metrics } from '@powersync/service-core';
2
+ import { test_utils } from '@powersync/service-core-tests';
3
+ import * as timers from 'timers/promises';
2
4
  import { describe, expect, test } from 'vitest';
3
5
  import { populateData } from '../../dist/utils/populate_test_data.js';
4
6
  import { env } from './env.js';
5
- import { TEST_CONNECTION_OPTIONS } from './util.js';
7
+ import { INITIALIZED_MONGO_STORAGE_FACTORY, TEST_CONNECTION_OPTIONS } from './util.js';
6
8
  import { WalStreamTestContext } from './wal_stream_utils.js';
7
- import * as timers from 'timers/promises';
8
- import { Metrics } from '@powersync/service-core';
9
9
 
10
10
  describe('batch replication tests - mongodb', { timeout: 120_000 }, function () {
11
11
  // These are slow but consistent tests.
12
12
  // Not run on every test run, but we do run on CI, or when manually debugging issues.
13
13
  if (env.CI || env.SLOW_TESTS) {
14
- defineBatchTests(MONGO_STORAGE_FACTORY);
14
+ defineBatchTests(INITIALIZED_MONGO_STORAGE_FACTORY);
15
15
  } else {
16
16
  // Need something in this file.
17
17
  test('no-op', () => {});
@@ -23,7 +23,7 @@ const BASIC_SYNC_RULES = `bucket_definitions:
23
23
  data:
24
24
  - SELECT id, description, other FROM "test_data"`;
25
25
 
26
- function defineBatchTests(factory: StorageFactory) {
26
+ function defineBatchTests(factory: test_utils.StorageFactory) {
27
27
  test('update large record', async () => {
28
28
  await using context = await WalStreamTestContext.open(factory);
29
29
  // This test generates a large transaction in MongoDB, despite the replicated data
@@ -176,6 +176,71 @@ function defineBatchTests(factory: StorageFactory) {
176
176
  console.log(`Truncated ${truncateCount} ops in ${truncateDuration}ms ${truncatePerSecond} ops/s. ${used}MB heap`);
177
177
  });
178
178
 
179
+ test('large number of bucket_data docs', async () => {
180
+ // This tests that we don't run into this error:
181
+ // MongoBulkWriteError: BSONObj size: 16814023 (0x1008FC7) is invalid. Size must be between 0 and 16793600(16MB) First element: insert: "bucket_data"
182
+ // The test is quite sensitive to internals, since we need to
183
+ // generate an internal batch that is just below 16MB.
184
+ //
185
+ // For the test to work, we need a:
186
+ // 1. Large number of documents in the batch.
187
+ // 2. More bucket_data documents than current_data documents,
188
+ // otherwise other batch limiting thresholds are hit.
189
+ // 3. A large document to make sure we get to just below the 16MB
190
+ // limit.
191
+ // 4. Another document to make sure the internal batching overflows
192
+ // to a second batch.
193
+
194
+ await using context = await WalStreamTestContext.open(factory);
195
+ await context.updateSyncRules(`bucket_definitions:
196
+ global:
197
+ data:
198
+ # Sync 4x so we get more bucket_data documents
199
+ - SELECT * FROM test_data
200
+ - SELECT * FROM test_data
201
+ - SELECT * FROM test_data
202
+ - SELECT * FROM test_data
203
+ `);
204
+ const { pool } = context;
205
+
206
+ await pool.query(`CREATE TABLE test_data(id serial primary key, description text)`);
207
+
208
+ const numDocs = 499;
209
+ let description = '';
210
+ while (description.length < 2650) {
211
+ description += '.';
212
+ }
213
+
214
+ await pool.query({
215
+ statement: `INSERT INTO test_data(description) SELECT $2 FROM generate_series(1, $1) i`,
216
+ params: [
217
+ { type: 'int4', value: numDocs },
218
+ { type: 'varchar', value: description }
219
+ ]
220
+ });
221
+
222
+ let largeDescription = '';
223
+
224
+ while (largeDescription.length < 2_768_000) {
225
+ largeDescription += '.';
226
+ }
227
+ await pool.query({
228
+ statement: 'INSERT INTO test_data(description) VALUES($1)',
229
+ params: [{ type: 'varchar', value: largeDescription }]
230
+ });
231
+ await pool.query({
232
+ statement: 'INSERT INTO test_data(description) VALUES($1)',
233
+ params: [{ type: 'varchar', value: 'testingthis' }]
234
+ });
235
+ await context.replicateSnapshot();
236
+
237
+ context.startStreaming();
238
+
239
+ const checkpoint = await context.getCheckpoint({ timeout: 50_000 });
240
+ const checksum = await context.storage!.getChecksums(checkpoint, ['global[]']);
241
+ expect(checksum.get('global[]')!.count).toEqual((numDocs + 2) * 4);
242
+ });
243
+
179
244
  test('resuming initial replication (1)', async () => {
180
245
  // Stop early - likely to not include deleted row in first replication attempt.
181
246
  await testResumingReplication(2000);
@@ -1,10 +1,8 @@
1
- import { compareIds, putOp, removeOp } from '@core-tests/stream_utils.js';
2
- import { reduceBucket } from '@powersync/service-core';
3
- import { setTimeout } from 'node:timers/promises';
1
+ import { compareIds, putOp, reduceBucket, removeOp, test_utils } from '@powersync/service-core-tests';
2
+ import * as timers from 'timers/promises';
4
3
  import { describe, expect, test } from 'vitest';
5
4
  import { INITIALIZED_MONGO_STORAGE_FACTORY, StorageFactory } from './util.js';
6
5
  import { WalStreamTestContext } from './wal_stream_utils.js';
7
-
8
6
  describe('schema changes', { timeout: 20_000 }, function () {
9
7
  defineTests(INITIALIZED_MONGO_STORAGE_FACTORY);
10
8
  });
@@ -16,12 +14,12 @@ bucket_definitions:
16
14
  - SELECT id, * FROM "test_data"
17
15
  `;
18
16
 
19
- const PUT_T1 = putOp('test_data', { id: 't1', description: 'test1' });
20
- const PUT_T2 = putOp('test_data', { id: 't2', description: 'test2' });
21
- const PUT_T3 = putOp('test_data', { id: 't3', description: 'test3' });
17
+ const PUT_T1 = test_utils.putOp('test_data', { id: 't1', description: 'test1' });
18
+ const PUT_T2 = test_utils.putOp('test_data', { id: 't2', description: 'test2' });
19
+ const PUT_T3 = test_utils.putOp('test_data', { id: 't3', description: 'test3' });
22
20
 
23
- const REMOVE_T1 = removeOp('test_data', 't1');
24
- const REMOVE_T2 = removeOp('test_data', 't2');
21
+ const REMOVE_T1 = test_utils.removeOp('test_data', 't1');
22
+ const REMOVE_T2 = test_utils.removeOp('test_data', 't2');
25
23
 
26
24
  function defineTests(factory: StorageFactory) {
27
25
  test('re-create table', async () => {
@@ -544,7 +542,7 @@ function defineTests(factory: StorageFactory) {
544
542
  );
545
543
 
546
544
  // Need some delay for the snapshot to be triggered
547
- await setTimeout(5);
545
+ await timers.setTimeout(5);
548
546
 
549
547
  let stop = false;
550
548
 
@@ -2,15 +2,14 @@ import * as bson from 'bson';
2
2
  import { afterEach, describe, expect, test } from 'vitest';
3
3
  import { WalStream, WalStreamOptions } from '../../src/replication/WalStream.js';
4
4
  import { env } from './env.js';
5
- import { clearTestDb, connectPgPool, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js';
5
+ import { clearTestDb, connectPgPool, getClientCheckpoint, INITIALIZED_MONGO_STORAGE_FACTORY, TEST_CONNECTION_OPTIONS } from './util.js';
6
6
 
7
7
  import * as pgwire from '@powersync/service-jpgwire';
8
8
  import { SqliteRow } from '@powersync/service-sync-rules';
9
9
 
10
- import { mapOpEntry, MongoBucketStorage } from '@/storage/storage-index.js';
11
- import { validateCompactedBucket } from '@core-tests/bucket_validation.js';
12
- import { MONGO_STORAGE_FACTORY, StorageFactory } from '@core-tests/util.js';
13
10
  import { PgManager } from '@module/replication/PgManager.js';
11
+ import { test_utils } from '@powersync/service-core-tests';
12
+ import * as mongo_module from '@powersync/service-module-mongodb';
14
13
  import * as timers from 'node:timers/promises';
15
14
  import { reduceBucket } from '@powersync/service-core';
16
15
 
@@ -18,14 +17,14 @@ describe('slow tests - mongodb', function () {
18
17
  // These are slow, inconsistent tests.
19
18
  // Not run on every test run, but we do run on CI, or when manually debugging issues.
20
19
  if (env.CI || env.SLOW_TESTS) {
21
- defineSlowTests(MONGO_STORAGE_FACTORY);
20
+ defineSlowTests(INITIALIZED_MONGO_STORAGE_FACTORY);
22
21
  } else {
23
22
  // Need something in this file.
24
23
  test('no-op', () => {});
25
24
  }
26
25
  });
27
26
 
28
- function defineSlowTests(factory: StorageFactory) {
27
+ function defineSlowTests(factory: test_utils.StorageFactory) {
29
28
  let walStream: WalStream | undefined;
30
29
  let connections: PgManager | undefined;
31
30
  let abortController: AbortController | undefined;
@@ -74,7 +73,7 @@ function defineSlowTests(factory: StorageFactory) {
74
73
  const replicationConnection = await connections.replicationConnection();
75
74
  const pool = connections.pool;
76
75
  await clearTestDb(pool);
77
- const f = (await factory()) as MongoBucketStorage;
76
+ const f = (await factory()) as mongo_module.storage.MongoBucketStorage;
78
77
 
79
78
  const syncRuleContent = `
80
79
  bucket_definitions:
@@ -171,13 +170,13 @@ bucket_definitions:
171
170
  const checkpoint = BigInt((await storage.getCheckpoint()).checkpoint);
172
171
  const opsBefore = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
173
172
  .filter((row) => row._id.o <= checkpoint)
174
- .map(mapOpEntry);
173
+ .map(mongo_module.storage.mapOpEntry);
175
174
  await storage.compact({ maxOpId: checkpoint });
176
175
  const opsAfter = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
177
176
  .filter((row) => row._id.o <= checkpoint)
178
- .map(mapOpEntry);
177
+ .map(mongo_module.storage.mapOpEntry);
179
178
 
180
- validateCompactedBucket(opsBefore, opsAfter);
179
+ test_utils.validateCompactedBucket(opsBefore, opsAfter);
181
180
  }
182
181
  };
183
182
 
@@ -202,8 +201,8 @@ bucket_definitions:
202
201
  const ops = await f.db.bucket_data.find().sort({ _id: 1 }).toArray();
203
202
 
204
203
  // All a single bucket in this test
205
- const bucket = ops.map((op) => mapOpEntry(op));
206
- const reduced = reduceBucket(bucket);
204
+ const bucket = ops.map((op) => mongo_module.storage.mapOpEntry(op));
205
+ const reduced = test_utils.reduceBucket(bucket);
207
206
  expect(reduced).toMatchObject([
208
207
  {
209
208
  op_id: '0',
package/test/src/util.ts CHANGED
@@ -1,12 +1,12 @@
1
- import { connectMongo } from '@core-tests/util.js';
1
+ import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js';
2
2
  import * as types from '@module/types/types.js';
3
3
  import * as pg_utils from '@module/utils/pgwire_utils.js';
4
4
  import { logger } from '@powersync/lib-services-framework';
5
- import { BucketStorageFactory, Metrics, MongoBucketStorage, OpId } from '@powersync/service-core';
5
+ import { BucketStorageFactory, Metrics, OpId } from '@powersync/service-core';
6
+ import { test_utils } from '@powersync/service-core-tests';
6
7
  import * as pgwire from '@powersync/service-jpgwire';
7
- import { pgwireRows } from '@powersync/service-jpgwire';
8
+ import * as mongo_module from '@powersync/service-module-mongodb';
8
9
  import { env } from './env.js';
9
- import { PostgresRouteAPIAdapter } from '@module/api/PostgresRouteAPIAdapter.js';
10
10
 
11
11
  // The metrics need to be initialized before they can be used
12
12
  await Metrics.initialise({
@@ -26,7 +26,7 @@ export const TEST_CONNECTION_OPTIONS = types.normalizeConnectionConfig({
26
26
 
27
27
  export type StorageFactory = () => Promise<BucketStorageFactory>;
28
28
 
29
- export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => {
29
+ export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async (options?: test_utils.StorageOptions) => {
30
30
  const db = await connectMongo();
31
31
 
32
32
  // None of the PG tests insert data into this collection, so it was never created
@@ -34,13 +34,26 @@ export const INITIALIZED_MONGO_STORAGE_FACTORY: StorageFactory = async () => {
34
34
  await db.db.createCollection('bucket_parameters');
35
35
  }
36
36
 
37
- await db.clear();
37
+ if (!options?.doNotClear) {
38
+ await db.clear();
39
+ }
38
40
 
39
- return new MongoBucketStorage(db, {
41
+ return new mongo_module.storage.MongoBucketStorage(db, {
40
42
  slot_name_prefix: 'test_'
41
43
  });
42
44
  };
43
45
 
46
+ export async function connectMongo() {
47
+ // Short timeout for tests, to fail fast when the server is not available.
48
+ // Slightly longer timeouts for CI, to avoid arbitrary test failures
49
+ const client = mongo_module.storage.createMongoClient(env.MONGO_TEST_URL, {
50
+ connectTimeoutMS: env.CI ? 15_000 : 5_000,
51
+ socketTimeoutMS: env.CI ? 15_000 : 5_000,
52
+ serverSelectionTimeoutMS: env.CI ? 15_000 : 2_500
53
+ });
54
+ return new mongo_module.storage.PowerSyncMongo(client);
55
+ }
56
+
44
57
  export async function clearTestDb(db: pgwire.PgClient) {
45
58
  await db.query(
46
59
  "select pg_drop_replication_slot(slot_name) from pg_replication_slots where active = false and slot_name like 'test_%'"
@@ -1,10 +1,12 @@
1
- import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js';
2
1
  import { getDebugTablesInfo } from '@module/replication/replication-utils.js';
3
2
  import { expect, test } from 'vitest';
3
+
4
+ // Not quite a walStreamTest, but it helps to manage the connection
5
+ import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js';
4
6
  import { WalStreamTestContext } from './wal_stream_utils.js';
5
7
 
6
8
  test('validate tables', async () => {
7
- await using context = await WalStreamTestContext.open(MONGO_STORAGE_FACTORY);
9
+ await using context = await WalStreamTestContext.open(INITIALIZED_MONGO_STORAGE_FACTORY);
8
10
  const { pool } = context;
9
11
 
10
12
  await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
@@ -1,10 +1,11 @@
1
- import { putOp, removeOp } from '@core-tests/stream_utils.js';
2
- import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js';
3
1
  import { BucketStorageFactory, Metrics } from '@powersync/service-core';
2
+ import { putOp, removeOp } from '@powersync/service-core-tests';
4
3
  import { pgwireRows } from '@powersync/service-jpgwire';
5
4
  import * as crypto from 'crypto';
6
5
  import { describe, expect, test } from 'vitest';
6
+ import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js';
7
7
  import { WalStreamTestContext } from './wal_stream_utils.js';
8
+ import { MissingReplicationSlotError } from '@module/replication/WalStream.js';
8
9
 
9
10
  type StorageFactory = () => Promise<BucketStorageFactory>;
10
11
 
@@ -16,7 +17,7 @@ bucket_definitions:
16
17
  `;
17
18
 
18
19
  describe('wal stream - mongodb', { timeout: 20_000 }, function () {
19
- defineWalStreamTests(MONGO_STORAGE_FACTORY);
20
+ defineWalStreamTests(INITIALIZED_MONGO_STORAGE_FACTORY);
20
21
  });
21
22
 
22
23
  function defineWalStreamTests(factory: StorageFactory) {
@@ -291,4 +292,52 @@ bucket_definitions:
291
292
  expect(endRowCount - startRowCount).toEqual(0);
292
293
  expect(endTxCount - startTxCount).toEqual(1);
293
294
  });
295
+
296
+ test('reporting slot issues', async () => {
297
+ {
298
+ await using context = await WalStreamTestContext.open(factory);
299
+ const { pool } = context;
300
+ await context.updateSyncRules(`
301
+ bucket_definitions:
302
+ global:
303
+ data:
304
+ - SELECT id, description FROM "test_data"`);
305
+
306
+ await pool.query(
307
+ `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num int8)`
308
+ );
309
+ await pool.query(
310
+ `INSERT INTO test_data(id, description) VALUES('8133cd37-903b-4937-a022-7c8294015a3a', 'test1') returning id as test_id`
311
+ );
312
+ await context.replicateSnapshot();
313
+ await context.startStreaming();
314
+
315
+ const data = await context.getBucketData('global[]');
316
+
317
+ expect(data).toMatchObject([
318
+ putOp('test_data', {
319
+ id: '8133cd37-903b-4937-a022-7c8294015a3a',
320
+ description: 'test1'
321
+ })
322
+ ]);
323
+
324
+ expect(await context.storage!.getStatus()).toMatchObject({ active: true, snapshot_done: true });
325
+ }
326
+
327
+ {
328
+ await using context = await WalStreamTestContext.open(factory, { doNotClear: true });
329
+ const { pool } = context;
330
+ await pool.query('DROP PUBLICATION powersync');
331
+ await pool.query(`UPDATE test_data SET description = 'updated'`);
332
+ await pool.query('CREATE PUBLICATION powersync FOR ALL TABLES');
333
+
334
+ await context.loadActiveSyncRules();
335
+ await expect(async () => {
336
+ await context.replicateSnapshot();
337
+ }).rejects.toThrowError(MissingReplicationSlotError);
338
+
339
+ // The error is handled on a higher level, which triggers
340
+ // creating a new replication slot.
341
+ }
342
+ });
294
343
  }
@@ -1,10 +1,9 @@
1
- import { fromAsync } from '@core-tests/stream_utils.js';
2
1
  import { PgManager } from '@module/replication/PgManager.js';
3
2
  import { PUBLICATION_NAME, WalStream, WalStreamOptions } from '@module/replication/WalStream.js';
4
3
  import { BucketStorageFactory, OplogEntry, SyncRulesBucketStorage } from '@powersync/service-core';
4
+ import { StorageOptions, test_utils } from '@powersync/service-core-tests';
5
5
  import * as pgwire from '@powersync/service-jpgwire';
6
6
  import { clearTestDb, getClientCheckpoint, TEST_CONNECTION_OPTIONS } from './util.js';
7
- import { StorageOptions } from '@core-tests/util.js';
8
7
 
9
8
  export class WalStreamTestContext implements AsyncDisposable {
10
9
  private _walStream?: WalStream;
@@ -77,6 +76,16 @@ export class WalStreamTestContext implements AsyncDisposable {
77
76
  return this.storage!;
78
77
  }
79
78
 
79
+ async loadActiveSyncRules() {
80
+ const syncRules = await this.factory.getActiveSyncRulesContent();
81
+ if (syncRules == null) {
82
+ throw new Error(`Active sync rules not available`);
83
+ }
84
+
85
+ this.storage = this.factory.getInstance(syncRules);
86
+ return this.storage!;
87
+ }
88
+
80
89
  get walStream() {
81
90
  if (this.storage == null) {
82
91
  throw new Error('updateSyncRules() first');
@@ -122,7 +131,7 @@ export class WalStreamTestContext implements AsyncDisposable {
122
131
  async getBucketsDataBatch(buckets: Record<string, string>, options?: { timeout?: number }) {
123
132
  let checkpoint = await this.getCheckpoint(options);
124
133
  const map = new Map<string, string>(Object.entries(buckets));
125
- return fromAsync(this.storage!.getBucketDataBatch(checkpoint, map));
134
+ return test_utils.fromAsync(this.storage!.getBucketDataBatch(checkpoint, map));
126
135
  }
127
136
 
128
137
  /**
@@ -136,7 +145,7 @@ export class WalStreamTestContext implements AsyncDisposable {
136
145
  while (true) {
137
146
  const batch = this.storage!.getBucketDataBatch(checkpoint, map);
138
147
 
139
- const batches = await fromAsync(batch);
148
+ const batches = await test_utils.fromAsync(batch);
140
149
  data = data.concat(batches[0]?.batch.data ?? []);
141
150
  if (batches.length == 0 || !batches[0]!.batch.has_more) {
142
151
  break;
@@ -154,7 +163,7 @@ export class WalStreamTestContext implements AsyncDisposable {
154
163
  const { checkpoint } = await this.storage!.getCheckpoint();
155
164
  const map = new Map<string, string>([[bucket, start]]);
156
165
  const batch = this.storage!.getBucketDataBatch(checkpoint, map);
157
- const batches = await fromAsync(batch);
166
+ const batches = await test_utils.fromAsync(batch);
158
167
  return batches[0]?.batch.data ?? [];
159
168
  }
160
169
  }