@powersync/service-module-mongodb 0.0.0-dev-20250214100224 → 0.0.0-dev-20250303114151

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "name": "@powersync/service-module-mongodb",
3
3
  "repository": "https://github.com/powersync-ja/powersync-service",
4
4
  "types": "dist/index.d.ts",
5
- "version": "0.0.0-dev-20250214100224",
5
+ "version": "0.0.0-dev-20250303114151",
6
6
  "main": "dist/index.js",
7
7
  "license": "FSL-1.1-Apache-2.0",
8
8
  "type": "module",
@@ -22,21 +22,21 @@
22
22
  }
23
23
  },
24
24
  "dependencies": {
25
- "bson": "^6.8.0",
25
+ "bson": "^6.10.3",
26
26
  "ts-codec": "^1.3.0",
27
27
  "uuid": "^9.0.1",
28
- "@powersync/lib-services-framework": "0.5.1",
29
- "@powersync/service-core": "0.0.0-dev-20250214100224",
28
+ "@powersync/lib-services-framework": "0.5.3",
29
+ "@powersync/service-core": "0.0.0-dev-20250303114151",
30
30
  "@powersync/service-jsonbig": "0.17.10",
31
- "@powersync/service-sync-rules": "0.23.4",
31
+ "@powersync/service-sync-rules": "0.24.0",
32
32
  "@powersync/service-types": "0.8.0",
33
- "@powersync/lib-service-mongodb": "0.4.1"
33
+ "@powersync/lib-service-mongodb": "0.4.3"
34
34
  },
35
35
  "devDependencies": {
36
36
  "@types/uuid": "^9.0.4",
37
- "@powersync/service-core-tests": "0.0.0-dev-20250214100224",
38
- "@powersync/service-module-mongodb-storage": "0.0.0-dev-20250214100224",
39
- "@powersync/service-module-postgres-storage": "0.0.0-dev-20250214100224"
37
+ "@powersync/service-core-tests": "0.0.0-dev-20250303114151",
38
+ "@powersync/service-module-mongodb-storage": "0.0.0-dev-20250303114151",
39
+ "@powersync/service-module-postgres-storage": "0.0.0-dev-20250303114151"
40
40
  },
41
41
  "scripts": {
42
42
  "build": "tsc -b",
@@ -8,13 +8,20 @@ import {
8
8
  ReplicationAssertionError,
9
9
  ServiceError
10
10
  } from '@powersync/lib-services-framework';
11
- import { Metrics, SaveOperationTag, SourceEntityDescriptor, SourceTable, storage } from '@powersync/service-core';
11
+ import {
12
+ BSON_DESERIALIZE_DATA_OPTIONS,
13
+ Metrics,
14
+ SaveOperationTag,
15
+ SourceEntityDescriptor,
16
+ SourceTable,
17
+ storage
18
+ } from '@powersync/service-core';
12
19
  import { DatabaseInputRow, SqliteRow, SqlSyncRules, TablePattern } from '@powersync/service-sync-rules';
13
20
  import { MongoLSN } from '../common/MongoLSN.js';
14
21
  import { PostImagesOption } from '../types/types.js';
15
22
  import { escapeRegExp } from '../utils.js';
16
23
  import { MongoManager } from './MongoManager.js';
17
- import { constructAfterRecord, createCheckpoint, getMongoRelation } from './MongoRelation.js';
24
+ import { constructAfterRecord, createCheckpoint, getCacheIdentifier, getMongoRelation } from './MongoRelation.js';
18
25
  import { CHECKPOINTS_COLLECTION } from './replication-utils.js';
19
26
 
20
27
  export interface ChangeStreamOptions {
@@ -89,6 +96,10 @@ export class ChangeStream {
89
96
  return this.connections.options.postImages == PostImagesOption.AUTO_CONFIGURE;
90
97
  }
91
98
 
99
+ private get logPrefix() {
100
+ return `[powersync_${this.group_id}]`;
101
+ }
102
+
92
103
  /**
93
104
  * This resolves a pattern, persists the related metadata, and returns
94
105
  * the resulting SourceTables.
@@ -124,18 +135,13 @@ export class ChangeStream {
124
135
  .toArray();
125
136
 
126
137
  if (!tablePattern.isWildcard && collections.length == 0) {
127
- logger.warn(`Collection ${schema}.${tablePattern.name} not found`);
138
+ logger.warn(`${this.logPrefix} Collection ${schema}.${tablePattern.name} not found`);
128
139
  }
129
140
 
130
141
  for (let collection of collections) {
131
142
  const table = await this.handleRelation(
132
143
  batch,
133
- {
134
- name: collection.name,
135
- schema,
136
- objectId: collection.name,
137
- replicationColumns: [{ name: '_id' }]
138
- } as SourceEntityDescriptor,
144
+ getMongoRelation({ db: schema, coll: collection.name }),
139
145
  // This is done as part of the initial setup - snapshot is handled elsewhere
140
146
  { snapshot: false, collectionInfo: collection }
141
147
  );
@@ -149,7 +155,7 @@ export class ChangeStream {
149
155
  async initSlot(): Promise<InitResult> {
150
156
  const status = await this.storage.getStatus();
151
157
  if (status.snapshot_done && status.checkpoint_lsn) {
152
- logger.info(`Initial replication already done`);
158
+ logger.info(`${this.logPrefix} Initial replication already done`);
153
159
  return { needsInitialSync: false };
154
160
  }
155
161
 
@@ -194,39 +200,31 @@ export class ChangeStream {
194
200
  // Not known where this would happen apart from the above cases
195
201
  throw new ReplicationAssertionError('MongoDB lastWrite timestamp not found.');
196
202
  }
197
- // We previously used {snapshot: true} for the snapshot session.
198
- // While it gives nice consistency guarantees, it fails when the
199
- // snapshot takes longer than 5 minutes, due to minSnapshotHistoryWindowInSeconds
200
- // expiring the snapshot.
201
- const session = await this.client.startSession();
202
- try {
203
- await this.storage.startBatch(
204
- { zeroLSN: MongoLSN.ZERO.comparable, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
205
- async (batch) => {
206
- // Start by resolving all tables.
207
- // This checks postImage configuration, and that should fail as
208
- // earlier as possible.
209
- let allSourceTables: SourceTable[] = [];
210
- for (let tablePattern of sourceTables) {
211
- const tables = await this.resolveQualifiedTableNames(batch, tablePattern);
212
- allSourceTables.push(...tables);
213
- }
214
203
 
215
- for (let table of allSourceTables) {
216
- await this.snapshotTable(batch, table, session);
217
- await batch.markSnapshotDone([table], MongoLSN.ZERO.comparable);
204
+ await this.storage.startBatch(
205
+ { zeroLSN: MongoLSN.ZERO.comparable, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
206
+ async (batch) => {
207
+ // Start by resolving all tables.
208
+ // This checks postImage configuration, and that should fail as
209
+ // earlier as possible.
210
+ let allSourceTables: SourceTable[] = [];
211
+ for (let tablePattern of sourceTables) {
212
+ const tables = await this.resolveQualifiedTableNames(batch, tablePattern);
213
+ allSourceTables.push(...tables);
214
+ }
218
215
 
219
- await touch();
220
- }
216
+ for (let table of allSourceTables) {
217
+ await this.snapshotTable(batch, table);
218
+ await batch.markSnapshotDone([table], MongoLSN.ZERO.comparable);
221
219
 
222
- const { comparable: lsn } = new MongoLSN({ timestamp: snapshotTime });
223
- logger.info(`Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
224
- await batch.commit(lsn);
220
+ await touch();
225
221
  }
226
- );
227
- } finally {
228
- session.endSession();
229
- }
222
+
223
+ const { comparable: lsn } = new MongoLSN({ timestamp: snapshotTime });
224
+ logger.info(`${this.logPrefix} Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
225
+ await batch.commit(lsn);
226
+ }
227
+ );
230
228
  }
231
229
 
232
230
  private async setupCheckpointsCollection() {
@@ -284,58 +282,64 @@ export class ChangeStream {
284
282
  }
285
283
  }
286
284
 
287
- private async snapshotTable(
288
- batch: storage.BucketStorageBatch,
289
- table: storage.SourceTable,
290
- session?: mongo.ClientSession
291
- ) {
292
- logger.info(`Replicating ${table.qualifiedName}`);
285
+ private async snapshotTable(batch: storage.BucketStorageBatch, table: storage.SourceTable) {
286
+ logger.info(`${this.logPrefix} Replicating ${table.qualifiedName}`);
293
287
  const estimatedCount = await this.estimatedCount(table);
294
288
  let at = 0;
295
- let lastLogIndex = 0;
296
-
297
289
  const db = this.client.db(table.schema);
298
290
  const collection = db.collection(table.table);
299
- const query = collection.find({}, { session, readConcern: { level: 'majority' } });
300
-
301
- const cursor = query.stream();
302
-
303
- for await (let document of cursor) {
304
- if (this.abort_signal.aborted) {
305
- throw new ReplicationAbortedError(`Aborted initial replication`);
306
- }
307
-
308
- const record = constructAfterRecord(document);
291
+ const cursor = collection.find({}, { batchSize: 6_000, readConcern: 'majority' });
292
+
293
+ let lastBatch = performance.now();
294
+ // hasNext() is the call that triggers fetching of the next batch,
295
+ // then we read it with readBufferedDocuments(). This gives us semi-explicit
296
+ // control over the fetching of each batch, and avoids a separate promise per document
297
+ let hasNextPromise = cursor.hasNext();
298
+ while (await hasNextPromise) {
299
+ const docBatch = cursor.readBufferedDocuments();
300
+ // Pre-fetch next batch, so that we can read and write concurrently
301
+ hasNextPromise = cursor.hasNext();
302
+ for (let document of docBatch) {
303
+ if (this.abort_signal.aborted) {
304
+ throw new ReplicationAbortedError(`Aborted initial replication`);
305
+ }
309
306
 
310
- // This auto-flushes when the batch reaches its size limit
311
- await batch.save({
312
- tag: SaveOperationTag.INSERT,
313
- sourceTable: table,
314
- before: undefined,
315
- beforeReplicaId: undefined,
316
- after: record,
317
- afterReplicaId: document._id
318
- });
307
+ const record = constructAfterRecord(document);
319
308
 
320
- at += 1;
321
- if (at - lastLogIndex >= 5000) {
322
- logger.info(`[${this.group_id}] Replicating ${table.qualifiedName} ${at}/${estimatedCount}`);
323
- lastLogIndex = at;
309
+ // This auto-flushes when the batch reaches its size limit
310
+ await batch.save({
311
+ tag: SaveOperationTag.INSERT,
312
+ sourceTable: table,
313
+ before: undefined,
314
+ beforeReplicaId: undefined,
315
+ after: record,
316
+ afterReplicaId: document._id
317
+ });
324
318
  }
325
- Metrics.getInstance().rows_replicated_total.add(1);
326
319
 
320
+ at += docBatch.length;
321
+ Metrics.getInstance().rows_replicated_total.add(docBatch.length);
322
+ const duration = performance.now() - lastBatch;
323
+ lastBatch = performance.now();
324
+ logger.info(
325
+ `${this.logPrefix} Replicating ${table.qualifiedName} ${at}/${estimatedCount} in ${duration.toFixed(0)}ms`
326
+ );
327
327
  await touch();
328
328
  }
329
+ // In case the loop was interrupted, make sure we await the last promise.
330
+ await hasNextPromise;
329
331
 
330
332
  await batch.flush();
331
- logger.info(`Replicated ${at} documents for ${table.qualifiedName}`);
333
+ logger.info(`${this.logPrefix} Replicated ${at} documents for ${table.qualifiedName}`);
332
334
  }
333
335
 
334
336
  private async getRelation(
335
337
  batch: storage.BucketStorageBatch,
336
- descriptor: SourceEntityDescriptor
338
+ descriptor: SourceEntityDescriptor,
339
+ options: { snapshot: boolean }
337
340
  ): Promise<SourceTable> {
338
- const existing = this.relation_cache.get(descriptor.objectId);
341
+ const cacheId = getCacheIdentifier(descriptor);
342
+ const existing = this.relation_cache.get(cacheId);
339
343
  if (existing != null) {
340
344
  return existing;
341
345
  }
@@ -344,7 +348,7 @@ export class ChangeStream {
344
348
  // missing values.
345
349
  const collection = await this.getCollectionInfo(descriptor.schema, descriptor.name);
346
350
 
347
- return this.handleRelation(batch, descriptor, { snapshot: false, collectionInfo: collection });
351
+ return this.handleRelation(batch, descriptor, { snapshot: options.snapshot, collectionInfo: collection });
348
352
  }
349
353
 
350
354
  private async getCollectionInfo(db: string, name: string): Promise<mongo.CollectionInfo | undefined> {
@@ -375,7 +379,7 @@ export class ChangeStream {
375
379
  collMod: collectionInfo.name,
376
380
  changeStreamPreAndPostImages: { enabled: true }
377
381
  });
378
- logger.info(`Enabled postImages on ${db}.${collectionInfo.name}`);
382
+ logger.info(`${this.logPrefix} Enabled postImages on ${db}.${collectionInfo.name}`);
379
383
  } else if (!enabled) {
380
384
  throw new ServiceError(ErrorCode.PSYNC_S1343, `postImages not enabled on ${db}.${collectionInfo.name}`);
381
385
  }
@@ -394,9 +398,6 @@ export class ChangeStream {
394
398
  }
395
399
 
396
400
  const snapshot = options.snapshot;
397
- if (!descriptor.objectId && typeof descriptor.objectId != 'string') {
398
- throw new ReplicationAssertionError('MongoDB replication - objectId expected');
399
- }
400
401
  const result = await this.storage.resolveTable({
401
402
  group_id: this.group_id,
402
403
  connection_id: this.connection_id,
@@ -404,10 +405,16 @@ export class ChangeStream {
404
405
  entity_descriptor: descriptor,
405
406
  sync_rules: this.sync_rules
406
407
  });
407
- this.relation_cache.set(descriptor.objectId, result.table);
408
+ this.relation_cache.set(getCacheIdentifier(descriptor), result.table);
408
409
 
409
- // Drop conflicting tables. This includes for example renamed tables.
410
- await batch.drop(result.dropTables);
410
+ // Drop conflicting collections.
411
+ // This is generally not expected for MongoDB source dbs, so we log an error.
412
+ if (result.dropTables.length > 0) {
413
+ logger.error(
414
+ `Conflicting collections found for ${JSON.stringify(descriptor)}. Dropping: ${result.dropTables.map((t) => t.id).join(', ')}`
415
+ );
416
+ await batch.drop(result.dropTables);
417
+ }
411
418
 
412
419
  // Snapshot if:
413
420
  // 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere)
@@ -415,6 +422,7 @@ export class ChangeStream {
415
422
  // 3. The table is used in sync rules.
416
423
  const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny;
417
424
  if (shouldSnapshot) {
425
+ logger.info(`${this.logPrefix} New collection: ${descriptor.schema}.${descriptor.name}`);
418
426
  // Truncate this table, in case a previous snapshot was interrupted.
419
427
  await batch.truncate([result.table]);
420
428
 
@@ -434,7 +442,7 @@ export class ChangeStream {
434
442
  change: mongo.ChangeStreamDocument
435
443
  ): Promise<storage.FlushedResult | null> {
436
444
  if (!table.syncAny) {
437
- logger.debug(`Collection ${table.qualifiedName} not used in sync rules - skipping`);
445
+ logger.debug(`${this.logPrefix} Collection ${table.qualifiedName} not used in sync rules - skipping`);
438
446
  return null;
439
447
  }
440
448
 
@@ -528,7 +536,7 @@ export class ChangeStream {
528
536
  const startAfter = lastLsn?.timestamp;
529
537
  const resumeAfter = lastLsn?.resumeToken;
530
538
 
531
- logger.info(`Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`);
539
+ logger.info(`${this.logPrefix} Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`);
532
540
 
533
541
  const filters = this.getSourceNamespaceFilters();
534
542
 
@@ -552,7 +560,6 @@ export class ChangeStream {
552
560
 
553
561
  const streamOptions: mongo.ChangeStreamOptions = {
554
562
  showExpandedEvents: true,
555
- useBigInt64: true,
556
563
  maxAwaitTimeMS: 200,
557
564
  fullDocument: fullDocument
558
565
  };
@@ -591,13 +598,14 @@ export class ChangeStream {
591
598
 
592
599
  let splitDocument: mongo.ChangeStreamDocument | null = null;
593
600
 
601
+ let flexDbNameWorkaroundLogged = false;
602
+
594
603
  while (true) {
595
604
  if (this.abort_signal.aborted) {
596
605
  break;
597
606
  }
598
607
 
599
608
  const originalChangeDocument = await stream.tryNext();
600
-
601
609
  // The stream was closed, we will only ever receive `null` from it
602
610
  if (!originalChangeDocument && stream.closed) {
603
611
  break;
@@ -637,6 +645,29 @@ export class ChangeStream {
637
645
  throw new ReplicationAssertionError(`Incomplete splitEvent: ${JSON.stringify(splitDocument.splitEvent)}`);
638
646
  }
639
647
 
648
+ if (
649
+ !filters.multipleDatabases &&
650
+ 'ns' in changeDocument &&
651
+ changeDocument.ns.db != this.defaultDb.databaseName &&
652
+ changeDocument.ns.db.endsWith(`_${this.defaultDb.databaseName}`)
653
+ ) {
654
+ // When all of the following conditions are met:
655
+ // 1. We're replicating from an Atlas Flex instance.
656
+ // 2. There were changestream events recorded while the PowerSync service is paused.
657
+ // 3. We're only replicating from a single database.
658
+ // Then we've observed an ns with for example {db: '67b83e86cd20730f1e766dde_ps'},
659
+ // instead of the expected {db: 'ps'}.
660
+ // We correct this.
661
+ changeDocument.ns.db = this.defaultDb.databaseName;
662
+
663
+ if (!flexDbNameWorkaroundLogged) {
664
+ flexDbNameWorkaroundLogged = true;
665
+ logger.warn(
666
+ `${this.logPrefix} Incorrect DB name in change stream: ${changeDocument.ns.db}. Changed to ${this.defaultDb.databaseName}.`
667
+ );
668
+ }
669
+ }
670
+
640
671
  if (
641
672
  (changeDocument.operationType == 'insert' ||
642
673
  changeDocument.operationType == 'update' ||
@@ -683,28 +714,44 @@ export class ChangeStream {
683
714
  waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb);
684
715
  }
685
716
  const rel = getMongoRelation(changeDocument.ns);
686
- const table = await this.getRelation(batch, rel);
717
+ const table = await this.getRelation(batch, rel, {
718
+ // In most cases, we should not need to snapshot this. But if this is the first time we see the collection
719
+ // for whatever reason, then we do need to snapshot it.
720
+ // This may result in some duplicate operations when a collection is created for the first time after
721
+ // sync rules was deployed.
722
+ snapshot: true
723
+ });
687
724
  if (table.syncAny) {
688
725
  await this.writeChange(batch, table, changeDocument);
689
726
  }
690
727
  } else if (changeDocument.operationType == 'drop') {
691
728
  const rel = getMongoRelation(changeDocument.ns);
692
- const table = await this.getRelation(batch, rel);
729
+ const table = await this.getRelation(batch, rel, {
730
+ // We're "dropping" this collection, so never snapshot it.
731
+ snapshot: false
732
+ });
693
733
  if (table.syncAny) {
694
734
  await batch.drop([table]);
695
- this.relation_cache.delete(table.objectId);
735
+ this.relation_cache.delete(getCacheIdentifier(rel));
696
736
  }
697
737
  } else if (changeDocument.operationType == 'rename') {
698
738
  const relFrom = getMongoRelation(changeDocument.ns);
699
739
  const relTo = getMongoRelation(changeDocument.to);
700
- const tableFrom = await this.getRelation(batch, relFrom);
740
+ const tableFrom = await this.getRelation(batch, relFrom, {
741
+ // We're "dropping" this collection, so never snapshot it.
742
+ snapshot: false
743
+ });
701
744
  if (tableFrom.syncAny) {
702
745
  await batch.drop([tableFrom]);
703
- this.relation_cache.delete(tableFrom.objectId);
746
+ this.relation_cache.delete(getCacheIdentifier(relFrom));
704
747
  }
705
748
  // Here we do need to snapshot the new table
706
749
  const collection = await this.getCollectionInfo(relTo.schema, relTo.name);
707
- await this.handleRelation(batch, relTo, { snapshot: true, collectionInfo: collection });
750
+ await this.handleRelation(batch, relTo, {
751
+ // This is a new (renamed) collection, so always snapshot it.
752
+ snapshot: true,
753
+ collectionInfo: collection
754
+ });
708
755
  }
709
756
  }
710
757
  }
@@ -40,8 +40,8 @@ export class ChangeStreamReplicationJob extends replication.AbstractReplicationJ
40
40
  this.logger.error(`Replication failed`, e);
41
41
 
42
42
  if (e instanceof ChangeStreamInvalidatedError) {
43
- // This stops replication on this slot, and creates a new slot
44
- await this.options.storage.factory.slotRemoved(this.slotName);
43
+ // This stops replication and restarts with a new instance
44
+ await this.options.storage.factory.restartReplication(this.storage.group_id);
45
45
  }
46
46
  } finally {
47
47
  this.abortController.abort();
@@ -1,11 +1,9 @@
1
1
  import { mongo } from '@powersync/lib-service-mongodb';
2
2
 
3
3
  import { NormalizedMongoConnectionConfig } from '../types/types.js';
4
+ import { BSON_DESERIALIZE_DATA_OPTIONS } from '@powersync/service-core';
4
5
 
5
6
  export class MongoManager {
6
- /**
7
- * Do not use this for any transactions.
8
- */
9
7
  public readonly client: mongo.MongoClient;
10
8
  public readonly db: mongo.Db;
11
9
 
@@ -35,6 +33,9 @@ export class MongoManager {
35
33
 
36
34
  maxConnecting: 3,
37
35
  maxIdleTimeMS: 60_000,
36
+
37
+ ...BSON_DESERIALIZE_DATA_OPTIONS,
38
+
38
39
  ...overrides
39
40
  });
40
41
  this.db = this.client.db(options.database, {});
@@ -11,11 +11,19 @@ export function getMongoRelation(source: mongo.ChangeStreamNameSpace): storage.S
11
11
  return {
12
12
  name: source.coll,
13
13
  schema: source.db,
14
- objectId: source.coll,
14
+ // Not relevant for MongoDB - we use db + coll name as the identifier
15
+ objectId: undefined,
15
16
  replicationColumns: [{ name: '_id' }]
16
17
  } satisfies storage.SourceEntityDescriptor;
17
18
  }
18
19
 
20
+ /**
21
+ * For in-memory cache only.
22
+ */
23
+ export function getCacheIdentifier(source: storage.SourceEntityDescriptor): string {
24
+ return `${source.schema}.${source.name}`;
25
+ }
26
+
19
27
  export function constructAfterRecord(document: mongo.Document): SqliteRow {
20
28
  let record: SqliteRow = {};
21
29
  for (let key of Object.keys(document)) {
@@ -239,6 +239,7 @@ bucket_definitions:
239
239
  - SELECT _id as id, description FROM "test_DATA"
240
240
  `);
241
241
 
242
+ await db.createCollection('test_DATA');
242
243
  await context.replicateSnapshot();
243
244
 
244
245
  context.startStreaming();
@@ -261,6 +262,7 @@ bucket_definitions:
261
262
  data:
262
263
  - SELECT _id as id, name, description FROM "test_data"
263
264
  `);
265
+ await db.createCollection('test_data');
264
266
 
265
267
  await context.replicateSnapshot();
266
268
  context.startStreaming();
@@ -371,6 +373,8 @@ bucket_definitions:
371
373
  - SELECT _id as id, name, other FROM "test_data"`);
372
374
  const { db } = context;
373
375
 
376
+ await db.createCollection('test_data');
377
+
374
378
  await context.replicateSnapshot();
375
379
 
376
380
  const collection = db.collection('test_data');
@@ -451,6 +455,8 @@ bucket_definitions:
451
455
 
452
456
  const data = await context.getBucketData('global[]');
453
457
  expect(data).toMatchObject([
458
+ // An extra op here, since this triggers a snapshot in addition to getting the event.
459
+ test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test2' }),
454
460
  test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test1' }),
455
461
  test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test2' })
456
462
  ]);
@@ -1,5 +1,5 @@
1
1
  import { mongo } from '@powersync/lib-service-mongodb';
2
- import { ActiveCheckpoint, BucketStorageFactory, OpId, SyncRulesBucketStorage } from '@powersync/service-core';
2
+ import { BucketStorageFactory, OpId, ReplicationCheckpoint, SyncRulesBucketStorage } from '@powersync/service-core';
3
3
  import { test_utils } from '@powersync/service-core-tests';
4
4
 
5
5
  import { ChangeStream, ChangeStreamOptions } from '@module/replication/ChangeStream.js';
@@ -138,7 +138,7 @@ export class ChangeStreamTestContext {
138
138
  export async function getClientCheckpoint(
139
139
  client: mongo.MongoClient,
140
140
  db: mongo.Db,
141
- bucketStorage: BucketStorageFactory,
141
+ storageFactory: BucketStorageFactory,
142
142
  options?: { timeout?: number }
143
143
  ): Promise<OpId> {
144
144
  const start = Date.now();
@@ -147,14 +147,15 @@ export async function getClientCheckpoint(
147
147
  // Since we don't use LSNs anymore, the only way to get that is to wait.
148
148
 
149
149
  const timeout = options?.timeout ?? 50_000;
150
- let lastCp: ActiveCheckpoint | null = null;
150
+ let lastCp: ReplicationCheckpoint | null = null;
151
151
 
152
152
  while (Date.now() - start < timeout) {
153
- const cp = await bucketStorage.getActiveCheckpoint();
154
- lastCp = cp;
155
- if (!cp.hasSyncRules()) {
153
+ const storage = await storageFactory.getActiveStorage();
154
+ const cp = await storage?.getCheckpoint();
155
+ if (cp == null) {
156
156
  throw new Error('No sync rules available');
157
157
  }
158
+ lastCp = cp;
158
159
  if (cp.lsn && cp.lsn >= lsn) {
159
160
  return cp.checkpoint;
160
161
  }