@powersync/service-module-mongodb 0.0.0-dev-20250122110924 → 0.0.0-dev-20250227082606

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +96 -11
  2. package/dist/api/MongoRouteAPIAdapter.d.ts +2 -1
  3. package/dist/api/MongoRouteAPIAdapter.js +39 -0
  4. package/dist/api/MongoRouteAPIAdapter.js.map +1 -1
  5. package/dist/common/MongoLSN.d.ts +31 -0
  6. package/dist/common/MongoLSN.js +47 -0
  7. package/dist/common/MongoLSN.js.map +1 -0
  8. package/dist/module/MongoModule.js +2 -2
  9. package/dist/module/MongoModule.js.map +1 -1
  10. package/dist/replication/ChangeStream.d.ts +4 -3
  11. package/dist/replication/ChangeStream.js +130 -52
  12. package/dist/replication/ChangeStream.js.map +1 -1
  13. package/dist/replication/ChangeStreamReplicationJob.js +7 -6
  14. package/dist/replication/ChangeStreamReplicationJob.js.map +1 -1
  15. package/dist/replication/ChangeStreamReplicator.js +1 -0
  16. package/dist/replication/ChangeStreamReplicator.js.map +1 -1
  17. package/dist/replication/ConnectionManagerFactory.js +2 -0
  18. package/dist/replication/ConnectionManagerFactory.js.map +1 -1
  19. package/dist/replication/MongoErrorRateLimiter.js +5 -7
  20. package/dist/replication/MongoErrorRateLimiter.js.map +1 -1
  21. package/dist/replication/MongoManager.d.ts +0 -3
  22. package/dist/replication/MongoManager.js +9 -4
  23. package/dist/replication/MongoManager.js.map +1 -1
  24. package/dist/replication/MongoRelation.d.ts +4 -2
  25. package/dist/replication/MongoRelation.js +10 -15
  26. package/dist/replication/MongoRelation.js.map +1 -1
  27. package/package.json +10 -10
  28. package/src/api/MongoRouteAPIAdapter.ts +41 -1
  29. package/src/common/MongoLSN.ts +74 -0
  30. package/src/replication/ChangeStream.ts +138 -57
  31. package/src/replication/ChangeStreamReplicationJob.ts +6 -6
  32. package/src/replication/MongoManager.ts +4 -3
  33. package/src/replication/MongoRelation.ts +10 -16
  34. package/test/src/change_stream.test.ts +6 -0
  35. package/test/src/change_stream_utils.ts +9 -8
  36. package/test/src/mongo_test.test.ts +47 -12
  37. package/test/src/resume.test.ts +152 -0
  38. package/test/src/util.ts +2 -1
  39. package/tsconfig.tsbuildinfo +1 -1
@@ -1,6 +1,7 @@
1
1
  import { mongo } from '@powersync/lib-service-mongodb';
2
2
  import {
3
3
  container,
4
+ DatabaseConnectionError,
4
5
  ErrorCode,
5
6
  logger,
6
7
  ReplicationAbortedError,
@@ -9,20 +10,13 @@ import {
9
10
  } from '@powersync/lib-services-framework';
10
11
  import { Metrics, SaveOperationTag, SourceEntityDescriptor, SourceTable, storage } from '@powersync/service-core';
11
12
  import { DatabaseInputRow, SqliteRow, SqlSyncRules, TablePattern } from '@powersync/service-sync-rules';
13
+ import { MongoLSN } from '../common/MongoLSN.js';
12
14
  import { PostImagesOption } from '../types/types.js';
13
15
  import { escapeRegExp } from '../utils.js';
14
16
  import { MongoManager } from './MongoManager.js';
15
- import {
16
- constructAfterRecord,
17
- createCheckpoint,
18
- getMongoLsn,
19
- getMongoRelation,
20
- mongoLsnToTimestamp
21
- } from './MongoRelation.js';
17
+ import { constructAfterRecord, createCheckpoint, getCacheIdentifier, getMongoRelation } from './MongoRelation.js';
22
18
  import { CHECKPOINTS_COLLECTION } from './replication-utils.js';
23
19
 
24
- export const ZERO_LSN = '0000000000000000';
25
-
26
20
  export interface ChangeStreamOptions {
27
21
  connections: MongoManager;
28
22
  storage: storage.SyncRulesBucketStorage;
@@ -41,9 +35,9 @@ interface InitResult {
41
35
  * * Some change stream documents do not have postImages.
42
36
  * * startAfter/resumeToken is not valid anymore.
43
37
  */
44
- export class ChangeStreamInvalidatedError extends Error {
45
- constructor(message: string) {
46
- super(message);
38
+ export class ChangeStreamInvalidatedError extends DatabaseConnectionError {
39
+ constructor(message: string, cause: any) {
40
+ super(ErrorCode.PSYNC_S1344, message, cause);
47
41
  }
48
42
  }
49
43
 
@@ -95,6 +89,10 @@ export class ChangeStream {
95
89
  return this.connections.options.postImages == PostImagesOption.AUTO_CONFIGURE;
96
90
  }
97
91
 
92
+ private get logPrefix() {
93
+ return `[powersync_${this.group_id}]`;
94
+ }
95
+
98
96
  /**
99
97
  * This resolves a pattern, persists the related metadata, and returns
100
98
  * the resulting SourceTables.
@@ -130,18 +128,13 @@ export class ChangeStream {
130
128
  .toArray();
131
129
 
132
130
  if (!tablePattern.isWildcard && collections.length == 0) {
133
- logger.warn(`Collection ${schema}.${tablePattern.name} not found`);
131
+ logger.warn(`${this.logPrefix} Collection ${schema}.${tablePattern.name} not found`);
134
132
  }
135
133
 
136
134
  for (let collection of collections) {
137
135
  const table = await this.handleRelation(
138
136
  batch,
139
- {
140
- name: collection.name,
141
- schema,
142
- objectId: collection.name,
143
- replicationColumns: [{ name: '_id' }]
144
- } as SourceEntityDescriptor,
137
+ getMongoRelation({ db: schema, coll: collection.name }),
145
138
  // This is done as part of the initial setup - snapshot is handled elsewhere
146
139
  { snapshot: false, collectionInfo: collection }
147
140
  );
@@ -155,7 +148,7 @@ export class ChangeStream {
155
148
  async initSlot(): Promise<InitResult> {
156
149
  const status = await this.storage.getStatus();
157
150
  if (status.snapshot_done && status.checkpoint_lsn) {
158
- logger.info(`Initial replication already done`);
151
+ logger.info(`${this.logPrefix} Initial replication already done`);
159
152
  return { needsInitialSync: false };
160
153
  }
161
154
 
@@ -164,7 +157,7 @@ export class ChangeStream {
164
157
 
165
158
  async estimatedCount(table: storage.SourceTable): Promise<string> {
166
159
  const db = this.client.db(table.schema);
167
- const count = db.collection(table.table).estimatedDocumentCount();
160
+ const count = await db.collection(table.table).estimatedDocumentCount();
168
161
  return `~${count}`;
169
162
  }
170
163
 
@@ -207,7 +200,7 @@ export class ChangeStream {
207
200
  const session = await this.client.startSession();
208
201
  try {
209
202
  await this.storage.startBatch(
210
- { zeroLSN: ZERO_LSN, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
203
+ { zeroLSN: MongoLSN.ZERO.comparable, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
211
204
  async (batch) => {
212
205
  // Start by resolving all tables.
213
206
  // This checks postImage configuration, and that should fail as
@@ -220,13 +213,13 @@ export class ChangeStream {
220
213
 
221
214
  for (let table of allSourceTables) {
222
215
  await this.snapshotTable(batch, table, session);
223
- await batch.markSnapshotDone([table], ZERO_LSN);
216
+ await batch.markSnapshotDone([table], MongoLSN.ZERO.comparable);
224
217
 
225
218
  await touch();
226
219
  }
227
220
 
228
- const lsn = getMongoLsn(snapshotTime);
229
- logger.info(`Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
221
+ const { comparable: lsn } = new MongoLSN({ timestamp: snapshotTime });
222
+ logger.info(`${this.logPrefix} Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
230
223
  await batch.commit(lsn);
231
224
  }
232
225
  );
@@ -295,9 +288,10 @@ export class ChangeStream {
295
288
  table: storage.SourceTable,
296
289
  session?: mongo.ClientSession
297
290
  ) {
298
- logger.info(`Replicating ${table.qualifiedName}`);
291
+ logger.info(`${this.logPrefix} Replicating ${table.qualifiedName}`);
299
292
  const estimatedCount = await this.estimatedCount(table);
300
293
  let at = 0;
294
+ let lastLogIndex = 0;
301
295
 
302
296
  const db = this.client.db(table.schema);
303
297
  const collection = db.collection(table.table);
@@ -310,8 +304,6 @@ export class ChangeStream {
310
304
  throw new ReplicationAbortedError(`Aborted initial replication`);
311
305
  }
312
306
 
313
- at += 1;
314
-
315
307
  const record = constructAfterRecord(document);
316
308
 
317
309
  // This auto-flushes when the batch reaches its size limit
@@ -325,20 +317,26 @@ export class ChangeStream {
325
317
  });
326
318
 
327
319
  at += 1;
320
+ if (at - lastLogIndex >= 5000) {
321
+ logger.info(`${this.logPrefix} Replicating ${table.qualifiedName} ${at}/${estimatedCount}`);
322
+ lastLogIndex = at;
323
+ }
328
324
  Metrics.getInstance().rows_replicated_total.add(1);
329
325
 
330
326
  await touch();
331
327
  }
332
328
 
333
329
  await batch.flush();
334
- logger.info(`Replicated ${at} documents for ${table.qualifiedName}`);
330
+ logger.info(`${this.logPrefix} Replicated ${at} documents for ${table.qualifiedName}`);
335
331
  }
336
332
 
337
333
  private async getRelation(
338
334
  batch: storage.BucketStorageBatch,
339
- descriptor: SourceEntityDescriptor
335
+ descriptor: SourceEntityDescriptor,
336
+ options: { snapshot: boolean }
340
337
  ): Promise<SourceTable> {
341
- const existing = this.relation_cache.get(descriptor.objectId);
338
+ const cacheId = getCacheIdentifier(descriptor);
339
+ const existing = this.relation_cache.get(cacheId);
342
340
  if (existing != null) {
343
341
  return existing;
344
342
  }
@@ -347,7 +345,7 @@ export class ChangeStream {
347
345
  // missing values.
348
346
  const collection = await this.getCollectionInfo(descriptor.schema, descriptor.name);
349
347
 
350
- return this.handleRelation(batch, descriptor, { snapshot: false, collectionInfo: collection });
348
+ return this.handleRelation(batch, descriptor, { snapshot: options.snapshot, collectionInfo: collection });
351
349
  }
352
350
 
353
351
  private async getCollectionInfo(db: string, name: string): Promise<mongo.CollectionInfo | undefined> {
@@ -378,7 +376,7 @@ export class ChangeStream {
378
376
  collMod: collectionInfo.name,
379
377
  changeStreamPreAndPostImages: { enabled: true }
380
378
  });
381
- logger.info(`Enabled postImages on ${db}.${collectionInfo.name}`);
379
+ logger.info(`${this.logPrefix} Enabled postImages on ${db}.${collectionInfo.name}`);
382
380
  } else if (!enabled) {
383
381
  throw new ServiceError(ErrorCode.PSYNC_S1343, `postImages not enabled on ${db}.${collectionInfo.name}`);
384
382
  }
@@ -397,9 +395,6 @@ export class ChangeStream {
397
395
  }
398
396
 
399
397
  const snapshot = options.snapshot;
400
- if (!descriptor.objectId && typeof descriptor.objectId != 'string') {
401
- throw new ReplicationAssertionError('MongoDB replication - objectId expected');
402
- }
403
398
  const result = await this.storage.resolveTable({
404
399
  group_id: this.group_id,
405
400
  connection_id: this.connection_id,
@@ -407,10 +402,16 @@ export class ChangeStream {
407
402
  entity_descriptor: descriptor,
408
403
  sync_rules: this.sync_rules
409
404
  });
410
- this.relation_cache.set(descriptor.objectId, result.table);
405
+ this.relation_cache.set(getCacheIdentifier(descriptor), result.table);
411
406
 
412
- // Drop conflicting tables. This includes for example renamed tables.
413
- await batch.drop(result.dropTables);
407
+ // Drop conflicting collections.
408
+ // This is generally not expected for MongoDB source dbs, so we log an error.
409
+ if (result.dropTables.length > 0) {
410
+ logger.error(
411
+ `Conflicting collections found for ${JSON.stringify(descriptor)}. Dropping: ${result.dropTables.map((t) => t.id).join(', ')}`
412
+ );
413
+ await batch.drop(result.dropTables);
414
+ }
414
415
 
415
416
  // Snapshot if:
416
417
  // 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere)
@@ -418,6 +419,7 @@ export class ChangeStream {
418
419
  // 3. The table is used in sync rules.
419
420
  const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny;
420
421
  if (shouldSnapshot) {
422
+ logger.info(`${this.logPrefix} New collection: ${descriptor.schema}.${descriptor.name}`);
421
423
  // Truncate this table, in case a previous snapshot was interrupted.
422
424
  await batch.truncate([result.table]);
423
425
 
@@ -437,7 +439,7 @@ export class ChangeStream {
437
439
  change: mongo.ChangeStreamDocument
438
440
  ): Promise<storage.FlushedResult | null> {
439
441
  if (!table.syncAny) {
440
- logger.debug(`Collection ${table.qualifiedName} not used in sync rules - skipping`);
442
+ logger.debug(`${this.logPrefix} Collection ${table.qualifiedName} not used in sync rules - skipping`);
441
443
  return null;
442
444
  }
443
445
 
@@ -513,7 +515,7 @@ export class ChangeStream {
513
515
  e.codeName == 'NoMatchingDocument' &&
514
516
  e.errmsg?.includes('post-image was not found')
515
517
  ) {
516
- throw new ChangeStreamInvalidatedError(e.errmsg);
518
+ throw new ChangeStreamInvalidatedError(e.errmsg, e);
517
519
  }
518
520
  throw e;
519
521
  }
@@ -524,11 +526,14 @@ export class ChangeStream {
524
526
  await this.storage.autoActivate();
525
527
 
526
528
  await this.storage.startBatch(
527
- { zeroLSN: ZERO_LSN, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
529
+ { zeroLSN: MongoLSN.ZERO.comparable, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
528
530
  async (batch) => {
529
- const lastLsn = batch.lastCheckpointLsn;
530
- const startAfter = mongoLsnToTimestamp(lastLsn) ?? undefined;
531
- logger.info(`Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`);
531
+ const { lastCheckpointLsn } = batch;
532
+ const lastLsn = lastCheckpointLsn ? MongoLSN.fromSerialized(lastCheckpointLsn) : null;
533
+ const startAfter = lastLsn?.timestamp;
534
+ const resumeAfter = lastLsn?.resumeToken;
535
+
536
+ logger.info(`${this.logPrefix} Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`);
532
537
 
533
538
  const filters = this.getSourceNamespaceFilters();
534
539
 
@@ -551,12 +556,20 @@ export class ChangeStream {
551
556
  }
552
557
 
553
558
  const streamOptions: mongo.ChangeStreamOptions = {
554
- startAtOperationTime: startAfter,
555
559
  showExpandedEvents: true,
556
- useBigInt64: true,
557
560
  maxAwaitTimeMS: 200,
558
561
  fullDocument: fullDocument
559
562
  };
563
+
564
+ /**
565
+ * Only one of these options can be supplied at a time.
566
+ */
567
+ if (resumeAfter) {
568
+ streamOptions.resumeAfter = resumeAfter;
569
+ } else {
570
+ streamOptions.startAtOperationTime = startAfter;
571
+ }
572
+
560
573
  let stream: mongo.ChangeStream<mongo.Document>;
561
574
  if (filters.multipleDatabases) {
562
575
  // Requires readAnyDatabase@admin on Atlas
@@ -576,18 +589,24 @@ export class ChangeStream {
576
589
  });
577
590
 
578
591
  // Always start with a checkpoint.
579
- // This helps us to clear erorrs when restarting, even if there is
592
+ // This helps us to clear errors when restarting, even if there is
580
593
  // no data to replicate.
581
594
  let waitForCheckpointLsn: string | null = await createCheckpoint(this.client, this.defaultDb);
582
595
 
583
596
  let splitDocument: mongo.ChangeStreamDocument | null = null;
584
597
 
598
+ let flexDbNameWorkaroundLogged = false;
599
+
585
600
  while (true) {
586
601
  if (this.abort_signal.aborted) {
587
602
  break;
588
603
  }
589
604
 
590
605
  const originalChangeDocument = await stream.tryNext();
606
+ // The stream was closed, we will only ever receive `null` from it
607
+ if (!originalChangeDocument && stream.closed) {
608
+ break;
609
+ }
591
610
 
592
611
  if (originalChangeDocument == null || this.abort_signal.aborted) {
593
612
  continue;
@@ -623,15 +642,61 @@ export class ChangeStream {
623
642
  throw new ReplicationAssertionError(`Incomplete splitEvent: ${JSON.stringify(splitDocument.splitEvent)}`);
624
643
  }
625
644
 
626
- // console.log('event', changeDocument);
645
+ if (
646
+ !filters.multipleDatabases &&
647
+ 'ns' in changeDocument &&
648
+ changeDocument.ns.db != this.defaultDb.databaseName &&
649
+ changeDocument.ns.db.endsWith(`_${this.defaultDb.databaseName}`)
650
+ ) {
651
+ // When all of the following conditions are met:
652
+ // 1. We're replicating from an Atlas Flex instance.
653
+ // 2. There were changestream events recorded while the PowerSync service is paused.
654
+ // 3. We're only replicating from a single database.
655
+ // Then we've observed an ns with for example {db: '67b83e86cd20730f1e766dde_ps'},
656
+ // instead of the expected {db: 'ps'}.
657
+ // We correct this.
658
+ changeDocument.ns.db = this.defaultDb.databaseName;
659
+
660
+ if (!flexDbNameWorkaroundLogged) {
661
+ flexDbNameWorkaroundLogged = true;
662
+ logger.warn(
663
+ `${this.logPrefix} Incorrect DB name in change stream: ${changeDocument.ns.db}. Changed to ${this.defaultDb.databaseName}.`
664
+ );
665
+ }
666
+ }
627
667
 
628
668
  if (
629
669
  (changeDocument.operationType == 'insert' ||
630
670
  changeDocument.operationType == 'update' ||
631
- changeDocument.operationType == 'replace') &&
671
+ changeDocument.operationType == 'replace' ||
672
+ changeDocument.operationType == 'drop') &&
632
673
  changeDocument.ns.coll == CHECKPOINTS_COLLECTION
633
674
  ) {
634
- const lsn = getMongoLsn(changeDocument.clusterTime!);
675
+ /**
676
+ * Dropping the database does not provide an `invalidate` event.
677
+ * We typically would receive `drop` events for the collection which we
678
+ * would process below.
679
+ *
680
+ * However we don't commit the LSN after collections are dropped.
681
+ * The prevents the `startAfter` or `resumeToken` from advancing past the drop events.
682
+ * The stream also closes after the drop events.
683
+ * This causes an infinite loop of processing the collection drop events.
684
+ *
685
+ * This check here invalidates the change stream if our `_checkpoints` collection
686
+ * is dropped. This allows for detecting when the DB is dropped.
687
+ */
688
+ if (changeDocument.operationType == 'drop') {
689
+ throw new ChangeStreamInvalidatedError(
690
+ 'Internal collections have been dropped',
691
+ new Error('_checkpoints collection was dropped')
692
+ );
693
+ }
694
+
695
+ const { comparable: lsn } = new MongoLSN({
696
+ timestamp: changeDocument.clusterTime!,
697
+ resume_token: changeDocument._id
698
+ });
699
+
635
700
  if (waitForCheckpointLsn != null && lsn >= waitForCheckpointLsn) {
636
701
  waitForCheckpointLsn = null;
637
702
  }
@@ -646,28 +711,44 @@ export class ChangeStream {
646
711
  waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb);
647
712
  }
648
713
  const rel = getMongoRelation(changeDocument.ns);
649
- const table = await this.getRelation(batch, rel);
714
+ const table = await this.getRelation(batch, rel, {
715
+ // In most cases, we should not need to snapshot this. But if this is the first time we see the collection
716
+ // for whatever reason, then we do need to snapshot it.
717
+ // This may result in some duplicate operations when a collection is created for the first time after
718
+ // sync rules was deployed.
719
+ snapshot: true
720
+ });
650
721
  if (table.syncAny) {
651
722
  await this.writeChange(batch, table, changeDocument);
652
723
  }
653
724
  } else if (changeDocument.operationType == 'drop') {
654
725
  const rel = getMongoRelation(changeDocument.ns);
655
- const table = await this.getRelation(batch, rel);
726
+ const table = await this.getRelation(batch, rel, {
727
+ // We're "dropping" this collection, so never snapshot it.
728
+ snapshot: false
729
+ });
656
730
  if (table.syncAny) {
657
731
  await batch.drop([table]);
658
- this.relation_cache.delete(table.objectId);
732
+ this.relation_cache.delete(getCacheIdentifier(rel));
659
733
  }
660
734
  } else if (changeDocument.operationType == 'rename') {
661
735
  const relFrom = getMongoRelation(changeDocument.ns);
662
736
  const relTo = getMongoRelation(changeDocument.to);
663
- const tableFrom = await this.getRelation(batch, relFrom);
737
+ const tableFrom = await this.getRelation(batch, relFrom, {
738
+ // We're "dropping" this collection, so never snapshot it.
739
+ snapshot: false
740
+ });
664
741
  if (tableFrom.syncAny) {
665
742
  await batch.drop([tableFrom]);
666
- this.relation_cache.delete(tableFrom.objectId);
743
+ this.relation_cache.delete(getCacheIdentifier(relFrom));
667
744
  }
668
745
  // Here we do need to snapshot the new table
669
746
  const collection = await this.getCollectionInfo(relTo.schema, relTo.name);
670
- await this.handleRelation(batch, relTo, { snapshot: true, collectionInfo: collection });
747
+ await this.handleRelation(batch, relTo, {
748
+ // This is a new (renamed) collection, so always snapshot it.
749
+ snapshot: true,
750
+ collectionInfo: collection
751
+ });
671
752
  }
672
753
  }
673
754
  }
@@ -1,4 +1,4 @@
1
- import { mongo } from '@powersync/lib-service-mongodb';
1
+ import { isMongoServerError } from '@powersync/lib-service-mongodb';
2
2
  import { container } from '@powersync/lib-services-framework';
3
3
  import { replication } from '@powersync/service-core';
4
4
 
@@ -40,8 +40,8 @@ export class ChangeStreamReplicationJob extends replication.AbstractReplicationJ
40
40
  this.logger.error(`Replication failed`, e);
41
41
 
42
42
  if (e instanceof ChangeStreamInvalidatedError) {
43
- // This stops replication on this slot, and creates a new slot
44
- await this.options.storage.factory.slotRemoved(this.slotName);
43
+ // This stops replication and restarts with a new instance
44
+ await this.options.storage.factory.restartReplication(this.storage.group_id);
45
45
  }
46
46
  } finally {
47
47
  this.abortController.abort();
@@ -78,15 +78,15 @@ export class ChangeStreamReplicationJob extends replication.AbstractReplicationJ
78
78
  if (this.abortController.signal.aborted) {
79
79
  return;
80
80
  }
81
- this.logger.error(`Replication error`, e);
81
+ this.logger.error(`${this.slotName} Replication error`, e);
82
82
  if (e.cause != null) {
83
83
  // Without this additional log, the cause may not be visible in the logs.
84
84
  this.logger.error(`cause`, e.cause);
85
85
  }
86
86
  if (e instanceof ChangeStreamInvalidatedError) {
87
87
  throw e;
88
- } else if (e instanceof mongo.MongoError && e.hasErrorLabel('NonResumableChangeStreamError')) {
89
- throw new ChangeStreamInvalidatedError(e.message);
88
+ } else if (isMongoServerError(e) && e.hasErrorLabel('NonResumableChangeStreamError')) {
89
+ throw new ChangeStreamInvalidatedError(e.message, e);
90
90
  } else {
91
91
  // Report the error if relevant, before retrying
92
92
  container.reporter.captureException(e, {
@@ -1,11 +1,9 @@
1
1
  import { mongo } from '@powersync/lib-service-mongodb';
2
2
 
3
3
  import { NormalizedMongoConnectionConfig } from '../types/types.js';
4
+ import { BSON_DESERIALIZE_DATA_OPTIONS } from '@powersync/service-core';
4
5
 
5
6
  export class MongoManager {
6
- /**
7
- * Do not use this for any transactions.
8
- */
9
7
  public readonly client: mongo.MongoClient;
10
8
  public readonly db: mongo.Db;
11
9
 
@@ -35,6 +33,9 @@ export class MongoManager {
35
33
 
36
34
  maxConnecting: 3,
37
35
  maxIdleTimeMS: 60_000,
36
+
37
+ ...BSON_DESERIALIZE_DATA_OPTIONS,
38
+
38
39
  ...overrides
39
40
  });
40
41
  this.db = this.client.db(options.database, {});
@@ -3,31 +3,25 @@ import { storage } from '@powersync/service-core';
3
3
  import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
4
4
  import { SqliteRow, SqliteValue } from '@powersync/service-sync-rules';
5
5
 
6
- import { CHECKPOINTS_COLLECTION } from './replication-utils.js';
7
6
  import { ErrorCode, ServiceError } from '@powersync/lib-services-framework';
7
+ import { MongoLSN } from '../common/MongoLSN.js';
8
+ import { CHECKPOINTS_COLLECTION } from './replication-utils.js';
8
9
 
9
10
  export function getMongoRelation(source: mongo.ChangeStreamNameSpace): storage.SourceEntityDescriptor {
10
11
  return {
11
12
  name: source.coll,
12
13
  schema: source.db,
13
- objectId: source.coll,
14
+ // Not relevant for MongoDB - we use db + coll name as the identifier
15
+ objectId: undefined,
14
16
  replicationColumns: [{ name: '_id' }]
15
17
  } satisfies storage.SourceEntityDescriptor;
16
18
  }
17
19
 
18
- export function getMongoLsn(timestamp: mongo.Timestamp) {
19
- const a = timestamp.high.toString(16).padStart(8, '0');
20
- const b = timestamp.low.toString(16).padStart(8, '0');
21
- return a + b;
22
- }
23
-
24
- export function mongoLsnToTimestamp(lsn: string | null) {
25
- if (lsn == null) {
26
- return null;
27
- }
28
- const a = parseInt(lsn.substring(0, 8), 16);
29
- const b = parseInt(lsn.substring(8, 16), 16);
30
- return mongo.Timestamp.fromBits(b, a);
20
+ /**
21
+ * For in-memory cache only.
22
+ */
23
+ export function getCacheIdentifier(source: storage.SourceEntityDescriptor): string {
24
+ return `${source.schema}.${source.name}`;
31
25
  }
32
26
 
33
27
  export function constructAfterRecord(document: mongo.Document): SqliteRow {
@@ -174,7 +168,7 @@ export async function createCheckpoint(client: mongo.MongoClient, db: mongo.Db):
174
168
  );
175
169
  const time = session.operationTime!;
176
170
  // TODO: Use the above when we support custom write checkpoints
177
- return getMongoLsn(time);
171
+ return new MongoLSN({ timestamp: time }).comparable;
178
172
  } finally {
179
173
  await session.endSession();
180
174
  }
@@ -239,6 +239,7 @@ bucket_definitions:
239
239
  - SELECT _id as id, description FROM "test_DATA"
240
240
  `);
241
241
 
242
+ await db.createCollection('test_DATA');
242
243
  await context.replicateSnapshot();
243
244
 
244
245
  context.startStreaming();
@@ -261,6 +262,7 @@ bucket_definitions:
261
262
  data:
262
263
  - SELECT _id as id, name, description FROM "test_data"
263
264
  `);
265
+ await db.createCollection('test_data');
264
266
 
265
267
  await context.replicateSnapshot();
266
268
  context.startStreaming();
@@ -371,6 +373,8 @@ bucket_definitions:
371
373
  - SELECT _id as id, name, other FROM "test_data"`);
372
374
  const { db } = context;
373
375
 
376
+ await db.createCollection('test_data');
377
+
374
378
  await context.replicateSnapshot();
375
379
 
376
380
  const collection = db.collection('test_data');
@@ -451,6 +455,8 @@ bucket_definitions:
451
455
 
452
456
  const data = await context.getBucketData('global[]');
453
457
  expect(data).toMatchObject([
458
+ // An extra op here, since this triggers a snapshot in addition to getting the event.
459
+ test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test2' }),
454
460
  test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test1' }),
455
461
  test_utils.putOp('test_data', { id: test_id!.toHexString(), description: 'test2' })
456
462
  ]);
@@ -1,5 +1,5 @@
1
1
  import { mongo } from '@powersync/lib-service-mongodb';
2
- import { ActiveCheckpoint, BucketStorageFactory, OpId, SyncRulesBucketStorage } from '@powersync/service-core';
2
+ import { BucketStorageFactory, OpId, ReplicationCheckpoint, SyncRulesBucketStorage } from '@powersync/service-core';
3
3
  import { test_utils } from '@powersync/service-core-tests';
4
4
 
5
5
  import { ChangeStream, ChangeStreamOptions } from '@module/replication/ChangeStream.js';
@@ -58,7 +58,7 @@ export class ChangeStreamTestContext {
58
58
  }
59
59
 
60
60
  async updateSyncRules(content: string) {
61
- const syncRules = await this.factory.updateSyncRules({ content: content });
61
+ const syncRules = await this.factory.updateSyncRules({ content: content, validate: true });
62
62
  this.storage = this.factory.getInstance(syncRules);
63
63
  return this.storage!;
64
64
  }
@@ -85,7 +85,7 @@ export class ChangeStreamTestContext {
85
85
  }
86
86
 
87
87
  startStreaming() {
88
- this.streamPromise = this.walStream.streamChanges();
88
+ return (this.streamPromise = this.walStream.streamChanges());
89
89
  }
90
90
 
91
91
  async getCheckpoint(options?: { timeout?: number }) {
@@ -138,7 +138,7 @@ export class ChangeStreamTestContext {
138
138
  export async function getClientCheckpoint(
139
139
  client: mongo.MongoClient,
140
140
  db: mongo.Db,
141
- bucketStorage: BucketStorageFactory,
141
+ storageFactory: BucketStorageFactory,
142
142
  options?: { timeout?: number }
143
143
  ): Promise<OpId> {
144
144
  const start = Date.now();
@@ -147,14 +147,15 @@ export async function getClientCheckpoint(
147
147
  // Since we don't use LSNs anymore, the only way to get that is to wait.
148
148
 
149
149
  const timeout = options?.timeout ?? 50_000;
150
- let lastCp: ActiveCheckpoint | null = null;
150
+ let lastCp: ReplicationCheckpoint | null = null;
151
151
 
152
152
  while (Date.now() - start < timeout) {
153
- const cp = await bucketStorage.getActiveCheckpoint();
154
- lastCp = cp;
155
- if (!cp.hasSyncRules()) {
153
+ const storage = await storageFactory.getActiveStorage();
154
+ const cp = await storage?.getCheckpoint();
155
+ if (cp == null) {
156
156
  throw new Error('No sync rules available');
157
157
  }
158
+ lastCp = cp;
158
159
  if (cp.lsn && cp.lsn >= lsn) {
159
160
  return cp.checkpoint;
160
161
  }