@powersync/service-module-mongodb-storage 0.9.4 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +34 -0
  2. package/dist/migrations/db/migrations/1749720702136-checkpoint-events.d.ts +3 -0
  3. package/dist/migrations/db/migrations/1749720702136-checkpoint-events.js +34 -0
  4. package/dist/migrations/db/migrations/1749720702136-checkpoint-events.js.map +1 -0
  5. package/dist/storage/MongoBucketStorage.js +5 -0
  6. package/dist/storage/MongoBucketStorage.js.map +1 -1
  7. package/dist/storage/implementation/MongoBucketBatch.d.ts +9 -3
  8. package/dist/storage/implementation/MongoBucketBatch.js +117 -37
  9. package/dist/storage/implementation/MongoBucketBatch.js.map +1 -1
  10. package/dist/storage/implementation/MongoPersistedSyncRulesContent.d.ts +1 -0
  11. package/dist/storage/implementation/MongoPersistedSyncRulesContent.js +2 -0
  12. package/dist/storage/implementation/MongoPersistedSyncRulesContent.js.map +1 -1
  13. package/dist/storage/implementation/MongoStorageProvider.js +23 -1
  14. package/dist/storage/implementation/MongoStorageProvider.js.map +1 -1
  15. package/dist/storage/implementation/MongoSyncBucketStorage.d.ts +14 -5
  16. package/dist/storage/implementation/MongoSyncBucketStorage.js +161 -159
  17. package/dist/storage/implementation/MongoSyncBucketStorage.js.map +1 -1
  18. package/dist/storage/implementation/MongoTestStorageFactoryGenerator.js +2 -0
  19. package/dist/storage/implementation/MongoTestStorageFactoryGenerator.js.map +1 -1
  20. package/dist/storage/implementation/MongoWriteCheckpointAPI.d.ts +9 -15
  21. package/dist/storage/implementation/MongoWriteCheckpointAPI.js +55 -191
  22. package/dist/storage/implementation/MongoWriteCheckpointAPI.js.map +1 -1
  23. package/dist/storage/implementation/PersistedBatch.d.ts +6 -2
  24. package/dist/storage/implementation/PersistedBatch.js +39 -7
  25. package/dist/storage/implementation/PersistedBatch.js.map +1 -1
  26. package/dist/storage/implementation/db.d.ts +12 -1
  27. package/dist/storage/implementation/db.js +39 -0
  28. package/dist/storage/implementation/db.js.map +1 -1
  29. package/dist/storage/implementation/models.d.ts +29 -1
  30. package/package.json +7 -7
  31. package/src/migrations/db/migrations/1749720702136-checkpoint-events.ts +50 -0
  32. package/src/storage/MongoBucketStorage.ts +5 -0
  33. package/src/storage/implementation/MongoBucketBatch.ts +160 -49
  34. package/src/storage/implementation/MongoPersistedSyncRulesContent.ts +2 -0
  35. package/src/storage/implementation/MongoStorageProvider.ts +27 -1
  36. package/src/storage/implementation/MongoSyncBucketStorage.ts +187 -200
  37. package/src/storage/implementation/MongoTestStorageFactoryGenerator.ts +3 -0
  38. package/src/storage/implementation/MongoWriteCheckpointAPI.ts +66 -255
  39. package/src/storage/implementation/PersistedBatch.ts +50 -11
  40. package/src/storage/implementation/db.ts +42 -0
  41. package/src/storage/implementation/models.ts +32 -1
  42. package/test/src/__snapshots__/storage_sync.test.ts.snap +147 -0
  43. package/tsconfig.tsbuildinfo +1 -1
@@ -7,11 +7,19 @@ import {
7
7
  container,
8
8
  ErrorCode,
9
9
  errors,
10
- logger,
10
+ Logger,
11
+ logger as defaultLogger,
11
12
  ReplicationAssertionError,
12
13
  ServiceError
13
14
  } from '@powersync/lib-services-framework';
14
- import { deserializeBson, InternalOpId, SaveOperationTag, storage, utils } from '@powersync/service-core';
15
+ import {
16
+ BucketStorageMarkRecordUnavailable,
17
+ deserializeBson,
18
+ InternalOpId,
19
+ SaveOperationTag,
20
+ storage,
21
+ utils
22
+ } from '@powersync/service-core';
15
23
  import * as timers from 'node:timers/promises';
16
24
  import { PowerSyncMongo } from './db.js';
17
25
  import { CurrentBucket, CurrentDataDocument, SourceKey, SyncRuleDocument } from './models.js';
@@ -46,12 +54,18 @@ export interface MongoBucketBatchOptions {
46
54
  * Set to true for initial replication.
47
55
  */
48
56
  skipExistingRows: boolean;
57
+
58
+ markRecordUnavailable: BucketStorageMarkRecordUnavailable | undefined;
59
+
60
+ logger?: Logger;
49
61
  }
50
62
 
51
63
  export class MongoBucketBatch
52
64
  extends BaseObserver<storage.BucketBatchStorageListener>
53
65
  implements storage.BucketStorageBatch
54
66
  {
67
+ private logger: Logger;
68
+
55
69
  private readonly client: mongo.MongoClient;
56
70
  public readonly db: PowerSyncMongo;
57
71
  public readonly session: mongo.ClientSession;
@@ -65,6 +79,7 @@ export class MongoBucketBatch
65
79
 
66
80
  private batch: OperationBatch | null = null;
67
81
  private write_checkpoint_batch: storage.CustomWriteCheckpointOptions[] = [];
82
+ private markRecordUnavailable: BucketStorageMarkRecordUnavailable | undefined;
68
83
 
69
84
  /**
70
85
  * Last LSN received associated with a checkpoint.
@@ -86,6 +101,7 @@ export class MongoBucketBatch
86
101
 
87
102
  constructor(options: MongoBucketBatchOptions) {
88
103
  super();
104
+ this.logger = options.logger ?? defaultLogger;
89
105
  this.client = options.db.client;
90
106
  this.db = options.db;
91
107
  this.group_id = options.groupId;
@@ -96,6 +112,7 @@ export class MongoBucketBatch
96
112
  this.sync_rules = options.syncRules;
97
113
  this.storeCurrentData = options.storeCurrentData;
98
114
  this.skipExistingRows = options.skipExistingRows;
115
+ this.markRecordUnavailable = options.markRecordUnavailable;
99
116
  this.batch = new OperationBatch();
100
117
 
101
118
  this.persisted_op = options.keepaliveOp ?? null;
@@ -112,32 +129,34 @@ export class MongoBucketBatch
112
129
  return this.last_checkpoint_lsn;
113
130
  }
114
131
 
115
- async flush(): Promise<storage.FlushedResult | null> {
132
+ async flush(options?: storage.BatchBucketFlushOptions): Promise<storage.FlushedResult | null> {
116
133
  let result: storage.FlushedResult | null = null;
117
134
  // One flush may be split over multiple transactions.
118
135
  // Each flushInner() is one transaction.
119
- while (this.batch != null) {
120
- let r = await this.flushInner();
136
+ while (this.batch != null || this.write_checkpoint_batch.length > 0) {
137
+ let r = await this.flushInner(options);
121
138
  if (r) {
122
139
  result = r;
123
140
  }
124
141
  }
125
- await batchCreateCustomWriteCheckpoints(this.db, this.write_checkpoint_batch);
126
- this.write_checkpoint_batch = [];
127
142
  return result;
128
143
  }
129
144
 
130
- private async flushInner(): Promise<storage.FlushedResult | null> {
145
+ private async flushInner(options?: storage.BatchBucketFlushOptions): Promise<storage.FlushedResult | null> {
131
146
  const batch = this.batch;
132
- if (batch == null) {
133
- return null;
134
- }
135
-
136
147
  let last_op: InternalOpId | null = null;
137
148
  let resumeBatch: OperationBatch | null = null;
138
149
 
139
- await this.withReplicationTransaction(`Flushing ${batch.length} ops`, async (session, opSeq) => {
140
- resumeBatch = await this.replicateBatch(session, batch, opSeq);
150
+ await this.withReplicationTransaction(`Flushing ${batch?.length ?? 0} ops`, async (session, opSeq) => {
151
+ if (batch != null) {
152
+ resumeBatch = await this.replicateBatch(session, batch, opSeq, options);
153
+ }
154
+
155
+ if (this.write_checkpoint_batch.length > 0) {
156
+ this.logger.info(`Writing ${this.write_checkpoint_batch.length} custom write checkpoints`);
157
+ await batchCreateCustomWriteCheckpoints(this.db, session, this.write_checkpoint_batch, opSeq.next());
158
+ this.write_checkpoint_batch = [];
159
+ }
141
160
 
142
161
  last_op = opSeq.last();
143
162
  });
@@ -157,7 +176,8 @@ export class MongoBucketBatch
157
176
  private async replicateBatch(
158
177
  session: mongo.ClientSession,
159
178
  batch: OperationBatch,
160
- op_seq: MongoIdSequence
179
+ op_seq: MongoIdSequence,
180
+ options?: storage.BucketBatchCommitOptions
161
181
  ): Promise<OperationBatch | null> {
162
182
  let sizes: Map<string, number> | undefined = undefined;
163
183
  if (this.storeCurrentData && !this.skipExistingRows) {
@@ -231,7 +251,9 @@ export class MongoBucketBatch
231
251
  current_data_lookup.set(cacheKey(doc._id.t, doc._id.k), doc);
232
252
  }
233
253
 
234
- let persistedBatch: PersistedBatch | null = new PersistedBatch(this.group_id, transactionSize);
254
+ let persistedBatch: PersistedBatch | null = new PersistedBatch(this.group_id, transactionSize, {
255
+ logger: this.logger
256
+ });
235
257
 
236
258
  for (let op of b) {
237
259
  if (resumeBatch) {
@@ -253,7 +275,7 @@ export class MongoBucketBatch
253
275
  if (persistedBatch!.shouldFlushTransaction()) {
254
276
  // Transaction is getting big.
255
277
  // Flush, and resume in a new transaction.
256
- await persistedBatch!.flush(this.db, this.session);
278
+ await persistedBatch!.flush(this.db, this.session, options);
257
279
  persistedBatch = null;
258
280
  // Computing our current progress is a little tricky here, since
259
281
  // we're stopping in the middle of a batch.
@@ -264,7 +286,7 @@ export class MongoBucketBatch
264
286
 
265
287
  if (persistedBatch) {
266
288
  transactionSize = persistedBatch.currentSize;
267
- await persistedBatch.flush(this.db, this.session);
289
+ await persistedBatch.flush(this.db, this.session, options);
268
290
  }
269
291
  }
270
292
 
@@ -310,11 +332,18 @@ export class MongoBucketBatch
310
332
  // Not an error if we re-apply a transaction
311
333
  existing_buckets = [];
312
334
  existing_lookups = [];
313
- // Log to help with debugging if there was a consistency issue
314
335
  if (this.storeCurrentData) {
315
- logger.warn(
316
- `Cannot find previous record for update on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
317
- );
336
+ if (this.markRecordUnavailable != null) {
337
+ // This will trigger a "resnapshot" of the record.
338
+ // This is not relevant if storeCurrentData is false, since we'll get the full row
339
+ // directly in the replication stream.
340
+ this.markRecordUnavailable(record);
341
+ } else {
342
+ // Log to help with debugging if there was a consistency issue
343
+ this.logger.warn(
344
+ `Cannot find previous record for update on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
345
+ );
346
+ }
318
347
  }
319
348
  } else {
320
349
  existing_buckets = result.buckets;
@@ -331,8 +360,8 @@ export class MongoBucketBatch
331
360
  existing_buckets = [];
332
361
  existing_lookups = [];
333
362
  // Log to help with debugging if there was a consistency issue
334
- if (this.storeCurrentData) {
335
- logger.warn(
363
+ if (this.storeCurrentData && this.markRecordUnavailable == null) {
364
+ this.logger.warn(
336
365
  `Cannot find previous record for delete on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
337
366
  );
338
367
  }
@@ -429,7 +458,7 @@ export class MongoBucketBatch
429
458
  }
430
459
  }
431
460
  );
432
- logger.error(
461
+ this.logger.error(
433
462
  `Failed to evaluate data query on ${record.sourceTable.qualifiedName}.${record.after?.id}: ${error.error}`
434
463
  );
435
464
  }
@@ -469,7 +498,7 @@ export class MongoBucketBatch
469
498
  }
470
499
  }
471
500
  );
472
- logger.error(
501
+ this.logger.error(
473
502
  `Failed to evaluate parameter query on ${record.sourceTable.qualifiedName}.${after.id}: ${error.error}`
474
503
  );
475
504
  }
@@ -523,7 +552,7 @@ export class MongoBucketBatch
523
552
  if (e instanceof mongo.MongoError && e.hasErrorLabel('TransientTransactionError')) {
524
553
  // Likely write conflict caused by concurrent write stream replicating
525
554
  } else {
526
- logger.warn('Transaction error', e as Error);
555
+ this.logger.warn('Transaction error', e as Error);
527
556
  }
528
557
  await timers.setTimeout(Math.random() * 50);
529
558
  throw e;
@@ -548,7 +577,7 @@ export class MongoBucketBatch
548
577
  await this.withTransaction(async () => {
549
578
  flushTry += 1;
550
579
  if (flushTry % 10 == 0) {
551
- logger.info(`${this.slot_name} ${description} - try ${flushTry}`);
580
+ this.logger.info(`${description} - try ${flushTry}`);
552
581
  }
553
582
  if (flushTry > 20 && Date.now() > lastTry) {
554
583
  throw new ServiceError(ErrorCode.PSYNC_S1402, 'Max transaction tries exceeded');
@@ -600,6 +629,7 @@ export class MongoBucketBatch
600
629
  },
601
630
  { session }
602
631
  );
632
+ // We don't notify checkpoint here - we don't make any checkpoint updates directly
603
633
  });
604
634
  }
605
635
 
@@ -613,17 +643,18 @@ export class MongoBucketBatch
613
643
  async commit(lsn: string, options?: storage.BucketBatchCommitOptions): Promise<boolean> {
614
644
  const { createEmptyCheckpoints } = { ...storage.DEFAULT_BUCKET_BATCH_COMMIT_OPTIONS, ...options };
615
645
 
616
- await this.flush();
646
+ await this.flush(options);
617
647
 
618
648
  if (this.last_checkpoint_lsn != null && lsn < this.last_checkpoint_lsn) {
619
649
  // When re-applying transactions, don't create a new checkpoint until
620
650
  // we are past the last transaction.
621
- logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`);
651
+ this.logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`);
652
+ // Cannot create a checkpoint yet - return false
622
653
  return false;
623
654
  }
624
655
  if (lsn < this.no_checkpoint_before_lsn) {
625
656
  if (Date.now() - this.lastWaitingLogThottled > 5_000) {
626
- logger.info(
657
+ this.logger.info(
627
658
  `Waiting until ${this.no_checkpoint_before_lsn} before creating checkpoint, currently at ${lsn}. Persisted op: ${this.persisted_op}`
628
659
  );
629
660
  this.lastWaitingLogThottled = Date.now();
@@ -646,12 +677,15 @@ export class MongoBucketBatch
646
677
  },
647
678
  { session: this.session }
648
679
  );
680
+ await this.db.notifyCheckpoint();
649
681
 
682
+ // Cannot create a checkpoint yet - return false
650
683
  return false;
651
684
  }
652
685
 
653
686
  if (!createEmptyCheckpoints && this.persisted_op == null) {
654
- return false;
687
+ // Nothing to commit - also return true
688
+ return true;
655
689
  }
656
690
 
657
691
  const now = new Date();
@@ -668,15 +702,34 @@ export class MongoBucketBatch
668
702
  update.last_checkpoint = this.persisted_op;
669
703
  }
670
704
 
705
+ // Mark relevant write checkpoints as "processed".
706
+ // This makes it easier to identify write checkpoints that are "valid" in order.
707
+ await this.db.write_checkpoints.updateMany(
708
+ {
709
+ processed_at_lsn: null,
710
+ 'lsns.1': { $lte: lsn }
711
+ },
712
+ {
713
+ $set: {
714
+ processed_at_lsn: lsn
715
+ }
716
+ },
717
+ {
718
+ session: this.session
719
+ }
720
+ );
721
+
671
722
  await this.db.sync_rules.updateOne(
672
723
  {
673
724
  _id: this.group_id
674
725
  },
675
726
  {
676
- $set: update
727
+ $set: update,
728
+ $unset: { snapshot_lsn: 1 }
677
729
  },
678
730
  { session: this.session }
679
731
  );
732
+ await this.db.notifyCheckpoint();
680
733
  this.persisted_op = null;
681
734
  this.last_checkpoint_lsn = lsn;
682
735
  return true;
@@ -695,10 +748,25 @@ export class MongoBucketBatch
695
748
  if (this.persisted_op != null) {
696
749
  // The commit may have been skipped due to "no_checkpoint_before_lsn".
697
750
  // Apply it now if relevant
698
- logger.info(`Commit due to keepalive at ${lsn} / ${this.persisted_op}`);
751
+ this.logger.info(`Commit due to keepalive at ${lsn} / ${this.persisted_op}`);
699
752
  return await this.commit(lsn);
700
753
  }
701
754
 
755
+ await this.db.write_checkpoints.updateMany(
756
+ {
757
+ processed_at_lsn: null,
758
+ 'lsns.1': { $lte: lsn }
759
+ },
760
+ {
761
+ $set: {
762
+ processed_at_lsn: lsn
763
+ }
764
+ },
765
+ {
766
+ session: this.session
767
+ }
768
+ );
769
+
702
770
  await this.db.sync_rules.updateOne(
703
771
  {
704
772
  _id: this.group_id
@@ -709,15 +777,33 @@ export class MongoBucketBatch
709
777
  snapshot_done: true,
710
778
  last_fatal_error: null,
711
779
  last_keepalive_ts: new Date()
712
- }
780
+ },
781
+ $unset: { snapshot_lsn: 1 }
713
782
  },
714
783
  { session: this.session }
715
784
  );
785
+ await this.db.notifyCheckpoint();
716
786
  this.last_checkpoint_lsn = lsn;
717
787
 
718
788
  return true;
719
789
  }
720
790
 
791
+ async setSnapshotLsn(lsn: string): Promise<void> {
792
+ const update: Partial<SyncRuleDocument> = {
793
+ snapshot_lsn: lsn
794
+ };
795
+
796
+ await this.db.sync_rules.updateOne(
797
+ {
798
+ _id: this.group_id
799
+ },
800
+ {
801
+ $set: update
802
+ },
803
+ { session: this.session }
804
+ );
805
+ }
806
+
721
807
  async save(record: storage.SaveOptions): Promise<storage.FlushedResult | null> {
722
808
  const { after, before, sourceTable, tag } = record;
723
809
  for (const event of this.getTableEvents(sourceTable)) {
@@ -742,7 +828,7 @@ export class MongoBucketBatch
742
828
  return null;
743
829
  }
744
830
 
745
- logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`);
831
+ this.logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`);
746
832
 
747
833
  this.batch ??= new OperationBatch();
748
834
  this.batch.push(new RecordOperation(record));
@@ -813,7 +899,7 @@ export class MongoBucketBatch
813
899
  session: session
814
900
  });
815
901
  const batch = await cursor.toArray();
816
- const persistedBatch = new PersistedBatch(this.group_id, 0);
902
+ const persistedBatch = new PersistedBatch(this.group_id, 0, { logger: this.logger });
817
903
 
818
904
  for (let value of batch) {
819
905
  persistedBatch.saveBucketData({
@@ -843,6 +929,37 @@ export class MongoBucketBatch
843
929
  return last_op!;
844
930
  }
845
931
 
932
+ async updateTableProgress(
933
+ table: storage.SourceTable,
934
+ progress: Partial<storage.TableSnapshotStatus>
935
+ ): Promise<storage.SourceTable> {
936
+ const copy = table.clone();
937
+ const snapshotStatus = {
938
+ totalEstimatedCount: progress.totalEstimatedCount ?? copy.snapshotStatus?.totalEstimatedCount ?? 0,
939
+ replicatedCount: progress.replicatedCount ?? copy.snapshotStatus?.replicatedCount ?? 0,
940
+ lastKey: progress.lastKey ?? copy.snapshotStatus?.lastKey ?? null
941
+ };
942
+ copy.snapshotStatus = snapshotStatus;
943
+
944
+ await this.withTransaction(async () => {
945
+ await this.db.source_tables.updateOne(
946
+ { _id: table.id },
947
+ {
948
+ $set: {
949
+ snapshot_status: {
950
+ last_key: snapshotStatus.lastKey == null ? null : new bson.Binary(snapshotStatus.lastKey),
951
+ total_estimated_count: snapshotStatus.totalEstimatedCount,
952
+ replicated_count: snapshotStatus.replicatedCount
953
+ }
954
+ }
955
+ },
956
+ { session: this.session }
957
+ );
958
+ });
959
+
960
+ return copy;
961
+ }
962
+
846
963
  async markSnapshotDone(tables: storage.SourceTable[], no_checkpoint_before_lsn: string) {
847
964
  const session = this.session;
848
965
  const ids = tables.map((table) => table.id);
@@ -853,6 +970,9 @@ export class MongoBucketBatch
853
970
  {
854
971
  $set: {
855
972
  snapshot_done: true
973
+ },
974
+ $unset: {
975
+ snapshot_status: 1
856
976
  }
857
977
  },
858
978
  { session }
@@ -876,17 +996,8 @@ export class MongoBucketBatch
876
996
  }
877
997
  });
878
998
  return tables.map((table) => {
879
- const copy = new storage.SourceTable(
880
- table.id,
881
- table.connectionTag,
882
- table.objectId,
883
- table.schema,
884
- table.table,
885
- table.replicaIdColumns,
886
- table.snapshotComplete
887
- );
888
- copy.syncData = table.syncData;
889
- copy.syncParameters = table.syncParameters;
999
+ const copy = table.clone();
1000
+ copy.snapshotComplete = true;
890
1001
  return copy;
891
1002
  });
892
1003
  }
@@ -895,7 +1006,7 @@ export class MongoBucketBatch
895
1006
  * Gets relevant {@link SqlEventDescriptor}s for the given {@link SourceTable}
896
1007
  */
897
1008
  protected getTableEvents(table: storage.SourceTable): SqlEventDescriptor[] {
898
- return this.sync_rules.event_descriptors.filter((evt) =>
1009
+ return this.sync_rules.eventDescriptors.filter((evt) =>
899
1010
  [...evt.getSourceTables()].some((sourceTable) => sourceTable.matches(table))
900
1011
  );
901
1012
  }
@@ -15,6 +15,7 @@ export class MongoPersistedSyncRulesContent implements storage.PersistedSyncRule
15
15
  public readonly last_fatal_error: string | null;
16
16
  public readonly last_keepalive_ts: Date | null;
17
17
  public readonly last_checkpoint_ts: Date | null;
18
+ public readonly active: boolean;
18
19
 
19
20
  public current_lock: MongoSyncRulesLock | null = null;
20
21
 
@@ -30,6 +31,7 @@ export class MongoPersistedSyncRulesContent implements storage.PersistedSyncRule
30
31
  this.last_fatal_error = doc.last_fatal_error;
31
32
  this.last_checkpoint_ts = doc.last_checkpoint_ts;
32
33
  this.last_keepalive_ts = doc.last_keepalive_ts;
34
+ this.active = doc.state == 'ACTIVE';
33
35
  }
34
36
 
35
37
  parsed(options: storage.ParseSyncRulesOptions) {
@@ -1,5 +1,5 @@
1
1
  import * as lib_mongo from '@powersync/lib-service-mongodb';
2
- import { logger, ServiceAssertionError } from '@powersync/lib-services-framework';
2
+ import { ErrorCode, logger, ServiceAssertionError, ServiceError } from '@powersync/lib-services-framework';
3
3
  import { storage } from '@powersync/service-core';
4
4
  import { MongoStorageConfig } from '../../types/types.js';
5
5
  import { MongoBucketStorage } from '../MongoBucketStorage.js';
@@ -26,6 +26,15 @@ export class MongoStorageProvider implements storage.BucketStorageProvider {
26
26
  maxPoolSize: resolvedConfig.storage.max_pool_size ?? 8
27
27
  });
28
28
 
29
+ let shuttingDown = false;
30
+
31
+ // Explicitly connect on startup.
32
+ // Connection errors during startup are typically not recoverable - we get topologyClosed.
33
+ // This helps to catch the error early, along with the cause, and before the process starts
34
+ // to serve API requests.
35
+ // Errors here will cause the process to exit.
36
+ await client.connect();
37
+
29
38
  const database = new PowerSyncMongo(client, { database: resolvedConfig.storage.database });
30
39
  const factory = new MongoBucketStorage(database, {
31
40
  // TODO currently need the entire resolved config due to this
@@ -34,12 +43,29 @@ export class MongoStorageProvider implements storage.BucketStorageProvider {
34
43
  return {
35
44
  storage: factory,
36
45
  shutDown: async () => {
46
+ shuttingDown = true;
37
47
  await factory[Symbol.asyncDispose]();
38
48
  await client.close();
39
49
  },
40
50
  tearDown: () => {
41
51
  logger.info(`Tearing down storage: ${database.db.namespace}...`);
42
52
  return database.db.dropDatabase();
53
+ },
54
+ onFatalError: (callback) => {
55
+ client.addListener('topologyClosed', () => {
56
+ // If we're shutting down, this is expected and we can ignore it.
57
+ if (!shuttingDown) {
58
+ // Unfortunately there is no simple way to catch the cause of this issue.
59
+ // It most commonly happens when the process fails to _ever_ connect - connection issues after
60
+ // the initial connection are usually recoverable.
61
+ callback(
62
+ new ServiceError({
63
+ code: ErrorCode.PSYNC_S2402,
64
+ description: 'MongoDB topology closed - failed to connect to MongoDB storage.'
65
+ })
66
+ );
67
+ }
68
+ });
43
69
  }
44
70
  } satisfies storage.ActiveStorage;
45
71
  }