@powersync/service-module-mongodb 0.0.0-dev-20250117095455 → 0.0.0-dev-20250214100224

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +57 -9
  2. package/dist/api/MongoRouteAPIAdapter.d.ts +2 -1
  3. package/dist/api/MongoRouteAPIAdapter.js +39 -0
  4. package/dist/api/MongoRouteAPIAdapter.js.map +1 -1
  5. package/dist/common/MongoLSN.d.ts +31 -0
  6. package/dist/common/MongoLSN.js +47 -0
  7. package/dist/common/MongoLSN.js.map +1 -0
  8. package/dist/module/MongoModule.d.ts +3 -2
  9. package/dist/module/MongoModule.js +15 -6
  10. package/dist/module/MongoModule.js.map +1 -1
  11. package/dist/replication/ChangeStream.d.ts +3 -3
  12. package/dist/replication/ChangeStream.js +74 -30
  13. package/dist/replication/ChangeStream.js.map +1 -1
  14. package/dist/replication/ChangeStreamReplicationJob.js +5 -4
  15. package/dist/replication/ChangeStreamReplicationJob.js.map +1 -1
  16. package/dist/replication/ChangeStreamReplicator.d.ts +1 -0
  17. package/dist/replication/ChangeStreamReplicator.js +5 -0
  18. package/dist/replication/ChangeStreamReplicator.js.map +1 -1
  19. package/dist/replication/ConnectionManagerFactory.d.ts +1 -1
  20. package/dist/replication/ConnectionManagerFactory.js +2 -0
  21. package/dist/replication/ConnectionManagerFactory.js.map +1 -1
  22. package/dist/replication/MongoErrorRateLimiter.js +5 -7
  23. package/dist/replication/MongoErrorRateLimiter.js.map +1 -1
  24. package/dist/replication/MongoManager.js +10 -4
  25. package/dist/replication/MongoManager.js.map +1 -1
  26. package/dist/replication/MongoRelation.d.ts +0 -2
  27. package/dist/replication/MongoRelation.js +4 -15
  28. package/dist/replication/MongoRelation.js.map +1 -1
  29. package/dist/replication/replication-utils.js +49 -2
  30. package/dist/replication/replication-utils.js.map +1 -1
  31. package/package.json +9 -9
  32. package/src/api/MongoRouteAPIAdapter.ts +41 -1
  33. package/src/common/MongoLSN.ts +74 -0
  34. package/src/module/MongoModule.ts +24 -8
  35. package/src/replication/ChangeStream.ts +87 -37
  36. package/src/replication/ChangeStreamReplicationJob.ts +4 -4
  37. package/src/replication/ChangeStreamReplicator.ts +5 -0
  38. package/src/replication/ConnectionManagerFactory.ts +1 -1
  39. package/src/replication/MongoRelation.ts +4 -17
  40. package/src/replication/replication-utils.ts +77 -2
  41. package/test/src/change_stream_utils.ts +2 -2
  42. package/test/src/resume.test.ts +152 -0
  43. package/tsconfig.tsbuildinfo +1 -1
@@ -1,5 +1,12 @@
1
1
  import * as lib_mongo from '@powersync/lib-service-mongodb';
2
- import { api, ConfigurationFileSyncRulesProvider, replication, system, TearDownOptions } from '@powersync/service-core';
2
+ import {
3
+ api,
4
+ ConfigurationFileSyncRulesProvider,
5
+ ConnectionTestResult,
6
+ replication,
7
+ system,
8
+ TearDownOptions
9
+ } from '@powersync/service-core';
3
10
  import { MongoRouteAPIAdapter } from '../api/MongoRouteAPIAdapter.js';
4
11
  import { ChangeStreamReplicator } from '../replication/ChangeStreamReplicator.js';
5
12
  import { ConnectionManagerFactory } from '../replication/ConnectionManagerFactory.js';
@@ -46,22 +53,31 @@ export class MongoModule extends replication.ReplicationModule<types.MongoConnec
46
53
  }
47
54
 
48
55
  async teardown(options: TearDownOptions): Promise<void> {
49
- // TODO: Implement?
56
+ // No-op
50
57
  }
51
58
 
52
- async testConnection(config: types.MongoConnectionConfig): Promise<void> {
59
+ async testConnection(config: types.MongoConnectionConfig) {
53
60
  this.decodeConfig(config);
54
- const normalisedConfig = this.resolveConfig(this.decodedConfig!);
55
- const connectionManager = new MongoManager(normalisedConfig, {
61
+ const normalizedConfig = this.resolveConfig(this.decodedConfig!);
62
+ return await MongoModule.testConnection(normalizedConfig);
63
+ }
64
+
65
+ static async testConnection(normalizedConfig: types.NormalizedMongoConnectionConfig): Promise<ConnectionTestResult> {
66
+ const connectionManager = new MongoManager(normalizedConfig, {
56
67
  // Use short timeouts for testing connections.
57
68
  // Must be < 30s, to ensure we get a proper timeout error.
58
- socketTimeoutMS: 5_000,
59
- serverSelectionTimeoutMS: 5_000
69
+ socketTimeoutMS: 1_000,
70
+ serverSelectionTimeoutMS: 1_000
60
71
  });
61
72
  try {
62
- return await checkSourceConfiguration(connectionManager);
73
+ await checkSourceConfiguration(connectionManager);
74
+ } catch (e) {
75
+ throw lib_mongo.mapConnectionError(e);
63
76
  } finally {
64
77
  await connectionManager.end();
65
78
  }
79
+ return {
80
+ connectionDescription: normalizedConfig.uri
81
+ };
66
82
  }
67
83
  }
@@ -1,21 +1,22 @@
1
1
  import { mongo } from '@powersync/lib-service-mongodb';
2
- import { container, logger } from '@powersync/lib-services-framework';
2
+ import {
3
+ container,
4
+ DatabaseConnectionError,
5
+ ErrorCode,
6
+ logger,
7
+ ReplicationAbortedError,
8
+ ReplicationAssertionError,
9
+ ServiceError
10
+ } from '@powersync/lib-services-framework';
3
11
  import { Metrics, SaveOperationTag, SourceEntityDescriptor, SourceTable, storage } from '@powersync/service-core';
4
12
  import { DatabaseInputRow, SqliteRow, SqlSyncRules, TablePattern } from '@powersync/service-sync-rules';
13
+ import { MongoLSN } from '../common/MongoLSN.js';
5
14
  import { PostImagesOption } from '../types/types.js';
6
15
  import { escapeRegExp } from '../utils.js';
7
16
  import { MongoManager } from './MongoManager.js';
8
- import {
9
- constructAfterRecord,
10
- createCheckpoint,
11
- getMongoLsn,
12
- getMongoRelation,
13
- mongoLsnToTimestamp
14
- } from './MongoRelation.js';
17
+ import { constructAfterRecord, createCheckpoint, getMongoRelation } from './MongoRelation.js';
15
18
  import { CHECKPOINTS_COLLECTION } from './replication-utils.js';
16
19
 
17
- export const ZERO_LSN = '0000000000000000';
18
-
19
20
  export interface ChangeStreamOptions {
20
21
  connections: MongoManager;
21
22
  storage: storage.SyncRulesBucketStorage;
@@ -34,9 +35,9 @@ interface InitResult {
34
35
  * * Some change stream documents do not have postImages.
35
36
  * * startAfter/resumeToken is not valid anymore.
36
37
  */
37
- export class ChangeStreamInvalidatedError extends Error {
38
- constructor(message: string) {
39
- super(message);
38
+ export class ChangeStreamInvalidatedError extends DatabaseConnectionError {
39
+ constructor(message: string, cause: any) {
40
+ super(ErrorCode.PSYNC_S1344, message, cause);
40
41
  }
41
42
  }
42
43
 
@@ -157,7 +158,7 @@ export class ChangeStream {
157
158
 
158
159
  async estimatedCount(table: storage.SourceTable): Promise<string> {
159
160
  const db = this.client.db(table.schema);
160
- const count = db.collection(table.table).estimatedDocumentCount();
161
+ const count = await db.collection(table.table).estimatedDocumentCount();
161
162
  return `~${count}`;
162
163
  }
163
164
 
@@ -180,12 +181,18 @@ export class ChangeStream {
180
181
  const hello = await this.defaultDb.command({ hello: 1 });
181
182
  const snapshotTime = hello.lastWrite?.majorityOpTime?.ts as mongo.Timestamp;
182
183
  if (hello.msg == 'isdbgrid') {
183
- throw new Error('Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).');
184
+ throw new ServiceError(
185
+ ErrorCode.PSYNC_S1341,
186
+ 'Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).'
187
+ );
184
188
  } else if (hello.setName == null) {
185
- throw new Error('Standalone MongoDB instances are not supported - use a replicaset.');
189
+ throw new ServiceError(
190
+ ErrorCode.PSYNC_S1342,
191
+ 'Standalone MongoDB instances are not supported - use a replicaset.'
192
+ );
186
193
  } else if (snapshotTime == null) {
187
194
  // Not known where this would happen apart from the above cases
188
- throw new Error('MongoDB lastWrite timestamp not found.');
195
+ throw new ReplicationAssertionError('MongoDB lastWrite timestamp not found.');
189
196
  }
190
197
  // We previously used {snapshot: true} for the snapshot session.
191
198
  // While it gives nice consistency guarantees, it fails when the
@@ -194,7 +201,7 @@ export class ChangeStream {
194
201
  const session = await this.client.startSession();
195
202
  try {
196
203
  await this.storage.startBatch(
197
- { zeroLSN: ZERO_LSN, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
204
+ { zeroLSN: MongoLSN.ZERO.comparable, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
198
205
  async (batch) => {
199
206
  // Start by resolving all tables.
200
207
  // This checks postImage configuration, and that should fail as
@@ -207,12 +214,12 @@ export class ChangeStream {
207
214
 
208
215
  for (let table of allSourceTables) {
209
216
  await this.snapshotTable(batch, table, session);
210
- await batch.markSnapshotDone([table], ZERO_LSN);
217
+ await batch.markSnapshotDone([table], MongoLSN.ZERO.comparable);
211
218
 
212
219
  await touch();
213
220
  }
214
221
 
215
- const lsn = getMongoLsn(snapshotTime);
222
+ const { comparable: lsn } = new MongoLSN({ timestamp: snapshotTime });
216
223
  logger.info(`Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
217
224
  await batch.commit(lsn);
218
225
  }
@@ -285,6 +292,7 @@ export class ChangeStream {
285
292
  logger.info(`Replicating ${table.qualifiedName}`);
286
293
  const estimatedCount = await this.estimatedCount(table);
287
294
  let at = 0;
295
+ let lastLogIndex = 0;
288
296
 
289
297
  const db = this.client.db(table.schema);
290
298
  const collection = db.collection(table.table);
@@ -294,11 +302,9 @@ export class ChangeStream {
294
302
 
295
303
  for await (let document of cursor) {
296
304
  if (this.abort_signal.aborted) {
297
- throw new Error(`Aborted initial replication`);
305
+ throw new ReplicationAbortedError(`Aborted initial replication`);
298
306
  }
299
307
 
300
- at += 1;
301
-
302
308
  const record = constructAfterRecord(document);
303
309
 
304
310
  // This auto-flushes when the batch reaches its size limit
@@ -312,6 +318,10 @@ export class ChangeStream {
312
318
  });
313
319
 
314
320
  at += 1;
321
+ if (at - lastLogIndex >= 5000) {
322
+ logger.info(`[${this.group_id}] Replicating ${table.qualifiedName} ${at}/${estimatedCount}`);
323
+ lastLogIndex = at;
324
+ }
315
325
  Metrics.getInstance().rows_replicated_total.add(1);
316
326
 
317
327
  await touch();
@@ -367,7 +377,7 @@ export class ChangeStream {
367
377
  });
368
378
  logger.info(`Enabled postImages on ${db}.${collectionInfo.name}`);
369
379
  } else if (!enabled) {
370
- throw new Error(`postImages not enabled on ${db}.${collectionInfo.name}`);
380
+ throw new ServiceError(ErrorCode.PSYNC_S1343, `postImages not enabled on ${db}.${collectionInfo.name}`);
371
381
  }
372
382
  }
373
383
 
@@ -385,7 +395,7 @@ export class ChangeStream {
385
395
 
386
396
  const snapshot = options.snapshot;
387
397
  if (!descriptor.objectId && typeof descriptor.objectId != 'string') {
388
- throw new Error('objectId expected');
398
+ throw new ReplicationAssertionError('MongoDB replication - objectId expected');
389
399
  }
390
400
  const result = await this.storage.resolveTable({
391
401
  group_id: this.group_id,
@@ -466,7 +476,7 @@ export class ChangeStream {
466
476
  beforeReplicaId: change.documentKey._id
467
477
  });
468
478
  } else {
469
- throw new Error(`Unsupported operation: ${change.operationType}`);
479
+ throw new ReplicationAssertionError(`Unsupported operation: ${change.operationType}`);
470
480
  }
471
481
  }
472
482
 
@@ -500,7 +510,7 @@ export class ChangeStream {
500
510
  e.codeName == 'NoMatchingDocument' &&
501
511
  e.errmsg?.includes('post-image was not found')
502
512
  ) {
503
- throw new ChangeStreamInvalidatedError(e.errmsg);
513
+ throw new ChangeStreamInvalidatedError(e.errmsg, e);
504
514
  }
505
515
  throw e;
506
516
  }
@@ -511,10 +521,13 @@ export class ChangeStream {
511
521
  await this.storage.autoActivate();
512
522
 
513
523
  await this.storage.startBatch(
514
- { zeroLSN: ZERO_LSN, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
524
+ { zeroLSN: MongoLSN.ZERO.comparable, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
515
525
  async (batch) => {
516
- const lastLsn = batch.lastCheckpointLsn;
517
- const startAfter = mongoLsnToTimestamp(lastLsn) ?? undefined;
526
+ const { lastCheckpointLsn } = batch;
527
+ const lastLsn = lastCheckpointLsn ? MongoLSN.fromSerialized(lastCheckpointLsn) : null;
528
+ const startAfter = lastLsn?.timestamp;
529
+ const resumeAfter = lastLsn?.resumeToken;
530
+
518
531
  logger.info(`Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`);
519
532
 
520
533
  const filters = this.getSourceNamespaceFilters();
@@ -538,12 +551,21 @@ export class ChangeStream {
538
551
  }
539
552
 
540
553
  const streamOptions: mongo.ChangeStreamOptions = {
541
- startAtOperationTime: startAfter,
542
554
  showExpandedEvents: true,
543
555
  useBigInt64: true,
544
556
  maxAwaitTimeMS: 200,
545
557
  fullDocument: fullDocument
546
558
  };
559
+
560
+ /**
561
+ * Only one of these options can be supplied at a time.
562
+ */
563
+ if (resumeAfter) {
564
+ streamOptions.resumeAfter = resumeAfter;
565
+ } else {
566
+ streamOptions.startAtOperationTime = startAfter;
567
+ }
568
+
547
569
  let stream: mongo.ChangeStream<mongo.Document>;
548
570
  if (filters.multipleDatabases) {
549
571
  // Requires readAnyDatabase@admin on Atlas
@@ -563,7 +585,7 @@ export class ChangeStream {
563
585
  });
564
586
 
565
587
  // Always start with a checkpoint.
566
- // This helps us to clear erorrs when restarting, even if there is
588
+ // This helps us to clear errors when restarting, even if there is
567
589
  // no data to replicate.
568
590
  let waitForCheckpointLsn: string | null = await createCheckpoint(this.client, this.defaultDb);
569
591
 
@@ -576,6 +598,11 @@ export class ChangeStream {
576
598
 
577
599
  const originalChangeDocument = await stream.tryNext();
578
600
 
601
+ // The stream was closed, we will only ever receive `null` from it
602
+ if (!originalChangeDocument && stream.closed) {
603
+ break;
604
+ }
605
+
579
606
  if (originalChangeDocument == null || this.abort_signal.aborted) {
580
607
  continue;
581
608
  }
@@ -607,18 +634,41 @@ export class ChangeStream {
607
634
  }
608
635
  } else if (splitDocument != null) {
609
636
  // We were waiting for fragments, but got a different event
610
- throw new Error(`Incomplete splitEvent: ${JSON.stringify(splitDocument.splitEvent)}`);
637
+ throw new ReplicationAssertionError(`Incomplete splitEvent: ${JSON.stringify(splitDocument.splitEvent)}`);
611
638
  }
612
639
 
613
- // console.log('event', changeDocument);
614
-
615
640
  if (
616
641
  (changeDocument.operationType == 'insert' ||
617
642
  changeDocument.operationType == 'update' ||
618
- changeDocument.operationType == 'replace') &&
643
+ changeDocument.operationType == 'replace' ||
644
+ changeDocument.operationType == 'drop') &&
619
645
  changeDocument.ns.coll == CHECKPOINTS_COLLECTION
620
646
  ) {
621
- const lsn = getMongoLsn(changeDocument.clusterTime!);
647
+ /**
648
+ * Dropping the database does not provide an `invalidate` event.
649
+ * We typically would receive `drop` events for the collection which we
650
+ * would process below.
651
+ *
652
+ * However we don't commit the LSN after collections are dropped.
653
+ * The prevents the `startAfter` or `resumeToken` from advancing past the drop events.
654
+ * The stream also closes after the drop events.
655
+ * This causes an infinite loop of processing the collection drop events.
656
+ *
657
+ * This check here invalidates the change stream if our `_checkpoints` collection
658
+ * is dropped. This allows for detecting when the DB is dropped.
659
+ */
660
+ if (changeDocument.operationType == 'drop') {
661
+ throw new ChangeStreamInvalidatedError(
662
+ 'Internal collections have been dropped',
663
+ new Error('_checkpoints collection was dropped')
664
+ );
665
+ }
666
+
667
+ const { comparable: lsn } = new MongoLSN({
668
+ timestamp: changeDocument.clusterTime!,
669
+ resume_token: changeDocument._id
670
+ });
671
+
622
672
  if (waitForCheckpointLsn != null && lsn >= waitForCheckpointLsn) {
623
673
  waitForCheckpointLsn = null;
624
674
  }
@@ -1,4 +1,4 @@
1
- import { mongo } from '@powersync/lib-service-mongodb';
1
+ import { isMongoServerError } from '@powersync/lib-service-mongodb';
2
2
  import { container } from '@powersync/lib-services-framework';
3
3
  import { replication } from '@powersync/service-core';
4
4
 
@@ -78,15 +78,15 @@ export class ChangeStreamReplicationJob extends replication.AbstractReplicationJ
78
78
  if (this.abortController.signal.aborted) {
79
79
  return;
80
80
  }
81
- this.logger.error(`Replication error`, e);
81
+ this.logger.error(`${this.slotName} Replication error`, e);
82
82
  if (e.cause != null) {
83
83
  // Without this additional log, the cause may not be visible in the logs.
84
84
  this.logger.error(`cause`, e.cause);
85
85
  }
86
86
  if (e instanceof ChangeStreamInvalidatedError) {
87
87
  throw e;
88
- } else if (e instanceof mongo.MongoError && e.hasErrorLabel('NonResumableChangeStreamError')) {
89
- throw new ChangeStreamInvalidatedError(e.message);
88
+ } else if (isMongoServerError(e) && e.hasErrorLabel('NonResumableChangeStreamError')) {
89
+ throw new ChangeStreamInvalidatedError(e.message, e);
90
90
  } else {
91
91
  // Report the error if relevant, before retrying
92
92
  container.reporter.captureException(e, {
@@ -2,6 +2,7 @@ import { storage, replication } from '@powersync/service-core';
2
2
  import { ChangeStreamReplicationJob } from './ChangeStreamReplicationJob.js';
3
3
  import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
4
4
  import { MongoErrorRateLimiter } from './MongoErrorRateLimiter.js';
5
+ import { MongoModule } from '../module/MongoModule.js';
5
6
 
6
7
  export interface ChangeStreamReplicatorOptions extends replication.AbstractReplicatorOptions {
7
8
  connectionFactory: ConnectionManagerFactory;
@@ -33,4 +34,8 @@ export class ChangeStreamReplicator extends replication.AbstractReplicator<Chang
33
34
  await super.stop();
34
35
  await this.connectionFactory.shutdown();
35
36
  }
37
+
38
+ async testConnection() {
39
+ return await MongoModule.testConnection(this.connectionFactory.dbConnectionConfig);
40
+ }
36
41
  }
@@ -4,7 +4,7 @@ import { MongoManager } from './MongoManager.js';
4
4
 
5
5
  export class ConnectionManagerFactory {
6
6
  private readonly connectionManagers: MongoManager[];
7
- private readonly dbConnectionConfig: NormalizedMongoConnectionConfig;
7
+ public readonly dbConnectionConfig: NormalizedMongoConnectionConfig;
8
8
 
9
9
  constructor(dbConnectionConfig: NormalizedMongoConnectionConfig) {
10
10
  this.dbConnectionConfig = dbConnectionConfig;
@@ -3,6 +3,8 @@ import { storage } from '@powersync/service-core';
3
3
  import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
4
4
  import { SqliteRow, SqliteValue } from '@powersync/service-sync-rules';
5
5
 
6
+ import { ErrorCode, ServiceError } from '@powersync/lib-services-framework';
7
+ import { MongoLSN } from '../common/MongoLSN.js';
6
8
  import { CHECKPOINTS_COLLECTION } from './replication-utils.js';
7
9
 
8
10
  export function getMongoRelation(source: mongo.ChangeStreamNameSpace): storage.SourceEntityDescriptor {
@@ -14,21 +16,6 @@ export function getMongoRelation(source: mongo.ChangeStreamNameSpace): storage.S
14
16
  } satisfies storage.SourceEntityDescriptor;
15
17
  }
16
18
 
17
- export function getMongoLsn(timestamp: mongo.Timestamp) {
18
- const a = timestamp.high.toString(16).padStart(8, '0');
19
- const b = timestamp.low.toString(16).padStart(8, '0');
20
- return a + b;
21
- }
22
-
23
- export function mongoLsnToTimestamp(lsn: string | null) {
24
- if (lsn == null) {
25
- return null;
26
- }
27
- const a = parseInt(lsn.substring(0, 8), 16);
28
- const b = parseInt(lsn.substring(8, 16), 16);
29
- return mongo.Timestamp.fromBits(b, a);
30
- }
31
-
32
19
  export function constructAfterRecord(document: mongo.Document): SqliteRow {
33
20
  let record: SqliteRow = {};
34
21
  for (let key of Object.keys(document)) {
@@ -97,7 +84,7 @@ function filterJsonData(data: any, depth = 0): any {
97
84
  const autoBigNum = true;
98
85
  if (depth > DEPTH_LIMIT) {
99
86
  // This is primarily to prevent infinite recursion
100
- throw new Error(`json nested object depth exceeds the limit of ${DEPTH_LIMIT}`);
87
+ throw new ServiceError(ErrorCode.PSYNC_S1004, `json nested object depth exceeds the limit of ${DEPTH_LIMIT}`);
101
88
  }
102
89
  if (data === null) {
103
90
  return data;
@@ -173,7 +160,7 @@ export async function createCheckpoint(client: mongo.MongoClient, db: mongo.Db):
173
160
  );
174
161
  const time = session.operationTime!;
175
162
  // TODO: Use the above when we support custom write checkpoints
176
- return getMongoLsn(time);
163
+ return new MongoLSN({ timestamp: time }).comparable;
177
164
  } finally {
178
165
  await session.endSession();
179
166
  }
@@ -1,13 +1,88 @@
1
+ import { ErrorCode, ServiceError } from '@powersync/lib-services-framework';
1
2
  import { MongoManager } from './MongoManager.js';
3
+ import { PostImagesOption } from '../types/types.js';
2
4
 
3
5
  export const CHECKPOINTS_COLLECTION = '_powersync_checkpoints';
4
6
 
7
+ const REQUIRED_CHECKPOINT_PERMISSIONS = ['find', 'insert', 'update', 'remove', 'changeStream', 'createCollection'];
8
+
5
9
  export async function checkSourceConfiguration(connectionManager: MongoManager): Promise<void> {
6
10
  const db = connectionManager.db;
11
+
7
12
  const hello = await db.command({ hello: 1 });
8
13
  if (hello.msg == 'isdbgrid') {
9
- throw new Error('Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).');
14
+ throw new ServiceError(
15
+ ErrorCode.PSYNC_S1341,
16
+ 'Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).'
17
+ );
10
18
  } else if (hello.setName == null) {
11
- throw new Error('Standalone MongoDB instances are not supported - use a replicaset.');
19
+ throw new ServiceError(ErrorCode.PSYNC_S1342, 'Standalone MongoDB instances are not supported - use a replicaset.');
20
+ }
21
+
22
+ // https://www.mongodb.com/docs/manual/reference/command/connectionStatus/
23
+ const connectionStatus = await db.command({ connectionStatus: 1, showPrivileges: true });
24
+ const priviledges = connectionStatus.authInfo?.authenticatedUserPrivileges as {
25
+ resource: { db: string; collection: string };
26
+ actions: string[];
27
+ }[];
28
+ let checkpointsActions = new Set<string>();
29
+ let anyCollectionActions = new Set<string>();
30
+ if (priviledges?.length > 0) {
31
+ const onDefaultDb = priviledges.filter((p) => p.resource.db == db.databaseName || p.resource.db == '');
32
+ const onCheckpoints = onDefaultDb.filter(
33
+ (p) => p.resource.collection == CHECKPOINTS_COLLECTION || p.resource?.collection == ''
34
+ );
35
+
36
+ for (let p of onCheckpoints) {
37
+ for (let a of p.actions) {
38
+ checkpointsActions.add(a);
39
+ }
40
+ }
41
+ for (let p of onDefaultDb) {
42
+ for (let a of p.actions) {
43
+ anyCollectionActions.add(a);
44
+ }
45
+ }
46
+
47
+ const missingCheckpointActions = REQUIRED_CHECKPOINT_PERMISSIONS.filter(
48
+ (action) => !checkpointsActions.has(action)
49
+ );
50
+ if (missingCheckpointActions.length > 0) {
51
+ const fullName = `${db.databaseName}.${CHECKPOINTS_COLLECTION}`;
52
+ throw new ServiceError(
53
+ ErrorCode.PSYNC_S1307,
54
+ `MongoDB user does not have the required ${missingCheckpointActions.map((a) => `"${a}"`).join(', ')} priviledge(s) on "${fullName}".`
55
+ );
56
+ }
57
+
58
+ if (connectionManager.options.postImages == PostImagesOption.AUTO_CONFIGURE) {
59
+ // This checks that we have collMod on _any_ collection in the db.
60
+ // This is not a complete check, but does give a basic sanity-check for testing the connection.
61
+ if (!anyCollectionActions.has('collMod')) {
62
+ throw new ServiceError(
63
+ ErrorCode.PSYNC_S1307,
64
+ `MongoDB user does not have the required "collMod" priviledge on "${db.databaseName}", required for "post_images: auto_configure".`
65
+ );
66
+ }
67
+ }
68
+ if (!anyCollectionActions.has('listCollections')) {
69
+ throw new ServiceError(
70
+ ErrorCode.PSYNC_S1307,
71
+ `MongoDB user does not have the required "listCollections" priviledge on "${db.databaseName}".`
72
+ );
73
+ }
74
+ } else {
75
+ // Assume auth is disabled.
76
+ // On Atlas, at least one role/priviledge is required for each user, which will trigger the above.
77
+
78
+ // We do still do a basic check that we can list the collection (it may not actually exist yet).
79
+ await db
80
+ .listCollections(
81
+ {
82
+ name: CHECKPOINTS_COLLECTION
83
+ },
84
+ { nameOnly: false }
85
+ )
86
+ .toArray();
12
87
  }
13
88
  }
@@ -58,7 +58,7 @@ export class ChangeStreamTestContext {
58
58
  }
59
59
 
60
60
  async updateSyncRules(content: string) {
61
- const syncRules = await this.factory.updateSyncRules({ content: content });
61
+ const syncRules = await this.factory.updateSyncRules({ content: content, validate: true });
62
62
  this.storage = this.factory.getInstance(syncRules);
63
63
  return this.storage!;
64
64
  }
@@ -85,7 +85,7 @@ export class ChangeStreamTestContext {
85
85
  }
86
86
 
87
87
  startStreaming() {
88
- this.streamPromise = this.walStream.streamChanges();
88
+ return (this.streamPromise = this.walStream.streamChanges());
89
89
  }
90
90
 
91
91
  async getCheckpoint(options?: { timeout?: number }) {