@powersync/service-module-mongodb 0.0.0-dev-20241001150444

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/LICENSE +67 -0
  3. package/README.md +3 -0
  4. package/dist/api/MongoRouteAPIAdapter.d.ts +22 -0
  5. package/dist/api/MongoRouteAPIAdapter.js +64 -0
  6. package/dist/api/MongoRouteAPIAdapter.js.map +1 -0
  7. package/dist/index.d.ts +3 -0
  8. package/dist/index.js +4 -0
  9. package/dist/index.js.map +1 -0
  10. package/dist/module/MongoModule.d.ts +13 -0
  11. package/dist/module/MongoModule.js +46 -0
  12. package/dist/module/MongoModule.js.map +1 -0
  13. package/dist/replication/ChangeStream.d.ts +53 -0
  14. package/dist/replication/ChangeStream.js +389 -0
  15. package/dist/replication/ChangeStream.js.map +1 -0
  16. package/dist/replication/ChangeStreamReplicationJob.d.ts +16 -0
  17. package/dist/replication/ChangeStreamReplicationJob.js +90 -0
  18. package/dist/replication/ChangeStreamReplicationJob.js.map +1 -0
  19. package/dist/replication/ChangeStreamReplicator.d.ts +13 -0
  20. package/dist/replication/ChangeStreamReplicator.js +26 -0
  21. package/dist/replication/ChangeStreamReplicator.js.map +1 -0
  22. package/dist/replication/ConnectionManagerFactory.d.ts +9 -0
  23. package/dist/replication/ConnectionManagerFactory.js +21 -0
  24. package/dist/replication/ConnectionManagerFactory.js.map +1 -0
  25. package/dist/replication/MongoErrorRateLimiter.d.ts +11 -0
  26. package/dist/replication/MongoErrorRateLimiter.js +44 -0
  27. package/dist/replication/MongoErrorRateLimiter.js.map +1 -0
  28. package/dist/replication/MongoManager.d.ts +14 -0
  29. package/dist/replication/MongoManager.js +36 -0
  30. package/dist/replication/MongoManager.js.map +1 -0
  31. package/dist/replication/MongoRelation.d.ts +9 -0
  32. package/dist/replication/MongoRelation.js +174 -0
  33. package/dist/replication/MongoRelation.js.map +1 -0
  34. package/dist/replication/replication-index.d.ts +4 -0
  35. package/dist/replication/replication-index.js +5 -0
  36. package/dist/replication/replication-index.js.map +1 -0
  37. package/dist/types/types.d.ts +51 -0
  38. package/dist/types/types.js +37 -0
  39. package/dist/types/types.js.map +1 -0
  40. package/package.json +47 -0
  41. package/src/api/MongoRouteAPIAdapter.ts +86 -0
  42. package/src/index.ts +5 -0
  43. package/src/module/MongoModule.ts +52 -0
  44. package/src/replication/ChangeStream.ts +503 -0
  45. package/src/replication/ChangeStreamReplicationJob.ts +104 -0
  46. package/src/replication/ChangeStreamReplicator.ts +36 -0
  47. package/src/replication/ConnectionManagerFactory.ts +27 -0
  48. package/src/replication/MongoErrorRateLimiter.ts +45 -0
  49. package/src/replication/MongoManager.ts +47 -0
  50. package/src/replication/MongoRelation.ts +156 -0
  51. package/src/replication/replication-index.ts +4 -0
  52. package/src/types/types.ts +65 -0
  53. package/test/src/change_stream.test.ts +306 -0
  54. package/test/src/change_stream_utils.ts +148 -0
  55. package/test/src/env.ts +7 -0
  56. package/test/src/mongo_test.test.ts +219 -0
  57. package/test/src/setup.ts +7 -0
  58. package/test/src/util.ts +52 -0
  59. package/test/tsconfig.json +28 -0
  60. package/tsconfig.json +28 -0
  61. package/tsconfig.tsbuildinfo +1 -0
  62. package/vitest.config.ts +9 -0
@@ -0,0 +1,503 @@
1
+ import { container, logger } from '@powersync/lib-services-framework';
2
+ import { Metrics, SourceEntityDescriptor, SourceTable, storage } from '@powersync/service-core';
3
+ import { DatabaseInputRow, SqliteRow, SqlSyncRules, TablePattern, toSyncRulesRow } from '@powersync/service-sync-rules';
4
+ import * as mongo from 'mongodb';
5
+ import { MongoManager } from './MongoManager.js';
6
+ import {
7
+ constructAfterRecord,
8
+ createCheckpoint,
9
+ getMongoLsn,
10
+ getMongoRelation,
11
+ mongoLsnToTimestamp
12
+ } from './MongoRelation.js';
13
+
14
+ export const ZERO_LSN = '0000000000000000';
15
+
16
+ export interface ChangeStreamOptions {
17
+ connections: MongoManager;
18
+ storage: storage.SyncRulesBucketStorage;
19
+ abort_signal: AbortSignal;
20
+ }
21
+
22
+ interface InitResult {
23
+ needsInitialSync: boolean;
24
+ }
25
+
26
+ export class MissingReplicationSlotError extends Error {
27
+ constructor(message: string) {
28
+ super(message);
29
+ }
30
+ }
31
+
32
+ export class ChangeStream {
33
+ sync_rules: SqlSyncRules;
34
+ group_id: number;
35
+
36
+ connection_id = 1;
37
+
38
+ private readonly storage: storage.SyncRulesBucketStorage;
39
+
40
+ private connections: MongoManager;
41
+ private readonly client: mongo.MongoClient;
42
+ private readonly defaultDb: mongo.Db;
43
+
44
+ private abort_signal: AbortSignal;
45
+
46
+ private relation_cache = new Map<string | number, storage.SourceTable>();
47
+
48
+ constructor(options: ChangeStreamOptions) {
49
+ this.storage = options.storage;
50
+ this.group_id = options.storage.group_id;
51
+ this.connections = options.connections;
52
+ this.client = this.connections.client;
53
+ this.defaultDb = this.connections.db;
54
+ this.sync_rules = options.storage.getParsedSyncRules({
55
+ defaultSchema: this.defaultDb.databaseName
56
+ });
57
+
58
+ this.abort_signal = options.abort_signal;
59
+ this.abort_signal.addEventListener(
60
+ 'abort',
61
+ () => {
62
+ // TODO: Fast abort?
63
+ },
64
+ { once: true }
65
+ );
66
+ }
67
+
68
+ get stopped() {
69
+ return this.abort_signal.aborted;
70
+ }
71
+
72
+ async getQualifiedTableNames(
73
+ batch: storage.BucketStorageBatch,
74
+ tablePattern: TablePattern
75
+ ): Promise<storage.SourceTable[]> {
76
+ const schema = tablePattern.schema;
77
+ if (tablePattern.connectionTag != this.connections.connectionTag) {
78
+ return [];
79
+ }
80
+
81
+ let nameFilter: RegExp | string;
82
+ if (tablePattern.isWildcard) {
83
+ nameFilter = new RegExp('^' + escapeRegExp(tablePattern.tablePrefix));
84
+ } else {
85
+ nameFilter = tablePattern.name;
86
+ }
87
+ let result: storage.SourceTable[] = [];
88
+
89
+ // Check if the collection exists
90
+ const collections = await this.client
91
+ .db(schema)
92
+ .listCollections(
93
+ {
94
+ name: nameFilter
95
+ },
96
+ { nameOnly: true }
97
+ )
98
+ .toArray();
99
+
100
+ for (let collection of collections) {
101
+ const table = await this.handleRelation(
102
+ batch,
103
+ {
104
+ name: collection.name,
105
+ schema,
106
+ objectId: collection.name,
107
+ replicationColumns: [{ name: '_id' }]
108
+ } as SourceEntityDescriptor,
109
+ // This is done as part of the initial setup - snapshot is handled elsewhere
110
+ { snapshot: false }
111
+ );
112
+
113
+ result.push(table);
114
+ }
115
+
116
+ return result;
117
+ }
118
+
119
+ async initSlot(): Promise<InitResult> {
120
+ const status = await this.storage.getStatus();
121
+ if (status.snapshot_done && status.checkpoint_lsn) {
122
+ logger.info(`Initial replication already done`);
123
+ return { needsInitialSync: false };
124
+ }
125
+
126
+ return { needsInitialSync: true };
127
+ }
128
+
129
+ async estimatedCount(table: storage.SourceTable): Promise<string> {
130
+ const db = this.client.db(table.schema);
131
+ const count = db.collection(table.table).estimatedDocumentCount();
132
+ return `~${count}`;
133
+ }
134
+
135
+ /**
136
+ * Start initial replication.
137
+ *
138
+ * If (partial) replication was done before on this slot, this clears the state
139
+ * and starts again from scratch.
140
+ */
141
+ async startInitialReplication() {
142
+ await this.storage.clear();
143
+ await this.initialReplication();
144
+ }
145
+
146
+ async initialReplication() {
147
+ const sourceTables = this.sync_rules.getSourceTables();
148
+ await this.client.connect();
149
+
150
+ const hello = await this.defaultDb.command({ hello: 1 });
151
+ const startTime = hello.lastWrite?.majorityOpTime?.ts as mongo.Timestamp;
152
+ if (hello.msg == 'isdbgrid') {
153
+ throw new Error('Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).');
154
+ } else if (hello.setName == null) {
155
+ throw new Error('Standalone MongoDB instances are not supported - use a replicaset.');
156
+ } else if (startTime == null) {
157
+ // Not known where this would happen apart from the above cases
158
+ throw new Error('MongoDB lastWrite timestamp not found.');
159
+ }
160
+ const session = await this.client.startSession({
161
+ snapshot: true
162
+ });
163
+ try {
164
+ await this.storage.startBatch(
165
+ { zeroLSN: ZERO_LSN, defaultSchema: this.defaultDb.databaseName },
166
+ async (batch) => {
167
+ for (let tablePattern of sourceTables) {
168
+ const tables = await this.getQualifiedTableNames(batch, tablePattern);
169
+ for (let table of tables) {
170
+ await this.snapshotTable(batch, table, session);
171
+ await batch.markSnapshotDone([table], ZERO_LSN);
172
+
173
+ await touch();
174
+ }
175
+ }
176
+
177
+ const snapshotTime = session.clusterTime?.clusterTime ?? startTime;
178
+
179
+ if (snapshotTime != null) {
180
+ const lsn = getMongoLsn(snapshotTime);
181
+ logger.info(`Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
182
+ // keepalive() does an auto-commit if there is data
183
+ await batch.flush();
184
+ await batch.keepalive(lsn);
185
+ } else {
186
+ throw new Error(`No snapshot clusterTime available.`);
187
+ }
188
+ }
189
+ );
190
+ } finally {
191
+ session.endSession();
192
+ }
193
+ }
194
+
195
+ private getSourceNamespaceFilters() {
196
+ const sourceTables = this.sync_rules.getSourceTables();
197
+
198
+ let $inFilters: any[] = [{ db: this.defaultDb.databaseName, coll: '_powersync_checkpoints' }];
199
+ let $refilters: any[] = [];
200
+ for (let tablePattern of sourceTables) {
201
+ if (tablePattern.connectionTag != this.connections.connectionTag) {
202
+ continue;
203
+ }
204
+
205
+ if (tablePattern.isWildcard) {
206
+ $refilters.push({ db: tablePattern.schema, coll: new RegExp('^' + escapeRegExp(tablePattern.tablePrefix)) });
207
+ } else {
208
+ $inFilters.push({
209
+ db: tablePattern.schema,
210
+ coll: tablePattern.name
211
+ });
212
+ }
213
+ }
214
+ if ($refilters.length > 0) {
215
+ return { $or: [{ ns: { $in: $inFilters } }, ...$refilters] };
216
+ }
217
+ return { ns: { $in: $inFilters } };
218
+ }
219
+
220
+ static *getQueryData(results: Iterable<DatabaseInputRow>): Generator<SqliteRow> {
221
+ for (let row of results) {
222
+ yield constructAfterRecord(row);
223
+ }
224
+ }
225
+
226
+ private async snapshotTable(
227
+ batch: storage.BucketStorageBatch,
228
+ table: storage.SourceTable,
229
+ session?: mongo.ClientSession
230
+ ) {
231
+ logger.info(`Replicating ${table.qualifiedName}`);
232
+ const estimatedCount = await this.estimatedCount(table);
233
+ let at = 0;
234
+
235
+ const db = this.client.db(table.schema);
236
+ const collection = db.collection(table.table);
237
+ const query = collection.find({}, { session });
238
+
239
+ const cursor = query.stream();
240
+
241
+ for await (let document of cursor) {
242
+ if (this.abort_signal.aborted) {
243
+ throw new Error(`Aborted initial replication`);
244
+ }
245
+
246
+ const record = constructAfterRecord(document);
247
+
248
+ // This auto-flushes when the batch reaches its size limit
249
+ await batch.save({
250
+ tag: 'insert',
251
+ sourceTable: table,
252
+ before: undefined,
253
+ beforeReplicaId: undefined,
254
+ after: record,
255
+ afterReplicaId: document._id
256
+ });
257
+
258
+ at += 1;
259
+ Metrics.getInstance().rows_replicated_total.add(1);
260
+
261
+ await touch();
262
+ }
263
+
264
+ await batch.flush();
265
+ }
266
+
267
+ private async getRelation(
268
+ batch: storage.BucketStorageBatch,
269
+ descriptor: SourceEntityDescriptor
270
+ ): Promise<SourceTable> {
271
+ const existing = this.relation_cache.get(descriptor.objectId);
272
+ if (existing != null) {
273
+ return existing;
274
+ }
275
+ return this.handleRelation(batch, descriptor, { snapshot: false });
276
+ }
277
+
278
+ async handleRelation(
279
+ batch: storage.BucketStorageBatch,
280
+ descriptor: SourceEntityDescriptor,
281
+ options: { snapshot: boolean }
282
+ ) {
283
+ const snapshot = options.snapshot;
284
+ if (!descriptor.objectId && typeof descriptor.objectId != 'string') {
285
+ throw new Error('objectId expected');
286
+ }
287
+ const result = await this.storage.resolveTable({
288
+ group_id: this.group_id,
289
+ connection_id: this.connection_id,
290
+ connection_tag: this.connections.connectionTag,
291
+ entity_descriptor: descriptor,
292
+ sync_rules: this.sync_rules
293
+ });
294
+ this.relation_cache.set(descriptor.objectId, result.table);
295
+
296
+ // Drop conflicting tables. This includes for example renamed tables.
297
+ await batch.drop(result.dropTables);
298
+
299
+ // Snapshot if:
300
+ // 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere)
301
+ // 2. Snapshot is not already done, AND:
302
+ // 3. The table is used in sync rules.
303
+ const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny;
304
+ if (shouldSnapshot) {
305
+ // Truncate this table, in case a previous snapshot was interrupted.
306
+ await batch.truncate([result.table]);
307
+
308
+ await this.snapshotTable(batch, result.table);
309
+ const no_checkpoint_before_lsn = await createCheckpoint(this.client, this.defaultDb);
310
+
311
+ const [table] = await batch.markSnapshotDone([result.table], no_checkpoint_before_lsn);
312
+ return table;
313
+ }
314
+
315
+ return result.table;
316
+ }
317
+
318
+ async writeChange(
319
+ batch: storage.BucketStorageBatch,
320
+ table: storage.SourceTable,
321
+ change: mongo.ChangeStreamDocument
322
+ ): Promise<storage.FlushedResult | null> {
323
+ if (!table.syncAny) {
324
+ logger.debug(`Collection ${table.qualifiedName} not used in sync rules - skipping`);
325
+ return null;
326
+ }
327
+
328
+ Metrics.getInstance().rows_replicated_total.add(1);
329
+ if (change.operationType == 'insert') {
330
+ const baseRecord = constructAfterRecord(change.fullDocument);
331
+ return await batch.save({
332
+ tag: 'insert',
333
+ sourceTable: table,
334
+ before: undefined,
335
+ beforeReplicaId: undefined,
336
+ after: baseRecord,
337
+ afterReplicaId: change.documentKey._id
338
+ });
339
+ } else if (change.operationType == 'update' || change.operationType == 'replace') {
340
+ if (change.fullDocument == null) {
341
+ // Treat as delete
342
+ return await batch.save({
343
+ tag: 'delete',
344
+ sourceTable: table,
345
+ before: undefined,
346
+ beforeReplicaId: change.documentKey._id
347
+ });
348
+ }
349
+ const after = constructAfterRecord(change.fullDocument!);
350
+ return await batch.save({
351
+ tag: 'update',
352
+ sourceTable: table,
353
+ before: undefined,
354
+ beforeReplicaId: undefined,
355
+ after: after,
356
+ afterReplicaId: change.documentKey._id
357
+ });
358
+ } else if (change.operationType == 'delete') {
359
+ return await batch.save({
360
+ tag: 'delete',
361
+ sourceTable: table,
362
+ before: undefined,
363
+ beforeReplicaId: change.documentKey._id
364
+ });
365
+ } else {
366
+ throw new Error(`Unsupported operation: ${change.operationType}`);
367
+ }
368
+ }
369
+
370
+ async replicate() {
371
+ try {
372
+ // If anything errors here, the entire replication process is halted, and
373
+ // all connections automatically closed, including this one.
374
+
375
+ await this.initReplication();
376
+ await this.streamChanges();
377
+ } catch (e) {
378
+ await this.storage.reportError(e);
379
+ throw e;
380
+ }
381
+ }
382
+
383
+ async initReplication() {
384
+ const result = await this.initSlot();
385
+ if (result.needsInitialSync) {
386
+ await this.startInitialReplication();
387
+ }
388
+ }
389
+
390
+ async streamChanges() {
391
+ // Auto-activate as soon as initial replication is done
392
+ await this.storage.autoActivate();
393
+
394
+ await this.storage.startBatch({ zeroLSN: ZERO_LSN, defaultSchema: this.defaultDb.databaseName }, async (batch) => {
395
+ const lastLsn = batch.lastCheckpointLsn;
396
+ const startAfter = mongoLsnToTimestamp(lastLsn) ?? undefined;
397
+ logger.info(`Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`);
398
+
399
+ // TODO: Use changeStreamSplitLargeEvent
400
+
401
+ const pipeline: mongo.Document[] = [
402
+ {
403
+ $match: this.getSourceNamespaceFilters()
404
+ }
405
+ ];
406
+
407
+ const stream = this.client.watch(pipeline, {
408
+ startAtOperationTime: startAfter,
409
+ showExpandedEvents: true,
410
+ useBigInt64: true,
411
+ maxAwaitTimeMS: 200,
412
+ fullDocument: 'updateLookup'
413
+ });
414
+
415
+ if (this.abort_signal.aborted) {
416
+ stream.close();
417
+ return;
418
+ }
419
+
420
+ this.abort_signal.addEventListener('abort', () => {
421
+ stream.close();
422
+ });
423
+
424
+ let waitForCheckpointLsn: string | null = null;
425
+
426
+ while (true) {
427
+ if (this.abort_signal.aborted) {
428
+ break;
429
+ }
430
+
431
+ const changeDocument = await stream.tryNext();
432
+
433
+ if (changeDocument == null || this.abort_signal.aborted) {
434
+ continue;
435
+ }
436
+ await touch();
437
+
438
+ if (startAfter != null && changeDocument.clusterTime?.lte(startAfter)) {
439
+ continue;
440
+ }
441
+
442
+ // console.log('event', changeDocument);
443
+
444
+ if (
445
+ (changeDocument.operationType == 'insert' ||
446
+ changeDocument.operationType == 'update' ||
447
+ changeDocument.operationType == 'replace') &&
448
+ changeDocument.ns.coll == '_powersync_checkpoints'
449
+ ) {
450
+ const lsn = getMongoLsn(changeDocument.clusterTime!);
451
+ if (waitForCheckpointLsn != null && lsn >= waitForCheckpointLsn) {
452
+ waitForCheckpointLsn = null;
453
+ }
454
+ await batch.flush();
455
+ await batch.keepalive(lsn);
456
+ } else if (
457
+ changeDocument.operationType == 'insert' ||
458
+ changeDocument.operationType == 'update' ||
459
+ changeDocument.operationType == 'replace' ||
460
+ changeDocument.operationType == 'delete'
461
+ ) {
462
+ if (waitForCheckpointLsn == null) {
463
+ waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb);
464
+ }
465
+ const rel = getMongoRelation(changeDocument.ns);
466
+ const table = await this.getRelation(batch, rel);
467
+ if (table.syncAny) {
468
+ await this.writeChange(batch, table, changeDocument);
469
+ }
470
+ } else if (changeDocument.operationType == 'drop') {
471
+ const rel = getMongoRelation(changeDocument.ns);
472
+ const table = await this.getRelation(batch, rel);
473
+ if (table.syncAny) {
474
+ await batch.drop([table]);
475
+ this.relation_cache.delete(table.objectId);
476
+ }
477
+ } else if (changeDocument.operationType == 'rename') {
478
+ const relFrom = getMongoRelation(changeDocument.ns);
479
+ const relTo = getMongoRelation(changeDocument.to);
480
+ const tableFrom = await this.getRelation(batch, relFrom);
481
+ if (tableFrom.syncAny) {
482
+ await batch.drop([tableFrom]);
483
+ this.relation_cache.delete(tableFrom.objectId);
484
+ }
485
+ // Here we do need to snapshot the new table
486
+ await this.handleRelation(batch, relTo, { snapshot: true });
487
+ }
488
+ }
489
+ });
490
+ }
491
+ }
492
+
493
+ async function touch() {
494
+ // FIXME: The hosted Kubernetes probe does not actually check the timestamp on this.
495
+ // FIXME: We need a timeout of around 5+ minutes in Kubernetes if we do start checking the timestamp,
496
+ // or reduce PING_INTERVAL here.
497
+ return container.probes.touch();
498
+ }
499
+
500
+ function escapeRegExp(string: string) {
501
+ // https://stackoverflow.com/a/3561711/214837
502
+ return string.replace(/[/\-\\^$*+?.()|[\]{}]/g, '\\$&');
503
+ }
@@ -0,0 +1,104 @@
1
+ import { container } from '@powersync/lib-services-framework';
2
+ import { MongoManager } from './MongoManager.js';
3
+ import { MissingReplicationSlotError, ChangeStream } from './ChangeStream.js';
4
+
5
+ import { replication } from '@powersync/service-core';
6
+ import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
7
+
8
+ import * as mongo from 'mongodb';
9
+
10
+ export interface ChangeStreamReplicationJobOptions extends replication.AbstractReplicationJobOptions {
11
+ connectionFactory: ConnectionManagerFactory;
12
+ }
13
+
14
+ export class ChangeStreamReplicationJob extends replication.AbstractReplicationJob {
15
+ private connectionFactory: ConnectionManagerFactory;
16
+ private readonly connectionManager: MongoManager;
17
+
18
+ constructor(options: ChangeStreamReplicationJobOptions) {
19
+ super(options);
20
+ this.connectionFactory = options.connectionFactory;
21
+ this.connectionManager = this.connectionFactory.create();
22
+ }
23
+
24
+ async cleanUp(): Promise<void> {
25
+ // TODO: Implement?
26
+ }
27
+
28
+ async keepAlive() {
29
+ // TODO: Implement?
30
+ }
31
+
32
+ private get slotName() {
33
+ return this.options.storage.slot_name;
34
+ }
35
+
36
+ async replicate() {
37
+ try {
38
+ await this.replicateLoop();
39
+ } catch (e) {
40
+ // Fatal exception
41
+ container.reporter.captureException(e, {
42
+ metadata: {}
43
+ });
44
+ this.logger.error(`Replication failed`, e);
45
+
46
+ if (e instanceof MissingReplicationSlotError) {
47
+ // This stops replication on this slot, and creates a new slot
48
+ await this.options.storage.factory.slotRemoved(this.slotName);
49
+ }
50
+ } finally {
51
+ this.abortController.abort();
52
+ }
53
+ }
54
+
55
+ async replicateLoop() {
56
+ while (!this.isStopped) {
57
+ await this.replicateOnce();
58
+
59
+ if (!this.isStopped) {
60
+ await new Promise((resolve) => setTimeout(resolve, 5000));
61
+ }
62
+ }
63
+ }
64
+
65
+ async replicateOnce() {
66
+ // New connections on every iteration (every error with retry),
67
+ // otherwise we risk repeating errors related to the connection,
68
+ // such as caused by cached PG schemas.
69
+ const connectionManager = this.connectionFactory.create();
70
+ try {
71
+ await this.rateLimiter?.waitUntilAllowed({ signal: this.abortController.signal });
72
+ if (this.isStopped) {
73
+ return;
74
+ }
75
+ const stream = new ChangeStream({
76
+ abort_signal: this.abortController.signal,
77
+ storage: this.options.storage,
78
+ connections: connectionManager
79
+ });
80
+ await stream.replicate();
81
+ } catch (e) {
82
+ if (this.abortController.signal.aborted) {
83
+ return;
84
+ }
85
+ this.logger.error(`Replication error`, e);
86
+ if (e.cause != null) {
87
+ // Without this additional log, the cause may not be visible in the logs.
88
+ this.logger.error(`cause`, e.cause);
89
+ }
90
+ if (e instanceof mongo.MongoError && e.hasErrorLabel('NonResumableChangeStreamError')) {
91
+ throw new MissingReplicationSlotError(e.message);
92
+ } else {
93
+ // Report the error if relevant, before retrying
94
+ container.reporter.captureException(e, {
95
+ metadata: {}
96
+ });
97
+ // This sets the retry delay
98
+ this.rateLimiter?.reportError(e);
99
+ }
100
+ } finally {
101
+ await connectionManager.end();
102
+ }
103
+ }
104
+ }
@@ -0,0 +1,36 @@
1
+ import { storage, replication } from '@powersync/service-core';
2
+ import { ChangeStreamReplicationJob } from './ChangeStreamReplicationJob.js';
3
+ import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
4
+ import { MongoErrorRateLimiter } from './MongoErrorRateLimiter.js';
5
+
6
+ export interface WalStreamReplicatorOptions extends replication.AbstractReplicatorOptions {
7
+ connectionFactory: ConnectionManagerFactory;
8
+ }
9
+
10
+ export class ChangeStreamReplicator extends replication.AbstractReplicator<ChangeStreamReplicationJob> {
11
+ private readonly connectionFactory: ConnectionManagerFactory;
12
+
13
+ constructor(options: WalStreamReplicatorOptions) {
14
+ super(options);
15
+ this.connectionFactory = options.connectionFactory;
16
+ }
17
+
18
+ createJob(options: replication.CreateJobOptions): ChangeStreamReplicationJob {
19
+ return new ChangeStreamReplicationJob({
20
+ id: this.createJobId(options.storage.group_id),
21
+ storage: options.storage,
22
+ connectionFactory: this.connectionFactory,
23
+ lock: options.lock,
24
+ rateLimiter: new MongoErrorRateLimiter()
25
+ });
26
+ }
27
+
28
+ async cleanUp(syncRulesStorage: storage.SyncRulesBucketStorage): Promise<void> {
29
+ // TODO: Implement anything?
30
+ }
31
+
32
+ async stop(): Promise<void> {
33
+ await super.stop();
34
+ await this.connectionFactory.shutdown();
35
+ }
36
+ }
@@ -0,0 +1,27 @@
1
+ import { logger } from '@powersync/lib-services-framework';
2
+ import { NormalizedMongoConnectionConfig } from '../types/types.js';
3
+ import { MongoManager } from './MongoManager.js';
4
+
5
+ export class ConnectionManagerFactory {
6
+ private readonly connectionManagers: MongoManager[];
7
+ private readonly dbConnectionConfig: NormalizedMongoConnectionConfig;
8
+
9
+ constructor(dbConnectionConfig: NormalizedMongoConnectionConfig) {
10
+ this.dbConnectionConfig = dbConnectionConfig;
11
+ this.connectionManagers = [];
12
+ }
13
+
14
+ create() {
15
+ const manager = new MongoManager(this.dbConnectionConfig);
16
+ this.connectionManagers.push(manager);
17
+ return manager;
18
+ }
19
+
20
+ async shutdown() {
21
+ logger.info('Shutting down MongoDB connection Managers...');
22
+ for (const manager of this.connectionManagers) {
23
+ await manager.end();
24
+ }
25
+ logger.info('MongoDB connection Managers shutdown completed.');
26
+ }
27
+ }