@powersync/service-module-mongodb 0.0.0-dev-20241111122558 → 0.0.0-dev-20241128134723
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +26 -17
- package/dist/api/MongoRouteAPIAdapter.d.ts +1 -0
- package/dist/api/MongoRouteAPIAdapter.js +54 -21
- package/dist/api/MongoRouteAPIAdapter.js.map +1 -1
- package/dist/replication/ChangeStream.d.ts +23 -2
- package/dist/replication/ChangeStream.js +178 -42
- package/dist/replication/ChangeStream.js.map +1 -1
- package/dist/replication/ChangeStreamReplicationJob.js +7 -4
- package/dist/replication/ChangeStreamReplicationJob.js.map +1 -1
- package/dist/replication/MongoErrorRateLimiter.js +0 -6
- package/dist/replication/MongoErrorRateLimiter.js.map +1 -1
- package/dist/replication/MongoRelation.js +5 -2
- package/dist/replication/MongoRelation.js.map +1 -1
- package/dist/replication/replication-utils.d.ts +1 -0
- package/dist/replication/replication-utils.js +1 -0
- package/dist/replication/replication-utils.js.map +1 -1
- package/dist/types/types.d.ts +35 -0
- package/dist/types/types.js +38 -2
- package/dist/types/types.js.map +1 -1
- package/package.json +6 -9
- package/src/api/MongoRouteAPIAdapter.ts +53 -21
- package/src/replication/ChangeStream.ts +277 -121
- package/src/replication/ChangeStreamReplicationJob.ts +6 -4
- package/src/replication/MongoErrorRateLimiter.ts +1 -8
- package/src/replication/MongoRelation.ts +5 -2
- package/src/replication/replication-utils.ts +2 -1
- package/src/types/types.ts +43 -3
- package/test/src/change_stream.test.ts +442 -231
- package/test/src/change_stream_utils.ts +54 -27
- package/test/src/mongo_test.test.ts +180 -46
- package/test/src/slow_tests.test.ts +109 -0
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -11,6 +11,8 @@ import {
|
|
|
11
11
|
mongoLsnToTimestamp
|
|
12
12
|
} from './MongoRelation.js';
|
|
13
13
|
import { escapeRegExp } from '../utils.js';
|
|
14
|
+
import { CHECKPOINTS_COLLECTION } from './replication-utils.js';
|
|
15
|
+
import { PostImagesOption } from '../types/types.js';
|
|
14
16
|
|
|
15
17
|
export const ZERO_LSN = '0000000000000000';
|
|
16
18
|
|
|
@@ -24,7 +26,15 @@ interface InitResult {
|
|
|
24
26
|
needsInitialSync: boolean;
|
|
25
27
|
}
|
|
26
28
|
|
|
27
|
-
|
|
29
|
+
/**
|
|
30
|
+
* Thrown when the change stream is not valid anymore, and replication
|
|
31
|
+
* must be restarted.
|
|
32
|
+
*
|
|
33
|
+
* Possible reasons:
|
|
34
|
+
* * Some change stream documents do not have postImages.
|
|
35
|
+
* * startAfter/resumeToken is not valid anymore.
|
|
36
|
+
*/
|
|
37
|
+
export class ChangeStreamInvalidatedError extends Error {
|
|
28
38
|
constructor(message: string) {
|
|
29
39
|
super(message);
|
|
30
40
|
}
|
|
@@ -70,7 +80,21 @@ export class ChangeStream {
|
|
|
70
80
|
return this.abort_signal.aborted;
|
|
71
81
|
}
|
|
72
82
|
|
|
73
|
-
|
|
83
|
+
private get usePostImages() {
|
|
84
|
+
return this.connections.options.postImages != PostImagesOption.OFF;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
private get configurePostImages() {
|
|
88
|
+
return this.connections.options.postImages == PostImagesOption.AUTO_CONFIGURE;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* This resolves a pattern, persists the related metadata, and returns
|
|
93
|
+
* the resulting SourceTables.
|
|
94
|
+
*
|
|
95
|
+
* This implicitly checks the collection postImage configuration.
|
|
96
|
+
*/
|
|
97
|
+
async resolveQualifiedTableNames(
|
|
74
98
|
batch: storage.BucketStorageBatch,
|
|
75
99
|
tablePattern: TablePattern
|
|
76
100
|
): Promise<storage.SourceTable[]> {
|
|
@@ -94,10 +118,14 @@ export class ChangeStream {
|
|
|
94
118
|
{
|
|
95
119
|
name: nameFilter
|
|
96
120
|
},
|
|
97
|
-
{ nameOnly:
|
|
121
|
+
{ nameOnly: false }
|
|
98
122
|
)
|
|
99
123
|
.toArray();
|
|
100
124
|
|
|
125
|
+
if (!tablePattern.isWildcard && collections.length == 0) {
|
|
126
|
+
logger.warn(`Collection ${schema}.${tablePattern.name} not found`);
|
|
127
|
+
}
|
|
128
|
+
|
|
101
129
|
for (let collection of collections) {
|
|
102
130
|
const table = await this.handleRelation(
|
|
103
131
|
batch,
|
|
@@ -108,7 +136,7 @@ export class ChangeStream {
|
|
|
108
136
|
replicationColumns: [{ name: '_id' }]
|
|
109
137
|
} as SourceEntityDescriptor,
|
|
110
138
|
// This is done as part of the initial setup - snapshot is handled elsewhere
|
|
111
|
-
{ snapshot: false }
|
|
139
|
+
{ snapshot: false, collectionInfo: collection }
|
|
112
140
|
);
|
|
113
141
|
|
|
114
142
|
result.push(table);
|
|
@@ -148,44 +176,45 @@ export class ChangeStream {
|
|
|
148
176
|
const sourceTables = this.sync_rules.getSourceTables();
|
|
149
177
|
await this.client.connect();
|
|
150
178
|
|
|
179
|
+
// We need to get the snapshot time before taking the initial snapshot.
|
|
151
180
|
const hello = await this.defaultDb.command({ hello: 1 });
|
|
152
|
-
const
|
|
181
|
+
const snapshotTime = hello.lastWrite?.majorityOpTime?.ts as mongo.Timestamp;
|
|
153
182
|
if (hello.msg == 'isdbgrid') {
|
|
154
183
|
throw new Error('Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).');
|
|
155
184
|
} else if (hello.setName == null) {
|
|
156
185
|
throw new Error('Standalone MongoDB instances are not supported - use a replicaset.');
|
|
157
|
-
} else if (
|
|
186
|
+
} else if (snapshotTime == null) {
|
|
158
187
|
// Not known where this would happen apart from the above cases
|
|
159
188
|
throw new Error('MongoDB lastWrite timestamp not found.');
|
|
160
189
|
}
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
190
|
+
// We previously used {snapshot: true} for the snapshot session.
|
|
191
|
+
// While it gives nice consistency guarantees, it fails when the
|
|
192
|
+
// snapshot takes longer than 5 minutes, due to minSnapshotHistoryWindowInSeconds
|
|
193
|
+
// expiring the snapshot.
|
|
194
|
+
const session = await this.client.startSession();
|
|
164
195
|
try {
|
|
165
196
|
await this.storage.startBatch(
|
|
166
|
-
{ zeroLSN: ZERO_LSN, defaultSchema: this.defaultDb.databaseName },
|
|
197
|
+
{ zeroLSN: ZERO_LSN, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
|
|
167
198
|
async (batch) => {
|
|
199
|
+
// Start by resolving all tables.
|
|
200
|
+
// This checks postImage configuration, and that should fail as
|
|
201
|
+
// earlier as possible.
|
|
202
|
+
let allSourceTables: SourceTable[] = [];
|
|
168
203
|
for (let tablePattern of sourceTables) {
|
|
169
|
-
const tables = await this.
|
|
170
|
-
|
|
171
|
-
await this.snapshotTable(batch, table, session);
|
|
172
|
-
await batch.markSnapshotDone([table], ZERO_LSN);
|
|
173
|
-
|
|
174
|
-
await touch();
|
|
175
|
-
}
|
|
204
|
+
const tables = await this.resolveQualifiedTableNames(batch, tablePattern);
|
|
205
|
+
allSourceTables.push(...tables);
|
|
176
206
|
}
|
|
177
207
|
|
|
178
|
-
|
|
208
|
+
for (let table of allSourceTables) {
|
|
209
|
+
await this.snapshotTable(batch, table, session);
|
|
210
|
+
await batch.markSnapshotDone([table], ZERO_LSN);
|
|
179
211
|
|
|
180
|
-
|
|
181
|
-
const lsn = getMongoLsn(snapshotTime);
|
|
182
|
-
logger.info(`Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
|
|
183
|
-
// keepalive() does an auto-commit if there is data
|
|
184
|
-
await batch.flush();
|
|
185
|
-
await batch.keepalive(lsn);
|
|
186
|
-
} else {
|
|
187
|
-
throw new Error(`No snapshot clusterTime available.`);
|
|
212
|
+
await touch();
|
|
188
213
|
}
|
|
214
|
+
|
|
215
|
+
const lsn = getMongoLsn(snapshotTime);
|
|
216
|
+
logger.info(`Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
|
|
217
|
+
await batch.commit(lsn);
|
|
189
218
|
}
|
|
190
219
|
);
|
|
191
220
|
} finally {
|
|
@@ -193,10 +222,26 @@ export class ChangeStream {
|
|
|
193
222
|
}
|
|
194
223
|
}
|
|
195
224
|
|
|
225
|
+
private async setupCheckpointsCollection() {
|
|
226
|
+
const collection = await this.getCollectionInfo(this.defaultDb.databaseName, CHECKPOINTS_COLLECTION);
|
|
227
|
+
if (collection == null) {
|
|
228
|
+
await this.defaultDb.createCollection(CHECKPOINTS_COLLECTION, {
|
|
229
|
+
changeStreamPreAndPostImages: { enabled: true }
|
|
230
|
+
});
|
|
231
|
+
} else if (this.usePostImages && collection.options?.changeStreamPreAndPostImages?.enabled != true) {
|
|
232
|
+
// Drop + create requires less permissions than collMod,
|
|
233
|
+
// and we don't care about the data in this collection.
|
|
234
|
+
await this.defaultDb.dropCollection(CHECKPOINTS_COLLECTION);
|
|
235
|
+
await this.defaultDb.createCollection(CHECKPOINTS_COLLECTION, {
|
|
236
|
+
changeStreamPreAndPostImages: { enabled: true }
|
|
237
|
+
});
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
196
241
|
private getSourceNamespaceFilters(): { $match: any; multipleDatabases: boolean } {
|
|
197
242
|
const sourceTables = this.sync_rules.getSourceTables();
|
|
198
243
|
|
|
199
|
-
let $inFilters: any[] = [{ db: this.defaultDb.databaseName, coll:
|
|
244
|
+
let $inFilters: any[] = [{ db: this.defaultDb.databaseName, coll: CHECKPOINTS_COLLECTION }];
|
|
200
245
|
let $refilters: any[] = [];
|
|
201
246
|
let multipleDatabases = false;
|
|
202
247
|
for (let tablePattern of sourceTables) {
|
|
@@ -209,7 +254,10 @@ export class ChangeStream {
|
|
|
209
254
|
}
|
|
210
255
|
|
|
211
256
|
if (tablePattern.isWildcard) {
|
|
212
|
-
$refilters.push({
|
|
257
|
+
$refilters.push({
|
|
258
|
+
'ns.db': tablePattern.schema,
|
|
259
|
+
'ns.coll': new RegExp('^' + escapeRegExp(tablePattern.tablePrefix))
|
|
260
|
+
});
|
|
213
261
|
} else {
|
|
214
262
|
$inFilters.push({
|
|
215
263
|
db: tablePattern.schema,
|
|
@@ -240,7 +288,7 @@ export class ChangeStream {
|
|
|
240
288
|
|
|
241
289
|
const db = this.client.db(table.schema);
|
|
242
290
|
const collection = db.collection(table.table);
|
|
243
|
-
const query = collection.find({}, { session });
|
|
291
|
+
const query = collection.find({}, { session, readConcern: { level: 'majority' } });
|
|
244
292
|
|
|
245
293
|
const cursor = query.stream();
|
|
246
294
|
|
|
@@ -249,6 +297,8 @@ export class ChangeStream {
|
|
|
249
297
|
throw new Error(`Aborted initial replication`);
|
|
250
298
|
}
|
|
251
299
|
|
|
300
|
+
at += 1;
|
|
301
|
+
|
|
252
302
|
const record = constructAfterRecord(document);
|
|
253
303
|
|
|
254
304
|
// This auto-flushes when the batch reaches its size limit
|
|
@@ -268,6 +318,7 @@ export class ChangeStream {
|
|
|
268
318
|
}
|
|
269
319
|
|
|
270
320
|
await batch.flush();
|
|
321
|
+
logger.info(`Replicated ${at} documents for ${table.qualifiedName}`);
|
|
271
322
|
}
|
|
272
323
|
|
|
273
324
|
private async getRelation(
|
|
@@ -278,14 +329,60 @@ export class ChangeStream {
|
|
|
278
329
|
if (existing != null) {
|
|
279
330
|
return existing;
|
|
280
331
|
}
|
|
281
|
-
|
|
332
|
+
|
|
333
|
+
// Note: collection may have been dropped at this point, so we handle
|
|
334
|
+
// missing values.
|
|
335
|
+
const collection = await this.getCollectionInfo(descriptor.schema, descriptor.name);
|
|
336
|
+
|
|
337
|
+
return this.handleRelation(batch, descriptor, { snapshot: false, collectionInfo: collection });
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
private async getCollectionInfo(db: string, name: string): Promise<mongo.CollectionInfo | undefined> {
|
|
341
|
+
const collection = (
|
|
342
|
+
await this.client
|
|
343
|
+
.db(db)
|
|
344
|
+
.listCollections(
|
|
345
|
+
{
|
|
346
|
+
name: name
|
|
347
|
+
},
|
|
348
|
+
{ nameOnly: false }
|
|
349
|
+
)
|
|
350
|
+
.toArray()
|
|
351
|
+
)[0];
|
|
352
|
+
return collection;
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
private async checkPostImages(db: string, collectionInfo: mongo.CollectionInfo) {
|
|
356
|
+
if (!this.usePostImages) {
|
|
357
|
+
// Nothing to check
|
|
358
|
+
return;
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
const enabled = collectionInfo.options?.changeStreamPreAndPostImages?.enabled == true;
|
|
362
|
+
|
|
363
|
+
if (!enabled && this.configurePostImages) {
|
|
364
|
+
await this.client.db(db).command({
|
|
365
|
+
collMod: collectionInfo.name,
|
|
366
|
+
changeStreamPreAndPostImages: { enabled: true }
|
|
367
|
+
});
|
|
368
|
+
logger.info(`Enabled postImages on ${db}.${collectionInfo.name}`);
|
|
369
|
+
} else if (!enabled) {
|
|
370
|
+
throw new Error(`postImages not enabled on ${db}.${collectionInfo.name}`);
|
|
371
|
+
}
|
|
282
372
|
}
|
|
283
373
|
|
|
284
374
|
async handleRelation(
|
|
285
375
|
batch: storage.BucketStorageBatch,
|
|
286
376
|
descriptor: SourceEntityDescriptor,
|
|
287
|
-
options: { snapshot: boolean }
|
|
377
|
+
options: { snapshot: boolean; collectionInfo: mongo.CollectionInfo | undefined }
|
|
288
378
|
) {
|
|
379
|
+
if (options.collectionInfo != null) {
|
|
380
|
+
await this.checkPostImages(descriptor.schema, options.collectionInfo);
|
|
381
|
+
} else {
|
|
382
|
+
// If collectionInfo is null, the collection may have been dropped.
|
|
383
|
+
// Ignore the postImages check in this case.
|
|
384
|
+
}
|
|
385
|
+
|
|
289
386
|
const snapshot = options.snapshot;
|
|
290
387
|
if (!descriptor.objectId && typeof descriptor.objectId != 'string') {
|
|
291
388
|
throw new Error('objectId expected');
|
|
@@ -388,121 +485,180 @@ export class ChangeStream {
|
|
|
388
485
|
|
|
389
486
|
async initReplication() {
|
|
390
487
|
const result = await this.initSlot();
|
|
488
|
+
await this.setupCheckpointsCollection();
|
|
391
489
|
if (result.needsInitialSync) {
|
|
392
490
|
await this.startInitialReplication();
|
|
393
491
|
}
|
|
394
492
|
}
|
|
395
493
|
|
|
396
494
|
async streamChanges() {
|
|
495
|
+
try {
|
|
496
|
+
await this.streamChangesInternal();
|
|
497
|
+
} catch (e) {
|
|
498
|
+
if (
|
|
499
|
+
e instanceof mongo.MongoServerError &&
|
|
500
|
+
e.codeName == 'NoMatchingDocument' &&
|
|
501
|
+
e.errmsg?.includes('post-image was not found')
|
|
502
|
+
) {
|
|
503
|
+
throw new ChangeStreamInvalidatedError(e.errmsg);
|
|
504
|
+
}
|
|
505
|
+
throw e;
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
async streamChangesInternal() {
|
|
397
510
|
// Auto-activate as soon as initial replication is done
|
|
398
511
|
await this.storage.autoActivate();
|
|
399
512
|
|
|
400
|
-
await this.storage.startBatch(
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
513
|
+
await this.storage.startBatch(
|
|
514
|
+
{ zeroLSN: ZERO_LSN, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false },
|
|
515
|
+
async (batch) => {
|
|
516
|
+
const lastLsn = batch.lastCheckpointLsn;
|
|
517
|
+
const startAfter = mongoLsnToTimestamp(lastLsn) ?? undefined;
|
|
518
|
+
logger.info(`Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`);
|
|
519
|
+
|
|
520
|
+
const filters = this.getSourceNamespaceFilters();
|
|
521
|
+
|
|
522
|
+
const pipeline: mongo.Document[] = [
|
|
523
|
+
{
|
|
524
|
+
$match: filters.$match
|
|
525
|
+
},
|
|
526
|
+
{ $changeStreamSplitLargeEvent: {} }
|
|
527
|
+
];
|
|
528
|
+
|
|
529
|
+
let fullDocument: 'required' | 'updateLookup';
|
|
530
|
+
|
|
531
|
+
if (this.usePostImages) {
|
|
532
|
+
// 'read_only' or 'auto_configure'
|
|
533
|
+
// Configuration happens during snapshot, or when we see new
|
|
534
|
+
// collections.
|
|
535
|
+
fullDocument = 'required';
|
|
536
|
+
} else {
|
|
537
|
+
fullDocument = 'updateLookup';
|
|
412
538
|
}
|
|
413
|
-
];
|
|
414
|
-
|
|
415
|
-
const streamOptions: mongo.ChangeStreamOptions = {
|
|
416
|
-
startAtOperationTime: startAfter,
|
|
417
|
-
showExpandedEvents: true,
|
|
418
|
-
useBigInt64: true,
|
|
419
|
-
maxAwaitTimeMS: 200,
|
|
420
|
-
fullDocument: 'updateLookup'
|
|
421
|
-
};
|
|
422
|
-
let stream: mongo.ChangeStream<mongo.Document>;
|
|
423
|
-
if (filters.multipleDatabases) {
|
|
424
|
-
// Requires readAnyDatabase@admin on Atlas
|
|
425
|
-
stream = this.client.watch(pipeline, streamOptions);
|
|
426
|
-
} else {
|
|
427
|
-
// Same general result, but requires less permissions than the above
|
|
428
|
-
stream = this.defaultDb.watch(pipeline, streamOptions);
|
|
429
|
-
}
|
|
430
539
|
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
540
|
+
const streamOptions: mongo.ChangeStreamOptions = {
|
|
541
|
+
startAtOperationTime: startAfter,
|
|
542
|
+
showExpandedEvents: true,
|
|
543
|
+
useBigInt64: true,
|
|
544
|
+
maxAwaitTimeMS: 200,
|
|
545
|
+
fullDocument: fullDocument
|
|
546
|
+
};
|
|
547
|
+
let stream: mongo.ChangeStream<mongo.Document>;
|
|
548
|
+
if (filters.multipleDatabases) {
|
|
549
|
+
// Requires readAnyDatabase@admin on Atlas
|
|
550
|
+
stream = this.client.watch(pipeline, streamOptions);
|
|
551
|
+
} else {
|
|
552
|
+
// Same general result, but requires less permissions than the above
|
|
553
|
+
stream = this.defaultDb.watch(pipeline, streamOptions);
|
|
554
|
+
}
|
|
441
555
|
|
|
442
|
-
while (true) {
|
|
443
556
|
if (this.abort_signal.aborted) {
|
|
444
|
-
|
|
557
|
+
stream.close();
|
|
558
|
+
return;
|
|
445
559
|
}
|
|
446
560
|
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
continue;
|
|
451
|
-
}
|
|
452
|
-
await touch();
|
|
561
|
+
this.abort_signal.addEventListener('abort', () => {
|
|
562
|
+
stream.close();
|
|
563
|
+
});
|
|
453
564
|
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
565
|
+
// Always start with a checkpoint.
|
|
566
|
+
// This helps us to clear erorrs when restarting, even if there is
|
|
567
|
+
// no data to replicate.
|
|
568
|
+
let waitForCheckpointLsn: string | null = await createCheckpoint(this.client, this.defaultDb);
|
|
457
569
|
|
|
458
|
-
|
|
570
|
+
let splitDocument: mongo.ChangeStreamDocument | null = null;
|
|
459
571
|
|
|
460
|
-
|
|
461
|
-
(
|
|
462
|
-
|
|
463
|
-
changeDocument.operationType == 'replace') &&
|
|
464
|
-
changeDocument.ns.coll == '_powersync_checkpoints'
|
|
465
|
-
) {
|
|
466
|
-
const lsn = getMongoLsn(changeDocument.clusterTime!);
|
|
467
|
-
if (waitForCheckpointLsn != null && lsn >= waitForCheckpointLsn) {
|
|
468
|
-
waitForCheckpointLsn = null;
|
|
572
|
+
while (true) {
|
|
573
|
+
if (this.abort_signal.aborted) {
|
|
574
|
+
break;
|
|
469
575
|
}
|
|
470
|
-
|
|
471
|
-
await
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
changeDocument.operationType == 'replace' ||
|
|
476
|
-
changeDocument.operationType == 'delete'
|
|
477
|
-
) {
|
|
478
|
-
if (waitForCheckpointLsn == null) {
|
|
479
|
-
waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb);
|
|
576
|
+
|
|
577
|
+
const originalChangeDocument = await stream.tryNext();
|
|
578
|
+
|
|
579
|
+
if (originalChangeDocument == null || this.abort_signal.aborted) {
|
|
580
|
+
continue;
|
|
480
581
|
}
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
if (
|
|
484
|
-
|
|
582
|
+
await touch();
|
|
583
|
+
|
|
584
|
+
if (startAfter != null && originalChangeDocument.clusterTime?.lte(startAfter)) {
|
|
585
|
+
continue;
|
|
485
586
|
}
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
587
|
+
|
|
588
|
+
let changeDocument = originalChangeDocument;
|
|
589
|
+
if (originalChangeDocument?.splitEvent != null) {
|
|
590
|
+
// Handle split events from $changeStreamSplitLargeEvent.
|
|
591
|
+
// This is only relevant for very large update operations.
|
|
592
|
+
const splitEvent = originalChangeDocument?.splitEvent;
|
|
593
|
+
|
|
594
|
+
if (splitDocument == null) {
|
|
595
|
+
splitDocument = originalChangeDocument;
|
|
596
|
+
} else {
|
|
597
|
+
splitDocument = Object.assign(splitDocument, originalChangeDocument);
|
|
598
|
+
}
|
|
599
|
+
|
|
600
|
+
if (splitEvent.fragment == splitEvent.of) {
|
|
601
|
+
// Got all fragments
|
|
602
|
+
changeDocument = splitDocument;
|
|
603
|
+
splitDocument = null;
|
|
604
|
+
} else {
|
|
605
|
+
// Wait for more fragments
|
|
606
|
+
continue;
|
|
607
|
+
}
|
|
608
|
+
} else if (splitDocument != null) {
|
|
609
|
+
// We were waiting for fragments, but got a different event
|
|
610
|
+
throw new Error(`Incomplete splitEvent: ${JSON.stringify(splitDocument.splitEvent)}`);
|
|
492
611
|
}
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
612
|
+
|
|
613
|
+
// console.log('event', changeDocument);
|
|
614
|
+
|
|
615
|
+
if (
|
|
616
|
+
(changeDocument.operationType == 'insert' ||
|
|
617
|
+
changeDocument.operationType == 'update' ||
|
|
618
|
+
changeDocument.operationType == 'replace') &&
|
|
619
|
+
changeDocument.ns.coll == CHECKPOINTS_COLLECTION
|
|
620
|
+
) {
|
|
621
|
+
const lsn = getMongoLsn(changeDocument.clusterTime!);
|
|
622
|
+
if (waitForCheckpointLsn != null && lsn >= waitForCheckpointLsn) {
|
|
623
|
+
waitForCheckpointLsn = null;
|
|
624
|
+
}
|
|
625
|
+
await batch.commit(lsn);
|
|
626
|
+
} else if (
|
|
627
|
+
changeDocument.operationType == 'insert' ||
|
|
628
|
+
changeDocument.operationType == 'update' ||
|
|
629
|
+
changeDocument.operationType == 'replace' ||
|
|
630
|
+
changeDocument.operationType == 'delete'
|
|
631
|
+
) {
|
|
632
|
+
if (waitForCheckpointLsn == null) {
|
|
633
|
+
waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb);
|
|
634
|
+
}
|
|
635
|
+
const rel = getMongoRelation(changeDocument.ns);
|
|
636
|
+
const table = await this.getRelation(batch, rel);
|
|
637
|
+
if (table.syncAny) {
|
|
638
|
+
await this.writeChange(batch, table, changeDocument);
|
|
639
|
+
}
|
|
640
|
+
} else if (changeDocument.operationType == 'drop') {
|
|
641
|
+
const rel = getMongoRelation(changeDocument.ns);
|
|
642
|
+
const table = await this.getRelation(batch, rel);
|
|
643
|
+
if (table.syncAny) {
|
|
644
|
+
await batch.drop([table]);
|
|
645
|
+
this.relation_cache.delete(table.objectId);
|
|
646
|
+
}
|
|
647
|
+
} else if (changeDocument.operationType == 'rename') {
|
|
648
|
+
const relFrom = getMongoRelation(changeDocument.ns);
|
|
649
|
+
const relTo = getMongoRelation(changeDocument.to);
|
|
650
|
+
const tableFrom = await this.getRelation(batch, relFrom);
|
|
651
|
+
if (tableFrom.syncAny) {
|
|
652
|
+
await batch.drop([tableFrom]);
|
|
653
|
+
this.relation_cache.delete(tableFrom.objectId);
|
|
654
|
+
}
|
|
655
|
+
// Here we do need to snapshot the new table
|
|
656
|
+
const collection = await this.getCollectionInfo(relTo.schema, relTo.name);
|
|
657
|
+
await this.handleRelation(batch, relTo, { snapshot: true, collectionInfo: collection });
|
|
500
658
|
}
|
|
501
|
-
// Here we do need to snapshot the new table
|
|
502
|
-
await this.handleRelation(batch, relTo, { snapshot: true });
|
|
503
659
|
}
|
|
504
660
|
}
|
|
505
|
-
|
|
661
|
+
);
|
|
506
662
|
}
|
|
507
663
|
}
|
|
508
664
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { container } from '@powersync/lib-services-framework';
|
|
2
|
-
import {
|
|
2
|
+
import { ChangeStreamInvalidatedError, ChangeStream } from './ChangeStream.js';
|
|
3
3
|
|
|
4
4
|
import { replication } from '@powersync/service-core';
|
|
5
5
|
import { ConnectionManagerFactory } from './ConnectionManagerFactory.js';
|
|
@@ -40,7 +40,7 @@ export class ChangeStreamReplicationJob extends replication.AbstractReplicationJ
|
|
|
40
40
|
});
|
|
41
41
|
this.logger.error(`Replication failed`, e);
|
|
42
42
|
|
|
43
|
-
if (e instanceof
|
|
43
|
+
if (e instanceof ChangeStreamInvalidatedError) {
|
|
44
44
|
// This stops replication on this slot, and creates a new slot
|
|
45
45
|
await this.options.storage.factory.slotRemoved(this.slotName);
|
|
46
46
|
}
|
|
@@ -84,8 +84,10 @@ export class ChangeStreamReplicationJob extends replication.AbstractReplicationJ
|
|
|
84
84
|
// Without this additional log, the cause may not be visible in the logs.
|
|
85
85
|
this.logger.error(`cause`, e.cause);
|
|
86
86
|
}
|
|
87
|
-
if (e instanceof
|
|
88
|
-
throw
|
|
87
|
+
if (e instanceof ChangeStreamInvalidatedError) {
|
|
88
|
+
throw e;
|
|
89
|
+
} else if (e instanceof mongo.MongoError && e.hasErrorLabel('NonResumableChangeStreamError')) {
|
|
90
|
+
throw new ChangeStreamInvalidatedError(e.message);
|
|
89
91
|
} else {
|
|
90
92
|
// Report the error if relevant, before retrying
|
|
91
93
|
container.reporter.captureException(e, {
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { setTimeout } from 'timers/promises';
|
|
2
1
|
import { ErrorRateLimiter } from '@powersync/service-core';
|
|
2
|
+
import { setTimeout } from 'timers/promises';
|
|
3
3
|
|
|
4
4
|
export class MongoErrorRateLimiter implements ErrorRateLimiter {
|
|
5
5
|
nextAllowed: number = Date.now();
|
|
@@ -27,13 +27,6 @@ export class MongoErrorRateLimiter implements ErrorRateLimiter {
|
|
|
27
27
|
} else if (message.includes('ECONNREFUSED')) {
|
|
28
28
|
// Could be fail2ban or similar
|
|
29
29
|
this.setDelay(120_000);
|
|
30
|
-
} else if (
|
|
31
|
-
message.includes('Unable to do postgres query on ended pool') ||
|
|
32
|
-
message.includes('Postgres unexpectedly closed connection')
|
|
33
|
-
) {
|
|
34
|
-
// Connection timed out - ignore / immediately retry
|
|
35
|
-
// We don't explicitly set the delay to 0, since there could have been another error that
|
|
36
|
-
// we need to respect.
|
|
37
30
|
} else {
|
|
38
31
|
this.setDelay(30_000);
|
|
39
32
|
}
|
|
@@ -2,6 +2,7 @@ import { storage } from '@powersync/service-core';
|
|
|
2
2
|
import { SqliteRow, SqliteValue, toSyncRulesRow } from '@powersync/service-sync-rules';
|
|
3
3
|
import * as mongo from 'mongodb';
|
|
4
4
|
import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
|
|
5
|
+
import { CHECKPOINTS_COLLECTION } from './replication-utils.js';
|
|
5
6
|
|
|
6
7
|
export function getMongoRelation(source: mongo.ChangeStreamNameSpace): storage.SourceEntityDescriptor {
|
|
7
8
|
return {
|
|
@@ -145,7 +146,10 @@ function filterJsonData(data: any, depth = 0): any {
|
|
|
145
146
|
export async function createCheckpoint(client: mongo.MongoClient, db: mongo.Db): Promise<string> {
|
|
146
147
|
const session = client.startSession();
|
|
147
148
|
try {
|
|
148
|
-
|
|
149
|
+
// Note: If multiple PowerSync instances are replicating the same source database,
|
|
150
|
+
// they'll modify the same checkpoint document. This is fine - it could create
|
|
151
|
+
// more replication load than required, but won't break anything.
|
|
152
|
+
await db.collection(CHECKPOINTS_COLLECTION).findOneAndUpdate(
|
|
149
153
|
{
|
|
150
154
|
_id: 'checkpoint' as any
|
|
151
155
|
},
|
|
@@ -159,7 +163,6 @@ export async function createCheckpoint(client: mongo.MongoClient, db: mongo.Db):
|
|
|
159
163
|
}
|
|
160
164
|
);
|
|
161
165
|
const time = session.operationTime!;
|
|
162
|
-
// console.log('marked checkpoint at', time, getMongoLsn(time));
|
|
163
166
|
// TODO: Use the above when we support custom write checkpoints
|
|
164
167
|
return getMongoLsn(time);
|
|
165
168
|
} finally {
|
|
@@ -1,6 +1,7 @@
|
|
|
1
|
-
import * as mongo from 'mongodb';
|
|
2
1
|
import { MongoManager } from './MongoManager.js';
|
|
3
2
|
|
|
3
|
+
export const CHECKPOINTS_COLLECTION = '_powersync_checkpoints';
|
|
4
|
+
|
|
4
5
|
export async function checkSourceConfiguration(connectionManager: MongoManager): Promise<void> {
|
|
5
6
|
const db = connectionManager.db;
|
|
6
7
|
const hello = await db.command({ hello: 1 });
|