@powersync/service-module-mongodb 0.9.1 → 0.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +28 -0
  2. package/dist/api/MongoRouteAPIAdapter.d.ts +1 -1
  3. package/dist/api/MongoRouteAPIAdapter.js +1 -1
  4. package/dist/api/MongoRouteAPIAdapter.js.map +1 -1
  5. package/dist/replication/ChangeStream.d.ts +26 -11
  6. package/dist/replication/ChangeStream.js +556 -300
  7. package/dist/replication/ChangeStream.js.map +1 -1
  8. package/dist/replication/ChangeStreamReplicationJob.d.ts +2 -0
  9. package/dist/replication/ChangeStreamReplicationJob.js +13 -5
  10. package/dist/replication/ChangeStreamReplicationJob.js.map +1 -1
  11. package/dist/replication/ChangeStreamReplicator.d.ts +1 -0
  12. package/dist/replication/ChangeStreamReplicator.js +21 -0
  13. package/dist/replication/ChangeStreamReplicator.js.map +1 -1
  14. package/dist/replication/MongoRelation.d.ts +1 -1
  15. package/dist/replication/MongoRelation.js +4 -0
  16. package/dist/replication/MongoRelation.js.map +1 -1
  17. package/dist/replication/MongoSnapshotQuery.d.ts +26 -0
  18. package/dist/replication/MongoSnapshotQuery.js +56 -0
  19. package/dist/replication/MongoSnapshotQuery.js.map +1 -0
  20. package/dist/replication/replication-utils.d.ts +2 -0
  21. package/dist/replication/replication-utils.js +3 -0
  22. package/dist/replication/replication-utils.js.map +1 -1
  23. package/package.json +8 -8
  24. package/src/api/MongoRouteAPIAdapter.ts +1 -1
  25. package/src/replication/ChangeStream.ts +324 -124
  26. package/src/replication/ChangeStreamReplicationJob.ts +14 -6
  27. package/src/replication/ChangeStreamReplicator.ts +23 -0
  28. package/src/replication/MongoRelation.ts +4 -1
  29. package/src/replication/MongoSnapshotQuery.ts +59 -0
  30. package/src/replication/replication-utils.ts +5 -0
  31. package/test/src/change_stream.test.ts +18 -13
  32. package/test/src/change_stream_utils.ts +45 -20
  33. package/test/src/chunked_snapshot.test.ts +153 -0
  34. package/test/src/resume.test.ts +7 -94
  35. package/test/src/resume_token.test.ts +78 -2
  36. package/test/src/resuming_snapshots.test.ts +138 -0
  37. package/test/src/slow_tests.test.ts +4 -18
  38. package/test/src/util.ts +12 -1
  39. package/tsconfig.tsbuildinfo +1 -1
@@ -1,12 +1,65 @@
1
+ var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
2
+ if (value !== null && value !== void 0) {
3
+ if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
4
+ var dispose, inner;
5
+ if (async) {
6
+ if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
7
+ dispose = value[Symbol.asyncDispose];
8
+ }
9
+ if (dispose === void 0) {
10
+ if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
11
+ dispose = value[Symbol.dispose];
12
+ if (async) inner = dispose;
13
+ }
14
+ if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
15
+ if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
16
+ env.stack.push({ value: value, dispose: dispose, async: async });
17
+ }
18
+ else if (async) {
19
+ env.stack.push({ async: true });
20
+ }
21
+ return value;
22
+ };
23
+ var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
24
+ return function (env) {
25
+ function fail(e) {
26
+ env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
27
+ env.hasError = true;
28
+ }
29
+ var r, s = 0;
30
+ function next() {
31
+ while (r = env.stack.pop()) {
32
+ try {
33
+ if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
34
+ if (r.dispose) {
35
+ var result = r.dispose.call(r.value);
36
+ if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
37
+ }
38
+ else s |= 1;
39
+ }
40
+ catch (e) {
41
+ fail(e);
42
+ }
43
+ }
44
+ if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
45
+ if (env.hasError) throw env.error;
46
+ }
47
+ return next();
48
+ };
49
+ })(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
50
+ var e = new Error(message);
51
+ return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
52
+ });
1
53
  import { isMongoNetworkTimeoutError, isMongoServerError, mongo } from '@powersync/lib-service-mongodb';
2
- import { container, DatabaseConnectionError, ErrorCode, logger, ReplicationAbortedError, ReplicationAssertionError, ServiceError } from '@powersync/lib-services-framework';
3
- import { SaveOperationTag } from '@powersync/service-core';
54
+ import { container, DatabaseConnectionError, logger as defaultLogger, ErrorCode, ReplicationAbortedError, ReplicationAssertionError, ServiceError } from '@powersync/lib-services-framework';
55
+ import { RelationCache, SaveOperationTag } from '@powersync/service-core';
4
56
  import { ReplicationMetric } from '@powersync/service-types';
5
57
  import { MongoLSN } from '../common/MongoLSN.js';
6
58
  import { PostImagesOption } from '../types/types.js';
7
59
  import { escapeRegExp } from '../utils.js';
8
60
  import { constructAfterRecord, createCheckpoint, getCacheIdentifier, getMongoRelation, STANDALONE_CHECKPOINT_ID } from './MongoRelation.js';
9
- import { CHECKPOINTS_COLLECTION } from './replication-utils.js';
61
+ import { ChunkedSnapshotQuery } from './MongoSnapshotQuery.js';
62
+ import { CHECKPOINTS_COLLECTION, timestampToDate } from './replication-utils.js';
10
63
  /**
11
64
  * Thrown when the change stream is not valid anymore, and replication
12
65
  * must be restarted.
@@ -31,14 +84,27 @@ export class ChangeStream {
31
84
  metrics;
32
85
  maxAwaitTimeMS;
33
86
  abort_signal;
34
- relation_cache = new Map();
87
+ relationCache = new RelationCache(getCacheIdentifier);
88
+ /**
89
+ * Time of the oldest uncommitted change, according to the source db.
90
+ * This is used to determine the replication lag.
91
+ */
92
+ oldestUncommittedChange = null;
93
+ /**
94
+ * Keep track of whether we have done a commit or keepalive yet.
95
+ * We can only compute replication lag if isStartingReplication == false, or oldestUncommittedChange is present.
96
+ */
97
+ isStartingReplication = true;
35
98
  checkpointStreamId = new mongo.ObjectId();
99
+ logger;
100
+ snapshotChunkLength;
36
101
  constructor(options) {
37
102
  this.storage = options.storage;
38
103
  this.metrics = options.metrics;
39
104
  this.group_id = options.storage.group_id;
40
105
  this.connections = options.connections;
41
106
  this.maxAwaitTimeMS = options.maxAwaitTimeMS ?? 10_000;
107
+ this.snapshotChunkLength = options.snapshotChunkLength ?? 6_000;
42
108
  this.client = this.connections.client;
43
109
  this.defaultDb = this.connections.db;
44
110
  this.sync_rules = options.storage.getParsedSyncRules({
@@ -48,6 +114,7 @@ export class ChangeStream {
48
114
  this.abort_signal.addEventListener('abort', () => {
49
115
  // TODO: Fast abort?
50
116
  }, { once: true });
117
+ this.logger = options.logger ?? defaultLogger;
51
118
  }
52
119
  get stopped() {
53
120
  return this.abort_signal.aborted;
@@ -58,9 +125,6 @@ export class ChangeStream {
58
125
  get configurePostImages() {
59
126
  return this.connections.options.postImages == PostImagesOption.AUTO_CONFIGURE;
60
127
  }
61
- get logPrefix() {
62
- return `[powersync_${this.group_id}]`;
63
- }
64
128
  /**
65
129
  * This resolves a pattern, persists the related metadata, and returns
66
130
  * the resulting SourceTables.
@@ -88,7 +152,7 @@ export class ChangeStream {
88
152
  }, { nameOnly: false })
89
153
  .toArray();
90
154
  if (!tablePattern.isWildcard && collections.length == 0) {
91
- logger.warn(`${this.logPrefix} Collection ${schema}.${tablePattern.name} not found`);
155
+ this.logger.warn(`Collection ${schema}.${tablePattern.name} not found`);
92
156
  }
93
157
  for (let collection of collections) {
94
158
  const table = await this.handleRelation(batch, getMongoRelation({ db: schema, coll: collection.name }),
@@ -101,59 +165,162 @@ export class ChangeStream {
101
165
  async initSlot() {
102
166
  const status = await this.storage.getStatus();
103
167
  if (status.snapshot_done && status.checkpoint_lsn) {
104
- logger.info(`${this.logPrefix} Initial replication already done`);
105
- return { needsInitialSync: false };
168
+ this.logger.info(`Initial replication already done`);
169
+ return { needsInitialSync: false, snapshotLsn: null };
106
170
  }
107
- return { needsInitialSync: true };
171
+ return { needsInitialSync: true, snapshotLsn: status.snapshot_lsn };
108
172
  }
109
173
  async estimatedCount(table) {
110
- const db = this.client.db(table.schema);
111
- const count = await db.collection(table.table).estimatedDocumentCount();
174
+ const count = await this.estimatedCountNumber(table);
112
175
  return `~${count}`;
113
176
  }
114
- /**
115
- * Start initial replication.
116
- *
117
- * If (partial) replication was done before on this slot, this clears the state
118
- * and starts again from scratch.
119
- */
120
- async startInitialReplication() {
121
- await this.storage.clear();
122
- await this.initialReplication();
177
+ async estimatedCountNumber(table) {
178
+ const db = this.client.db(table.schema);
179
+ return await db.collection(table.table).estimatedDocumentCount();
123
180
  }
124
- async initialReplication() {
125
- const sourceTables = this.sync_rules.getSourceTables();
126
- await this.client.connect();
127
- // We need to get the snapshot time before taking the initial snapshot.
128
- const hello = await this.defaultDb.command({ hello: 1 });
129
- const snapshotTime = hello.lastWrite?.majorityOpTime?.ts;
130
- if (hello.msg == 'isdbgrid') {
131
- throw new ServiceError(ErrorCode.PSYNC_S1341, 'Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).');
181
+ async getSnapshotLsn() {
182
+ const env_1 = { stack: [], error: void 0, hasError: false };
183
+ try {
184
+ const hello = await this.defaultDb.command({ hello: 1 });
185
+ // Basic sanity check
186
+ if (hello.msg == 'isdbgrid') {
187
+ throw new ServiceError(ErrorCode.PSYNC_S1341, 'Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).');
188
+ }
189
+ else if (hello.setName == null) {
190
+ throw new ServiceError(ErrorCode.PSYNC_S1342, 'Standalone MongoDB instances are not supported - use a replicaset.');
191
+ }
192
+ // Open a change stream just to get a resume token for later use.
193
+ // We could use clusterTime from the hello command, but that won't tell us if the
194
+ // snapshot isn't valid anymore.
195
+ // If we just use the first resumeToken from the stream, we get two potential issues:
196
+ // 1. The resumeToken may just be a wrapped clusterTime, which does not detect changes
197
+ // in source db or other stream issues.
198
+ // 2. The first actual change we get may have the same clusterTime, causing us to incorrect
199
+ // skip that event.
200
+ // Instead, we create a new checkpoint document, and wait until we get that document back in the stream.
201
+ // To avoid potential race conditions with the checkpoint creation, we create a new checkpoint document
202
+ // periodically until the timeout is reached.
203
+ const LSN_TIMEOUT_SECONDS = 60;
204
+ const LSN_CREATE_INTERVAL_SECONDS = 1;
205
+ const streamManager = __addDisposableResource(env_1, this.openChangeStream({ lsn: null, maxAwaitTimeMs: 0 }), true);
206
+ const { stream } = streamManager;
207
+ const startTime = performance.now();
208
+ let lastCheckpointCreated = -10_000;
209
+ let eventsSeen = 0;
210
+ while (performance.now() - startTime < LSN_TIMEOUT_SECONDS * 1000) {
211
+ if (performance.now() - lastCheckpointCreated >= LSN_CREATE_INTERVAL_SECONDS * 1000) {
212
+ await createCheckpoint(this.client, this.defaultDb, this.checkpointStreamId);
213
+ lastCheckpointCreated = performance.now();
214
+ }
215
+ // tryNext() doesn't block, while next() / hasNext() does block until there is data on the stream
216
+ const changeDocument = await stream.tryNext().catch((e) => {
217
+ throw mapChangeStreamError(e);
218
+ });
219
+ if (changeDocument == null) {
220
+ continue;
221
+ }
222
+ const ns = 'ns' in changeDocument && 'coll' in changeDocument.ns ? changeDocument.ns : undefined;
223
+ if (ns?.coll == CHECKPOINTS_COLLECTION && 'documentKey' in changeDocument) {
224
+ const checkpointId = changeDocument.documentKey._id;
225
+ if (!this.checkpointStreamId.equals(checkpointId)) {
226
+ continue;
227
+ }
228
+ const { comparable: lsn } = new MongoLSN({
229
+ timestamp: changeDocument.clusterTime,
230
+ resume_token: changeDocument._id
231
+ });
232
+ return lsn;
233
+ }
234
+ eventsSeen += 1;
235
+ }
236
+ // Could happen if there is a very large replication lag?
237
+ throw new ServiceError(ErrorCode.PSYNC_S1301, `Timeout after while waiting for checkpoint document for ${LSN_TIMEOUT_SECONDS}s. Streamed events = ${eventsSeen}`);
132
238
  }
133
- else if (hello.setName == null) {
134
- throw new ServiceError(ErrorCode.PSYNC_S1342, 'Standalone MongoDB instances are not supported - use a replicaset.');
239
+ catch (e_1) {
240
+ env_1.error = e_1;
241
+ env_1.hasError = true;
135
242
  }
136
- else if (snapshotTime == null) {
137
- // Not known where this would happen apart from the above cases
138
- throw new ReplicationAssertionError('MongoDB lastWrite timestamp not found.');
243
+ finally {
244
+ const result_1 = __disposeResources(env_1);
245
+ if (result_1)
246
+ await result_1;
139
247
  }
140
- await this.storage.startBatch({ zeroLSN: MongoLSN.ZERO.comparable, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false }, async (batch) => {
248
+ }
249
+ async validateSnapshotLsn(lsn) {
250
+ const env_2 = { stack: [], error: void 0, hasError: false };
251
+ try {
252
+ const streamManager = __addDisposableResource(env_2, this.openChangeStream({ lsn: lsn, maxAwaitTimeMs: 0 }), true);
253
+ const { stream } = streamManager;
254
+ try {
255
+ // tryNext() doesn't block, while next() / hasNext() does block until there is data on the stream
256
+ await stream.tryNext();
257
+ }
258
+ catch (e) {
259
+ // Note: A timeout here is not handled as a ChangeStreamInvalidatedError, even though
260
+ // we possibly cannot recover from it.
261
+ throw mapChangeStreamError(e);
262
+ }
263
+ }
264
+ catch (e_2) {
265
+ env_2.error = e_2;
266
+ env_2.hasError = true;
267
+ }
268
+ finally {
269
+ const result_2 = __disposeResources(env_2);
270
+ if (result_2)
271
+ await result_2;
272
+ }
273
+ }
274
+ async initialReplication(snapshotLsn) {
275
+ const sourceTables = this.sync_rules.getSourceTables();
276
+ await this.client.connect();
277
+ await this.storage.startBatch({
278
+ logger: this.logger,
279
+ zeroLSN: MongoLSN.ZERO.comparable,
280
+ defaultSchema: this.defaultDb.databaseName,
281
+ storeCurrentData: false,
282
+ skipExistingRows: true
283
+ }, async (batch) => {
284
+ if (snapshotLsn == null) {
285
+ // First replication attempt - get a snapshot and store the timestamp
286
+ snapshotLsn = await this.getSnapshotLsn();
287
+ await batch.setSnapshotLsn(snapshotLsn);
288
+ this.logger.info(`Marking snapshot at ${snapshotLsn}`);
289
+ }
290
+ else {
291
+ this.logger.info(`Resuming snapshot at ${snapshotLsn}`);
292
+ // Check that the snapshot is still valid.
293
+ await this.validateSnapshotLsn(snapshotLsn);
294
+ }
141
295
  // Start by resolving all tables.
142
296
  // This checks postImage configuration, and that should fail as
143
- // earlier as possible.
297
+ // early as possible.
144
298
  let allSourceTables = [];
145
299
  for (let tablePattern of sourceTables) {
146
300
  const tables = await this.resolveQualifiedTableNames(batch, tablePattern);
147
301
  allSourceTables.push(...tables);
148
302
  }
303
+ let tablesWithStatus = [];
149
304
  for (let table of allSourceTables) {
305
+ if (table.snapshotComplete) {
306
+ this.logger.info(`Skipping ${table.qualifiedName} - snapshot already done`);
307
+ continue;
308
+ }
309
+ let count = await this.estimatedCountNumber(table);
310
+ const updated = await batch.updateTableProgress(table, {
311
+ totalEstimatedCount: count
312
+ });
313
+ tablesWithStatus.push(updated);
314
+ this.relationCache.update(updated);
315
+ this.logger.info(`To replicate: ${table.qualifiedName}: ${updated.snapshotStatus?.replicatedCount}/~${updated.snapshotStatus?.totalEstimatedCount}`);
316
+ }
317
+ for (let table of tablesWithStatus) {
150
318
  await this.snapshotTable(batch, table);
151
319
  await batch.markSnapshotDone([table], MongoLSN.ZERO.comparable);
152
320
  await touch();
153
321
  }
154
- const { comparable: lsn } = new MongoLSN({ timestamp: snapshotTime });
155
- logger.info(`${this.logPrefix} Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
156
- await batch.commit(lsn);
322
+ this.logger.info(`Snapshot commit at ${snapshotLsn}`);
323
+ await batch.commit(snapshotLsn);
157
324
  });
158
325
  }
159
326
  async setupCheckpointsCollection() {
@@ -214,51 +381,77 @@ export class ChangeStream {
214
381
  }
215
382
  }
216
383
  async snapshotTable(batch, table) {
217
- logger.info(`${this.logPrefix} Replicating ${table.qualifiedName}`);
218
- const estimatedCount = await this.estimatedCount(table);
219
- let at = 0;
220
- const db = this.client.db(table.schema);
221
- const collection = db.collection(table.table);
222
- const cursor = collection.find({}, { batchSize: 6_000, readConcern: 'majority' });
223
- let lastBatch = performance.now();
224
- // hasNext() is the call that triggers fetching of the next batch,
225
- // then we read it with readBufferedDocuments(). This gives us semi-explicit
226
- // control over the fetching of each batch, and avoids a separate promise per document
227
- let hasNextPromise = cursor.hasNext();
228
- while (await hasNextPromise) {
229
- const docBatch = cursor.readBufferedDocuments();
230
- // Pre-fetch next batch, so that we can read and write concurrently
231
- hasNextPromise = cursor.hasNext();
232
- for (let document of docBatch) {
384
+ const env_3 = { stack: [], error: void 0, hasError: false };
385
+ try {
386
+ const totalEstimatedCount = await this.estimatedCountNumber(table);
387
+ let at = table.snapshotStatus?.replicatedCount ?? 0;
388
+ const db = this.client.db(table.schema);
389
+ const collection = db.collection(table.table);
390
+ const query = __addDisposableResource(env_3, new ChunkedSnapshotQuery({
391
+ collection,
392
+ key: table.snapshotStatus?.lastKey,
393
+ batchSize: this.snapshotChunkLength
394
+ }), true);
395
+ if (query.lastKey != null) {
396
+ this.logger.info(`Replicating ${table.qualifiedName} ${table.formatSnapshotProgress()} - resuming at _id > ${query.lastKey}`);
397
+ }
398
+ else {
399
+ this.logger.info(`Replicating ${table.qualifiedName} ${table.formatSnapshotProgress()}`);
400
+ }
401
+ let lastBatch = performance.now();
402
+ let nextChunkPromise = query.nextChunk();
403
+ while (true) {
404
+ const { docs: docBatch, lastKey } = await nextChunkPromise;
405
+ if (docBatch.length == 0) {
406
+ break;
407
+ }
233
408
  if (this.abort_signal.aborted) {
234
409
  throw new ReplicationAbortedError(`Aborted initial replication`);
235
410
  }
236
- const record = constructAfterRecord(document);
237
- // This auto-flushes when the batch reaches its size limit
238
- await batch.save({
239
- tag: SaveOperationTag.INSERT,
240
- sourceTable: table,
241
- before: undefined,
242
- beforeReplicaId: undefined,
243
- after: record,
244
- afterReplicaId: document._id
411
+ // Pre-fetch next batch, so that we can read and write concurrently
412
+ nextChunkPromise = query.nextChunk();
413
+ for (let document of docBatch) {
414
+ const record = constructAfterRecord(document);
415
+ // This auto-flushes when the batch reaches its size limit
416
+ await batch.save({
417
+ tag: SaveOperationTag.INSERT,
418
+ sourceTable: table,
419
+ before: undefined,
420
+ beforeReplicaId: undefined,
421
+ after: record,
422
+ afterReplicaId: document._id
423
+ });
424
+ }
425
+ // Important: flush before marking progress
426
+ await batch.flush();
427
+ at += docBatch.length;
428
+ this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(docBatch.length);
429
+ table = await batch.updateTableProgress(table, {
430
+ lastKey,
431
+ replicatedCount: at,
432
+ totalEstimatedCount: totalEstimatedCount
245
433
  });
434
+ this.relationCache.update(table);
435
+ const duration = performance.now() - lastBatch;
436
+ lastBatch = performance.now();
437
+ this.logger.info(`Replicating ${table.qualifiedName} ${table.formatSnapshotProgress()} in ${duration.toFixed(0)}ms`);
438
+ await touch();
246
439
  }
247
- at += docBatch.length;
248
- this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(docBatch.length);
249
- const duration = performance.now() - lastBatch;
250
- lastBatch = performance.now();
251
- logger.info(`${this.logPrefix} Replicating ${table.qualifiedName} ${at}/${estimatedCount} in ${duration.toFixed(0)}ms`);
252
- await touch();
253
- }
254
- // In case the loop was interrupted, make sure we await the last promise.
255
- await hasNextPromise;
256
- await batch.flush();
257
- logger.info(`${this.logPrefix} Replicated ${at} documents for ${table.qualifiedName}`);
440
+ // In case the loop was interrupted, make sure we await the last promise.
441
+ await nextChunkPromise;
442
+ }
443
+ catch (e_3) {
444
+ env_3.error = e_3;
445
+ env_3.hasError = true;
446
+ }
447
+ finally {
448
+ const result_3 = __disposeResources(env_3);
449
+ if (result_3)
450
+ await result_3;
451
+ }
258
452
  }
259
453
  async getRelation(batch, descriptor, options) {
260
- const cacheId = getCacheIdentifier(descriptor);
261
- const existing = this.relation_cache.get(cacheId);
454
+ const existing = this.relationCache.get(descriptor);
262
455
  if (existing != null) {
263
456
  return existing;
264
457
  }
@@ -287,7 +480,7 @@ export class ChangeStream {
287
480
  collMod: collectionInfo.name,
288
481
  changeStreamPreAndPostImages: { enabled: true }
289
482
  });
290
- logger.info(`${this.logPrefix} Enabled postImages on ${db}.${collectionInfo.name}`);
483
+ this.logger.info(`Enabled postImages on ${db}.${collectionInfo.name}`);
291
484
  }
292
485
  else if (!enabled) {
293
486
  throw new ServiceError(ErrorCode.PSYNC_S1343, `postImages not enabled on ${db}.${collectionInfo.name}`);
@@ -309,11 +502,11 @@ export class ChangeStream {
309
502
  entity_descriptor: descriptor,
310
503
  sync_rules: this.sync_rules
311
504
  });
312
- this.relation_cache.set(getCacheIdentifier(descriptor), result.table);
505
+ this.relationCache.update(result.table);
313
506
  // Drop conflicting collections.
314
507
  // This is generally not expected for MongoDB source dbs, so we log an error.
315
508
  if (result.dropTables.length > 0) {
316
- logger.error(`Conflicting collections found for ${JSON.stringify(descriptor)}. Dropping: ${result.dropTables.map((t) => t.id).join(', ')}`);
509
+ this.logger.error(`Conflicting collections found for ${JSON.stringify(descriptor)}. Dropping: ${result.dropTables.map((t) => t.id).join(', ')}`);
317
510
  await batch.drop(result.dropTables);
318
511
  }
319
512
  // Snapshot if:
@@ -322,7 +515,7 @@ export class ChangeStream {
322
515
  // 3. The table is used in sync rules.
323
516
  const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny;
324
517
  if (shouldSnapshot) {
325
- logger.info(`${this.logPrefix} New collection: ${descriptor.schema}.${descriptor.name}`);
518
+ this.logger.info(`New collection: ${descriptor.schema}.${descriptor.name}`);
326
519
  // Truncate this table, in case a previous snapshot was interrupted.
327
520
  await batch.truncate([result.table]);
328
521
  await this.snapshotTable(batch, result.table);
@@ -334,7 +527,7 @@ export class ChangeStream {
334
527
  }
335
528
  async writeChange(batch, table, change) {
336
529
  if (!table.syncAny) {
337
- logger.debug(`${this.logPrefix} Collection ${table.qualifiedName} not used in sync rules - skipping`);
530
+ this.logger.debug(`Collection ${table.qualifiedName} not used in sync rules - skipping`);
338
531
  return null;
339
532
  }
340
533
  this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1);
@@ -397,7 +590,11 @@ export class ChangeStream {
397
590
  const result = await this.initSlot();
398
591
  await this.setupCheckpointsCollection();
399
592
  if (result.needsInitialSync) {
400
- await this.startInitialReplication();
593
+ if (result.snapshotLsn == null) {
594
+ // Snapshot LSN is not present, so we need to start replication from scratch.
595
+ await this.storage.clear({ signal: this.abort_signal });
596
+ }
597
+ await this.initialReplication(result.snapshotLsn);
401
598
  }
402
599
  }
403
600
  async streamChanges() {
@@ -413,244 +610,303 @@ export class ChangeStream {
413
610
  throw e;
414
611
  }
415
612
  }
613
+ openChangeStream(options) {
614
+ const lastLsn = options.lsn ? MongoLSN.fromSerialized(options.lsn) : null;
615
+ const startAfter = lastLsn?.timestamp;
616
+ const resumeAfter = lastLsn?.resumeToken;
617
+ const filters = this.getSourceNamespaceFilters();
618
+ const pipeline = [
619
+ {
620
+ $match: filters.$match
621
+ },
622
+ { $changeStreamSplitLargeEvent: {} }
623
+ ];
624
+ let fullDocument;
625
+ if (this.usePostImages) {
626
+ // 'read_only' or 'auto_configure'
627
+ // Configuration happens during snapshot, or when we see new
628
+ // collections.
629
+ fullDocument = 'required';
630
+ }
631
+ else {
632
+ fullDocument = 'updateLookup';
633
+ }
634
+ const streamOptions = {
635
+ showExpandedEvents: true,
636
+ maxAwaitTimeMS: options.maxAwaitTimeMs ?? this.maxAwaitTimeMS,
637
+ fullDocument: fullDocument
638
+ };
639
+ /**
640
+ * Only one of these options can be supplied at a time.
641
+ */
642
+ if (resumeAfter) {
643
+ streamOptions.resumeAfter = resumeAfter;
644
+ }
645
+ else {
646
+ // Legacy: We don't persist lsns without resumeTokens anymore, but we do still handle the
647
+ // case if we have an old one.
648
+ streamOptions.startAtOperationTime = startAfter;
649
+ }
650
+ let stream;
651
+ if (filters.multipleDatabases) {
652
+ // Requires readAnyDatabase@admin on Atlas
653
+ stream = this.client.watch(pipeline, streamOptions);
654
+ }
655
+ else {
656
+ // Same general result, but requires less permissions than the above
657
+ stream = this.defaultDb.watch(pipeline, streamOptions);
658
+ }
659
+ this.abort_signal.addEventListener('abort', () => {
660
+ stream.close();
661
+ });
662
+ return {
663
+ stream,
664
+ filters,
665
+ [Symbol.asyncDispose]: async () => {
666
+ return stream.close();
667
+ }
668
+ };
669
+ }
416
670
  async streamChangesInternal() {
417
671
  // Auto-activate as soon as initial replication is done
418
672
  await this.storage.autoActivate();
419
- await this.storage.startBatch({ zeroLSN: MongoLSN.ZERO.comparable, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false }, async (batch) => {
420
- const { lastCheckpointLsn } = batch;
421
- const lastLsn = lastCheckpointLsn ? MongoLSN.fromSerialized(lastCheckpointLsn) : null;
422
- const startAfter = lastLsn?.timestamp;
423
- const resumeAfter = lastLsn?.resumeToken;
424
- logger.info(`${this.logPrefix} Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`);
425
- const filters = this.getSourceNamespaceFilters();
426
- const pipeline = [
427
- {
428
- $match: filters.$match
429
- },
430
- { $changeStreamSplitLargeEvent: {} }
431
- ];
432
- let fullDocument;
433
- if (this.usePostImages) {
434
- // 'read_only' or 'auto_configure'
435
- // Configuration happens during snapshot, or when we see new
436
- // collections.
437
- fullDocument = 'required';
438
- }
439
- else {
440
- fullDocument = 'updateLookup';
441
- }
442
- const streamOptions = {
443
- showExpandedEvents: true,
444
- maxAwaitTimeMS: this.maxAwaitTimeMS,
445
- fullDocument: fullDocument
446
- };
447
- /**
448
- * Only one of these options can be supplied at a time.
449
- */
450
- if (resumeAfter) {
451
- streamOptions.resumeAfter = resumeAfter;
452
- }
453
- else {
454
- streamOptions.startAtOperationTime = startAfter;
455
- }
456
- let stream;
457
- if (filters.multipleDatabases) {
458
- // Requires readAnyDatabase@admin on Atlas
459
- stream = this.client.watch(pipeline, streamOptions);
460
- }
461
- else {
462
- // Same general result, but requires less permissions than the above
463
- stream = this.defaultDb.watch(pipeline, streamOptions);
464
- }
465
- if (this.abort_signal.aborted) {
466
- stream.close();
467
- return;
468
- }
469
- this.abort_signal.addEventListener('abort', () => {
470
- stream.close();
471
- });
472
- // Always start with a checkpoint.
473
- // This helps us to clear errors when restarting, even if there is
474
- // no data to replicate.
475
- let waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb, this.checkpointStreamId);
476
- let splitDocument = null;
477
- let flexDbNameWorkaroundLogged = false;
478
- let lastEmptyResume = performance.now();
479
- while (true) {
673
+ await this.storage.startBatch({
674
+ logger: this.logger,
675
+ zeroLSN: MongoLSN.ZERO.comparable,
676
+ defaultSchema: this.defaultDb.databaseName,
677
+ storeCurrentData: false
678
+ }, async (batch) => {
679
+ const env_4 = { stack: [], error: void 0, hasError: false };
680
+ try {
681
+ const { lastCheckpointLsn } = batch;
682
+ const lastLsn = MongoLSN.fromSerialized(lastCheckpointLsn);
683
+ const startAfter = lastLsn?.timestamp;
684
+ // It is normal for this to be a minute or two old when there is a low volume
685
+ // of ChangeStream events.
686
+ const tokenAgeSeconds = Math.round((Date.now() - timestampToDate(startAfter).getTime()) / 1000);
687
+ this.logger.info(`Resume streaming at ${startAfter?.inspect()} / ${lastLsn} | Token age: ${tokenAgeSeconds}s`);
688
+ const streamManager = __addDisposableResource(env_4, this.openChangeStream({ lsn: lastCheckpointLsn }), true);
689
+ const { stream, filters } = streamManager;
480
690
  if (this.abort_signal.aborted) {
481
- break;
691
+ await stream.close();
692
+ return;
482
693
  }
483
- const originalChangeDocument = await stream.tryNext().catch((e) => {
484
- throw mapChangeStreamError(e);
485
- });
486
- // The stream was closed, we will only ever receive `null` from it
487
- if (!originalChangeDocument && stream.closed) {
488
- break;
489
- }
490
- if (this.abort_signal.aborted) {
491
- break;
492
- }
493
- if (originalChangeDocument == null) {
494
- // We get a new null document after `maxAwaitTimeMS` if there were no other events.
495
- // In this case, stream.resumeToken is the resume token associated with the last response.
496
- // stream.resumeToken is not updated if stream.tryNext() returns data, while stream.next()
497
- // does update it.
498
- // From observed behavior, the actual resumeToken changes around once every 10 seconds.
499
- // If we don't update it on empty events, we do keep consistency, but resuming the stream
500
- // with old tokens may cause connection timeouts.
501
- // We throttle this further by only persisting a keepalive once a minute.
502
- // We add an additional check for waitForCheckpointLsn == null, to make sure we're not
503
- // doing a keepalive in the middle of a transaction.
504
- if (waitForCheckpointLsn == null && performance.now() - lastEmptyResume > 60_000) {
505
- const { comparable: lsn } = MongoLSN.fromResumeToken(stream.resumeToken);
506
- await batch.keepalive(lsn);
507
- await touch();
508
- lastEmptyResume = performance.now();
694
+ // Always start with a checkpoint.
695
+ // This helps us to clear errors when restarting, even if there is
696
+ // no data to replicate.
697
+ let waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb, this.checkpointStreamId);
698
+ let splitDocument = null;
699
+ let flexDbNameWorkaroundLogged = false;
700
+ let lastEmptyResume = performance.now();
701
+ while (true) {
702
+ if (this.abort_signal.aborted) {
703
+ break;
509
704
  }
510
- continue;
511
- }
512
- await touch();
513
- if (startAfter != null && originalChangeDocument.clusterTime?.lte(startAfter)) {
514
- continue;
515
- }
516
- let changeDocument = originalChangeDocument;
517
- if (originalChangeDocument?.splitEvent != null) {
518
- // Handle split events from $changeStreamSplitLargeEvent.
519
- // This is only relevant for very large update operations.
520
- const splitEvent = originalChangeDocument?.splitEvent;
521
- if (splitDocument == null) {
522
- splitDocument = originalChangeDocument;
523
- }
524
- else {
525
- splitDocument = Object.assign(splitDocument, originalChangeDocument);
705
+ const originalChangeDocument = await stream.tryNext().catch((e) => {
706
+ throw mapChangeStreamError(e);
707
+ });
708
+ // The stream was closed, we will only ever receive `null` from it
709
+ if (!originalChangeDocument && stream.closed) {
710
+ break;
526
711
  }
527
- if (splitEvent.fragment == splitEvent.of) {
528
- // Got all fragments
529
- changeDocument = splitDocument;
530
- splitDocument = null;
712
+ if (this.abort_signal.aborted) {
713
+ break;
531
714
  }
532
- else {
533
- // Wait for more fragments
715
+ if (originalChangeDocument == null) {
716
+ // We get a new null document after `maxAwaitTimeMS` if there were no other events.
717
+ // In this case, stream.resumeToken is the resume token associated with the last response.
718
+ // stream.resumeToken is not updated if stream.tryNext() returns data, while stream.next()
719
+ // does update it.
720
+ // From observed behavior, the actual resumeToken changes around once every 10 seconds.
721
+ // If we don't update it on empty events, we do keep consistency, but resuming the stream
722
+ // with old tokens may cause connection timeouts.
723
+ // We throttle this further by only persisting a keepalive once a minute.
724
+ // We add an additional check for waitForCheckpointLsn == null, to make sure we're not
725
+ // doing a keepalive in the middle of a transaction.
726
+ if (waitForCheckpointLsn == null && performance.now() - lastEmptyResume > 60_000) {
727
+ const { comparable: lsn, timestamp } = MongoLSN.fromResumeToken(stream.resumeToken);
728
+ await batch.keepalive(lsn);
729
+ await touch();
730
+ lastEmptyResume = performance.now();
731
+ // Log the token update. This helps as a general "replication is still active" message in the logs.
732
+ // This token would typically be around 10s behind.
733
+ this.logger.info(`Idle change stream. Persisted resumeToken for ${timestampToDate(timestamp).toISOString()}`);
734
+ this.isStartingReplication = false;
735
+ }
534
736
  continue;
535
737
  }
536
- }
537
- else if (splitDocument != null) {
538
- // We were waiting for fragments, but got a different event
539
- throw new ReplicationAssertionError(`Incomplete splitEvent: ${JSON.stringify(splitDocument.splitEvent)}`);
540
- }
541
- if (!filters.multipleDatabases &&
542
- 'ns' in changeDocument &&
543
- changeDocument.ns.db != this.defaultDb.databaseName &&
544
- changeDocument.ns.db.endsWith(`_${this.defaultDb.databaseName}`)) {
545
- // When all of the following conditions are met:
546
- // 1. We're replicating from an Atlas Flex instance.
547
- // 2. There were changestream events recorded while the PowerSync service is paused.
548
- // 3. We're only replicating from a single database.
549
- // Then we've observed an ns with for example {db: '67b83e86cd20730f1e766dde_ps'},
550
- // instead of the expected {db: 'ps'}.
551
- // We correct this.
552
- changeDocument.ns.db = this.defaultDb.databaseName;
553
- if (!flexDbNameWorkaroundLogged) {
554
- flexDbNameWorkaroundLogged = true;
555
- logger.warn(`${this.logPrefix} Incorrect DB name in change stream: ${changeDocument.ns.db}. Changed to ${this.defaultDb.databaseName}.`);
556
- }
557
- }
558
- const ns = 'ns' in changeDocument && 'coll' in changeDocument.ns ? changeDocument.ns : undefined;
559
- if (ns?.coll == CHECKPOINTS_COLLECTION) {
560
- /**
561
- * Dropping the database does not provide an `invalidate` event.
562
- * We typically would receive `drop` events for the collection which we
563
- * would process below.
564
- *
565
- * However we don't commit the LSN after collections are dropped.
566
- * The prevents the `startAfter` or `resumeToken` from advancing past the drop events.
567
- * The stream also closes after the drop events.
568
- * This causes an infinite loop of processing the collection drop events.
569
- *
570
- * This check here invalidates the change stream if our `_checkpoints` collection
571
- * is dropped. This allows for detecting when the DB is dropped.
572
- */
573
- if (changeDocument.operationType == 'drop') {
574
- throw new ChangeStreamInvalidatedError('Internal collections have been dropped', new Error('_checkpoints collection was dropped'));
575
- }
576
- if (!(changeDocument.operationType == 'insert' ||
577
- changeDocument.operationType == 'update' ||
578
- changeDocument.operationType == 'replace')) {
738
+ await touch();
739
+ if (startAfter != null && originalChangeDocument.clusterTime?.lte(startAfter)) {
579
740
  continue;
580
741
  }
581
- // We handle two types of checkpoint events:
582
- // 1. "Standalone" checkpoints, typically write checkpoints. We want to process these
583
- // immediately, regardless of where they were created.
584
- // 2. "Batch" checkpoints for the current stream. This is used as a form of dynamic rate
585
- // limiting of commits, so we specifically want to exclude checkpoints from other streams.
586
- //
587
- // It may be useful to also throttle commits due to standalone checkpoints in the future.
588
- // However, these typically have a much lower rate than batch checkpoints, so we don't do that for now.
589
- const checkpointId = changeDocument.documentKey._id;
590
- if (!(checkpointId == STANDALONE_CHECKPOINT_ID || this.checkpointStreamId.equals(checkpointId))) {
591
- continue;
742
+ let changeDocument = originalChangeDocument;
743
+ if (originalChangeDocument?.splitEvent != null) {
744
+ // Handle split events from $changeStreamSplitLargeEvent.
745
+ // This is only relevant for very large update operations.
746
+ const splitEvent = originalChangeDocument?.splitEvent;
747
+ if (splitDocument == null) {
748
+ splitDocument = originalChangeDocument;
749
+ }
750
+ else {
751
+ splitDocument = Object.assign(splitDocument, originalChangeDocument);
752
+ }
753
+ if (splitEvent.fragment == splitEvent.of) {
754
+ // Got all fragments
755
+ changeDocument = splitDocument;
756
+ splitDocument = null;
757
+ }
758
+ else {
759
+ // Wait for more fragments
760
+ continue;
761
+ }
592
762
  }
593
- const { comparable: lsn } = new MongoLSN({
594
- timestamp: changeDocument.clusterTime,
595
- resume_token: changeDocument._id
596
- });
597
- if (waitForCheckpointLsn != null && lsn >= waitForCheckpointLsn) {
598
- waitForCheckpointLsn = null;
763
+ else if (splitDocument != null) {
764
+ // We were waiting for fragments, but got a different event
765
+ throw new ReplicationAssertionError(`Incomplete splitEvent: ${JSON.stringify(splitDocument.splitEvent)}`);
599
766
  }
600
- await batch.commit(lsn);
601
- }
602
- else if (changeDocument.operationType == 'insert' ||
603
- changeDocument.operationType == 'update' ||
604
- changeDocument.operationType == 'replace' ||
605
- changeDocument.operationType == 'delete') {
606
- if (waitForCheckpointLsn == null) {
607
- waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb, this.checkpointStreamId);
767
+ if (!filters.multipleDatabases &&
768
+ 'ns' in changeDocument &&
769
+ changeDocument.ns.db != this.defaultDb.databaseName &&
770
+ changeDocument.ns.db.endsWith(`_${this.defaultDb.databaseName}`)) {
771
+ // When all of the following conditions are met:
772
+ // 1. We're replicating from an Atlas Flex instance.
773
+ // 2. There were changestream events recorded while the PowerSync service is paused.
774
+ // 3. We're only replicating from a single database.
775
+ // Then we've observed an ns with for example {db: '67b83e86cd20730f1e766dde_ps'},
776
+ // instead of the expected {db: 'ps'}.
777
+ // We correct this.
778
+ changeDocument.ns.db = this.defaultDb.databaseName;
779
+ if (!flexDbNameWorkaroundLogged) {
780
+ flexDbNameWorkaroundLogged = true;
781
+ this.logger.warn(`Incorrect DB name in change stream: ${changeDocument.ns.db}. Changed to ${this.defaultDb.databaseName}.`);
782
+ }
608
783
  }
609
- const rel = getMongoRelation(changeDocument.ns);
610
- const table = await this.getRelation(batch, rel, {
611
- // In most cases, we should not need to snapshot this. But if this is the first time we see the collection
612
- // for whatever reason, then we do need to snapshot it.
613
- // This may result in some duplicate operations when a collection is created for the first time after
614
- // sync rules was deployed.
615
- snapshot: true
616
- });
617
- if (table.syncAny) {
618
- await this.writeChange(batch, table, changeDocument);
784
+ const ns = 'ns' in changeDocument && 'coll' in changeDocument.ns ? changeDocument.ns : undefined;
785
+ if (ns?.coll == CHECKPOINTS_COLLECTION) {
786
+ /**
787
+ * Dropping the database does not provide an `invalidate` event.
788
+ * We typically would receive `drop` events for the collection which we
789
+ * would process below.
790
+ *
791
+ * However we don't commit the LSN after collections are dropped.
792
+ * The prevents the `startAfter` or `resumeToken` from advancing past the drop events.
793
+ * The stream also closes after the drop events.
794
+ * This causes an infinite loop of processing the collection drop events.
795
+ *
796
+ * This check here invalidates the change stream if our `_checkpoints` collection
797
+ * is dropped. This allows for detecting when the DB is dropped.
798
+ */
799
+ if (changeDocument.operationType == 'drop') {
800
+ throw new ChangeStreamInvalidatedError('Internal collections have been dropped', new Error('_checkpoints collection was dropped'));
801
+ }
802
+ if (!(changeDocument.operationType == 'insert' ||
803
+ changeDocument.operationType == 'update' ||
804
+ changeDocument.operationType == 'replace')) {
805
+ continue;
806
+ }
807
+ // We handle two types of checkpoint events:
808
+ // 1. "Standalone" checkpoints, typically write checkpoints. We want to process these
809
+ // immediately, regardless of where they were created.
810
+ // 2. "Batch" checkpoints for the current stream. This is used as a form of dynamic rate
811
+ // limiting of commits, so we specifically want to exclude checkpoints from other streams.
812
+ //
813
+ // It may be useful to also throttle commits due to standalone checkpoints in the future.
814
+ // However, these typically have a much lower rate than batch checkpoints, so we don't do that for now.
815
+ const checkpointId = changeDocument.documentKey._id;
816
+ if (!(checkpointId == STANDALONE_CHECKPOINT_ID || this.checkpointStreamId.equals(checkpointId))) {
817
+ continue;
818
+ }
819
+ const { comparable: lsn } = new MongoLSN({
820
+ timestamp: changeDocument.clusterTime,
821
+ resume_token: changeDocument._id
822
+ });
823
+ if (waitForCheckpointLsn != null && lsn >= waitForCheckpointLsn) {
824
+ waitForCheckpointLsn = null;
825
+ }
826
+ const didCommit = await batch.commit(lsn, { oldestUncommittedChange: this.oldestUncommittedChange });
827
+ if (didCommit) {
828
+ this.oldestUncommittedChange = null;
829
+ this.isStartingReplication = false;
830
+ }
619
831
  }
620
- }
621
- else if (changeDocument.operationType == 'drop') {
622
- const rel = getMongoRelation(changeDocument.ns);
623
- const table = await this.getRelation(batch, rel, {
624
- // We're "dropping" this collection, so never snapshot it.
625
- snapshot: false
626
- });
627
- if (table.syncAny) {
628
- await batch.drop([table]);
629
- this.relation_cache.delete(getCacheIdentifier(rel));
832
+ else if (changeDocument.operationType == 'insert' ||
833
+ changeDocument.operationType == 'update' ||
834
+ changeDocument.operationType == 'replace' ||
835
+ changeDocument.operationType == 'delete') {
836
+ if (waitForCheckpointLsn == null) {
837
+ waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb, this.checkpointStreamId);
838
+ }
839
+ const rel = getMongoRelation(changeDocument.ns);
840
+ const table = await this.getRelation(batch, rel, {
841
+ // In most cases, we should not need to snapshot this. But if this is the first time we see the collection
842
+ // for whatever reason, then we do need to snapshot it.
843
+ // This may result in some duplicate operations when a collection is created for the first time after
844
+ // sync rules was deployed.
845
+ snapshot: true
846
+ });
847
+ if (table.syncAny) {
848
+ if (this.oldestUncommittedChange == null && changeDocument.clusterTime != null) {
849
+ this.oldestUncommittedChange = timestampToDate(changeDocument.clusterTime);
850
+ }
851
+ await this.writeChange(batch, table, changeDocument);
852
+ }
630
853
  }
631
- }
632
- else if (changeDocument.operationType == 'rename') {
633
- const relFrom = getMongoRelation(changeDocument.ns);
634
- const relTo = getMongoRelation(changeDocument.to);
635
- const tableFrom = await this.getRelation(batch, relFrom, {
636
- // We're "dropping" this collection, so never snapshot it.
637
- snapshot: false
638
- });
639
- if (tableFrom.syncAny) {
640
- await batch.drop([tableFrom]);
641
- this.relation_cache.delete(getCacheIdentifier(relFrom));
854
+ else if (changeDocument.operationType == 'drop') {
855
+ const rel = getMongoRelation(changeDocument.ns);
856
+ const table = await this.getRelation(batch, rel, {
857
+ // We're "dropping" this collection, so never snapshot it.
858
+ snapshot: false
859
+ });
860
+ if (table.syncAny) {
861
+ await batch.drop([table]);
862
+ this.relationCache.delete(table);
863
+ }
864
+ }
865
+ else if (changeDocument.operationType == 'rename') {
866
+ const relFrom = getMongoRelation(changeDocument.ns);
867
+ const relTo = getMongoRelation(changeDocument.to);
868
+ const tableFrom = await this.getRelation(batch, relFrom, {
869
+ // We're "dropping" this collection, so never snapshot it.
870
+ snapshot: false
871
+ });
872
+ if (tableFrom.syncAny) {
873
+ await batch.drop([tableFrom]);
874
+ this.relationCache.delete(relFrom);
875
+ }
876
+ // Here we do need to snapshot the new table
877
+ const collection = await this.getCollectionInfo(relTo.schema, relTo.name);
878
+ await this.handleRelation(batch, relTo, {
879
+ // This is a new (renamed) collection, so always snapshot it.
880
+ snapshot: true,
881
+ collectionInfo: collection
882
+ });
642
883
  }
643
- // Here we do need to snapshot the new table
644
- const collection = await this.getCollectionInfo(relTo.schema, relTo.name);
645
- await this.handleRelation(batch, relTo, {
646
- // This is a new (renamed) collection, so always snapshot it.
647
- snapshot: true,
648
- collectionInfo: collection
649
- });
650
884
  }
651
885
  }
886
+ catch (e_4) {
887
+ env_4.error = e_4;
888
+ env_4.hasError = true;
889
+ }
890
+ finally {
891
+ const result_4 = __disposeResources(env_4);
892
+ if (result_4)
893
+ await result_4;
894
+ }
652
895
  });
653
896
  }
897
+ async getReplicationLagMillis() {
898
+ if (this.oldestUncommittedChange == null) {
899
+ if (this.isStartingReplication) {
900
+ // We don't have anything to compute replication lag with yet.
901
+ return undefined;
902
+ }
903
+ else {
904
+ // We don't have any uncommitted changes, so replication is up-to-date.
905
+ return 0;
906
+ }
907
+ }
908
+ return Date.now() - this.oldestUncommittedChange.getTime();
909
+ }
654
910
  }
655
911
  async function touch() {
656
912
  // FIXME: The hosted Kubernetes probe does not actually check the timestamp on this.