@powersync/service-module-mongodb 0.9.0 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +32 -0
  2. package/dist/api/MongoRouteAPIAdapter.d.ts +1 -2
  3. package/dist/api/MongoRouteAPIAdapter.js +3 -6
  4. package/dist/api/MongoRouteAPIAdapter.js.map +1 -1
  5. package/dist/replication/ChangeStream.d.ts +27 -11
  6. package/dist/replication/ChangeStream.js +565 -288
  7. package/dist/replication/ChangeStream.js.map +1 -1
  8. package/dist/replication/ChangeStreamReplicationJob.d.ts +2 -0
  9. package/dist/replication/ChangeStreamReplicationJob.js +13 -5
  10. package/dist/replication/ChangeStreamReplicationJob.js.map +1 -1
  11. package/dist/replication/ChangeStreamReplicator.d.ts +1 -0
  12. package/dist/replication/ChangeStreamReplicator.js +21 -0
  13. package/dist/replication/ChangeStreamReplicator.js.map +1 -1
  14. package/dist/replication/MongoRelation.d.ts +9 -2
  15. package/dist/replication/MongoRelation.js +16 -5
  16. package/dist/replication/MongoRelation.js.map +1 -1
  17. package/dist/replication/MongoSnapshotQuery.d.ts +26 -0
  18. package/dist/replication/MongoSnapshotQuery.js +56 -0
  19. package/dist/replication/MongoSnapshotQuery.js.map +1 -0
  20. package/dist/replication/replication-utils.d.ts +2 -0
  21. package/dist/replication/replication-utils.js +3 -0
  22. package/dist/replication/replication-utils.js.map +1 -1
  23. package/package.json +9 -9
  24. package/src/api/MongoRouteAPIAdapter.ts +3 -7
  25. package/src/replication/ChangeStream.ts +371 -135
  26. package/src/replication/ChangeStreamReplicationJob.ts +14 -6
  27. package/src/replication/ChangeStreamReplicator.ts +23 -0
  28. package/src/replication/MongoRelation.ts +21 -6
  29. package/src/replication/MongoSnapshotQuery.ts +59 -0
  30. package/src/replication/replication-utils.ts +5 -0
  31. package/test/src/change_stream.test.ts +18 -13
  32. package/test/src/change_stream_utils.ts +47 -22
  33. package/test/src/chunked_snapshot.test.ts +153 -0
  34. package/test/src/resume.test.ts +7 -94
  35. package/test/src/resume_token.test.ts +78 -2
  36. package/test/src/resuming_snapshots.test.ts +138 -0
  37. package/test/src/slow_tests.test.ts +4 -18
  38. package/test/src/util.ts +12 -1
  39. package/tsconfig.tsbuildinfo +1 -1
@@ -1,12 +1,65 @@
1
+ var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
2
+ if (value !== null && value !== void 0) {
3
+ if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
4
+ var dispose, inner;
5
+ if (async) {
6
+ if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
7
+ dispose = value[Symbol.asyncDispose];
8
+ }
9
+ if (dispose === void 0) {
10
+ if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
11
+ dispose = value[Symbol.dispose];
12
+ if (async) inner = dispose;
13
+ }
14
+ if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
15
+ if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
16
+ env.stack.push({ value: value, dispose: dispose, async: async });
17
+ }
18
+ else if (async) {
19
+ env.stack.push({ async: true });
20
+ }
21
+ return value;
22
+ };
23
+ var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
24
+ return function (env) {
25
+ function fail(e) {
26
+ env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
27
+ env.hasError = true;
28
+ }
29
+ var r, s = 0;
30
+ function next() {
31
+ while (r = env.stack.pop()) {
32
+ try {
33
+ if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
34
+ if (r.dispose) {
35
+ var result = r.dispose.call(r.value);
36
+ if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
37
+ }
38
+ else s |= 1;
39
+ }
40
+ catch (e) {
41
+ fail(e);
42
+ }
43
+ }
44
+ if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
45
+ if (env.hasError) throw env.error;
46
+ }
47
+ return next();
48
+ };
49
+ })(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
50
+ var e = new Error(message);
51
+ return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
52
+ });
1
53
  import { isMongoNetworkTimeoutError, isMongoServerError, mongo } from '@powersync/lib-service-mongodb';
2
- import { container, DatabaseConnectionError, ErrorCode, logger, ReplicationAbortedError, ReplicationAssertionError, ServiceError } from '@powersync/lib-services-framework';
3
- import { SaveOperationTag } from '@powersync/service-core';
54
+ import { container, DatabaseConnectionError, logger as defaultLogger, ErrorCode, ReplicationAbortedError, ReplicationAssertionError, ServiceError } from '@powersync/lib-services-framework';
55
+ import { RelationCache, SaveOperationTag } from '@powersync/service-core';
4
56
  import { ReplicationMetric } from '@powersync/service-types';
5
57
  import { MongoLSN } from '../common/MongoLSN.js';
6
58
  import { PostImagesOption } from '../types/types.js';
7
59
  import { escapeRegExp } from '../utils.js';
8
- import { constructAfterRecord, createCheckpoint, getCacheIdentifier, getMongoRelation } from './MongoRelation.js';
9
- import { CHECKPOINTS_COLLECTION } from './replication-utils.js';
60
+ import { constructAfterRecord, createCheckpoint, getCacheIdentifier, getMongoRelation, STANDALONE_CHECKPOINT_ID } from './MongoRelation.js';
61
+ import { ChunkedSnapshotQuery } from './MongoSnapshotQuery.js';
62
+ import { CHECKPOINTS_COLLECTION, timestampToDate } from './replication-utils.js';
10
63
  /**
11
64
  * Thrown when the change stream is not valid anymore, and replication
12
65
  * must be restarted.
@@ -31,13 +84,27 @@ export class ChangeStream {
31
84
  metrics;
32
85
  maxAwaitTimeMS;
33
86
  abort_signal;
34
- relation_cache = new Map();
87
+ relationCache = new RelationCache(getCacheIdentifier);
88
+ /**
89
+ * Time of the oldest uncommitted change, according to the source db.
90
+ * This is used to determine the replication lag.
91
+ */
92
+ oldestUncommittedChange = null;
93
+ /**
94
+ * Keep track of whether we have done a commit or keepalive yet.
95
+ * We can only compute replication lag if isStartingReplication == false, or oldestUncommittedChange is present.
96
+ */
97
+ isStartingReplication = true;
98
+ checkpointStreamId = new mongo.ObjectId();
99
+ logger;
100
+ snapshotChunkLength;
35
101
  constructor(options) {
36
102
  this.storage = options.storage;
37
103
  this.metrics = options.metrics;
38
104
  this.group_id = options.storage.group_id;
39
105
  this.connections = options.connections;
40
106
  this.maxAwaitTimeMS = options.maxAwaitTimeMS ?? 10_000;
107
+ this.snapshotChunkLength = options.snapshotChunkLength ?? 6_000;
41
108
  this.client = this.connections.client;
42
109
  this.defaultDb = this.connections.db;
43
110
  this.sync_rules = options.storage.getParsedSyncRules({
@@ -47,6 +114,7 @@ export class ChangeStream {
47
114
  this.abort_signal.addEventListener('abort', () => {
48
115
  // TODO: Fast abort?
49
116
  }, { once: true });
117
+ this.logger = options.logger ?? defaultLogger;
50
118
  }
51
119
  get stopped() {
52
120
  return this.abort_signal.aborted;
@@ -57,9 +125,6 @@ export class ChangeStream {
57
125
  get configurePostImages() {
58
126
  return this.connections.options.postImages == PostImagesOption.AUTO_CONFIGURE;
59
127
  }
60
- get logPrefix() {
61
- return `[powersync_${this.group_id}]`;
62
- }
63
128
  /**
64
129
  * This resolves a pattern, persists the related metadata, and returns
65
130
  * the resulting SourceTables.
@@ -87,7 +152,7 @@ export class ChangeStream {
87
152
  }, { nameOnly: false })
88
153
  .toArray();
89
154
  if (!tablePattern.isWildcard && collections.length == 0) {
90
- logger.warn(`${this.logPrefix} Collection ${schema}.${tablePattern.name} not found`);
155
+ this.logger.warn(`Collection ${schema}.${tablePattern.name} not found`);
91
156
  }
92
157
  for (let collection of collections) {
93
158
  const table = await this.handleRelation(batch, getMongoRelation({ db: schema, coll: collection.name }),
@@ -100,59 +165,162 @@ export class ChangeStream {
100
165
  async initSlot() {
101
166
  const status = await this.storage.getStatus();
102
167
  if (status.snapshot_done && status.checkpoint_lsn) {
103
- logger.info(`${this.logPrefix} Initial replication already done`);
104
- return { needsInitialSync: false };
168
+ this.logger.info(`Initial replication already done`);
169
+ return { needsInitialSync: false, snapshotLsn: null };
105
170
  }
106
- return { needsInitialSync: true };
171
+ return { needsInitialSync: true, snapshotLsn: status.snapshot_lsn };
107
172
  }
108
173
  async estimatedCount(table) {
109
- const db = this.client.db(table.schema);
110
- const count = await db.collection(table.table).estimatedDocumentCount();
174
+ const count = await this.estimatedCountNumber(table);
111
175
  return `~${count}`;
112
176
  }
113
- /**
114
- * Start initial replication.
115
- *
116
- * If (partial) replication was done before on this slot, this clears the state
117
- * and starts again from scratch.
118
- */
119
- async startInitialReplication() {
120
- await this.storage.clear();
121
- await this.initialReplication();
177
+ async estimatedCountNumber(table) {
178
+ const db = this.client.db(table.schema);
179
+ return await db.collection(table.table).estimatedDocumentCount();
122
180
  }
123
- async initialReplication() {
124
- const sourceTables = this.sync_rules.getSourceTables();
125
- await this.client.connect();
126
- // We need to get the snapshot time before taking the initial snapshot.
127
- const hello = await this.defaultDb.command({ hello: 1 });
128
- const snapshotTime = hello.lastWrite?.majorityOpTime?.ts;
129
- if (hello.msg == 'isdbgrid') {
130
- throw new ServiceError(ErrorCode.PSYNC_S1341, 'Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).');
181
+ async getSnapshotLsn() {
182
+ const env_1 = { stack: [], error: void 0, hasError: false };
183
+ try {
184
+ const hello = await this.defaultDb.command({ hello: 1 });
185
+ // Basic sanity check
186
+ if (hello.msg == 'isdbgrid') {
187
+ throw new ServiceError(ErrorCode.PSYNC_S1341, 'Sharded MongoDB Clusters are not supported yet (including MongoDB Serverless instances).');
188
+ }
189
+ else if (hello.setName == null) {
190
+ throw new ServiceError(ErrorCode.PSYNC_S1342, 'Standalone MongoDB instances are not supported - use a replicaset.');
191
+ }
192
+ // Open a change stream just to get a resume token for later use.
193
+ // We could use clusterTime from the hello command, but that won't tell us if the
194
+ // snapshot isn't valid anymore.
195
+ // If we just use the first resumeToken from the stream, we get two potential issues:
196
+ // 1. The resumeToken may just be a wrapped clusterTime, which does not detect changes
197
+ // in source db or other stream issues.
198
+ // 2. The first actual change we get may have the same clusterTime, causing us to incorrect
199
+ // skip that event.
200
+ // Instead, we create a new checkpoint document, and wait until we get that document back in the stream.
201
+ // To avoid potential race conditions with the checkpoint creation, we create a new checkpoint document
202
+ // periodically until the timeout is reached.
203
+ const LSN_TIMEOUT_SECONDS = 60;
204
+ const LSN_CREATE_INTERVAL_SECONDS = 1;
205
+ const streamManager = __addDisposableResource(env_1, this.openChangeStream({ lsn: null, maxAwaitTimeMs: 0 }), true);
206
+ const { stream } = streamManager;
207
+ const startTime = performance.now();
208
+ let lastCheckpointCreated = -10_000;
209
+ let eventsSeen = 0;
210
+ while (performance.now() - startTime < LSN_TIMEOUT_SECONDS * 1000) {
211
+ if (performance.now() - lastCheckpointCreated >= LSN_CREATE_INTERVAL_SECONDS * 1000) {
212
+ await createCheckpoint(this.client, this.defaultDb, this.checkpointStreamId);
213
+ lastCheckpointCreated = performance.now();
214
+ }
215
+ // tryNext() doesn't block, while next() / hasNext() does block until there is data on the stream
216
+ const changeDocument = await stream.tryNext().catch((e) => {
217
+ throw mapChangeStreamError(e);
218
+ });
219
+ if (changeDocument == null) {
220
+ continue;
221
+ }
222
+ const ns = 'ns' in changeDocument && 'coll' in changeDocument.ns ? changeDocument.ns : undefined;
223
+ if (ns?.coll == CHECKPOINTS_COLLECTION && 'documentKey' in changeDocument) {
224
+ const checkpointId = changeDocument.documentKey._id;
225
+ if (!this.checkpointStreamId.equals(checkpointId)) {
226
+ continue;
227
+ }
228
+ const { comparable: lsn } = new MongoLSN({
229
+ timestamp: changeDocument.clusterTime,
230
+ resume_token: changeDocument._id
231
+ });
232
+ return lsn;
233
+ }
234
+ eventsSeen += 1;
235
+ }
236
+ // Could happen if there is a very large replication lag?
237
+ throw new ServiceError(ErrorCode.PSYNC_S1301, `Timeout after while waiting for checkpoint document for ${LSN_TIMEOUT_SECONDS}s. Streamed events = ${eventsSeen}`);
238
+ }
239
+ catch (e_1) {
240
+ env_1.error = e_1;
241
+ env_1.hasError = true;
242
+ }
243
+ finally {
244
+ const result_1 = __disposeResources(env_1);
245
+ if (result_1)
246
+ await result_1;
247
+ }
248
+ }
249
+ async validateSnapshotLsn(lsn) {
250
+ const env_2 = { stack: [], error: void 0, hasError: false };
251
+ try {
252
+ const streamManager = __addDisposableResource(env_2, this.openChangeStream({ lsn: lsn, maxAwaitTimeMs: 0 }), true);
253
+ const { stream } = streamManager;
254
+ try {
255
+ // tryNext() doesn't block, while next() / hasNext() does block until there is data on the stream
256
+ await stream.tryNext();
257
+ }
258
+ catch (e) {
259
+ // Note: A timeout here is not handled as a ChangeStreamInvalidatedError, even though
260
+ // we possibly cannot recover from it.
261
+ throw mapChangeStreamError(e);
262
+ }
131
263
  }
132
- else if (hello.setName == null) {
133
- throw new ServiceError(ErrorCode.PSYNC_S1342, 'Standalone MongoDB instances are not supported - use a replicaset.');
264
+ catch (e_2) {
265
+ env_2.error = e_2;
266
+ env_2.hasError = true;
134
267
  }
135
- else if (snapshotTime == null) {
136
- // Not known where this would happen apart from the above cases
137
- throw new ReplicationAssertionError('MongoDB lastWrite timestamp not found.');
268
+ finally {
269
+ const result_2 = __disposeResources(env_2);
270
+ if (result_2)
271
+ await result_2;
138
272
  }
139
- await this.storage.startBatch({ zeroLSN: MongoLSN.ZERO.comparable, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false }, async (batch) => {
273
+ }
274
+ async initialReplication(snapshotLsn) {
275
+ const sourceTables = this.sync_rules.getSourceTables();
276
+ await this.client.connect();
277
+ await this.storage.startBatch({
278
+ logger: this.logger,
279
+ zeroLSN: MongoLSN.ZERO.comparable,
280
+ defaultSchema: this.defaultDb.databaseName,
281
+ storeCurrentData: false,
282
+ skipExistingRows: true
283
+ }, async (batch) => {
284
+ if (snapshotLsn == null) {
285
+ // First replication attempt - get a snapshot and store the timestamp
286
+ snapshotLsn = await this.getSnapshotLsn();
287
+ await batch.setSnapshotLsn(snapshotLsn);
288
+ this.logger.info(`Marking snapshot at ${snapshotLsn}`);
289
+ }
290
+ else {
291
+ this.logger.info(`Resuming snapshot at ${snapshotLsn}`);
292
+ // Check that the snapshot is still valid.
293
+ await this.validateSnapshotLsn(snapshotLsn);
294
+ }
140
295
  // Start by resolving all tables.
141
296
  // This checks postImage configuration, and that should fail as
142
- // earlier as possible.
297
+ // early as possible.
143
298
  let allSourceTables = [];
144
299
  for (let tablePattern of sourceTables) {
145
300
  const tables = await this.resolveQualifiedTableNames(batch, tablePattern);
146
301
  allSourceTables.push(...tables);
147
302
  }
303
+ let tablesWithStatus = [];
148
304
  for (let table of allSourceTables) {
305
+ if (table.snapshotComplete) {
306
+ this.logger.info(`Skipping ${table.qualifiedName} - snapshot already done`);
307
+ continue;
308
+ }
309
+ let count = await this.estimatedCountNumber(table);
310
+ const updated = await batch.updateTableProgress(table, {
311
+ totalEstimatedCount: count
312
+ });
313
+ tablesWithStatus.push(updated);
314
+ this.relationCache.update(updated);
315
+ this.logger.info(`To replicate: ${table.qualifiedName}: ${updated.snapshotStatus?.replicatedCount}/~${updated.snapshotStatus?.totalEstimatedCount}`);
316
+ }
317
+ for (let table of tablesWithStatus) {
149
318
  await this.snapshotTable(batch, table);
150
319
  await batch.markSnapshotDone([table], MongoLSN.ZERO.comparable);
151
320
  await touch();
152
321
  }
153
- const { comparable: lsn } = new MongoLSN({ timestamp: snapshotTime });
154
- logger.info(`${this.logPrefix} Snapshot commit at ${snapshotTime.inspect()} / ${lsn}`);
155
- await batch.commit(lsn);
322
+ this.logger.info(`Snapshot commit at ${snapshotLsn}`);
323
+ await batch.commit(snapshotLsn);
156
324
  });
157
325
  }
158
326
  async setupCheckpointsCollection() {
@@ -170,6 +338,12 @@ export class ChangeStream {
170
338
  changeStreamPreAndPostImages: { enabled: true }
171
339
  });
172
340
  }
341
+ else {
342
+ // Clear the collection on startup, to keep it clean
343
+ // We never query this collection directly, and don't want to keep the data around.
344
+ // We only use this to get data into the oplog/changestream.
345
+ await this.defaultDb.collection(CHECKPOINTS_COLLECTION).deleteMany({});
346
+ }
173
347
  }
174
348
  getSourceNamespaceFilters() {
175
349
  const sourceTables = this.sync_rules.getSourceTables();
@@ -207,51 +381,77 @@ export class ChangeStream {
207
381
  }
208
382
  }
209
383
  async snapshotTable(batch, table) {
210
- logger.info(`${this.logPrefix} Replicating ${table.qualifiedName}`);
211
- const estimatedCount = await this.estimatedCount(table);
212
- let at = 0;
213
- const db = this.client.db(table.schema);
214
- const collection = db.collection(table.table);
215
- const cursor = collection.find({}, { batchSize: 6_000, readConcern: 'majority' });
216
- let lastBatch = performance.now();
217
- // hasNext() is the call that triggers fetching of the next batch,
218
- // then we read it with readBufferedDocuments(). This gives us semi-explicit
219
- // control over the fetching of each batch, and avoids a separate promise per document
220
- let hasNextPromise = cursor.hasNext();
221
- while (await hasNextPromise) {
222
- const docBatch = cursor.readBufferedDocuments();
223
- // Pre-fetch next batch, so that we can read and write concurrently
224
- hasNextPromise = cursor.hasNext();
225
- for (let document of docBatch) {
384
+ const env_3 = { stack: [], error: void 0, hasError: false };
385
+ try {
386
+ const totalEstimatedCount = await this.estimatedCountNumber(table);
387
+ let at = table.snapshotStatus?.replicatedCount ?? 0;
388
+ const db = this.client.db(table.schema);
389
+ const collection = db.collection(table.table);
390
+ const query = __addDisposableResource(env_3, new ChunkedSnapshotQuery({
391
+ collection,
392
+ key: table.snapshotStatus?.lastKey,
393
+ batchSize: this.snapshotChunkLength
394
+ }), true);
395
+ if (query.lastKey != null) {
396
+ this.logger.info(`Replicating ${table.qualifiedName} ${table.formatSnapshotProgress()} - resuming at _id > ${query.lastKey}`);
397
+ }
398
+ else {
399
+ this.logger.info(`Replicating ${table.qualifiedName} ${table.formatSnapshotProgress()}`);
400
+ }
401
+ let lastBatch = performance.now();
402
+ let nextChunkPromise = query.nextChunk();
403
+ while (true) {
404
+ const { docs: docBatch, lastKey } = await nextChunkPromise;
405
+ if (docBatch.length == 0) {
406
+ break;
407
+ }
226
408
  if (this.abort_signal.aborted) {
227
409
  throw new ReplicationAbortedError(`Aborted initial replication`);
228
410
  }
229
- const record = constructAfterRecord(document);
230
- // This auto-flushes when the batch reaches its size limit
231
- await batch.save({
232
- tag: SaveOperationTag.INSERT,
233
- sourceTable: table,
234
- before: undefined,
235
- beforeReplicaId: undefined,
236
- after: record,
237
- afterReplicaId: document._id
411
+ // Pre-fetch next batch, so that we can read and write concurrently
412
+ nextChunkPromise = query.nextChunk();
413
+ for (let document of docBatch) {
414
+ const record = constructAfterRecord(document);
415
+ // This auto-flushes when the batch reaches its size limit
416
+ await batch.save({
417
+ tag: SaveOperationTag.INSERT,
418
+ sourceTable: table,
419
+ before: undefined,
420
+ beforeReplicaId: undefined,
421
+ after: record,
422
+ afterReplicaId: document._id
423
+ });
424
+ }
425
+ // Important: flush before marking progress
426
+ await batch.flush();
427
+ at += docBatch.length;
428
+ this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(docBatch.length);
429
+ table = await batch.updateTableProgress(table, {
430
+ lastKey,
431
+ replicatedCount: at,
432
+ totalEstimatedCount: totalEstimatedCount
238
433
  });
434
+ this.relationCache.update(table);
435
+ const duration = performance.now() - lastBatch;
436
+ lastBatch = performance.now();
437
+ this.logger.info(`Replicating ${table.qualifiedName} ${table.formatSnapshotProgress()} in ${duration.toFixed(0)}ms`);
438
+ await touch();
239
439
  }
240
- at += docBatch.length;
241
- this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(docBatch.length);
242
- const duration = performance.now() - lastBatch;
243
- lastBatch = performance.now();
244
- logger.info(`${this.logPrefix} Replicating ${table.qualifiedName} ${at}/${estimatedCount} in ${duration.toFixed(0)}ms`);
245
- await touch();
246
- }
247
- // In case the loop was interrupted, make sure we await the last promise.
248
- await hasNextPromise;
249
- await batch.flush();
250
- logger.info(`${this.logPrefix} Replicated ${at} documents for ${table.qualifiedName}`);
440
+ // In case the loop was interrupted, make sure we await the last promise.
441
+ await nextChunkPromise;
442
+ }
443
+ catch (e_3) {
444
+ env_3.error = e_3;
445
+ env_3.hasError = true;
446
+ }
447
+ finally {
448
+ const result_3 = __disposeResources(env_3);
449
+ if (result_3)
450
+ await result_3;
451
+ }
251
452
  }
252
453
  async getRelation(batch, descriptor, options) {
253
- const cacheId = getCacheIdentifier(descriptor);
254
- const existing = this.relation_cache.get(cacheId);
454
+ const existing = this.relationCache.get(descriptor);
255
455
  if (existing != null) {
256
456
  return existing;
257
457
  }
@@ -280,7 +480,7 @@ export class ChangeStream {
280
480
  collMod: collectionInfo.name,
281
481
  changeStreamPreAndPostImages: { enabled: true }
282
482
  });
283
- logger.info(`${this.logPrefix} Enabled postImages on ${db}.${collectionInfo.name}`);
483
+ this.logger.info(`Enabled postImages on ${db}.${collectionInfo.name}`);
284
484
  }
285
485
  else if (!enabled) {
286
486
  throw new ServiceError(ErrorCode.PSYNC_S1343, `postImages not enabled on ${db}.${collectionInfo.name}`);
@@ -302,11 +502,11 @@ export class ChangeStream {
302
502
  entity_descriptor: descriptor,
303
503
  sync_rules: this.sync_rules
304
504
  });
305
- this.relation_cache.set(getCacheIdentifier(descriptor), result.table);
505
+ this.relationCache.update(result.table);
306
506
  // Drop conflicting collections.
307
507
  // This is generally not expected for MongoDB source dbs, so we log an error.
308
508
  if (result.dropTables.length > 0) {
309
- logger.error(`Conflicting collections found for ${JSON.stringify(descriptor)}. Dropping: ${result.dropTables.map((t) => t.id).join(', ')}`);
509
+ this.logger.error(`Conflicting collections found for ${JSON.stringify(descriptor)}. Dropping: ${result.dropTables.map((t) => t.id).join(', ')}`);
310
510
  await batch.drop(result.dropTables);
311
511
  }
312
512
  // Snapshot if:
@@ -315,11 +515,11 @@ export class ChangeStream {
315
515
  // 3. The table is used in sync rules.
316
516
  const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny;
317
517
  if (shouldSnapshot) {
318
- logger.info(`${this.logPrefix} New collection: ${descriptor.schema}.${descriptor.name}`);
518
+ this.logger.info(`New collection: ${descriptor.schema}.${descriptor.name}`);
319
519
  // Truncate this table, in case a previous snapshot was interrupted.
320
520
  await batch.truncate([result.table]);
321
521
  await this.snapshotTable(batch, result.table);
322
- const no_checkpoint_before_lsn = await createCheckpoint(this.client, this.defaultDb);
522
+ const no_checkpoint_before_lsn = await createCheckpoint(this.client, this.defaultDb, STANDALONE_CHECKPOINT_ID);
323
523
  const [table] = await batch.markSnapshotDone([result.table], no_checkpoint_before_lsn);
324
524
  return table;
325
525
  }
@@ -327,7 +527,7 @@ export class ChangeStream {
327
527
  }
328
528
  async writeChange(batch, table, change) {
329
529
  if (!table.syncAny) {
330
- logger.debug(`${this.logPrefix} Collection ${table.qualifiedName} not used in sync rules - skipping`);
530
+ this.logger.debug(`Collection ${table.qualifiedName} not used in sync rules - skipping`);
331
531
  return null;
332
532
  }
333
533
  this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1);
@@ -390,7 +590,11 @@ export class ChangeStream {
390
590
  const result = await this.initSlot();
391
591
  await this.setupCheckpointsCollection();
392
592
  if (result.needsInitialSync) {
393
- await this.startInitialReplication();
593
+ if (result.snapshotLsn == null) {
594
+ // Snapshot LSN is not present, so we need to start replication from scratch.
595
+ await this.storage.clear({ signal: this.abort_signal });
596
+ }
597
+ await this.initialReplication(result.snapshotLsn);
394
598
  }
395
599
  }
396
600
  async streamChanges() {
@@ -406,230 +610,303 @@ export class ChangeStream {
406
610
  throw e;
407
611
  }
408
612
  }
613
+ openChangeStream(options) {
614
+ const lastLsn = options.lsn ? MongoLSN.fromSerialized(options.lsn) : null;
615
+ const startAfter = lastLsn?.timestamp;
616
+ const resumeAfter = lastLsn?.resumeToken;
617
+ const filters = this.getSourceNamespaceFilters();
618
+ const pipeline = [
619
+ {
620
+ $match: filters.$match
621
+ },
622
+ { $changeStreamSplitLargeEvent: {} }
623
+ ];
624
+ let fullDocument;
625
+ if (this.usePostImages) {
626
+ // 'read_only' or 'auto_configure'
627
+ // Configuration happens during snapshot, or when we see new
628
+ // collections.
629
+ fullDocument = 'required';
630
+ }
631
+ else {
632
+ fullDocument = 'updateLookup';
633
+ }
634
+ const streamOptions = {
635
+ showExpandedEvents: true,
636
+ maxAwaitTimeMS: options.maxAwaitTimeMs ?? this.maxAwaitTimeMS,
637
+ fullDocument: fullDocument
638
+ };
639
+ /**
640
+ * Only one of these options can be supplied at a time.
641
+ */
642
+ if (resumeAfter) {
643
+ streamOptions.resumeAfter = resumeAfter;
644
+ }
645
+ else {
646
+ // Legacy: We don't persist lsns without resumeTokens anymore, but we do still handle the
647
+ // case if we have an old one.
648
+ streamOptions.startAtOperationTime = startAfter;
649
+ }
650
+ let stream;
651
+ if (filters.multipleDatabases) {
652
+ // Requires readAnyDatabase@admin on Atlas
653
+ stream = this.client.watch(pipeline, streamOptions);
654
+ }
655
+ else {
656
+ // Same general result, but requires less permissions than the above
657
+ stream = this.defaultDb.watch(pipeline, streamOptions);
658
+ }
659
+ this.abort_signal.addEventListener('abort', () => {
660
+ stream.close();
661
+ });
662
+ return {
663
+ stream,
664
+ filters,
665
+ [Symbol.asyncDispose]: async () => {
666
+ return stream.close();
667
+ }
668
+ };
669
+ }
409
670
  async streamChangesInternal() {
410
671
  // Auto-activate as soon as initial replication is done
411
672
  await this.storage.autoActivate();
412
- await this.storage.startBatch({ zeroLSN: MongoLSN.ZERO.comparable, defaultSchema: this.defaultDb.databaseName, storeCurrentData: false }, async (batch) => {
413
- const { lastCheckpointLsn } = batch;
414
- const lastLsn = lastCheckpointLsn ? MongoLSN.fromSerialized(lastCheckpointLsn) : null;
415
- const startAfter = lastLsn?.timestamp;
416
- const resumeAfter = lastLsn?.resumeToken;
417
- logger.info(`${this.logPrefix} Resume streaming at ${startAfter?.inspect()} / ${lastLsn}`);
418
- const filters = this.getSourceNamespaceFilters();
419
- const pipeline = [
420
- {
421
- $match: filters.$match
422
- },
423
- { $changeStreamSplitLargeEvent: {} }
424
- ];
425
- let fullDocument;
426
- if (this.usePostImages) {
427
- // 'read_only' or 'auto_configure'
428
- // Configuration happens during snapshot, or when we see new
429
- // collections.
430
- fullDocument = 'required';
431
- }
432
- else {
433
- fullDocument = 'updateLookup';
434
- }
435
- const streamOptions = {
436
- showExpandedEvents: true,
437
- maxAwaitTimeMS: this.maxAwaitTimeMS,
438
- fullDocument: fullDocument
439
- };
440
- /**
441
- * Only one of these options can be supplied at a time.
442
- */
443
- if (resumeAfter) {
444
- streamOptions.resumeAfter = resumeAfter;
445
- }
446
- else {
447
- streamOptions.startAtOperationTime = startAfter;
448
- }
449
- let stream;
450
- if (filters.multipleDatabases) {
451
- // Requires readAnyDatabase@admin on Atlas
452
- stream = this.client.watch(pipeline, streamOptions);
453
- }
454
- else {
455
- // Same general result, but requires less permissions than the above
456
- stream = this.defaultDb.watch(pipeline, streamOptions);
457
- }
458
- if (this.abort_signal.aborted) {
459
- stream.close();
460
- return;
461
- }
462
- this.abort_signal.addEventListener('abort', () => {
463
- stream.close();
464
- });
465
- // Always start with a checkpoint.
466
- // This helps us to clear errors when restarting, even if there is
467
- // no data to replicate.
468
- let waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb);
469
- let splitDocument = null;
470
- let flexDbNameWorkaroundLogged = false;
471
- let lastEmptyResume = performance.now();
472
- while (true) {
673
+ await this.storage.startBatch({
674
+ logger: this.logger,
675
+ zeroLSN: MongoLSN.ZERO.comparable,
676
+ defaultSchema: this.defaultDb.databaseName,
677
+ storeCurrentData: false
678
+ }, async (batch) => {
679
+ const env_4 = { stack: [], error: void 0, hasError: false };
680
+ try {
681
+ const { lastCheckpointLsn } = batch;
682
+ const lastLsn = MongoLSN.fromSerialized(lastCheckpointLsn);
683
+ const startAfter = lastLsn?.timestamp;
684
+ // It is normal for this to be a minute or two old when there is a low volume
685
+ // of ChangeStream events.
686
+ const tokenAgeSeconds = Math.round((Date.now() - timestampToDate(startAfter).getTime()) / 1000);
687
+ this.logger.info(`Resume streaming at ${startAfter?.inspect()} / ${lastLsn} | Token age: ${tokenAgeSeconds}s`);
688
+ const streamManager = __addDisposableResource(env_4, this.openChangeStream({ lsn: lastCheckpointLsn }), true);
689
+ const { stream, filters } = streamManager;
473
690
  if (this.abort_signal.aborted) {
474
- break;
691
+ await stream.close();
692
+ return;
475
693
  }
476
- const originalChangeDocument = await stream.tryNext().catch((e) => {
477
- throw mapChangeStreamError(e);
478
- });
479
- // The stream was closed, we will only ever receive `null` from it
480
- if (!originalChangeDocument && stream.closed) {
481
- break;
482
- }
483
- if (this.abort_signal.aborted) {
484
- break;
485
- }
486
- if (originalChangeDocument == null) {
487
- // We get a new null document after `maxAwaitTimeMS` if there were no other events.
488
- // In this case, stream.resumeToken is the resume token associated with the last response.
489
- // stream.resumeToken is not updated if stream.tryNext() returns data, while stream.next()
490
- // does update it.
491
- // From observed behavior, the actual resumeToken changes around once every 10 seconds.
492
- // If we don't update it on empty events, we do keep consistency, but resuming the stream
493
- // with old tokens may cause connection timeouts.
494
- // We throttle this further by only persisting a keepalive once a minute.
495
- // We add an additional check for waitForCheckpointLsn == null, to make sure we're not
496
- // doing a keepalive in the middle of a transaction.
497
- if (waitForCheckpointLsn == null && performance.now() - lastEmptyResume > 60_000) {
498
- const { comparable: lsn } = MongoLSN.fromResumeToken(stream.resumeToken);
499
- await batch.keepalive(lsn);
500
- await touch();
501
- lastEmptyResume = performance.now();
694
+ // Always start with a checkpoint.
695
+ // This helps us to clear errors when restarting, even if there is
696
+ // no data to replicate.
697
+ let waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb, this.checkpointStreamId);
698
+ let splitDocument = null;
699
+ let flexDbNameWorkaroundLogged = false;
700
+ let lastEmptyResume = performance.now();
701
+ while (true) {
702
+ if (this.abort_signal.aborted) {
703
+ break;
502
704
  }
503
- continue;
504
- }
505
- await touch();
506
- if (startAfter != null && originalChangeDocument.clusterTime?.lte(startAfter)) {
507
- continue;
508
- }
509
- let changeDocument = originalChangeDocument;
510
- if (originalChangeDocument?.splitEvent != null) {
511
- // Handle split events from $changeStreamSplitLargeEvent.
512
- // This is only relevant for very large update operations.
513
- const splitEvent = originalChangeDocument?.splitEvent;
514
- if (splitDocument == null) {
515
- splitDocument = originalChangeDocument;
705
+ const originalChangeDocument = await stream.tryNext().catch((e) => {
706
+ throw mapChangeStreamError(e);
707
+ });
708
+ // The stream was closed, we will only ever receive `null` from it
709
+ if (!originalChangeDocument && stream.closed) {
710
+ break;
516
711
  }
517
- else {
518
- splitDocument = Object.assign(splitDocument, originalChangeDocument);
712
+ if (this.abort_signal.aborted) {
713
+ break;
519
714
  }
520
- if (splitEvent.fragment == splitEvent.of) {
521
- // Got all fragments
522
- changeDocument = splitDocument;
523
- splitDocument = null;
715
+ if (originalChangeDocument == null) {
716
+ // We get a new null document after `maxAwaitTimeMS` if there were no other events.
717
+ // In this case, stream.resumeToken is the resume token associated with the last response.
718
+ // stream.resumeToken is not updated if stream.tryNext() returns data, while stream.next()
719
+ // does update it.
720
+ // From observed behavior, the actual resumeToken changes around once every 10 seconds.
721
+ // If we don't update it on empty events, we do keep consistency, but resuming the stream
722
+ // with old tokens may cause connection timeouts.
723
+ // We throttle this further by only persisting a keepalive once a minute.
724
+ // We add an additional check for waitForCheckpointLsn == null, to make sure we're not
725
+ // doing a keepalive in the middle of a transaction.
726
+ if (waitForCheckpointLsn == null && performance.now() - lastEmptyResume > 60_000) {
727
+ const { comparable: lsn, timestamp } = MongoLSN.fromResumeToken(stream.resumeToken);
728
+ await batch.keepalive(lsn);
729
+ await touch();
730
+ lastEmptyResume = performance.now();
731
+ // Log the token update. This helps as a general "replication is still active" message in the logs.
732
+ // This token would typically be around 10s behind.
733
+ this.logger.info(`Idle change stream. Persisted resumeToken for ${timestampToDate(timestamp).toISOString()}`);
734
+ this.isStartingReplication = false;
735
+ }
736
+ continue;
524
737
  }
525
- else {
526
- // Wait for more fragments
738
+ await touch();
739
+ if (startAfter != null && originalChangeDocument.clusterTime?.lte(startAfter)) {
527
740
  continue;
528
741
  }
529
- }
530
- else if (splitDocument != null) {
531
- // We were waiting for fragments, but got a different event
532
- throw new ReplicationAssertionError(`Incomplete splitEvent: ${JSON.stringify(splitDocument.splitEvent)}`);
533
- }
534
- if (!filters.multipleDatabases &&
535
- 'ns' in changeDocument &&
536
- changeDocument.ns.db != this.defaultDb.databaseName &&
537
- changeDocument.ns.db.endsWith(`_${this.defaultDb.databaseName}`)) {
538
- // When all of the following conditions are met:
539
- // 1. We're replicating from an Atlas Flex instance.
540
- // 2. There were changestream events recorded while the PowerSync service is paused.
541
- // 3. We're only replicating from a single database.
542
- // Then we've observed an ns with for example {db: '67b83e86cd20730f1e766dde_ps'},
543
- // instead of the expected {db: 'ps'}.
544
- // We correct this.
545
- changeDocument.ns.db = this.defaultDb.databaseName;
546
- if (!flexDbNameWorkaroundLogged) {
547
- flexDbNameWorkaroundLogged = true;
548
- logger.warn(`${this.logPrefix} Incorrect DB name in change stream: ${changeDocument.ns.db}. Changed to ${this.defaultDb.databaseName}.`);
742
+ let changeDocument = originalChangeDocument;
743
+ if (originalChangeDocument?.splitEvent != null) {
744
+ // Handle split events from $changeStreamSplitLargeEvent.
745
+ // This is only relevant for very large update operations.
746
+ const splitEvent = originalChangeDocument?.splitEvent;
747
+ if (splitDocument == null) {
748
+ splitDocument = originalChangeDocument;
749
+ }
750
+ else {
751
+ splitDocument = Object.assign(splitDocument, originalChangeDocument);
752
+ }
753
+ if (splitEvent.fragment == splitEvent.of) {
754
+ // Got all fragments
755
+ changeDocument = splitDocument;
756
+ splitDocument = null;
757
+ }
758
+ else {
759
+ // Wait for more fragments
760
+ continue;
761
+ }
549
762
  }
550
- }
551
- if ((changeDocument.operationType == 'insert' ||
552
- changeDocument.operationType == 'update' ||
553
- changeDocument.operationType == 'replace' ||
554
- changeDocument.operationType == 'drop') &&
555
- changeDocument.ns.coll == CHECKPOINTS_COLLECTION) {
556
- /**
557
- * Dropping the database does not provide an `invalidate` event.
558
- * We typically would receive `drop` events for the collection which we
559
- * would process below.
560
- *
561
- * However we don't commit the LSN after collections are dropped.
562
- * The prevents the `startAfter` or `resumeToken` from advancing past the drop events.
563
- * The stream also closes after the drop events.
564
- * This causes an infinite loop of processing the collection drop events.
565
- *
566
- * This check here invalidates the change stream if our `_checkpoints` collection
567
- * is dropped. This allows for detecting when the DB is dropped.
568
- */
569
- if (changeDocument.operationType == 'drop') {
570
- throw new ChangeStreamInvalidatedError('Internal collections have been dropped', new Error('_checkpoints collection was dropped'));
763
+ else if (splitDocument != null) {
764
+ // We were waiting for fragments, but got a different event
765
+ throw new ReplicationAssertionError(`Incomplete splitEvent: ${JSON.stringify(splitDocument.splitEvent)}`);
571
766
  }
572
- const { comparable: lsn } = new MongoLSN({
573
- timestamp: changeDocument.clusterTime,
574
- resume_token: changeDocument._id
575
- });
576
- if (waitForCheckpointLsn != null && lsn >= waitForCheckpointLsn) {
577
- waitForCheckpointLsn = null;
767
+ if (!filters.multipleDatabases &&
768
+ 'ns' in changeDocument &&
769
+ changeDocument.ns.db != this.defaultDb.databaseName &&
770
+ changeDocument.ns.db.endsWith(`_${this.defaultDb.databaseName}`)) {
771
+ // When all of the following conditions are met:
772
+ // 1. We're replicating from an Atlas Flex instance.
773
+ // 2. There were changestream events recorded while the PowerSync service is paused.
774
+ // 3. We're only replicating from a single database.
775
+ // Then we've observed an ns with for example {db: '67b83e86cd20730f1e766dde_ps'},
776
+ // instead of the expected {db: 'ps'}.
777
+ // We correct this.
778
+ changeDocument.ns.db = this.defaultDb.databaseName;
779
+ if (!flexDbNameWorkaroundLogged) {
780
+ flexDbNameWorkaroundLogged = true;
781
+ this.logger.warn(`Incorrect DB name in change stream: ${changeDocument.ns.db}. Changed to ${this.defaultDb.databaseName}.`);
782
+ }
578
783
  }
579
- await batch.commit(lsn);
580
- }
581
- else if (changeDocument.operationType == 'insert' ||
582
- changeDocument.operationType == 'update' ||
583
- changeDocument.operationType == 'replace' ||
584
- changeDocument.operationType == 'delete') {
585
- if (waitForCheckpointLsn == null) {
586
- waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb);
784
+ const ns = 'ns' in changeDocument && 'coll' in changeDocument.ns ? changeDocument.ns : undefined;
785
+ if (ns?.coll == CHECKPOINTS_COLLECTION) {
786
+ /**
787
+ * Dropping the database does not provide an `invalidate` event.
788
+ * We typically would receive `drop` events for the collection which we
789
+ * would process below.
790
+ *
791
+ * However we don't commit the LSN after collections are dropped.
792
+ * The prevents the `startAfter` or `resumeToken` from advancing past the drop events.
793
+ * The stream also closes after the drop events.
794
+ * This causes an infinite loop of processing the collection drop events.
795
+ *
796
+ * This check here invalidates the change stream if our `_checkpoints` collection
797
+ * is dropped. This allows for detecting when the DB is dropped.
798
+ */
799
+ if (changeDocument.operationType == 'drop') {
800
+ throw new ChangeStreamInvalidatedError('Internal collections have been dropped', new Error('_checkpoints collection was dropped'));
801
+ }
802
+ if (!(changeDocument.operationType == 'insert' ||
803
+ changeDocument.operationType == 'update' ||
804
+ changeDocument.operationType == 'replace')) {
805
+ continue;
806
+ }
807
+ // We handle two types of checkpoint events:
808
+ // 1. "Standalone" checkpoints, typically write checkpoints. We want to process these
809
+ // immediately, regardless of where they were created.
810
+ // 2. "Batch" checkpoints for the current stream. This is used as a form of dynamic rate
811
+ // limiting of commits, so we specifically want to exclude checkpoints from other streams.
812
+ //
813
+ // It may be useful to also throttle commits due to standalone checkpoints in the future.
814
+ // However, these typically have a much lower rate than batch checkpoints, so we don't do that for now.
815
+ const checkpointId = changeDocument.documentKey._id;
816
+ if (!(checkpointId == STANDALONE_CHECKPOINT_ID || this.checkpointStreamId.equals(checkpointId))) {
817
+ continue;
818
+ }
819
+ const { comparable: lsn } = new MongoLSN({
820
+ timestamp: changeDocument.clusterTime,
821
+ resume_token: changeDocument._id
822
+ });
823
+ if (waitForCheckpointLsn != null && lsn >= waitForCheckpointLsn) {
824
+ waitForCheckpointLsn = null;
825
+ }
826
+ const didCommit = await batch.commit(lsn, { oldestUncommittedChange: this.oldestUncommittedChange });
827
+ if (didCommit) {
828
+ this.oldestUncommittedChange = null;
829
+ this.isStartingReplication = false;
830
+ }
587
831
  }
588
- const rel = getMongoRelation(changeDocument.ns);
589
- const table = await this.getRelation(batch, rel, {
590
- // In most cases, we should not need to snapshot this. But if this is the first time we see the collection
591
- // for whatever reason, then we do need to snapshot it.
592
- // This may result in some duplicate operations when a collection is created for the first time after
593
- // sync rules was deployed.
594
- snapshot: true
595
- });
596
- if (table.syncAny) {
597
- await this.writeChange(batch, table, changeDocument);
832
+ else if (changeDocument.operationType == 'insert' ||
833
+ changeDocument.operationType == 'update' ||
834
+ changeDocument.operationType == 'replace' ||
835
+ changeDocument.operationType == 'delete') {
836
+ if (waitForCheckpointLsn == null) {
837
+ waitForCheckpointLsn = await createCheckpoint(this.client, this.defaultDb, this.checkpointStreamId);
838
+ }
839
+ const rel = getMongoRelation(changeDocument.ns);
840
+ const table = await this.getRelation(batch, rel, {
841
+ // In most cases, we should not need to snapshot this. But if this is the first time we see the collection
842
+ // for whatever reason, then we do need to snapshot it.
843
+ // This may result in some duplicate operations when a collection is created for the first time after
844
+ // sync rules was deployed.
845
+ snapshot: true
846
+ });
847
+ if (table.syncAny) {
848
+ if (this.oldestUncommittedChange == null && changeDocument.clusterTime != null) {
849
+ this.oldestUncommittedChange = timestampToDate(changeDocument.clusterTime);
850
+ }
851
+ await this.writeChange(batch, table, changeDocument);
852
+ }
598
853
  }
599
- }
600
- else if (changeDocument.operationType == 'drop') {
601
- const rel = getMongoRelation(changeDocument.ns);
602
- const table = await this.getRelation(batch, rel, {
603
- // We're "dropping" this collection, so never snapshot it.
604
- snapshot: false
605
- });
606
- if (table.syncAny) {
607
- await batch.drop([table]);
608
- this.relation_cache.delete(getCacheIdentifier(rel));
854
+ else if (changeDocument.operationType == 'drop') {
855
+ const rel = getMongoRelation(changeDocument.ns);
856
+ const table = await this.getRelation(batch, rel, {
857
+ // We're "dropping" this collection, so never snapshot it.
858
+ snapshot: false
859
+ });
860
+ if (table.syncAny) {
861
+ await batch.drop([table]);
862
+ this.relationCache.delete(table);
863
+ }
609
864
  }
610
- }
611
- else if (changeDocument.operationType == 'rename') {
612
- const relFrom = getMongoRelation(changeDocument.ns);
613
- const relTo = getMongoRelation(changeDocument.to);
614
- const tableFrom = await this.getRelation(batch, relFrom, {
615
- // We're "dropping" this collection, so never snapshot it.
616
- snapshot: false
617
- });
618
- if (tableFrom.syncAny) {
619
- await batch.drop([tableFrom]);
620
- this.relation_cache.delete(getCacheIdentifier(relFrom));
865
+ else if (changeDocument.operationType == 'rename') {
866
+ const relFrom = getMongoRelation(changeDocument.ns);
867
+ const relTo = getMongoRelation(changeDocument.to);
868
+ const tableFrom = await this.getRelation(batch, relFrom, {
869
+ // We're "dropping" this collection, so never snapshot it.
870
+ snapshot: false
871
+ });
872
+ if (tableFrom.syncAny) {
873
+ await batch.drop([tableFrom]);
874
+ this.relationCache.delete(relFrom);
875
+ }
876
+ // Here we do need to snapshot the new table
877
+ const collection = await this.getCollectionInfo(relTo.schema, relTo.name);
878
+ await this.handleRelation(batch, relTo, {
879
+ // This is a new (renamed) collection, so always snapshot it.
880
+ snapshot: true,
881
+ collectionInfo: collection
882
+ });
621
883
  }
622
- // Here we do need to snapshot the new table
623
- const collection = await this.getCollectionInfo(relTo.schema, relTo.name);
624
- await this.handleRelation(batch, relTo, {
625
- // This is a new (renamed) collection, so always snapshot it.
626
- snapshot: true,
627
- collectionInfo: collection
628
- });
629
884
  }
630
885
  }
886
+ catch (e_4) {
887
+ env_4.error = e_4;
888
+ env_4.hasError = true;
889
+ }
890
+ finally {
891
+ const result_4 = __disposeResources(env_4);
892
+ if (result_4)
893
+ await result_4;
894
+ }
631
895
  });
632
896
  }
897
+ async getReplicationLagMillis() {
898
+ if (this.oldestUncommittedChange == null) {
899
+ if (this.isStartingReplication) {
900
+ // We don't have anything to compute replication lag with yet.
901
+ return undefined;
902
+ }
903
+ else {
904
+ // We don't have any uncommitted changes, so replication is up-to-date.
905
+ return 0;
906
+ }
907
+ }
908
+ return Date.now() - this.oldestUncommittedChange.getTime();
909
+ }
633
910
  }
634
911
  async function touch() {
635
912
  // FIXME: The hosted Kubernetes probe does not actually check the timestamp on this.