@powersync/service-module-mongodb-storage 0.10.4 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,6 +16,7 @@ import {
16
16
  BucketStorageMarkRecordUnavailable,
17
17
  deserializeBson,
18
18
  InternalOpId,
19
+ isCompleteRow,
19
20
  SaveOperationTag,
20
21
  storage,
21
22
  utils
@@ -49,6 +50,7 @@ export interface MongoBucketBatchOptions {
49
50
  lastCheckpointLsn: string | null;
50
51
  keepaliveOp: InternalOpId | null;
51
52
  noCheckpointBeforeLsn: string;
53
+ resumeFromLsn: string | null;
52
54
  storeCurrentData: boolean;
53
55
  /**
54
56
  * Set to true for initial replication.
@@ -99,6 +101,20 @@ export class MongoBucketBatch
99
101
  */
100
102
  public last_flushed_op: InternalOpId | null = null;
101
103
 
104
+ /**
105
+ * lastCheckpointLsn is the last consistent commit.
106
+ *
107
+ * While that is generally a "safe" point to resume from, there are cases where we may want to resume from a different point:
108
+ * 1. After an initial snapshot, we don't have a consistent commit yet, but need to resume from the snapshot LSN.
109
+ * 2. If "no_checkpoint_before_lsn" is set far in advance, it may take a while to reach that point. We
110
+ * may want to resume at incremental points before that.
111
+ *
112
+ * This is set when creating the batch, but may not be updated afterwards.
113
+ */
114
+ public resumeFromLsn: string | null = null;
115
+
116
+ private needsActivation = true;
117
+
102
118
  constructor(options: MongoBucketBatchOptions) {
103
119
  super();
104
120
  this.logger = options.logger ?? defaultLogger;
@@ -107,6 +123,7 @@ export class MongoBucketBatch
107
123
  this.group_id = options.groupId;
108
124
  this.last_checkpoint_lsn = options.lastCheckpointLsn;
109
125
  this.no_checkpoint_before_lsn = options.noCheckpointBeforeLsn;
126
+ this.resumeFromLsn = options.resumeFromLsn;
110
127
  this.session = this.client.startSession();
111
128
  this.slot_name = options.slotName;
112
129
  this.sync_rules = options.syncRules;
@@ -332,7 +349,7 @@ export class MongoBucketBatch
332
349
  // Not an error if we re-apply a transaction
333
350
  existing_buckets = [];
334
351
  existing_lookups = [];
335
- if (this.storeCurrentData) {
352
+ if (!isCompleteRow(this.storeCurrentData, after!)) {
336
353
  if (this.markRecordUnavailable != null) {
337
354
  // This will trigger a "resnapshot" of the record.
338
355
  // This is not relevant if storeCurrentData is false, since we'll get the full row
@@ -685,6 +702,7 @@ export class MongoBucketBatch
685
702
 
686
703
  if (!createEmptyCheckpoints && this.persisted_op == null) {
687
704
  // Nothing to commit - also return true
705
+ await this.autoActivate(lsn);
688
706
  return true;
689
707
  }
690
708
 
@@ -729,12 +747,65 @@ export class MongoBucketBatch
729
747
  },
730
748
  { session: this.session }
731
749
  );
750
+ await this.autoActivate(lsn);
732
751
  await this.db.notifyCheckpoint();
733
752
  this.persisted_op = null;
734
753
  this.last_checkpoint_lsn = lsn;
735
754
  return true;
736
755
  }
737
756
 
757
+ /**
758
+ * Switch from processing -> active if relevant.
759
+ *
760
+ * Called on new commits.
761
+ */
762
+ private async autoActivate(lsn: string) {
763
+ if (!this.needsActivation) {
764
+ return;
765
+ }
766
+
767
+ // Activate the batch, so it can start processing.
768
+ // This is done automatically when the first save() is called.
769
+
770
+ const session = this.session;
771
+ let activated = false;
772
+ await session.withTransaction(async () => {
773
+ const doc = await this.db.sync_rules.findOne({ _id: this.group_id }, { session });
774
+ if (doc && doc.state == 'PROCESSING') {
775
+ await this.db.sync_rules.updateOne(
776
+ {
777
+ _id: this.group_id
778
+ },
779
+ {
780
+ $set: {
781
+ state: storage.SyncRuleState.ACTIVE
782
+ }
783
+ },
784
+ { session }
785
+ );
786
+
787
+ await this.db.sync_rules.updateMany(
788
+ {
789
+ _id: { $ne: this.group_id },
790
+ state: { $in: [storage.SyncRuleState.ACTIVE, storage.SyncRuleState.ERRORED] }
791
+ },
792
+ {
793
+ $set: {
794
+ state: storage.SyncRuleState.STOP
795
+ }
796
+ },
797
+ { session }
798
+ );
799
+ activated = true;
800
+ }
801
+ });
802
+ if (activated) {
803
+ this.logger.info(`Activated new sync rules at ${lsn}`);
804
+ await this.db.notifyCheckpoint();
805
+ }
806
+ this.needsActivation = false;
807
+ }
808
+
738
809
  async keepalive(lsn: string): Promise<boolean> {
739
810
  if (this.last_checkpoint_lsn != null && lsn <= this.last_checkpoint_lsn) {
740
811
  // No-op
@@ -782,13 +853,14 @@ export class MongoBucketBatch
782
853
  },
783
854
  { session: this.session }
784
855
  );
856
+ await this.autoActivate(lsn);
785
857
  await this.db.notifyCheckpoint();
786
858
  this.last_checkpoint_lsn = lsn;
787
859
 
788
860
  return true;
789
861
  }
790
862
 
791
- async setSnapshotLsn(lsn: string): Promise<void> {
863
+ async setResumeLsn(lsn: string): Promise<void> {
792
864
  const update: Partial<SyncRuleDocument> = {
793
865
  snapshot_lsn: lsn
794
866
  };
@@ -0,0 +1,105 @@
1
+ import { logger } from '@powersync/lib-services-framework';
2
+ import { bson, CompactOptions, InternalOpId } from '@powersync/service-core';
3
+ import { LRUCache } from 'lru-cache';
4
+ import { PowerSyncMongo } from './db.js';
5
+ import { mongo } from '@powersync/lib-service-mongodb';
6
+ import { BucketParameterDocument } from './models.js';
7
+
8
+ /**
9
+ * Compacts parameter lookup data (the bucket_parameters collection).
10
+ *
11
+ * This scans through the entire collection to find data to compact.
12
+ *
13
+ * For background, see the `/docs/parameters-lookups.md` file.
14
+ */
15
+ export class MongoParameterCompactor {
16
+ constructor(
17
+ private db: PowerSyncMongo,
18
+ private group_id: number,
19
+ private checkpoint: InternalOpId,
20
+ private options: CompactOptions
21
+ ) {}
22
+
23
+ async compact() {
24
+ logger.info(`Compacting parameters for group ${this.group_id} up to checkpoint ${this.checkpoint}`);
25
+ // This is the currently-active checkpoint.
26
+ // We do not remove any data that may be used by this checkpoint.
27
+ // snapshot queries ensure that if any clients are still using older checkpoints, they would
28
+ // not be affected by this compaction.
29
+ const checkpoint = this.checkpoint;
30
+
31
+ // Index on {'key.g': 1, lookup: 1, _id: 1}
32
+ // In theory, we could let MongoDB do more of the work here, by grouping by (key, lookup)
33
+ // in MongoDB already. However, that risks running into cases where MongoDB needs to process
34
+ // very large amounts of data before returning results, which could lead to timeouts.
35
+ const cursor = this.db.bucket_parameters.find(
36
+ {
37
+ 'key.g': this.group_id
38
+ },
39
+ {
40
+ sort: { lookup: 1, _id: 1 },
41
+ batchSize: 10_000,
42
+ projection: { _id: 1, key: 1, lookup: 1, bucket_parameters: 1 }
43
+ }
44
+ );
45
+
46
+ // The index doesn't cover sorting by key, so we keep our own cache of the last seen key.
47
+ let lastByKey = new LRUCache<string, InternalOpId>({
48
+ max: this.options.compactParameterCacheLimit ?? 10_000
49
+ });
50
+ let removeIds: InternalOpId[] = [];
51
+ let removeDeleted: mongo.AnyBulkWriteOperation<BucketParameterDocument>[] = [];
52
+
53
+ const flush = async (force: boolean) => {
54
+ if (removeIds.length >= 1000 || (force && removeIds.length > 0)) {
55
+ const results = await this.db.bucket_parameters.deleteMany({ _id: { $in: removeIds } });
56
+ logger.info(`Removed ${results.deletedCount} (${removeIds.length}) superseded parameter entries`);
57
+ removeIds = [];
58
+ }
59
+
60
+ if (removeDeleted.length > 10 || (force && removeDeleted.length > 0)) {
61
+ const results = await this.db.bucket_parameters.bulkWrite(removeDeleted);
62
+ logger.info(`Removed ${results.deletedCount} (${removeDeleted.length}) deleted parameter entries`);
63
+ removeDeleted = [];
64
+ }
65
+ };
66
+
67
+ while (await cursor.hasNext()) {
68
+ const batch = cursor.readBufferedDocuments();
69
+ for (let doc of batch) {
70
+ if (doc._id >= checkpoint) {
71
+ continue;
72
+ }
73
+ const uniqueKey = (
74
+ bson.serialize({
75
+ k: doc.key,
76
+ l: doc.lookup
77
+ }) as Buffer
78
+ ).toString('base64');
79
+ const previous = lastByKey.get(uniqueKey);
80
+ if (previous != null && previous < doc._id) {
81
+ // We have a newer entry for the same key, so we can remove the old one.
82
+ removeIds.push(previous);
83
+ }
84
+ lastByKey.set(uniqueKey, doc._id);
85
+
86
+ if (doc.bucket_parameters?.length == 0) {
87
+ // This is a delete operation, so we can remove it completely.
88
+ // For this we cannot remove the operation itself only: There is a possibility that
89
+ // there is still an earlier operation with the same key and lookup, that we don't have
90
+ // in the cache due to cache size limits. So we need to explicitly remove all earlier operations.
91
+ removeDeleted.push({
92
+ deleteMany: {
93
+ filter: { 'key.g': doc.key.g, lookup: doc.lookup, _id: { $lte: doc._id }, key: doc.key }
94
+ }
95
+ });
96
+ }
97
+ }
98
+
99
+ await flush(false);
100
+ }
101
+
102
+ await flush(true);
103
+ logger.info('Parameter compaction completed');
104
+ }
105
+ }
@@ -2,11 +2,9 @@ import * as lib_mongo from '@powersync/lib-service-mongodb';
2
2
  import { mongo } from '@powersync/lib-service-mongodb';
3
3
  import {
4
4
  BaseObserver,
5
- ErrorCode,
6
5
  logger,
7
6
  ReplicationAbortedError,
8
- ServiceAssertionError,
9
- ServiceError
7
+ ServiceAssertionError
10
8
  } from '@powersync/lib-services-framework';
11
9
  import {
12
10
  BroadcastIterable,
@@ -16,6 +14,7 @@ import {
16
14
  GetCheckpointChangesOptions,
17
15
  InternalOpId,
18
16
  internalToExternalOpId,
17
+ maxLsn,
19
18
  ProtocolOpId,
20
19
  ReplicationCheckpoint,
21
20
  storage,
@@ -29,18 +28,12 @@ import { LRUCache } from 'lru-cache';
29
28
  import * as timers from 'timers/promises';
30
29
  import { MongoBucketStorage } from '../MongoBucketStorage.js';
31
30
  import { PowerSyncMongo } from './db.js';
32
- import {
33
- BucketDataDocument,
34
- BucketDataKey,
35
- BucketStateDocument,
36
- SourceKey,
37
- SourceTableDocument,
38
- SyncRuleCheckpointState
39
- } from './models.js';
31
+ import { BucketDataDocument, BucketDataKey, BucketStateDocument, SourceKey, SourceTableDocument } from './models.js';
40
32
  import { MongoBucketBatch } from './MongoBucketBatch.js';
41
33
  import { MongoCompactor } from './MongoCompactor.js';
42
34
  import { MongoWriteCheckpointAPI } from './MongoWriteCheckpointAPI.js';
43
- import { idPrefixFilter, mapOpEntry, readSingleBatch } from './util.js';
35
+ import { idPrefixFilter, mapOpEntry, readSingleBatch, setSessionSnapshotTime } from './util.js';
36
+ import { MongoParameterCompactor } from './MongoParameterCompactor.js';
44
37
 
45
38
  export class MongoSyncBucketStorage
46
39
  extends BaseObserver<storage.SyncRulesBucketStorageListener>
@@ -105,22 +98,44 @@ export class MongoSyncBucketStorage
105
98
  }
106
99
 
107
100
  async getCheckpoint(): Promise<storage.ReplicationCheckpoint> {
108
- const doc = await this.db.sync_rules.findOne(
109
- { _id: this.group_id },
110
- {
111
- projection: { last_checkpoint: 1, last_checkpoint_lsn: 1, snapshot_done: 1 }
101
+ return (await this.getCheckpointInternal()) ?? new EmptyReplicationCheckpoint();
102
+ }
103
+
104
+ async getCheckpointInternal(): Promise<storage.ReplicationCheckpoint | null> {
105
+ return await this.db.client.withSession({ snapshot: true }, async (session) => {
106
+ const doc = await this.db.sync_rules.findOne(
107
+ { _id: this.group_id },
108
+ {
109
+ session,
110
+ projection: { _id: 1, state: 1, last_checkpoint: 1, last_checkpoint_lsn: 1, snapshot_done: 1 }
111
+ }
112
+ );
113
+ if (!doc?.snapshot_done || !['ACTIVE', 'ERRORED'].includes(doc.state)) {
114
+ // Sync rules not active - return null
115
+ return null;
112
116
  }
113
- );
114
- if (!doc?.snapshot_done) {
115
- return {
116
- checkpoint: 0n,
117
- lsn: null
118
- };
119
- }
120
- return {
121
- checkpoint: doc?.last_checkpoint ?? 0n,
122
- lsn: doc?.last_checkpoint_lsn ?? null
123
- };
117
+
118
+ // Specifically using operationTime instead of clusterTime
119
+ // There are 3 fields in the response:
120
+ // 1. operationTime, not exposed for snapshot sessions (used for causal consistency)
121
+ // 2. clusterTime (used for connection management)
122
+ // 3. atClusterTime, which is session.snapshotTime
123
+ // We use atClusterTime, to match the driver's internal snapshot handling.
124
+ // There are cases where clusterTime > operationTime and atClusterTime,
125
+ // which could cause snapshot queries using this as the snapshotTime to timeout.
126
+ // This was specifically observed on MongoDB 6.0 and 7.0.
127
+ const snapshotTime = (session as any).snapshotTime as bson.Timestamp | undefined;
128
+ if (snapshotTime == null) {
129
+ throw new ServiceAssertionError('Missing snapshotTime in getCheckpoint()');
130
+ }
131
+ return new MongoReplicationCheckpoint(
132
+ this,
133
+ // null/0n is a valid checkpoint in some cases, for example if the initial snapshot was empty
134
+ doc.last_checkpoint ?? 0n,
135
+ doc.last_checkpoint_lsn ?? null,
136
+ snapshotTime
137
+ );
138
+ });
124
139
  }
125
140
 
126
141
  async startBatch(
@@ -131,7 +146,7 @@ export class MongoSyncBucketStorage
131
146
  {
132
147
  _id: this.group_id
133
148
  },
134
- { projection: { last_checkpoint_lsn: 1, no_checkpoint_before: 1, keepalive_op: 1 } }
149
+ { projection: { last_checkpoint_lsn: 1, no_checkpoint_before: 1, keepalive_op: 1, snapshot_lsn: 1 } }
135
150
  );
136
151
  const checkpoint_lsn = doc?.last_checkpoint_lsn ?? null;
137
152
 
@@ -142,6 +157,7 @@ export class MongoSyncBucketStorage
142
157
  groupId: this.group_id,
143
158
  slotName: this.slot_name,
144
159
  lastCheckpointLsn: checkpoint_lsn,
160
+ resumeFromLsn: maxLsn(checkpoint_lsn, doc?.snapshot_lsn),
145
161
  noCheckpointBeforeLsn: doc?.no_checkpoint_before ?? options.zeroLSN,
146
162
  keepaliveOp: doc?.keepalive_op ? BigInt(doc.keepalive_op) : null,
147
163
  storeCurrentData: options.storeCurrentData,
@@ -162,9 +178,9 @@ export class MongoSyncBucketStorage
162
178
  async resolveTable(options: storage.ResolveTableOptions): Promise<storage.ResolveTableResult> {
163
179
  const { group_id, connection_id, connection_tag, entity_descriptor } = options;
164
180
 
165
- const { schema, name: table, objectId, replicationColumns } = entity_descriptor;
181
+ const { schema, name, objectId, replicaIdColumns } = entity_descriptor;
166
182
 
167
- const columns = replicationColumns.map((column) => ({
183
+ const normalizedReplicaIdColumns = replicaIdColumns.map((column) => ({
168
184
  name: column.name,
169
185
  type: column.type,
170
186
  type_oid: column.typeId
@@ -176,8 +192,8 @@ export class MongoSyncBucketStorage
176
192
  group_id: group_id,
177
193
  connection_id: connection_id,
178
194
  schema_name: schema,
179
- table_name: table,
180
- replica_id_columns2: columns
195
+ table_name: name,
196
+ replica_id_columns2: normalizedReplicaIdColumns
181
197
  };
182
198
  if (objectId != null) {
183
199
  filter.relation_id = objectId;
@@ -190,24 +206,24 @@ export class MongoSyncBucketStorage
190
206
  connection_id: connection_id,
191
207
  relation_id: objectId,
192
208
  schema_name: schema,
193
- table_name: table,
209
+ table_name: name,
194
210
  replica_id_columns: null,
195
- replica_id_columns2: columns,
211
+ replica_id_columns2: normalizedReplicaIdColumns,
196
212
  snapshot_done: false,
197
213
  snapshot_status: undefined
198
214
  };
199
215
 
200
216
  await col.insertOne(doc, { session });
201
217
  }
202
- const sourceTable = new storage.SourceTable(
203
- doc._id,
204
- connection_tag,
205
- objectId,
206
- schema,
207
- table,
208
- replicationColumns,
209
- doc.snapshot_done ?? true
210
- );
218
+ const sourceTable = new storage.SourceTable({
219
+ id: doc._id,
220
+ connectionTag: connection_tag,
221
+ objectId: objectId,
222
+ schema: schema,
223
+ name: name,
224
+ replicaIdColumns: replicaIdColumns,
225
+ snapshotComplete: doc.snapshot_done ?? true
226
+ });
211
227
  sourceTable.syncEvent = options.sync_rules.tableTriggersEvent(sourceTable);
212
228
  sourceTable.syncData = options.sync_rules.tableSyncsData(sourceTable);
213
229
  sourceTable.syncParameters = options.sync_rules.tableSyncsParameters(sourceTable);
@@ -222,7 +238,7 @@ export class MongoSyncBucketStorage
222
238
 
223
239
  let dropTables: storage.SourceTable[] = [];
224
240
  // Detect tables that are either renamed, or have different replica_id_columns
225
- let truncateFilter = [{ schema_name: schema, table_name: table }] as any[];
241
+ let truncateFilter = [{ schema_name: schema, table_name: name }] as any[];
226
242
  if (objectId != null) {
227
243
  // Only detect renames if the source uses relation ids.
228
244
  truncateFilter.push({ relation_id: objectId });
@@ -240,15 +256,16 @@ export class MongoSyncBucketStorage
240
256
  .toArray();
241
257
  dropTables = truncate.map(
242
258
  (doc) =>
243
- new storage.SourceTable(
244
- doc._id,
245
- connection_tag,
246
- doc.relation_id,
247
- doc.schema_name,
248
- doc.table_name,
249
- doc.replica_id_columns2?.map((c) => ({ name: c.name, typeOid: c.type_oid, type: c.type })) ?? [],
250
- doc.snapshot_done ?? true
251
- )
259
+ new storage.SourceTable({
260
+ id: doc._id,
261
+ connectionTag: connection_tag,
262
+ objectId: doc.relation_id,
263
+ schema: doc.schema_name,
264
+ name: doc.table_name,
265
+ replicaIdColumns:
266
+ doc.replica_id_columns2?.map((c) => ({ name: c.name, typeOid: c.type_oid, type: c.type })) ?? [],
267
+ snapshotComplete: doc.snapshot_done ?? true
268
+ })
252
269
  );
253
270
 
254
271
  result = {
@@ -259,38 +276,67 @@ export class MongoSyncBucketStorage
259
276
  return result!;
260
277
  }
261
278
 
262
- async getParameterSets(checkpoint: utils.InternalOpId, lookups: ParameterLookup[]): Promise<SqliteJsonRow[]> {
263
- const lookupFilter = lookups.map((lookup) => {
264
- return storage.serializeLookup(lookup);
265
- });
266
- const rows = await this.db.bucket_parameters
267
- .aggregate([
268
- {
269
- $match: {
270
- 'key.g': this.group_id,
271
- lookup: { $in: lookupFilter },
272
- _id: { $lte: checkpoint }
273
- }
274
- },
275
- {
276
- $sort: {
277
- _id: -1
278
- }
279
- },
280
- {
281
- $group: {
282
- _id: { key: '$key', lookup: '$lookup' },
283
- bucket_parameters: {
284
- $first: '$bucket_parameters'
279
+ async getParameterSets(checkpoint: MongoReplicationCheckpoint, lookups: ParameterLookup[]): Promise<SqliteJsonRow[]> {
280
+ return this.db.client.withSession({ snapshot: true }, async (session) => {
281
+ // Set the session's snapshot time to the checkpoint's snapshot time.
282
+ // An alternative would be to create the session when the checkpoint is created, but managing
283
+ // the session lifetime would become more complex.
284
+ // Starting and ending sessions are cheap (synchronous when no transactions are used),
285
+ // so this should be fine.
286
+ // This is a roundabout way of setting {readConcern: {atClusterTime: clusterTime}}, since
287
+ // that is not exposed directly by the driver.
288
+ // Future versions of the driver may change the snapshotTime behavior, so we need tests to
289
+ // validate that this works as expected. We test this in the compacting tests.
290
+ setSessionSnapshotTime(session, checkpoint.snapshotTime);
291
+ const lookupFilter = lookups.map((lookup) => {
292
+ return storage.serializeLookup(lookup);
293
+ });
294
+ // This query does not use indexes super efficiently, apart from the lookup filter.
295
+ // From some experimentation I could do individual lookups more efficient using an index
296
+ // on {'key.g': 1, lookup: 1, 'key.t': 1, 'key.k': 1, _id: -1},
297
+ // but could not do the same using $group.
298
+ // For now, just rely on compacting to remove extraneous data.
299
+ // For a description of the data format, see the `/docs/parameters-lookups.md` file.
300
+ const rows = await this.db.bucket_parameters
301
+ .aggregate(
302
+ [
303
+ {
304
+ $match: {
305
+ 'key.g': this.group_id,
306
+ lookup: { $in: lookupFilter },
307
+ _id: { $lte: checkpoint.checkpoint }
308
+ }
309
+ },
310
+ {
311
+ $sort: {
312
+ _id: -1
313
+ }
314
+ },
315
+ {
316
+ $group: {
317
+ _id: { key: '$key', lookup: '$lookup' },
318
+ bucket_parameters: {
319
+ $first: '$bucket_parameters'
320
+ }
321
+ }
285
322
  }
323
+ ],
324
+ {
325
+ session,
326
+ readConcern: 'snapshot',
327
+ // Limit the time for the operation to complete, to avoid getting connection timeouts
328
+ maxTimeMS: lib_mongo.db.MONGO_OPERATION_TIMEOUT_MS
286
329
  }
287
- }
288
- ])
289
- .toArray();
290
- const groupedParameters = rows.map((row) => {
291
- return row.bucket_parameters;
330
+ )
331
+ .toArray()
332
+ .catch((e) => {
333
+ throw lib_mongo.mapQueryError(e, 'while evaluating parameter queries');
334
+ });
335
+ const groupedParameters = rows.map((row) => {
336
+ return row.bucket_parameters;
337
+ });
338
+ return groupedParameters.flat();
292
339
  });
293
- return groupedParameters.flat();
294
340
  }
295
341
 
296
342
  async *getBucketDataBatch(
@@ -575,7 +621,6 @@ export class MongoSyncBucketStorage
575
621
  `${this.slot_name} Cleared batch of data in ${lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS}ms, continuing...`
576
622
  );
577
623
  await timers.setTimeout(lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS / 5);
578
- continue;
579
624
  } else {
580
625
  throw e;
581
626
  }
@@ -640,41 +685,6 @@ export class MongoSyncBucketStorage
640
685
  );
641
686
  }
642
687
 
643
- async autoActivate(): Promise<void> {
644
- await this.db.client.withSession(async (session) => {
645
- await session.withTransaction(async () => {
646
- const doc = await this.db.sync_rules.findOne({ _id: this.group_id }, { session });
647
- if (doc && doc.state == 'PROCESSING') {
648
- await this.db.sync_rules.updateOne(
649
- {
650
- _id: this.group_id
651
- },
652
- {
653
- $set: {
654
- state: storage.SyncRuleState.ACTIVE
655
- }
656
- },
657
- { session }
658
- );
659
-
660
- await this.db.sync_rules.updateMany(
661
- {
662
- _id: { $ne: this.group_id },
663
- state: { $in: [storage.SyncRuleState.ACTIVE, storage.SyncRuleState.ERRORED] }
664
- },
665
- {
666
- $set: {
667
- state: storage.SyncRuleState.STOP
668
- }
669
- },
670
- { session }
671
- );
672
- await this.db.notifyCheckpoint();
673
- }
674
- });
675
- });
676
- }
677
-
678
688
  async reportError(e: any): Promise<void> {
679
689
  const message = String(e.message ?? 'Replication failure');
680
690
  await this.db.sync_rules.updateOne(
@@ -691,14 +701,11 @@ export class MongoSyncBucketStorage
691
701
  }
692
702
 
693
703
  async compact(options?: storage.CompactOptions) {
694
- return new MongoCompactor(this.db, this.group_id, options).compact();
695
- }
696
-
697
- private makeActiveCheckpoint(doc: SyncRuleCheckpointState | null) {
698
- return {
699
- checkpoint: doc?.last_checkpoint ?? 0n,
700
- lsn: doc?.last_checkpoint_lsn ?? null
701
- };
704
+ const checkpoint = await this.getCheckpointInternal();
705
+ await new MongoCompactor(this.db, this.group_id, options).compact();
706
+ if (checkpoint != null && options?.compactParameterData) {
707
+ await new MongoParameterCompactor(this.db, this.group_id, checkpoint.checkpoint, options).compact();
708
+ }
702
709
  }
703
710
 
704
711
  /**
@@ -720,33 +727,13 @@ export class MongoSyncBucketStorage
720
727
  break;
721
728
  }
722
729
 
723
- const doc = await this.db.sync_rules.findOne(
724
- {
725
- _id: this.group_id,
726
- state: { $in: [storage.SyncRuleState.ACTIVE, storage.SyncRuleState.ERRORED] }
727
- },
728
- {
729
- limit: 1,
730
- projection: {
731
- _id: 1,
732
- state: 1,
733
- last_checkpoint: 1,
734
- last_checkpoint_lsn: 1
735
- }
736
- }
737
- );
738
-
739
- if (doc == null) {
740
- // Sync rules not present or not active.
741
- // Abort the connections - clients will have to retry later.
742
- throw new ServiceError(ErrorCode.PSYNC_S2302, 'No active sync rules available');
743
- } else if (doc.state != storage.SyncRuleState.ACTIVE && doc.state != storage.SyncRuleState.ERRORED) {
730
+ const op = await this.getCheckpointInternal();
731
+ if (op == null) {
744
732
  // Sync rules have changed - abort and restart.
745
733
  // We do a soft close of the stream here - no error
746
734
  break;
747
735
  }
748
736
 
749
- const op = this.makeActiveCheckpoint(doc);
750
737
  // Check for LSN / checkpoint changes - ignore other metadata changes
751
738
  if (lastOp == null || op.lsn != lastOp.lsn || op.checkpoint != lastOp.checkpoint) {
752
739
  lastOp = op;
@@ -1013,3 +1000,25 @@ interface InternalCheckpointChanges extends CheckpointChanges {
1013
1000
  updatedWriteCheckpoints: Map<string, bigint>;
1014
1001
  invalidateWriteCheckpoints: boolean;
1015
1002
  }
1003
+
1004
+ class MongoReplicationCheckpoint implements ReplicationCheckpoint {
1005
+ constructor(
1006
+ private storage: MongoSyncBucketStorage,
1007
+ public readonly checkpoint: InternalOpId,
1008
+ public readonly lsn: string | null,
1009
+ public snapshotTime: mongo.Timestamp
1010
+ ) {}
1011
+
1012
+ async getParameterSets(lookups: ParameterLookup[]): Promise<SqliteJsonRow[]> {
1013
+ return this.storage.getParameterSets(this, lookups);
1014
+ }
1015
+ }
1016
+
1017
+ class EmptyReplicationCheckpoint implements ReplicationCheckpoint {
1018
+ readonly checkpoint: InternalOpId = 0n;
1019
+ readonly lsn: string | null = null;
1020
+
1021
+ async getParameterSets(lookups: ParameterLookup[]): Promise<SqliteJsonRow[]> {
1022
+ return [];
1023
+ }
1024
+ }