@powersync/service-module-mongodb-storage 0.10.3 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +48 -0
- package/dist/storage/implementation/MongoBucketBatch.d.ts +20 -1
- package/dist/storage/implementation/MongoBucketBatch.js +59 -3
- package/dist/storage/implementation/MongoBucketBatch.js.map +1 -1
- package/dist/storage/implementation/MongoParameterCompactor.d.ts +17 -0
- package/dist/storage/implementation/MongoParameterCompactor.js +92 -0
- package/dist/storage/implementation/MongoParameterCompactor.js.map +1 -0
- package/dist/storage/implementation/MongoStorageProvider.js +2 -0
- package/dist/storage/implementation/MongoStorageProvider.js.map +1 -1
- package/dist/storage/implementation/MongoSyncBucketStorage.d.ts +12 -4
- package/dist/storage/implementation/MongoSyncBucketStorage.js +153 -109
- package/dist/storage/implementation/MongoSyncBucketStorage.js.map +1 -1
- package/dist/storage/implementation/db.js +5 -2
- package/dist/storage/implementation/db.js.map +1 -1
- package/dist/storage/implementation/models.d.ts +6 -0
- package/dist/storage/implementation/util.d.ts +1 -4
- package/dist/storage/implementation/util.js +14 -7
- package/dist/storage/implementation/util.js.map +1 -1
- package/package.json +7 -7
- package/src/storage/implementation/MongoBucketBatch.ts +74 -2
- package/src/storage/implementation/MongoParameterCompactor.ts +105 -0
- package/src/storage/implementation/MongoStorageProvider.ts +2 -1
- package/src/storage/implementation/MongoSyncBucketStorage.ts +169 -152
- package/src/storage/implementation/db.ts +8 -2
- package/src/storage/implementation/models.ts +6 -0
- package/src/storage/implementation/util.ts +14 -8
- package/test/src/storage_compacting.test.ts +2 -0
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -2,11 +2,9 @@ import * as lib_mongo from '@powersync/lib-service-mongodb';
|
|
|
2
2
|
import { mongo } from '@powersync/lib-service-mongodb';
|
|
3
3
|
import {
|
|
4
4
|
BaseObserver,
|
|
5
|
-
ErrorCode,
|
|
6
5
|
logger,
|
|
7
6
|
ReplicationAbortedError,
|
|
8
|
-
ServiceAssertionError
|
|
9
|
-
ServiceError
|
|
7
|
+
ServiceAssertionError
|
|
10
8
|
} from '@powersync/lib-services-framework';
|
|
11
9
|
import {
|
|
12
10
|
BroadcastIterable,
|
|
@@ -16,6 +14,7 @@ import {
|
|
|
16
14
|
GetCheckpointChangesOptions,
|
|
17
15
|
InternalOpId,
|
|
18
16
|
internalToExternalOpId,
|
|
17
|
+
maxLsn,
|
|
19
18
|
ProtocolOpId,
|
|
20
19
|
ReplicationCheckpoint,
|
|
21
20
|
storage,
|
|
@@ -29,18 +28,12 @@ import { LRUCache } from 'lru-cache';
|
|
|
29
28
|
import * as timers from 'timers/promises';
|
|
30
29
|
import { MongoBucketStorage } from '../MongoBucketStorage.js';
|
|
31
30
|
import { PowerSyncMongo } from './db.js';
|
|
32
|
-
import {
|
|
33
|
-
BucketDataDocument,
|
|
34
|
-
BucketDataKey,
|
|
35
|
-
BucketStateDocument,
|
|
36
|
-
SourceKey,
|
|
37
|
-
SourceTableDocument,
|
|
38
|
-
SyncRuleCheckpointState
|
|
39
|
-
} from './models.js';
|
|
31
|
+
import { BucketDataDocument, BucketDataKey, BucketStateDocument, SourceKey, SourceTableDocument } from './models.js';
|
|
40
32
|
import { MongoBucketBatch } from './MongoBucketBatch.js';
|
|
41
33
|
import { MongoCompactor } from './MongoCompactor.js';
|
|
42
34
|
import { MongoWriteCheckpointAPI } from './MongoWriteCheckpointAPI.js';
|
|
43
|
-
import { idPrefixFilter, mapOpEntry, readSingleBatch } from './util.js';
|
|
35
|
+
import { idPrefixFilter, mapOpEntry, readSingleBatch, setSessionSnapshotTime } from './util.js';
|
|
36
|
+
import { MongoParameterCompactor } from './MongoParameterCompactor.js';
|
|
44
37
|
|
|
45
38
|
export class MongoSyncBucketStorage
|
|
46
39
|
extends BaseObserver<storage.SyncRulesBucketStorageListener>
|
|
@@ -105,22 +98,44 @@ export class MongoSyncBucketStorage
|
|
|
105
98
|
}
|
|
106
99
|
|
|
107
100
|
async getCheckpoint(): Promise<storage.ReplicationCheckpoint> {
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
101
|
+
return (await this.getCheckpointInternal()) ?? new EmptyReplicationCheckpoint();
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
async getCheckpointInternal(): Promise<storage.ReplicationCheckpoint | null> {
|
|
105
|
+
return await this.db.client.withSession({ snapshot: true }, async (session) => {
|
|
106
|
+
const doc = await this.db.sync_rules.findOne(
|
|
107
|
+
{ _id: this.group_id },
|
|
108
|
+
{
|
|
109
|
+
session,
|
|
110
|
+
projection: { _id: 1, state: 1, last_checkpoint: 1, last_checkpoint_lsn: 1, snapshot_done: 1 }
|
|
111
|
+
}
|
|
112
|
+
);
|
|
113
|
+
if (!doc?.snapshot_done || !['ACTIVE', 'ERRORED'].includes(doc.state)) {
|
|
114
|
+
// Sync rules not active - return null
|
|
115
|
+
return null;
|
|
112
116
|
}
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
117
|
+
|
|
118
|
+
// Specifically using operationTime instead of clusterTime
|
|
119
|
+
// There are 3 fields in the response:
|
|
120
|
+
// 1. operationTime, not exposed for snapshot sessions (used for causal consistency)
|
|
121
|
+
// 2. clusterTime (used for connection management)
|
|
122
|
+
// 3. atClusterTime, which is session.snapshotTime
|
|
123
|
+
// We use atClusterTime, to match the driver's internal snapshot handling.
|
|
124
|
+
// There are cases where clusterTime > operationTime and atClusterTime,
|
|
125
|
+
// which could cause snapshot queries using this as the snapshotTime to timeout.
|
|
126
|
+
// This was specifically observed on MongoDB 6.0 and 7.0.
|
|
127
|
+
const snapshotTime = (session as any).snapshotTime as bson.Timestamp | undefined;
|
|
128
|
+
if (snapshotTime == null) {
|
|
129
|
+
throw new ServiceAssertionError('Missing snapshotTime in getCheckpoint()');
|
|
130
|
+
}
|
|
131
|
+
return new MongoReplicationCheckpoint(
|
|
132
|
+
this,
|
|
133
|
+
// null/0n is a valid checkpoint in some cases, for example if the initial snapshot was empty
|
|
134
|
+
doc.last_checkpoint ?? 0n,
|
|
135
|
+
doc.last_checkpoint_lsn ?? null,
|
|
136
|
+
snapshotTime
|
|
137
|
+
);
|
|
138
|
+
});
|
|
124
139
|
}
|
|
125
140
|
|
|
126
141
|
async startBatch(
|
|
@@ -131,7 +146,7 @@ export class MongoSyncBucketStorage
|
|
|
131
146
|
{
|
|
132
147
|
_id: this.group_id
|
|
133
148
|
},
|
|
134
|
-
{ projection: { last_checkpoint_lsn: 1, no_checkpoint_before: 1, keepalive_op: 1 } }
|
|
149
|
+
{ projection: { last_checkpoint_lsn: 1, no_checkpoint_before: 1, keepalive_op: 1, snapshot_lsn: 1 } }
|
|
135
150
|
);
|
|
136
151
|
const checkpoint_lsn = doc?.last_checkpoint_lsn ?? null;
|
|
137
152
|
|
|
@@ -142,6 +157,7 @@ export class MongoSyncBucketStorage
|
|
|
142
157
|
groupId: this.group_id,
|
|
143
158
|
slotName: this.slot_name,
|
|
144
159
|
lastCheckpointLsn: checkpoint_lsn,
|
|
160
|
+
resumeFromLsn: maxLsn(checkpoint_lsn, doc?.snapshot_lsn),
|
|
145
161
|
noCheckpointBeforeLsn: doc?.no_checkpoint_before ?? options.zeroLSN,
|
|
146
162
|
keepaliveOp: doc?.keepalive_op ? BigInt(doc.keepalive_op) : null,
|
|
147
163
|
storeCurrentData: options.storeCurrentData,
|
|
@@ -162,9 +178,9 @@ export class MongoSyncBucketStorage
|
|
|
162
178
|
async resolveTable(options: storage.ResolveTableOptions): Promise<storage.ResolveTableResult> {
|
|
163
179
|
const { group_id, connection_id, connection_tag, entity_descriptor } = options;
|
|
164
180
|
|
|
165
|
-
const { schema, name
|
|
181
|
+
const { schema, name, objectId, replicaIdColumns } = entity_descriptor;
|
|
166
182
|
|
|
167
|
-
const
|
|
183
|
+
const normalizedReplicaIdColumns = replicaIdColumns.map((column) => ({
|
|
168
184
|
name: column.name,
|
|
169
185
|
type: column.type,
|
|
170
186
|
type_oid: column.typeId
|
|
@@ -176,8 +192,8 @@ export class MongoSyncBucketStorage
|
|
|
176
192
|
group_id: group_id,
|
|
177
193
|
connection_id: connection_id,
|
|
178
194
|
schema_name: schema,
|
|
179
|
-
table_name:
|
|
180
|
-
replica_id_columns2:
|
|
195
|
+
table_name: name,
|
|
196
|
+
replica_id_columns2: normalizedReplicaIdColumns
|
|
181
197
|
};
|
|
182
198
|
if (objectId != null) {
|
|
183
199
|
filter.relation_id = objectId;
|
|
@@ -190,24 +206,24 @@ export class MongoSyncBucketStorage
|
|
|
190
206
|
connection_id: connection_id,
|
|
191
207
|
relation_id: objectId,
|
|
192
208
|
schema_name: schema,
|
|
193
|
-
table_name:
|
|
209
|
+
table_name: name,
|
|
194
210
|
replica_id_columns: null,
|
|
195
|
-
replica_id_columns2:
|
|
211
|
+
replica_id_columns2: normalizedReplicaIdColumns,
|
|
196
212
|
snapshot_done: false,
|
|
197
213
|
snapshot_status: undefined
|
|
198
214
|
};
|
|
199
215
|
|
|
200
216
|
await col.insertOne(doc, { session });
|
|
201
217
|
}
|
|
202
|
-
const sourceTable = new storage.SourceTable(
|
|
203
|
-
doc._id,
|
|
204
|
-
connection_tag,
|
|
205
|
-
objectId,
|
|
206
|
-
schema,
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
doc.snapshot_done ?? true
|
|
210
|
-
);
|
|
218
|
+
const sourceTable = new storage.SourceTable({
|
|
219
|
+
id: doc._id,
|
|
220
|
+
connectionTag: connection_tag,
|
|
221
|
+
objectId: objectId,
|
|
222
|
+
schema: schema,
|
|
223
|
+
name: name,
|
|
224
|
+
replicaIdColumns: replicaIdColumns,
|
|
225
|
+
snapshotComplete: doc.snapshot_done ?? true
|
|
226
|
+
});
|
|
211
227
|
sourceTable.syncEvent = options.sync_rules.tableTriggersEvent(sourceTable);
|
|
212
228
|
sourceTable.syncData = options.sync_rules.tableSyncsData(sourceTable);
|
|
213
229
|
sourceTable.syncParameters = options.sync_rules.tableSyncsParameters(sourceTable);
|
|
@@ -222,7 +238,7 @@ export class MongoSyncBucketStorage
|
|
|
222
238
|
|
|
223
239
|
let dropTables: storage.SourceTable[] = [];
|
|
224
240
|
// Detect tables that are either renamed, or have different replica_id_columns
|
|
225
|
-
let truncateFilter = [{ schema_name: schema, table_name:
|
|
241
|
+
let truncateFilter = [{ schema_name: schema, table_name: name }] as any[];
|
|
226
242
|
if (objectId != null) {
|
|
227
243
|
// Only detect renames if the source uses relation ids.
|
|
228
244
|
truncateFilter.push({ relation_id: objectId });
|
|
@@ -240,15 +256,16 @@ export class MongoSyncBucketStorage
|
|
|
240
256
|
.toArray();
|
|
241
257
|
dropTables = truncate.map(
|
|
242
258
|
(doc) =>
|
|
243
|
-
new storage.SourceTable(
|
|
244
|
-
doc._id,
|
|
245
|
-
connection_tag,
|
|
246
|
-
doc.relation_id,
|
|
247
|
-
doc.schema_name,
|
|
248
|
-
doc.table_name,
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
259
|
+
new storage.SourceTable({
|
|
260
|
+
id: doc._id,
|
|
261
|
+
connectionTag: connection_tag,
|
|
262
|
+
objectId: doc.relation_id,
|
|
263
|
+
schema: doc.schema_name,
|
|
264
|
+
name: doc.table_name,
|
|
265
|
+
replicaIdColumns:
|
|
266
|
+
doc.replica_id_columns2?.map((c) => ({ name: c.name, typeOid: c.type_oid, type: c.type })) ?? [],
|
|
267
|
+
snapshotComplete: doc.snapshot_done ?? true
|
|
268
|
+
})
|
|
252
269
|
);
|
|
253
270
|
|
|
254
271
|
result = {
|
|
@@ -259,38 +276,67 @@ export class MongoSyncBucketStorage
|
|
|
259
276
|
return result!;
|
|
260
277
|
}
|
|
261
278
|
|
|
262
|
-
async getParameterSets(checkpoint:
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
279
|
+
async getParameterSets(checkpoint: MongoReplicationCheckpoint, lookups: ParameterLookup[]): Promise<SqliteJsonRow[]> {
|
|
280
|
+
return this.db.client.withSession({ snapshot: true }, async (session) => {
|
|
281
|
+
// Set the session's snapshot time to the checkpoint's snapshot time.
|
|
282
|
+
// An alternative would be to create the session when the checkpoint is created, but managing
|
|
283
|
+
// the session lifetime would become more complex.
|
|
284
|
+
// Starting and ending sessions are cheap (synchronous when no transactions are used),
|
|
285
|
+
// so this should be fine.
|
|
286
|
+
// This is a roundabout way of setting {readConcern: {atClusterTime: clusterTime}}, since
|
|
287
|
+
// that is not exposed directly by the driver.
|
|
288
|
+
// Future versions of the driver may change the snapshotTime behavior, so we need tests to
|
|
289
|
+
// validate that this works as expected. We test this in the compacting tests.
|
|
290
|
+
setSessionSnapshotTime(session, checkpoint.snapshotTime);
|
|
291
|
+
const lookupFilter = lookups.map((lookup) => {
|
|
292
|
+
return storage.serializeLookup(lookup);
|
|
293
|
+
});
|
|
294
|
+
// This query does not use indexes super efficiently, apart from the lookup filter.
|
|
295
|
+
// From some experimentation I could do individual lookups more efficient using an index
|
|
296
|
+
// on {'key.g': 1, lookup: 1, 'key.t': 1, 'key.k': 1, _id: -1},
|
|
297
|
+
// but could not do the same using $group.
|
|
298
|
+
// For now, just rely on compacting to remove extraneous data.
|
|
299
|
+
// For a description of the data format, see the `/docs/parameters-lookups.md` file.
|
|
300
|
+
const rows = await this.db.bucket_parameters
|
|
301
|
+
.aggregate(
|
|
302
|
+
[
|
|
303
|
+
{
|
|
304
|
+
$match: {
|
|
305
|
+
'key.g': this.group_id,
|
|
306
|
+
lookup: { $in: lookupFilter },
|
|
307
|
+
_id: { $lte: checkpoint.checkpoint }
|
|
308
|
+
}
|
|
309
|
+
},
|
|
310
|
+
{
|
|
311
|
+
$sort: {
|
|
312
|
+
_id: -1
|
|
313
|
+
}
|
|
314
|
+
},
|
|
315
|
+
{
|
|
316
|
+
$group: {
|
|
317
|
+
_id: { key: '$key', lookup: '$lookup' },
|
|
318
|
+
bucket_parameters: {
|
|
319
|
+
$first: '$bucket_parameters'
|
|
320
|
+
}
|
|
321
|
+
}
|
|
285
322
|
}
|
|
323
|
+
],
|
|
324
|
+
{
|
|
325
|
+
session,
|
|
326
|
+
readConcern: 'snapshot',
|
|
327
|
+
// Limit the time for the operation to complete, to avoid getting connection timeouts
|
|
328
|
+
maxTimeMS: lib_mongo.db.MONGO_OPERATION_TIMEOUT_MS
|
|
286
329
|
}
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
330
|
+
)
|
|
331
|
+
.toArray()
|
|
332
|
+
.catch((e) => {
|
|
333
|
+
throw lib_mongo.mapQueryError(e, 'while evaluating parameter queries');
|
|
334
|
+
});
|
|
335
|
+
const groupedParameters = rows.map((row) => {
|
|
336
|
+
return row.bucket_parameters;
|
|
337
|
+
});
|
|
338
|
+
return groupedParameters.flat();
|
|
292
339
|
});
|
|
293
|
-
return groupedParameters.flat();
|
|
294
340
|
}
|
|
295
341
|
|
|
296
342
|
async *getBucketDataBatch(
|
|
@@ -348,7 +394,10 @@ export class MongoSyncBucketStorage
|
|
|
348
394
|
// 1. We can calculate the document size accurately without serializing again.
|
|
349
395
|
// 2. We can delay parsing the results until it's needed.
|
|
350
396
|
// We manually use bson.deserialize below
|
|
351
|
-
raw: true
|
|
397
|
+
raw: true,
|
|
398
|
+
|
|
399
|
+
// Limit the time for the operation to complete, to avoid getting connection timeouts
|
|
400
|
+
maxTimeMS: lib_mongo.db.MONGO_OPERATION_TIMEOUT_MS
|
|
352
401
|
}
|
|
353
402
|
) as unknown as mongo.FindCursor<Buffer>;
|
|
354
403
|
|
|
@@ -357,7 +406,9 @@ export class MongoSyncBucketStorage
|
|
|
357
406
|
// to the lower of the batch count and size limits.
|
|
358
407
|
// This is similar to using `singleBatch: true` in the find options, but allows
|
|
359
408
|
// detecting "hasMore".
|
|
360
|
-
let { data, hasMore: batchHasMore } = await readSingleBatch(cursor)
|
|
409
|
+
let { data, hasMore: batchHasMore } = await readSingleBatch(cursor).catch((e) => {
|
|
410
|
+
throw lib_mongo.mapQueryError(e, 'while reading bucket data');
|
|
411
|
+
});
|
|
361
412
|
if (data.length == batchLimit) {
|
|
362
413
|
// Limit reached - could have more data, despite the cursor being drained.
|
|
363
414
|
batchHasMore = true;
|
|
@@ -486,9 +537,12 @@ export class MongoSyncBucketStorage
|
|
|
486
537
|
}
|
|
487
538
|
}
|
|
488
539
|
],
|
|
489
|
-
{ session: undefined, readConcern: 'snapshot' }
|
|
540
|
+
{ session: undefined, readConcern: 'snapshot', maxTimeMS: lib_mongo.db.MONGO_OPERATION_TIMEOUT_MS }
|
|
490
541
|
)
|
|
491
|
-
.toArray()
|
|
542
|
+
.toArray()
|
|
543
|
+
.catch((e) => {
|
|
544
|
+
throw lib_mongo.mapQueryError(e, 'while reading checksums');
|
|
545
|
+
});
|
|
492
546
|
|
|
493
547
|
return new Map<string, storage.PartialChecksum>(
|
|
494
548
|
aggregate.map((doc) => {
|
|
@@ -567,7 +621,6 @@ export class MongoSyncBucketStorage
|
|
|
567
621
|
`${this.slot_name} Cleared batch of data in ${lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS}ms, continuing...`
|
|
568
622
|
);
|
|
569
623
|
await timers.setTimeout(lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS / 5);
|
|
570
|
-
continue;
|
|
571
624
|
} else {
|
|
572
625
|
throw e;
|
|
573
626
|
}
|
|
@@ -632,41 +685,6 @@ export class MongoSyncBucketStorage
|
|
|
632
685
|
);
|
|
633
686
|
}
|
|
634
687
|
|
|
635
|
-
async autoActivate(): Promise<void> {
|
|
636
|
-
await this.db.client.withSession(async (session) => {
|
|
637
|
-
await session.withTransaction(async () => {
|
|
638
|
-
const doc = await this.db.sync_rules.findOne({ _id: this.group_id }, { session });
|
|
639
|
-
if (doc && doc.state == 'PROCESSING') {
|
|
640
|
-
await this.db.sync_rules.updateOne(
|
|
641
|
-
{
|
|
642
|
-
_id: this.group_id
|
|
643
|
-
},
|
|
644
|
-
{
|
|
645
|
-
$set: {
|
|
646
|
-
state: storage.SyncRuleState.ACTIVE
|
|
647
|
-
}
|
|
648
|
-
},
|
|
649
|
-
{ session }
|
|
650
|
-
);
|
|
651
|
-
|
|
652
|
-
await this.db.sync_rules.updateMany(
|
|
653
|
-
{
|
|
654
|
-
_id: { $ne: this.group_id },
|
|
655
|
-
state: { $in: [storage.SyncRuleState.ACTIVE, storage.SyncRuleState.ERRORED] }
|
|
656
|
-
},
|
|
657
|
-
{
|
|
658
|
-
$set: {
|
|
659
|
-
state: storage.SyncRuleState.STOP
|
|
660
|
-
}
|
|
661
|
-
},
|
|
662
|
-
{ session }
|
|
663
|
-
);
|
|
664
|
-
await this.db.notifyCheckpoint();
|
|
665
|
-
}
|
|
666
|
-
});
|
|
667
|
-
});
|
|
668
|
-
}
|
|
669
|
-
|
|
670
688
|
async reportError(e: any): Promise<void> {
|
|
671
689
|
const message = String(e.message ?? 'Replication failure');
|
|
672
690
|
await this.db.sync_rules.updateOne(
|
|
@@ -683,14 +701,11 @@ export class MongoSyncBucketStorage
|
|
|
683
701
|
}
|
|
684
702
|
|
|
685
703
|
async compact(options?: storage.CompactOptions) {
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
checkpoint: doc?.last_checkpoint ?? 0n,
|
|
692
|
-
lsn: doc?.last_checkpoint_lsn ?? null
|
|
693
|
-
};
|
|
704
|
+
const checkpoint = await this.getCheckpointInternal();
|
|
705
|
+
await new MongoCompactor(this.db, this.group_id, options).compact();
|
|
706
|
+
if (checkpoint != null && options?.compactParameterData) {
|
|
707
|
+
await new MongoParameterCompactor(this.db, this.group_id, checkpoint.checkpoint, options).compact();
|
|
708
|
+
}
|
|
694
709
|
}
|
|
695
710
|
|
|
696
711
|
/**
|
|
@@ -712,33 +727,13 @@ export class MongoSyncBucketStorage
|
|
|
712
727
|
break;
|
|
713
728
|
}
|
|
714
729
|
|
|
715
|
-
const
|
|
716
|
-
|
|
717
|
-
_id: this.group_id,
|
|
718
|
-
state: { $in: [storage.SyncRuleState.ACTIVE, storage.SyncRuleState.ERRORED] }
|
|
719
|
-
},
|
|
720
|
-
{
|
|
721
|
-
limit: 1,
|
|
722
|
-
projection: {
|
|
723
|
-
_id: 1,
|
|
724
|
-
state: 1,
|
|
725
|
-
last_checkpoint: 1,
|
|
726
|
-
last_checkpoint_lsn: 1
|
|
727
|
-
}
|
|
728
|
-
}
|
|
729
|
-
);
|
|
730
|
-
|
|
731
|
-
if (doc == null) {
|
|
732
|
-
// Sync rules not present or not active.
|
|
733
|
-
// Abort the connections - clients will have to retry later.
|
|
734
|
-
throw new ServiceError(ErrorCode.PSYNC_S2302, 'No active sync rules available');
|
|
735
|
-
} else if (doc.state != storage.SyncRuleState.ACTIVE && doc.state != storage.SyncRuleState.ERRORED) {
|
|
730
|
+
const op = await this.getCheckpointInternal();
|
|
731
|
+
if (op == null) {
|
|
736
732
|
// Sync rules have changed - abort and restart.
|
|
737
733
|
// We do a soft close of the stream here - no error
|
|
738
734
|
break;
|
|
739
735
|
}
|
|
740
736
|
|
|
741
|
-
const op = this.makeActiveCheckpoint(doc);
|
|
742
737
|
// Check for LSN / checkpoint changes - ignore other metadata changes
|
|
743
738
|
if (lastOp == null || op.lsn != lastOp.lsn || op.checkpoint != lastOp.checkpoint) {
|
|
744
739
|
lastOp = op;
|
|
@@ -1005,3 +1000,25 @@ interface InternalCheckpointChanges extends CheckpointChanges {
|
|
|
1005
1000
|
updatedWriteCheckpoints: Map<string, bigint>;
|
|
1006
1001
|
invalidateWriteCheckpoints: boolean;
|
|
1007
1002
|
}
|
|
1003
|
+
|
|
1004
|
+
class MongoReplicationCheckpoint implements ReplicationCheckpoint {
|
|
1005
|
+
constructor(
|
|
1006
|
+
private storage: MongoSyncBucketStorage,
|
|
1007
|
+
public readonly checkpoint: InternalOpId,
|
|
1008
|
+
public readonly lsn: string | null,
|
|
1009
|
+
public snapshotTime: mongo.Timestamp
|
|
1010
|
+
) {}
|
|
1011
|
+
|
|
1012
|
+
async getParameterSets(lookups: ParameterLookup[]): Promise<SqliteJsonRow[]> {
|
|
1013
|
+
return this.storage.getParameterSets(this, lookups);
|
|
1014
|
+
}
|
|
1015
|
+
}
|
|
1016
|
+
|
|
1017
|
+
class EmptyReplicationCheckpoint implements ReplicationCheckpoint {
|
|
1018
|
+
readonly checkpoint: InternalOpId = 0n;
|
|
1019
|
+
readonly lsn: string | null = null;
|
|
1020
|
+
|
|
1021
|
+
async getParameterSets(lookups: ParameterLookup[]): Promise<SqliteJsonRow[]> {
|
|
1022
|
+
return [];
|
|
1023
|
+
}
|
|
1024
|
+
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import * as lib_mongo from '@powersync/lib-service-mongodb';
|
|
2
2
|
import { mongo } from '@powersync/lib-service-mongodb';
|
|
3
|
-
import { storage } from '@powersync/service-core';
|
|
3
|
+
import { POWERSYNC_VERSION, storage } from '@powersync/service-core';
|
|
4
4
|
|
|
5
5
|
import { MongoStorageConfig } from '../../types/types.js';
|
|
6
6
|
import {
|
|
@@ -130,5 +130,11 @@ export class PowerSyncMongo {
|
|
|
130
130
|
}
|
|
131
131
|
|
|
132
132
|
export function createPowerSyncMongo(config: MongoStorageConfig, options?: lib_mongo.MongoConnectionOptions) {
|
|
133
|
-
return new PowerSyncMongo(
|
|
133
|
+
return new PowerSyncMongo(
|
|
134
|
+
lib_mongo.createMongoClient(config, {
|
|
135
|
+
powersyncVersion: POWERSYNC_VERSION,
|
|
136
|
+
...options
|
|
137
|
+
}),
|
|
138
|
+
{ database: config.database }
|
|
139
|
+
);
|
|
134
140
|
}
|
|
@@ -118,9 +118,15 @@ export interface SyncRuleDocument {
|
|
|
118
118
|
snapshot_done: boolean;
|
|
119
119
|
|
|
120
120
|
/**
|
|
121
|
+
* This is now used for "resumeLsn".
|
|
122
|
+
*
|
|
121
123
|
* If snapshot_done = false, this may be the lsn at which we started the snapshot.
|
|
122
124
|
*
|
|
123
125
|
* This can be used for resuming the snapshot after a restart.
|
|
126
|
+
*
|
|
127
|
+
* If snapshot_done is true, this is treated as the point to restart replication from.
|
|
128
|
+
*
|
|
129
|
+
* More specifically, we resume replication from max(snapshot_lsn, last_checkpoint_lsn).
|
|
124
130
|
*/
|
|
125
131
|
snapshot_lsn: string | undefined;
|
|
126
132
|
|
|
@@ -7,6 +7,7 @@ import { storage, utils } from '@powersync/service-core';
|
|
|
7
7
|
|
|
8
8
|
import { PowerSyncMongo } from './db.js';
|
|
9
9
|
import { BucketDataDocument } from './models.js';
|
|
10
|
+
import { ServiceAssertionError } from '@powersync/lib-services-framework';
|
|
10
11
|
|
|
11
12
|
export function idPrefixFilter<T>(prefix: Partial<T>, rest: (keyof T)[]): mongo.Condition<T> {
|
|
12
13
|
let filter = {
|
|
@@ -104,23 +105,28 @@ export function replicaIdToSubkey(table: bson.ObjectId, id: storage.ReplicaId):
|
|
|
104
105
|
}
|
|
105
106
|
}
|
|
106
107
|
|
|
107
|
-
/**
|
|
108
|
-
* Helper function for creating a MongoDB client from consumers of this package
|
|
109
|
-
*/
|
|
110
|
-
export const createMongoClient = (url: string, options?: mongo.MongoClientOptions) => {
|
|
111
|
-
return new mongo.MongoClient(url, options);
|
|
112
|
-
};
|
|
113
|
-
|
|
114
108
|
/**
|
|
115
109
|
* Helper for unit tests
|
|
116
110
|
*/
|
|
117
111
|
export const connectMongoForTests = (url: string, isCI: boolean) => {
|
|
118
112
|
// Short timeout for tests, to fail fast when the server is not available.
|
|
119
113
|
// Slightly longer timeouts for CI, to avoid arbitrary test failures
|
|
120
|
-
const client =
|
|
114
|
+
const client = new mongo.MongoClient(url, {
|
|
121
115
|
connectTimeoutMS: isCI ? 15_000 : 5_000,
|
|
122
116
|
socketTimeoutMS: isCI ? 15_000 : 5_000,
|
|
123
117
|
serverSelectionTimeoutMS: isCI ? 15_000 : 2_500
|
|
124
118
|
});
|
|
125
119
|
return new PowerSyncMongo(client);
|
|
126
120
|
};
|
|
121
|
+
|
|
122
|
+
export function setSessionSnapshotTime(session: mongo.ClientSession, time: bson.Timestamp) {
|
|
123
|
+
// This is a workaround for the lack of direct support for snapshot reads in the MongoDB driver.
|
|
124
|
+
if (!session.snapshotEnabled) {
|
|
125
|
+
throw new ServiceAssertionError(`Session must be a snapshot session`);
|
|
126
|
+
}
|
|
127
|
+
if ((session as any).snapshotTime == null) {
|
|
128
|
+
(session as any).snapshotTime = time;
|
|
129
|
+
} else {
|
|
130
|
+
throw new ServiceAssertionError(`Session snapshotTime is already set`);
|
|
131
|
+
}
|
|
132
|
+
}
|
|
@@ -3,3 +3,5 @@ import { describe } from 'vitest';
|
|
|
3
3
|
import { INITIALIZED_MONGO_STORAGE_FACTORY } from './util.js';
|
|
4
4
|
|
|
5
5
|
describe('Mongo Sync Bucket Storage Compact', () => register.registerCompactTests(INITIALIZED_MONGO_STORAGE_FACTORY));
|
|
6
|
+
describe('Mongo Sync Parameter Storage Compact', () =>
|
|
7
|
+
register.registerParameterCompactTests(INITIALIZED_MONGO_STORAGE_FACTORY));
|