@powersync/service-module-mongodb-storage 0.0.0-dev-20250122110924 → 0.0.0-dev-20250227082606
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +101 -10
- package/dist/migrations/MongoMigrationAgent.js +3 -0
- package/dist/migrations/MongoMigrationAgent.js.map +1 -1
- package/dist/storage/MongoBucketStorage.d.ts +9 -20
- package/dist/storage/MongoBucketStorage.js +86 -199
- package/dist/storage/MongoBucketStorage.js.map +1 -1
- package/dist/storage/implementation/MongoBucketBatch.d.ts +3 -3
- package/dist/storage/implementation/MongoBucketBatch.js +37 -24
- package/dist/storage/implementation/MongoBucketBatch.js.map +1 -1
- package/dist/storage/implementation/MongoCompactor.d.ts +0 -6
- package/dist/storage/implementation/MongoCompactor.js +11 -4
- package/dist/storage/implementation/MongoCompactor.js.map +1 -1
- package/dist/storage/implementation/MongoIdSequence.js +1 -0
- package/dist/storage/implementation/MongoIdSequence.js.map +1 -1
- package/dist/storage/implementation/MongoPersistedSyncRules.js +4 -0
- package/dist/storage/implementation/MongoPersistedSyncRules.js.map +1 -1
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.js +9 -1
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.js.map +1 -1
- package/dist/storage/implementation/MongoStorageProvider.js +3 -1
- package/dist/storage/implementation/MongoStorageProvider.js.map +1 -1
- package/dist/storage/implementation/MongoSyncBucketStorage.d.ts +16 -3
- package/dist/storage/implementation/MongoSyncBucketStorage.js +225 -22
- package/dist/storage/implementation/MongoSyncBucketStorage.js.map +1 -1
- package/dist/storage/implementation/MongoSyncRulesLock.js +5 -1
- package/dist/storage/implementation/MongoSyncRulesLock.js.map +1 -1
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js +3 -2
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js.map +1 -1
- package/dist/storage/implementation/OperationBatch.js +10 -6
- package/dist/storage/implementation/OperationBatch.js.map +1 -1
- package/dist/storage/implementation/PersistedBatch.js +18 -18
- package/dist/storage/implementation/PersistedBatch.js.map +1 -1
- package/dist/storage/implementation/db.d.ts +1 -1
- package/dist/storage/implementation/db.js +15 -3
- package/dist/storage/implementation/db.js.map +1 -1
- package/dist/storage/implementation/models.d.ts +1 -0
- package/dist/storage/implementation/util.d.ts +0 -14
- package/dist/storage/implementation/util.js +3 -41
- package/dist/storage/implementation/util.js.map +1 -1
- package/package.json +8 -8
- package/src/storage/MongoBucketStorage.ts +88 -232
- package/src/storage/implementation/MongoBucketBatch.ts +12 -10
- package/src/storage/implementation/MongoCompactor.ts +2 -10
- package/src/storage/implementation/MongoStorageProvider.ts +3 -1
- package/src/storage/implementation/MongoSyncBucketStorage.ts +292 -37
- package/src/storage/implementation/MongoWriteCheckpointAPI.ts +1 -3
- package/src/storage/implementation/PersistedBatch.ts +4 -5
- package/src/storage/implementation/db.ts +3 -3
- package/src/storage/implementation/models.ts +5 -0
- package/src/storage/implementation/util.ts +0 -45
- package/test/src/__snapshots__/storage_sync.test.ts.snap +138 -0
- package/test/src/storage_compacting.test.ts +1 -7
- package/test/src/storage_sync.test.ts +1 -1
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -3,16 +3,15 @@ import { SqlEventDescriptor, SqliteRow, SqlSyncRules } from '@powersync/service-
|
|
|
3
3
|
import * as bson from 'bson';
|
|
4
4
|
|
|
5
5
|
import {
|
|
6
|
+
BaseObserver,
|
|
6
7
|
container,
|
|
7
|
-
DisposableObserver,
|
|
8
8
|
ErrorCode,
|
|
9
9
|
errors,
|
|
10
10
|
logger,
|
|
11
11
|
ReplicationAssertionError,
|
|
12
|
-
ServiceAssertionError,
|
|
13
12
|
ServiceError
|
|
14
13
|
} from '@powersync/lib-services-framework';
|
|
15
|
-
import { SaveOperationTag, storage, utils } from '@powersync/service-core';
|
|
14
|
+
import { deserializeBson, SaveOperationTag, storage, utils } from '@powersync/service-core';
|
|
16
15
|
import * as timers from 'node:timers/promises';
|
|
17
16
|
import { PowerSyncMongo } from './db.js';
|
|
18
17
|
import { CurrentBucket, CurrentDataDocument, SourceKey, SyncRuleDocument } from './models.js';
|
|
@@ -50,7 +49,7 @@ export interface MongoBucketBatchOptions {
|
|
|
50
49
|
}
|
|
51
50
|
|
|
52
51
|
export class MongoBucketBatch
|
|
53
|
-
extends
|
|
52
|
+
extends BaseObserver<storage.BucketBatchStorageListener>
|
|
54
53
|
implements storage.BucketStorageBatch
|
|
55
54
|
{
|
|
56
55
|
private readonly client: mongo.MongoClient;
|
|
@@ -323,10 +322,7 @@ export class MongoBucketBatch
|
|
|
323
322
|
existing_buckets = result.buckets;
|
|
324
323
|
existing_lookups = result.lookups;
|
|
325
324
|
if (this.storeCurrentData) {
|
|
326
|
-
const data =
|
|
327
|
-
(result.data as mongo.Binary).buffer,
|
|
328
|
-
storage.BSON_DESERIALIZE_OPTIONS
|
|
329
|
-
) as SqliteRow;
|
|
325
|
+
const data = deserializeBson((result.data as mongo.Binary).buffer) as SqliteRow;
|
|
330
326
|
after = storage.mergeToast(after!, data);
|
|
331
327
|
}
|
|
332
328
|
}
|
|
@@ -611,12 +607,14 @@ export class MongoBucketBatch
|
|
|
611
607
|
|
|
612
608
|
async [Symbol.asyncDispose]() {
|
|
613
609
|
await this.session.endSession();
|
|
614
|
-
super
|
|
610
|
+
super.clearListeners();
|
|
615
611
|
}
|
|
616
612
|
|
|
617
613
|
private lastWaitingLogThottled = 0;
|
|
618
614
|
|
|
619
|
-
async commit(lsn: string): Promise<boolean> {
|
|
615
|
+
async commit(lsn: string, options?: storage.BucketBatchCommitOptions): Promise<boolean> {
|
|
616
|
+
const { createEmptyCheckpoints } = { ...storage.DEFAULT_BUCKET_BATCH_COMMIT_OPTIONS, ...options };
|
|
617
|
+
|
|
620
618
|
await this.flush();
|
|
621
619
|
|
|
622
620
|
if (this.last_checkpoint_lsn != null && lsn < this.last_checkpoint_lsn) {
|
|
@@ -654,6 +652,10 @@ export class MongoBucketBatch
|
|
|
654
652
|
return false;
|
|
655
653
|
}
|
|
656
654
|
|
|
655
|
+
if (!createEmptyCheckpoints && this.persisted_op == null) {
|
|
656
|
+
return false;
|
|
657
|
+
}
|
|
658
|
+
|
|
657
659
|
const now = new Date();
|
|
658
660
|
const update: Partial<SyncRuleDocument> = {
|
|
659
661
|
last_checkpoint_lsn: lsn,
|
|
@@ -5,7 +5,6 @@ import { storage, utils } from '@powersync/service-core';
|
|
|
5
5
|
import { PowerSyncMongo } from './db.js';
|
|
6
6
|
import { BucketDataDocument, BucketDataKey } from './models.js';
|
|
7
7
|
import { cacheKey } from './OperationBatch.js';
|
|
8
|
-
import { safeBulkWrite } from './util.js';
|
|
9
8
|
|
|
10
9
|
interface CurrentBucketState {
|
|
11
10
|
/** Bucket name */
|
|
@@ -33,14 +32,7 @@ interface CurrentBucketState {
|
|
|
33
32
|
/**
|
|
34
33
|
* Additional options, primarily for testing.
|
|
35
34
|
*/
|
|
36
|
-
export interface MongoCompactOptions extends storage.CompactOptions {
|
|
37
|
-
/** Minimum of 2 */
|
|
38
|
-
clearBatchLimit?: number;
|
|
39
|
-
/** Minimum of 1 */
|
|
40
|
-
moveBatchLimit?: number;
|
|
41
|
-
/** Minimum of 1 */
|
|
42
|
-
moveBatchQueryLimit?: number;
|
|
43
|
-
}
|
|
35
|
+
export interface MongoCompactOptions extends storage.CompactOptions {}
|
|
44
36
|
|
|
45
37
|
const DEFAULT_CLEAR_BATCH_LIMIT = 5000;
|
|
46
38
|
const DEFAULT_MOVE_BATCH_LIMIT = 2000;
|
|
@@ -265,7 +257,7 @@ export class MongoCompactor {
|
|
|
265
257
|
private async flush() {
|
|
266
258
|
if (this.updates.length > 0) {
|
|
267
259
|
logger.info(`Compacting ${this.updates.length} ops`);
|
|
268
|
-
await
|
|
260
|
+
await this.db.bucket_data.bulkWrite(this.updates, {
|
|
269
261
|
// Order is not important.
|
|
270
262
|
// Since checksums are not affected, these operations can happen in any order,
|
|
271
263
|
// and it's fine if the operations are partially applied.
|
|
@@ -22,7 +22,9 @@ export class MongoStorageProvider implements storage.BucketStorageProvider {
|
|
|
22
22
|
}
|
|
23
23
|
|
|
24
24
|
const decodedConfig = MongoStorageConfig.decode(storage as any);
|
|
25
|
-
const client = lib_mongo.db.createMongoClient(decodedConfig
|
|
25
|
+
const client = lib_mongo.db.createMongoClient(decodedConfig, {
|
|
26
|
+
maxPoolSize: resolvedConfig.storage.max_pool_size ?? 8
|
|
27
|
+
});
|
|
26
28
|
|
|
27
29
|
const database = new PowerSyncMongo(client, { database: resolvedConfig.storage.database });
|
|
28
30
|
const factory = new MongoBucketStorage(database, {
|
|
@@ -1,20 +1,44 @@
|
|
|
1
1
|
import * as lib_mongo from '@powersync/lib-service-mongodb';
|
|
2
2
|
import { mongo } from '@powersync/lib-service-mongodb';
|
|
3
|
-
import {
|
|
4
|
-
|
|
3
|
+
import {
|
|
4
|
+
BaseObserver,
|
|
5
|
+
ErrorCode,
|
|
6
|
+
logger,
|
|
7
|
+
ServiceAssertionError,
|
|
8
|
+
ServiceError
|
|
9
|
+
} from '@powersync/lib-services-framework';
|
|
10
|
+
import {
|
|
11
|
+
BroadcastIterable,
|
|
12
|
+
CHECKPOINT_INVALIDATE_ALL,
|
|
13
|
+
CheckpointChanges,
|
|
14
|
+
GetCheckpointChangesOptions,
|
|
15
|
+
ReplicationCheckpoint,
|
|
16
|
+
SourceTable,
|
|
17
|
+
storage,
|
|
18
|
+
utils,
|
|
19
|
+
WatchWriteCheckpointOptions
|
|
20
|
+
} from '@powersync/service-core';
|
|
5
21
|
import { SqliteJsonRow, SqliteJsonValue, SqlSyncRules } from '@powersync/service-sync-rules';
|
|
6
22
|
import * as bson from 'bson';
|
|
23
|
+
import { wrapWithAbort } from 'ix/asynciterable/operators/withabort.js';
|
|
7
24
|
import * as timers from 'timers/promises';
|
|
8
25
|
import { MongoBucketStorage } from '../MongoBucketStorage.js';
|
|
9
26
|
import { PowerSyncMongo } from './db.js';
|
|
10
|
-
import {
|
|
27
|
+
import {
|
|
28
|
+
BucketDataDocument,
|
|
29
|
+
BucketDataKey,
|
|
30
|
+
SourceKey,
|
|
31
|
+
SourceTableDocument,
|
|
32
|
+
SyncRuleCheckpointState,
|
|
33
|
+
SyncRuleDocument
|
|
34
|
+
} from './models.js';
|
|
11
35
|
import { MongoBucketBatch } from './MongoBucketBatch.js';
|
|
12
36
|
import { MongoCompactor } from './MongoCompactor.js';
|
|
13
37
|
import { MongoWriteCheckpointAPI } from './MongoWriteCheckpointAPI.js';
|
|
14
38
|
import { idPrefixFilter, mapOpEntry, readSingleBatch } from './util.js';
|
|
15
39
|
|
|
16
40
|
export class MongoSyncBucketStorage
|
|
17
|
-
extends
|
|
41
|
+
extends BaseObserver<storage.SyncRulesBucketStorageListener>
|
|
18
42
|
implements storage.SyncRulesBucketStorage
|
|
19
43
|
{
|
|
20
44
|
private readonly db: PowerSyncMongo;
|
|
@@ -147,17 +171,17 @@ export class MongoSyncBucketStorage
|
|
|
147
171
|
let result: storage.ResolveTableResult | null = null;
|
|
148
172
|
await this.db.client.withSession(async (session) => {
|
|
149
173
|
const col = this.db.source_tables;
|
|
150
|
-
let
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
);
|
|
174
|
+
let filter: Partial<SourceTableDocument> = {
|
|
175
|
+
group_id: group_id,
|
|
176
|
+
connection_id: connection_id,
|
|
177
|
+
schema_name: schema,
|
|
178
|
+
table_name: table,
|
|
179
|
+
replica_id_columns2: columns
|
|
180
|
+
};
|
|
181
|
+
if (objectId != null) {
|
|
182
|
+
filter.relation_id = objectId;
|
|
183
|
+
}
|
|
184
|
+
let doc = await col.findOne(filter, { session });
|
|
161
185
|
if (doc == null) {
|
|
162
186
|
doc = {
|
|
163
187
|
_id: new bson.ObjectId(),
|
|
@@ -186,31 +210,40 @@ export class MongoSyncBucketStorage
|
|
|
186
210
|
sourceTable.syncData = options.sync_rules.tableSyncsData(sourceTable);
|
|
187
211
|
sourceTable.syncParameters = options.sync_rules.tableSyncsParameters(sourceTable);
|
|
188
212
|
|
|
213
|
+
let dropTables: storage.SourceTable[] = [];
|
|
214
|
+
// Detect tables that are either renamed, or have different replica_id_columns
|
|
215
|
+
let truncateFilter = [{ schema_name: schema, table_name: table }] as any[];
|
|
216
|
+
if (objectId != null) {
|
|
217
|
+
// Only detect renames if the source uses relation ids.
|
|
218
|
+
truncateFilter.push({ relation_id: objectId });
|
|
219
|
+
}
|
|
189
220
|
const truncate = await col
|
|
190
221
|
.find(
|
|
191
222
|
{
|
|
192
223
|
group_id: group_id,
|
|
193
224
|
connection_id: connection_id,
|
|
194
225
|
_id: { $ne: doc._id },
|
|
195
|
-
$or:
|
|
226
|
+
$or: truncateFilter
|
|
196
227
|
},
|
|
197
228
|
{ session }
|
|
198
229
|
)
|
|
199
230
|
.toArray();
|
|
231
|
+
dropTables = truncate.map(
|
|
232
|
+
(doc) =>
|
|
233
|
+
new storage.SourceTable(
|
|
234
|
+
doc._id,
|
|
235
|
+
connection_tag,
|
|
236
|
+
doc.relation_id,
|
|
237
|
+
doc.schema_name,
|
|
238
|
+
doc.table_name,
|
|
239
|
+
doc.replica_id_columns2?.map((c) => ({ name: c.name, typeOid: c.type_oid, type: c.type })) ?? [],
|
|
240
|
+
doc.snapshot_done ?? true
|
|
241
|
+
)
|
|
242
|
+
);
|
|
243
|
+
|
|
200
244
|
result = {
|
|
201
245
|
table: sourceTable,
|
|
202
|
-
dropTables:
|
|
203
|
-
(doc) =>
|
|
204
|
-
new storage.SourceTable(
|
|
205
|
-
doc._id,
|
|
206
|
-
connection_tag,
|
|
207
|
-
doc.relation_id ?? 0,
|
|
208
|
-
doc.schema_name,
|
|
209
|
-
doc.table_name,
|
|
210
|
-
doc.replica_id_columns2?.map((c) => ({ name: c.name, typeOid: c.type_oid, type: c.type })) ?? [],
|
|
211
|
-
doc.snapshot_done ?? true
|
|
212
|
-
)
|
|
213
|
-
)
|
|
246
|
+
dropTables: dropTables
|
|
214
247
|
};
|
|
215
248
|
});
|
|
216
249
|
return result!;
|
|
@@ -297,13 +330,7 @@ export class MongoSyncBucketStorage
|
|
|
297
330
|
// 1. We can calculate the document size accurately without serializing again.
|
|
298
331
|
// 2. We can delay parsing the results until it's needed.
|
|
299
332
|
// We manually use bson.deserialize below
|
|
300
|
-
raw: true
|
|
301
|
-
|
|
302
|
-
// Since we're using raw: true and parsing ourselves later, we don't need bigint
|
|
303
|
-
// support here.
|
|
304
|
-
// Disabling due to https://jira.mongodb.org/browse/NODE-6165, and the fact that this
|
|
305
|
-
// is one of our most common queries.
|
|
306
|
-
useBigInt64: false
|
|
333
|
+
raw: true
|
|
307
334
|
}
|
|
308
335
|
) as unknown as mongo.FindCursor<Buffer>;
|
|
309
336
|
|
|
@@ -324,7 +351,7 @@ export class MongoSyncBucketStorage
|
|
|
324
351
|
|
|
325
352
|
// Ordered by _id, meaning buckets are grouped together
|
|
326
353
|
for (let rawData of data) {
|
|
327
|
-
const row = bson.deserialize(rawData, storage.
|
|
354
|
+
const row = bson.deserialize(rawData, storage.BSON_DESERIALIZE_INTERNAL_OPTIONS) as BucketDataDocument;
|
|
328
355
|
const bucket = row._id.b;
|
|
329
356
|
|
|
330
357
|
if (currentBatch == null || currentBatch.bucket != bucket || batchSize >= sizeLimit) {
|
|
@@ -577,7 +604,7 @@ export class MongoSyncBucketStorage
|
|
|
577
604
|
await this.db.sync_rules.updateMany(
|
|
578
605
|
{
|
|
579
606
|
_id: { $ne: this.group_id },
|
|
580
|
-
state: storage.SyncRuleState.ACTIVE
|
|
607
|
+
state: { $in: [storage.SyncRuleState.ACTIVE, storage.SyncRuleState.ERRORED] }
|
|
581
608
|
},
|
|
582
609
|
{
|
|
583
610
|
$set: {
|
|
@@ -608,4 +635,232 @@ export class MongoSyncBucketStorage
|
|
|
608
635
|
async compact(options?: storage.CompactOptions) {
|
|
609
636
|
return new MongoCompactor(this.db, this.group_id, options).compact();
|
|
610
637
|
}
|
|
638
|
+
|
|
639
|
+
private makeActiveCheckpoint(doc: SyncRuleCheckpointState | null) {
|
|
640
|
+
return {
|
|
641
|
+
checkpoint: utils.timestampToOpId(doc?.last_checkpoint ?? 0n),
|
|
642
|
+
lsn: doc?.last_checkpoint_lsn ?? null
|
|
643
|
+
};
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
/**
|
|
647
|
+
* Instance-wide watch on the latest available checkpoint (op_id + lsn).
|
|
648
|
+
*/
|
|
649
|
+
private async *watchActiveCheckpoint(signal: AbortSignal): AsyncIterable<ReplicationCheckpoint> {
|
|
650
|
+
// Use this form instead of (doc: SyncRuleCheckpointState | null = null),
|
|
651
|
+
// otherwise we get weird "doc: never" issues.
|
|
652
|
+
let doc = null as SyncRuleCheckpointState | null;
|
|
653
|
+
let clusterTime = null as mongo.Timestamp | null;
|
|
654
|
+
const syncRulesId = this.group_id;
|
|
655
|
+
|
|
656
|
+
await this.db.client.withSession(async (session) => {
|
|
657
|
+
doc = await this.db.sync_rules.findOne(
|
|
658
|
+
{
|
|
659
|
+
_id: syncRulesId,
|
|
660
|
+
state: { $in: [storage.SyncRuleState.ACTIVE, storage.SyncRuleState.ERRORED] }
|
|
661
|
+
},
|
|
662
|
+
{
|
|
663
|
+
session,
|
|
664
|
+
sort: { _id: -1 },
|
|
665
|
+
limit: 1,
|
|
666
|
+
projection: {
|
|
667
|
+
_id: 1,
|
|
668
|
+
state: 1,
|
|
669
|
+
last_checkpoint: 1,
|
|
670
|
+
last_checkpoint_lsn: 1
|
|
671
|
+
}
|
|
672
|
+
}
|
|
673
|
+
);
|
|
674
|
+
const time = session.clusterTime?.clusterTime ?? null;
|
|
675
|
+
clusterTime = time;
|
|
676
|
+
});
|
|
677
|
+
if (clusterTime == null) {
|
|
678
|
+
throw new ServiceError(ErrorCode.PSYNC_S2401, 'Could not get clusterTime');
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
if (signal.aborted) {
|
|
682
|
+
return;
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
if (doc == null) {
|
|
686
|
+
// Sync rules not present or not active.
|
|
687
|
+
// Abort the connections - clients will have to retry later.
|
|
688
|
+
// Should this error instead?
|
|
689
|
+
return;
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
yield this.makeActiveCheckpoint(doc);
|
|
693
|
+
|
|
694
|
+
// We only watch changes to the active sync rules.
|
|
695
|
+
// If it changes to inactive, we abort and restart with the new sync rules.
|
|
696
|
+
|
|
697
|
+
const pipeline = this.getChangeStreamPipeline();
|
|
698
|
+
|
|
699
|
+
const stream = this.db.sync_rules.watch(pipeline, {
|
|
700
|
+
// Start at the cluster time where we got the initial doc, to make sure
|
|
701
|
+
// we don't skip any updates.
|
|
702
|
+
// This may result in the first operation being a duplicate, but we filter
|
|
703
|
+
// it out anyway.
|
|
704
|
+
startAtOperationTime: clusterTime
|
|
705
|
+
});
|
|
706
|
+
|
|
707
|
+
signal.addEventListener(
|
|
708
|
+
'abort',
|
|
709
|
+
() => {
|
|
710
|
+
stream.close();
|
|
711
|
+
},
|
|
712
|
+
{ once: true }
|
|
713
|
+
);
|
|
714
|
+
|
|
715
|
+
let lastOp: storage.ReplicationCheckpoint | null = null;
|
|
716
|
+
let lastDoc: SyncRuleCheckpointState | null = doc;
|
|
717
|
+
|
|
718
|
+
for await (const update of stream.stream()) {
|
|
719
|
+
if (signal.aborted) {
|
|
720
|
+
break;
|
|
721
|
+
}
|
|
722
|
+
if (update.operationType != 'insert' && update.operationType != 'update' && update.operationType != 'replace') {
|
|
723
|
+
continue;
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
const doc = await this.getOperationDoc(lastDoc, update as lib_mongo.mongo.ChangeStreamDocument<SyncRuleDocument>);
|
|
727
|
+
if (doc == null) {
|
|
728
|
+
// Irrelevant update
|
|
729
|
+
continue;
|
|
730
|
+
}
|
|
731
|
+
if (doc.state != storage.SyncRuleState.ACTIVE && doc.state != storage.SyncRuleState.ERRORED) {
|
|
732
|
+
// Sync rules have changed - abort and restart.
|
|
733
|
+
// Should this error instead?
|
|
734
|
+
break;
|
|
735
|
+
}
|
|
736
|
+
|
|
737
|
+
lastDoc = doc;
|
|
738
|
+
|
|
739
|
+
const op = this.makeActiveCheckpoint(doc);
|
|
740
|
+
// Check for LSN / checkpoint changes - ignore other metadata changes
|
|
741
|
+
if (lastOp == null || op.lsn != lastOp.lsn || op.checkpoint != lastOp.checkpoint) {
|
|
742
|
+
lastOp = op;
|
|
743
|
+
yield op;
|
|
744
|
+
}
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
// Nothing is done here until a subscriber starts to iterate
|
|
749
|
+
private readonly sharedIter = new BroadcastIterable((signal) => {
|
|
750
|
+
return this.watchActiveCheckpoint(signal);
|
|
751
|
+
});
|
|
752
|
+
|
|
753
|
+
/**
|
|
754
|
+
* User-specific watch on the latest checkpoint and/or write checkpoint.
|
|
755
|
+
*/
|
|
756
|
+
async *watchWriteCheckpoint(options: WatchWriteCheckpointOptions): AsyncIterable<storage.StorageCheckpointUpdate> {
|
|
757
|
+
const { user_id, signal } = options;
|
|
758
|
+
let lastCheckpoint: utils.OpId | null = null;
|
|
759
|
+
let lastWriteCheckpoint: bigint | null = null;
|
|
760
|
+
|
|
761
|
+
const iter = wrapWithAbort(this.sharedIter, signal);
|
|
762
|
+
for await (const event of iter) {
|
|
763
|
+
const { checkpoint, lsn } = event;
|
|
764
|
+
|
|
765
|
+
// lsn changes are not important by itself.
|
|
766
|
+
// What is important is:
|
|
767
|
+
// 1. checkpoint (op_id) changes.
|
|
768
|
+
// 2. write checkpoint changes for the specific user
|
|
769
|
+
|
|
770
|
+
const lsnFilters: Record<string, string> = lsn ? { 1: lsn } : {};
|
|
771
|
+
|
|
772
|
+
const currentWriteCheckpoint = await this.lastWriteCheckpoint({
|
|
773
|
+
user_id,
|
|
774
|
+
heads: {
|
|
775
|
+
...lsnFilters
|
|
776
|
+
}
|
|
777
|
+
});
|
|
778
|
+
|
|
779
|
+
if (currentWriteCheckpoint == lastWriteCheckpoint && checkpoint == lastCheckpoint) {
|
|
780
|
+
// No change - wait for next one
|
|
781
|
+
// In some cases, many LSNs may be produced in a short time.
|
|
782
|
+
// Add a delay to throttle the write checkpoint lookup a bit.
|
|
783
|
+
await timers.setTimeout(20 + 10 * Math.random());
|
|
784
|
+
continue;
|
|
785
|
+
}
|
|
786
|
+
|
|
787
|
+
const updates: CheckpointChanges =
|
|
788
|
+
lastCheckpoint == null
|
|
789
|
+
? {
|
|
790
|
+
invalidateDataBuckets: true,
|
|
791
|
+
invalidateParameterBuckets: true,
|
|
792
|
+
updatedDataBuckets: [],
|
|
793
|
+
updatedParameterBucketDefinitions: []
|
|
794
|
+
}
|
|
795
|
+
: await this.getCheckpointChanges({
|
|
796
|
+
lastCheckpoint: lastCheckpoint,
|
|
797
|
+
nextCheckpoint: checkpoint
|
|
798
|
+
});
|
|
799
|
+
|
|
800
|
+
lastWriteCheckpoint = currentWriteCheckpoint;
|
|
801
|
+
lastCheckpoint = checkpoint;
|
|
802
|
+
|
|
803
|
+
yield {
|
|
804
|
+
base: event,
|
|
805
|
+
writeCheckpoint: currentWriteCheckpoint,
|
|
806
|
+
update: updates
|
|
807
|
+
};
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
|
|
811
|
+
private async getOperationDoc(
|
|
812
|
+
lastDoc: SyncRuleCheckpointState,
|
|
813
|
+
update: lib_mongo.mongo.ChangeStreamDocument<SyncRuleDocument>
|
|
814
|
+
): Promise<SyncRuleCheckpointState | null> {
|
|
815
|
+
if (update.operationType == 'insert' || update.operationType == 'replace') {
|
|
816
|
+
return update.fullDocument;
|
|
817
|
+
} else if (update.operationType == 'update') {
|
|
818
|
+
const updatedFields = update.updateDescription.updatedFields ?? {};
|
|
819
|
+
if (lastDoc._id != update.documentKey._id) {
|
|
820
|
+
throw new ServiceAssertionError(`Sync rules id mismatch: ${lastDoc._id} != ${update.documentKey._id}`);
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
const mergedDoc: SyncRuleCheckpointState = {
|
|
824
|
+
_id: lastDoc._id,
|
|
825
|
+
last_checkpoint: updatedFields.last_checkpoint ?? lastDoc.last_checkpoint,
|
|
826
|
+
last_checkpoint_lsn: updatedFields.last_checkpoint_lsn ?? lastDoc.last_checkpoint_lsn,
|
|
827
|
+
state: updatedFields.state ?? lastDoc.state
|
|
828
|
+
};
|
|
829
|
+
|
|
830
|
+
return mergedDoc;
|
|
831
|
+
} else {
|
|
832
|
+
// Unknown event type
|
|
833
|
+
return null;
|
|
834
|
+
}
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
private getChangeStreamPipeline() {
|
|
838
|
+
const syncRulesId = this.group_id;
|
|
839
|
+
const pipeline: mongo.Document[] = [
|
|
840
|
+
{
|
|
841
|
+
$match: {
|
|
842
|
+
'documentKey._id': syncRulesId,
|
|
843
|
+
operationType: { $in: ['insert', 'update', 'replace'] }
|
|
844
|
+
}
|
|
845
|
+
},
|
|
846
|
+
{
|
|
847
|
+
$project: {
|
|
848
|
+
operationType: 1,
|
|
849
|
+
'documentKey._id': 1,
|
|
850
|
+
'updateDescription.updatedFields.state': 1,
|
|
851
|
+
'updateDescription.updatedFields.last_checkpoint': 1,
|
|
852
|
+
'updateDescription.updatedFields.last_checkpoint_lsn': 1,
|
|
853
|
+
'fullDocument._id': 1,
|
|
854
|
+
'fullDocument.state': 1,
|
|
855
|
+
'fullDocument.last_checkpoint': 1,
|
|
856
|
+
'fullDocument.last_checkpoint_lsn': 1
|
|
857
|
+
}
|
|
858
|
+
}
|
|
859
|
+
];
|
|
860
|
+
return pipeline;
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
async getCheckpointChanges(options: GetCheckpointChangesOptions): Promise<CheckpointChanges> {
|
|
864
|
+
return CHECKPOINT_INVALIDATE_ALL;
|
|
865
|
+
}
|
|
611
866
|
}
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import * as framework from '@powersync/lib-services-framework';
|
|
2
2
|
import { storage } from '@powersync/service-core';
|
|
3
3
|
import { PowerSyncMongo } from './db.js';
|
|
4
|
-
import { safeBulkWrite } from './util.js';
|
|
5
4
|
|
|
6
5
|
export type MongoCheckpointAPIOptions = {
|
|
7
6
|
db: PowerSyncMongo;
|
|
@@ -127,8 +126,7 @@ export async function batchCreateCustomWriteCheckpoints(
|
|
|
127
126
|
return;
|
|
128
127
|
}
|
|
129
128
|
|
|
130
|
-
await
|
|
131
|
-
db.custom_write_checkpoints,
|
|
129
|
+
await db.custom_write_checkpoints.bulkWrite(
|
|
132
130
|
checkpoints.map((checkpointOptions) => ({
|
|
133
131
|
updateOne: {
|
|
134
132
|
filter: { user_id: checkpointOptions.user_id, sync_rules_id: checkpointOptions.sync_rules_id },
|
|
@@ -15,7 +15,7 @@ import {
|
|
|
15
15
|
CurrentDataDocument,
|
|
16
16
|
SourceKey
|
|
17
17
|
} from './models.js';
|
|
18
|
-
import { replicaIdToSubkey
|
|
18
|
+
import { replicaIdToSubkey } from './util.js';
|
|
19
19
|
|
|
20
20
|
/**
|
|
21
21
|
* Maximum size of operations we write in a single transaction.
|
|
@@ -246,22 +246,21 @@ export class PersistedBatch {
|
|
|
246
246
|
|
|
247
247
|
async flush(db: PowerSyncMongo, session: mongo.ClientSession) {
|
|
248
248
|
if (this.bucketData.length > 0) {
|
|
249
|
-
|
|
250
|
-
await safeBulkWrite(db.bucket_data, this.bucketData, {
|
|
249
|
+
await db.bucket_data.bulkWrite(this.bucketData, {
|
|
251
250
|
session,
|
|
252
251
|
// inserts only - order doesn't matter
|
|
253
252
|
ordered: false
|
|
254
253
|
});
|
|
255
254
|
}
|
|
256
255
|
if (this.bucketParameters.length > 0) {
|
|
257
|
-
await
|
|
256
|
+
await db.bucket_parameters.bulkWrite(this.bucketParameters, {
|
|
258
257
|
session,
|
|
259
258
|
// inserts only - order doesn't matter
|
|
260
259
|
ordered: false
|
|
261
260
|
});
|
|
262
261
|
}
|
|
263
262
|
if (this.currentData.length > 0) {
|
|
264
|
-
await
|
|
263
|
+
await db.current_data.bulkWrite(this.currentData, {
|
|
265
264
|
session,
|
|
266
265
|
// may update and delete data within the same batch - order matters
|
|
267
266
|
ordered: true
|
|
@@ -41,7 +41,7 @@ export class PowerSyncMongo {
|
|
|
41
41
|
this.client = client;
|
|
42
42
|
|
|
43
43
|
const db = client.db(options?.database, {
|
|
44
|
-
...storage.
|
|
44
|
+
...storage.BSON_DESERIALIZE_INTERNAL_OPTIONS
|
|
45
45
|
});
|
|
46
46
|
this.db = db;
|
|
47
47
|
|
|
@@ -82,6 +82,6 @@ export class PowerSyncMongo {
|
|
|
82
82
|
}
|
|
83
83
|
}
|
|
84
84
|
|
|
85
|
-
export function createPowerSyncMongo(config: MongoStorageConfig) {
|
|
86
|
-
return new PowerSyncMongo(lib_mongo.createMongoClient(config), { database: config.database });
|
|
85
|
+
export function createPowerSyncMongo(config: MongoStorageConfig, options?: lib_mongo.MongoConnectionOptions) {
|
|
86
|
+
return new PowerSyncMongo(lib_mongo.createMongoClient(config, options), { database: config.database });
|
|
87
87
|
}
|
|
@@ -141,6 +141,11 @@ export interface SyncRuleDocument {
|
|
|
141
141
|
content: string;
|
|
142
142
|
}
|
|
143
143
|
|
|
144
|
+
export type SyncRuleCheckpointState = Pick<
|
|
145
|
+
SyncRuleDocument,
|
|
146
|
+
'last_checkpoint' | 'last_checkpoint_lsn' | '_id' | 'state'
|
|
147
|
+
>;
|
|
148
|
+
|
|
144
149
|
export interface CustomWriteCheckpointDocument {
|
|
145
150
|
_id: bson.ObjectId;
|
|
146
151
|
user_id: string;
|
|
@@ -124,48 +124,3 @@ export const connectMongoForTests = (url: string, isCI: boolean) => {
|
|
|
124
124
|
});
|
|
125
125
|
return new PowerSyncMongo(client);
|
|
126
126
|
};
|
|
127
|
-
|
|
128
|
-
/**
|
|
129
|
-
* MongoDB bulkWrite internally splits the operations into batches
|
|
130
|
-
* so that no batch exceeds 16MB. However, there are cases where
|
|
131
|
-
* the batch size is very close to 16MB, where additional metadata
|
|
132
|
-
* on the server pushes it over the limit, resulting in this error
|
|
133
|
-
* from the server:
|
|
134
|
-
*
|
|
135
|
-
* > MongoBulkWriteError: BSONObj size: 16814023 (0x1008FC7) is invalid. Size must be between 0 and 16793600(16MB) First element: insert: "bucket_data"
|
|
136
|
-
*
|
|
137
|
-
* We work around the issue by doing our own batching, limiting the
|
|
138
|
-
* batch size to 15MB. This does add additional overhead with
|
|
139
|
-
* BSON.calculateObjectSize.
|
|
140
|
-
*/
|
|
141
|
-
export async function safeBulkWrite<T extends mongo.Document>(
|
|
142
|
-
collection: mongo.Collection<T>,
|
|
143
|
-
operations: mongo.AnyBulkWriteOperation<T>[],
|
|
144
|
-
options: mongo.BulkWriteOptions
|
|
145
|
-
) {
|
|
146
|
-
// Must be below 16MB.
|
|
147
|
-
// We could probably go a little closer, but 15MB is a safe threshold.
|
|
148
|
-
const BULK_WRITE_LIMIT = 15 * 1024 * 1024;
|
|
149
|
-
|
|
150
|
-
let batch: mongo.AnyBulkWriteOperation<T>[] = [];
|
|
151
|
-
let currentSize = 0;
|
|
152
|
-
// Estimated overhead per operation, should be smaller in reality.
|
|
153
|
-
const keySize = 8;
|
|
154
|
-
for (let op of operations) {
|
|
155
|
-
const bsonSize =
|
|
156
|
-
mongo.BSON.calculateObjectSize(op, {
|
|
157
|
-
checkKeys: false,
|
|
158
|
-
ignoreUndefined: true
|
|
159
|
-
} as any) + keySize;
|
|
160
|
-
if (batch.length > 0 && currentSize + bsonSize > BULK_WRITE_LIMIT) {
|
|
161
|
-
await collection.bulkWrite(batch, options);
|
|
162
|
-
currentSize = 0;
|
|
163
|
-
batch = [];
|
|
164
|
-
}
|
|
165
|
-
batch.push(op);
|
|
166
|
-
currentSize += bsonSize;
|
|
167
|
-
}
|
|
168
|
-
if (batch.length > 0) {
|
|
169
|
-
await collection.bulkWrite(batch, options);
|
|
170
|
-
}
|
|
171
|
-
}
|