@powersync/service-module-mongodb-storage 0.12.9 → 0.12.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +22 -0
- package/dist/migrations/db/migrations/1752661449910-connection-reporting.js +1 -21
- package/dist/migrations/db/migrations/1752661449910-connection-reporting.js.map +1 -1
- package/dist/migrations/db/migrations/1762790715147-connection-reporting2.d.ts +3 -0
- package/dist/migrations/db/migrations/1762790715147-connection-reporting2.js +36 -0
- package/dist/migrations/db/migrations/1762790715147-connection-reporting2.js.map +1 -0
- package/dist/storage/implementation/MongoChecksums.d.ts +7 -1
- package/dist/storage/implementation/MongoChecksums.js +15 -6
- package/dist/storage/implementation/MongoChecksums.js.map +1 -1
- package/dist/storage/implementation/MongoCompactor.d.ts +1 -0
- package/dist/storage/implementation/MongoCompactor.js +40 -10
- package/dist/storage/implementation/MongoCompactor.js.map +1 -1
- package/dist/storage/implementation/MongoSyncRulesLock.js +8 -1
- package/dist/storage/implementation/MongoSyncRulesLock.js.map +1 -1
- package/dist/storage/implementation/models.d.ts +4 -0
- package/package.json +3 -3
- package/src/migrations/db/migrations/1752661449910-connection-reporting.ts +1 -35
- package/src/migrations/db/migrations/1762790715147-connection-reporting2.ts +58 -0
- package/src/storage/implementation/MongoChecksums.ts +16 -6
- package/src/storage/implementation/MongoCompactor.ts +45 -11
- package/src/storage/implementation/MongoSyncRulesLock.ts +13 -4
- package/src/storage/implementation/models.ts +5 -0
- package/test/src/storage_compacting.test.ts +1 -0
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -61,6 +61,7 @@ export interface MongoCompactOptions extends storage.CompactOptions {}
|
|
|
61
61
|
const DEFAULT_CLEAR_BATCH_LIMIT = 5000;
|
|
62
62
|
const DEFAULT_MOVE_BATCH_LIMIT = 2000;
|
|
63
63
|
const DEFAULT_MOVE_BATCH_QUERY_LIMIT = 10_000;
|
|
64
|
+
const DEFAULT_MIN_BUCKET_CHANGES = 10;
|
|
64
65
|
|
|
65
66
|
/** This default is primarily for tests. */
|
|
66
67
|
const DEFAULT_MEMORY_LIMIT_MB = 64;
|
|
@@ -73,6 +74,7 @@ export class MongoCompactor {
|
|
|
73
74
|
private moveBatchLimit: number;
|
|
74
75
|
private moveBatchQueryLimit: number;
|
|
75
76
|
private clearBatchLimit: number;
|
|
77
|
+
private minBucketChanges: number;
|
|
76
78
|
private maxOpId: bigint;
|
|
77
79
|
private buckets: string[] | undefined;
|
|
78
80
|
private signal?: AbortSignal;
|
|
@@ -88,6 +90,7 @@ export class MongoCompactor {
|
|
|
88
90
|
this.moveBatchLimit = options?.moveBatchLimit ?? DEFAULT_MOVE_BATCH_LIMIT;
|
|
89
91
|
this.moveBatchQueryLimit = options?.moveBatchQueryLimit ?? DEFAULT_MOVE_BATCH_QUERY_LIMIT;
|
|
90
92
|
this.clearBatchLimit = options?.clearBatchLimit ?? DEFAULT_CLEAR_BATCH_LIMIT;
|
|
93
|
+
this.minBucketChanges = options?.minBucketChanges ?? DEFAULT_MIN_BUCKET_CHANGES;
|
|
91
94
|
this.maxOpId = options?.maxOpId ?? 0n;
|
|
92
95
|
this.buckets = options?.compactBuckets;
|
|
93
96
|
this.signal = options?.signal;
|
|
@@ -113,14 +116,26 @@ export class MongoCompactor {
|
|
|
113
116
|
|
|
114
117
|
private async compactDirtyBuckets() {
|
|
115
118
|
while (!this.signal?.aborted) {
|
|
116
|
-
// Process all buckets with
|
|
117
|
-
|
|
119
|
+
// Process all buckets with 10 or more changes since last time.
|
|
120
|
+
// We exclude the last 100 compacted buckets, to avoid repeatedly re-compacting the same buckets over and over
|
|
121
|
+
// if they are modified while compacting.
|
|
122
|
+
const TRACK_RECENTLY_COMPACTED_NUMBER = 100;
|
|
123
|
+
|
|
124
|
+
let recentlyCompacted: string[] = [];
|
|
125
|
+
const buckets = await this.dirtyBucketBatch({
|
|
126
|
+
minBucketChanges: this.minBucketChanges,
|
|
127
|
+
exclude: recentlyCompacted
|
|
128
|
+
});
|
|
118
129
|
if (buckets.length == 0) {
|
|
119
130
|
// All done
|
|
120
131
|
break;
|
|
121
132
|
}
|
|
122
|
-
for (let bucket of buckets) {
|
|
133
|
+
for (let { bucket } of buckets) {
|
|
123
134
|
await this.compactSingleBucket(bucket);
|
|
135
|
+
recentlyCompacted.push(bucket);
|
|
136
|
+
}
|
|
137
|
+
if (recentlyCompacted.length > TRACK_RECENTLY_COMPACTED_NUMBER) {
|
|
138
|
+
recentlyCompacted = recentlyCompacted.slice(-TRACK_RECENTLY_COMPACTED_NUMBER);
|
|
124
139
|
}
|
|
125
140
|
}
|
|
126
141
|
}
|
|
@@ -482,10 +497,20 @@ export class MongoCompactor {
|
|
|
482
497
|
break;
|
|
483
498
|
}
|
|
484
499
|
const start = Date.now();
|
|
485
|
-
logger.info(`Calculating checksums for batch of ${buckets.length} buckets
|
|
500
|
+
logger.info(`Calculating checksums for batch of ${buckets.length} buckets`);
|
|
486
501
|
|
|
487
|
-
|
|
488
|
-
|
|
502
|
+
// Filter batch by estimated bucket size, to reduce possibility of timeouts
|
|
503
|
+
let checkBuckets: typeof buckets = [];
|
|
504
|
+
let totalCountEstimate = 0;
|
|
505
|
+
for (let bucket of buckets) {
|
|
506
|
+
checkBuckets.push(bucket);
|
|
507
|
+
totalCountEstimate += bucket.estimatedCount;
|
|
508
|
+
if (totalCountEstimate > 50_000) {
|
|
509
|
+
break;
|
|
510
|
+
}
|
|
511
|
+
}
|
|
512
|
+
await this.updateChecksumsBatch(checkBuckets.map((b) => b.bucket));
|
|
513
|
+
logger.info(`Updated checksums for batch of ${checkBuckets.length} buckets in ${Date.now() - start}ms`);
|
|
489
514
|
count += buckets.length;
|
|
490
515
|
}
|
|
491
516
|
return { buckets: count };
|
|
@@ -497,7 +522,10 @@ export class MongoCompactor {
|
|
|
497
522
|
* This cannot be used to iterate on its own - the client is expected to process these buckets and
|
|
498
523
|
* set estimate_since_compact.count: 0 when done, before fetching the next batch.
|
|
499
524
|
*/
|
|
500
|
-
private async dirtyBucketBatch(options: {
|
|
525
|
+
private async dirtyBucketBatch(options: {
|
|
526
|
+
minBucketChanges: number;
|
|
527
|
+
exclude?: string[];
|
|
528
|
+
}): Promise<{ bucket: string; estimatedCount: number }[]> {
|
|
501
529
|
if (options.minBucketChanges <= 0) {
|
|
502
530
|
throw new ReplicationAssertionError('minBucketChanges must be >= 1');
|
|
503
531
|
}
|
|
@@ -506,22 +534,28 @@ export class MongoCompactor {
|
|
|
506
534
|
.find(
|
|
507
535
|
{
|
|
508
536
|
'_id.g': this.group_id,
|
|
509
|
-
'estimate_since_compact.count': { $gte: options.minBucketChanges }
|
|
537
|
+
'estimate_since_compact.count': { $gte: options.minBucketChanges },
|
|
538
|
+
'_id.b': { $nin: options.exclude ?? [] }
|
|
510
539
|
},
|
|
511
540
|
{
|
|
512
541
|
projection: {
|
|
513
|
-
_id: 1
|
|
542
|
+
_id: 1,
|
|
543
|
+
estimate_since_compact: 1,
|
|
544
|
+
compacted_state: 1
|
|
514
545
|
},
|
|
515
546
|
sort: {
|
|
516
547
|
'estimate_since_compact.count': -1
|
|
517
548
|
},
|
|
518
|
-
limit:
|
|
549
|
+
limit: 200,
|
|
519
550
|
maxTimeMS: MONGO_OPERATION_TIMEOUT_MS
|
|
520
551
|
}
|
|
521
552
|
)
|
|
522
553
|
.toArray();
|
|
523
554
|
|
|
524
|
-
return dirtyBuckets.map((bucket) =>
|
|
555
|
+
return dirtyBuckets.map((bucket) => ({
|
|
556
|
+
bucket: bucket._id.b,
|
|
557
|
+
estimatedCount: bucket.estimate_since_compact!.count + (bucket.compacted_state?.count ?? 0)
|
|
558
|
+
}));
|
|
525
559
|
}
|
|
526
560
|
|
|
527
561
|
private async updateChecksumsBatch(buckets: string[]) {
|
|
@@ -33,10 +33,19 @@ export class MongoSyncRulesLock implements storage.ReplicationLock {
|
|
|
33
33
|
);
|
|
34
34
|
|
|
35
35
|
if (doc == null) {
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
36
|
+
// Query the existing lock to get the expiration time (best effort - it may have been released in the meantime).
|
|
37
|
+
const heldLock = await db.sync_rules.findOne({ _id: sync_rules.id }, { projection: { lock: 1 } });
|
|
38
|
+
if (heldLock?.lock?.expires_at) {
|
|
39
|
+
throw new ServiceError(
|
|
40
|
+
ErrorCode.PSYNC_S1003,
|
|
41
|
+
`Sync rules: ${sync_rules.id} have been locked by another process for replication, expiring at ${heldLock.lock.expires_at.toISOString()}.`
|
|
42
|
+
);
|
|
43
|
+
} else {
|
|
44
|
+
throw new ServiceError(
|
|
45
|
+
ErrorCode.PSYNC_S1003,
|
|
46
|
+
`Sync rules: ${sync_rules.id} have been locked by another process for replication.`
|
|
47
|
+
);
|
|
48
|
+
}
|
|
40
49
|
}
|
|
41
50
|
return new MongoSyncRulesLock(db, sync_rules.id, lockId);
|
|
42
51
|
}
|