@powersync/service-module-mongodb-storage 0.9.5 → 0.10.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +31 -0
- package/dist/migrations/db/migrations/1749720702136-checkpoint-events.d.ts +3 -0
- package/dist/migrations/db/migrations/1749720702136-checkpoint-events.js +34 -0
- package/dist/migrations/db/migrations/1749720702136-checkpoint-events.js.map +1 -0
- package/dist/storage/MongoBucketStorage.js +5 -0
- package/dist/storage/MongoBucketStorage.js.map +1 -1
- package/dist/storage/implementation/MongoBucketBatch.d.ts +9 -3
- package/dist/storage/implementation/MongoBucketBatch.js +116 -36
- package/dist/storage/implementation/MongoBucketBatch.js.map +1 -1
- package/dist/storage/implementation/MongoCompactor.js +2 -2
- package/dist/storage/implementation/MongoCompactor.js.map +1 -1
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.d.ts +1 -0
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.js +2 -0
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.js.map +1 -1
- package/dist/storage/implementation/MongoStorageProvider.js +23 -1
- package/dist/storage/implementation/MongoStorageProvider.js.map +1 -1
- package/dist/storage/implementation/MongoSyncBucketStorage.d.ts +14 -5
- package/dist/storage/implementation/MongoSyncBucketStorage.js +165 -160
- package/dist/storage/implementation/MongoSyncBucketStorage.js.map +1 -1
- package/dist/storage/implementation/MongoTestStorageFactoryGenerator.js +2 -0
- package/dist/storage/implementation/MongoTestStorageFactoryGenerator.js.map +1 -1
- package/dist/storage/implementation/MongoWriteCheckpointAPI.d.ts +9 -15
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js +55 -191
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js.map +1 -1
- package/dist/storage/implementation/PersistedBatch.d.ts +6 -2
- package/dist/storage/implementation/PersistedBatch.js +40 -8
- package/dist/storage/implementation/PersistedBatch.js.map +1 -1
- package/dist/storage/implementation/db.d.ts +12 -1
- package/dist/storage/implementation/db.js +39 -0
- package/dist/storage/implementation/db.js.map +1 -1
- package/dist/storage/implementation/models.d.ts +30 -2
- package/package.json +6 -6
- package/src/migrations/db/migrations/1749720702136-checkpoint-events.ts +50 -0
- package/src/storage/MongoBucketStorage.ts +5 -0
- package/src/storage/implementation/MongoBucketBatch.ts +159 -48
- package/src/storage/implementation/MongoCompactor.ts +2 -2
- package/src/storage/implementation/MongoPersistedSyncRulesContent.ts +2 -0
- package/src/storage/implementation/MongoStorageProvider.ts +27 -1
- package/src/storage/implementation/MongoSyncBucketStorage.ts +191 -201
- package/src/storage/implementation/MongoTestStorageFactoryGenerator.ts +3 -0
- package/src/storage/implementation/MongoWriteCheckpointAPI.ts +66 -255
- package/src/storage/implementation/PersistedBatch.ts +51 -12
- package/src/storage/implementation/db.ts +42 -0
- package/src/storage/implementation/models.ts +33 -2
- package/test/src/storage_sync.test.ts +7 -0
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -4,6 +4,7 @@ import {
|
|
|
4
4
|
BaseObserver,
|
|
5
5
|
ErrorCode,
|
|
6
6
|
logger,
|
|
7
|
+
ReplicationAbortedError,
|
|
7
8
|
ServiceAssertionError,
|
|
8
9
|
ServiceError
|
|
9
10
|
} from '@powersync/lib-services-framework';
|
|
@@ -15,13 +16,11 @@ import {
|
|
|
15
16
|
GetCheckpointChangesOptions,
|
|
16
17
|
InternalOpId,
|
|
17
18
|
internalToExternalOpId,
|
|
18
|
-
mergeAsyncIterables,
|
|
19
19
|
ProtocolOpId,
|
|
20
20
|
ReplicationCheckpoint,
|
|
21
21
|
storage,
|
|
22
22
|
utils,
|
|
23
|
-
WatchWriteCheckpointOptions
|
|
24
|
-
WriteCheckpointResult
|
|
23
|
+
WatchWriteCheckpointOptions
|
|
25
24
|
} from '@powersync/service-core';
|
|
26
25
|
import { JSONBig } from '@powersync/service-jsonbig';
|
|
27
26
|
import { ParameterLookup, SqliteJsonRow, SqlSyncRules } from '@powersync/service-sync-rules';
|
|
@@ -36,8 +35,7 @@ import {
|
|
|
36
35
|
BucketStateDocument,
|
|
37
36
|
SourceKey,
|
|
38
37
|
SourceTableDocument,
|
|
39
|
-
SyncRuleCheckpointState
|
|
40
|
-
SyncRuleDocument
|
|
38
|
+
SyncRuleCheckpointState
|
|
41
39
|
} from './models.js';
|
|
42
40
|
import { MongoBucketBatch } from './MongoBucketBatch.js';
|
|
43
41
|
import { MongoCompactor } from './MongoCompactor.js';
|
|
@@ -56,7 +54,7 @@ export class MongoSyncBucketStorage
|
|
|
56
54
|
});
|
|
57
55
|
|
|
58
56
|
private parsedSyncRulesCache: { parsed: SqlSyncRules; options: storage.ParseSyncRulesOptions } | undefined;
|
|
59
|
-
private writeCheckpointAPI:
|
|
57
|
+
private writeCheckpointAPI: MongoWriteCheckpointAPI;
|
|
60
58
|
|
|
61
59
|
constructor(
|
|
62
60
|
public readonly factory: MongoBucketStorage,
|
|
@@ -82,12 +80,6 @@ export class MongoSyncBucketStorage
|
|
|
82
80
|
this.writeCheckpointAPI.setWriteCheckpointMode(mode);
|
|
83
81
|
}
|
|
84
82
|
|
|
85
|
-
batchCreateCustomWriteCheckpoints(checkpoints: storage.BatchedCustomWriteCheckpointOptions[]): Promise<void> {
|
|
86
|
-
return this.writeCheckpointAPI.batchCreateCustomWriteCheckpoints(
|
|
87
|
-
checkpoints.map((checkpoint) => ({ ...checkpoint, sync_rules_id: this.group_id }))
|
|
88
|
-
);
|
|
89
|
-
}
|
|
90
|
-
|
|
91
83
|
createManagedWriteCheckpoint(checkpoint: storage.ManagedWriteCheckpointOptions): Promise<bigint> {
|
|
92
84
|
return this.writeCheckpointAPI.createManagedWriteCheckpoint(checkpoint);
|
|
93
85
|
}
|
|
@@ -116,9 +108,15 @@ export class MongoSyncBucketStorage
|
|
|
116
108
|
const doc = await this.db.sync_rules.findOne(
|
|
117
109
|
{ _id: this.group_id },
|
|
118
110
|
{
|
|
119
|
-
projection: { last_checkpoint: 1, last_checkpoint_lsn: 1 }
|
|
111
|
+
projection: { last_checkpoint: 1, last_checkpoint_lsn: 1, snapshot_done: 1 }
|
|
120
112
|
}
|
|
121
113
|
);
|
|
114
|
+
if (!doc?.snapshot_done) {
|
|
115
|
+
return {
|
|
116
|
+
checkpoint: 0n,
|
|
117
|
+
lsn: null
|
|
118
|
+
};
|
|
119
|
+
}
|
|
122
120
|
return {
|
|
123
121
|
checkpoint: doc?.last_checkpoint ?? 0n,
|
|
124
122
|
lsn: doc?.last_checkpoint_lsn ?? null
|
|
@@ -138,6 +136,7 @@ export class MongoSyncBucketStorage
|
|
|
138
136
|
const checkpoint_lsn = doc?.last_checkpoint_lsn ?? null;
|
|
139
137
|
|
|
140
138
|
await using batch = new MongoBucketBatch({
|
|
139
|
+
logger: options.logger,
|
|
141
140
|
db: this.db,
|
|
142
141
|
syncRules: this.sync_rules.parsed(options).sync_rules,
|
|
143
142
|
groupId: this.group_id,
|
|
@@ -146,7 +145,8 @@ export class MongoSyncBucketStorage
|
|
|
146
145
|
noCheckpointBeforeLsn: doc?.no_checkpoint_before ?? options.zeroLSN,
|
|
147
146
|
keepaliveOp: doc?.keepalive_op ? BigInt(doc.keepalive_op) : null,
|
|
148
147
|
storeCurrentData: options.storeCurrentData,
|
|
149
|
-
skipExistingRows: options.skipExistingRows ?? false
|
|
148
|
+
skipExistingRows: options.skipExistingRows ?? false,
|
|
149
|
+
markRecordUnavailable: options.markRecordUnavailable
|
|
150
150
|
});
|
|
151
151
|
this.iterateListeners((cb) => cb.batchStarted?.(batch));
|
|
152
152
|
|
|
@@ -193,7 +193,8 @@ export class MongoSyncBucketStorage
|
|
|
193
193
|
table_name: table,
|
|
194
194
|
replica_id_columns: null,
|
|
195
195
|
replica_id_columns2: columns,
|
|
196
|
-
snapshot_done: false
|
|
196
|
+
snapshot_done: false,
|
|
197
|
+
snapshot_status: undefined
|
|
197
198
|
};
|
|
198
199
|
|
|
199
200
|
await col.insertOne(doc, { session });
|
|
@@ -210,6 +211,14 @@ export class MongoSyncBucketStorage
|
|
|
210
211
|
sourceTable.syncEvent = options.sync_rules.tableTriggersEvent(sourceTable);
|
|
211
212
|
sourceTable.syncData = options.sync_rules.tableSyncsData(sourceTable);
|
|
212
213
|
sourceTable.syncParameters = options.sync_rules.tableSyncsParameters(sourceTable);
|
|
214
|
+
sourceTable.snapshotStatus =
|
|
215
|
+
doc.snapshot_status == null
|
|
216
|
+
? undefined
|
|
217
|
+
: {
|
|
218
|
+
lastKey: doc.snapshot_status.last_key?.buffer ?? null,
|
|
219
|
+
totalEstimatedCount: doc.snapshot_status.total_estimated_count,
|
|
220
|
+
replicatedCount: doc.snapshot_status.replicated_count
|
|
221
|
+
};
|
|
213
222
|
|
|
214
223
|
let dropTables: storage.SourceTable[] = [];
|
|
215
224
|
// Detect tables that are either renamed, or have different replica_id_columns
|
|
@@ -464,7 +473,10 @@ export class MongoSyncBucketStorage
|
|
|
464
473
|
{
|
|
465
474
|
$group: {
|
|
466
475
|
_id: '$_id.b',
|
|
467
|
-
|
|
476
|
+
// Historically, checksum may be stored as 'int' or 'double'.
|
|
477
|
+
// More recently, this should be a 'long'.
|
|
478
|
+
// $toLong ensures that we always sum it as a long, avoiding inaccuracies in the calculations.
|
|
479
|
+
checksum_total: { $sum: { $toLong: '$checksum' } },
|
|
468
480
|
count: { $sum: 1 },
|
|
469
481
|
has_clear_op: {
|
|
470
482
|
$max: {
|
|
@@ -496,7 +508,7 @@ export class MongoSyncBucketStorage
|
|
|
496
508
|
async terminate(options?: storage.TerminateOptions) {
|
|
497
509
|
// Default is to clear the storage except when explicitly requested not to.
|
|
498
510
|
if (!options || options?.clearStorage) {
|
|
499
|
-
await this.clear();
|
|
511
|
+
await this.clear(options);
|
|
500
512
|
}
|
|
501
513
|
await this.db.sync_rules.updateOne(
|
|
502
514
|
{
|
|
@@ -510,6 +522,7 @@ export class MongoSyncBucketStorage
|
|
|
510
522
|
}
|
|
511
523
|
}
|
|
512
524
|
);
|
|
525
|
+
await this.db.notifyCheckpoint();
|
|
513
526
|
}
|
|
514
527
|
|
|
515
528
|
async getStatus(): Promise<storage.SyncRuleStatus> {
|
|
@@ -521,7 +534,8 @@ export class MongoSyncBucketStorage
|
|
|
521
534
|
projection: {
|
|
522
535
|
snapshot_done: 1,
|
|
523
536
|
last_checkpoint_lsn: 1,
|
|
524
|
-
state: 1
|
|
537
|
+
state: 1,
|
|
538
|
+
snapshot_lsn: 1
|
|
525
539
|
}
|
|
526
540
|
}
|
|
527
541
|
);
|
|
@@ -531,13 +545,17 @@ export class MongoSyncBucketStorage
|
|
|
531
545
|
|
|
532
546
|
return {
|
|
533
547
|
snapshot_done: doc.snapshot_done,
|
|
548
|
+
snapshot_lsn: doc.snapshot_lsn ?? null,
|
|
534
549
|
active: doc.state == 'ACTIVE',
|
|
535
550
|
checkpoint_lsn: doc.last_checkpoint_lsn
|
|
536
551
|
};
|
|
537
552
|
}
|
|
538
553
|
|
|
539
|
-
async clear(): Promise<void> {
|
|
554
|
+
async clear(options?: storage.ClearStorageOptions): Promise<void> {
|
|
540
555
|
while (true) {
|
|
556
|
+
if (options?.signal?.aborted) {
|
|
557
|
+
throw new ReplicationAbortedError('Aborted clearing data');
|
|
558
|
+
}
|
|
541
559
|
try {
|
|
542
560
|
await this.clearIteration();
|
|
543
561
|
|
|
@@ -572,6 +590,9 @@ export class MongoSyncBucketStorage
|
|
|
572
590
|
last_checkpoint_lsn: null,
|
|
573
591
|
last_checkpoint: null,
|
|
574
592
|
no_checkpoint_before: null
|
|
593
|
+
},
|
|
594
|
+
$unset: {
|
|
595
|
+
snapshot_lsn: 1
|
|
575
596
|
}
|
|
576
597
|
},
|
|
577
598
|
{ maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS }
|
|
@@ -640,6 +661,7 @@ export class MongoSyncBucketStorage
|
|
|
640
661
|
},
|
|
641
662
|
{ session }
|
|
642
663
|
);
|
|
664
|
+
await this.db.notifyCheckpoint();
|
|
643
665
|
}
|
|
644
666
|
});
|
|
645
667
|
});
|
|
@@ -657,6 +679,7 @@ export class MongoSyncBucketStorage
|
|
|
657
679
|
}
|
|
658
680
|
}
|
|
659
681
|
);
|
|
682
|
+
await this.db.notifyCheckpoint();
|
|
660
683
|
}
|
|
661
684
|
|
|
662
685
|
async compact(options?: storage.CompactOptions) {
|
|
@@ -674,21 +697,27 @@ export class MongoSyncBucketStorage
|
|
|
674
697
|
* Instance-wide watch on the latest available checkpoint (op_id + lsn).
|
|
675
698
|
*/
|
|
676
699
|
private async *watchActiveCheckpoint(signal: AbortSignal): AsyncIterable<ReplicationCheckpoint> {
|
|
677
|
-
|
|
678
|
-
// otherwise we get weird "doc: never" issues.
|
|
679
|
-
let doc = null as SyncRuleCheckpointState | null;
|
|
680
|
-
let clusterTime = null as mongo.Timestamp | null;
|
|
681
|
-
const syncRulesId = this.group_id;
|
|
700
|
+
const stream = this.checkpointChangesStream(signal);
|
|
682
701
|
|
|
683
|
-
|
|
684
|
-
|
|
702
|
+
if (signal.aborted) {
|
|
703
|
+
return;
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
// We only watch changes to the active sync rules.
|
|
707
|
+
// If it changes to inactive, we abort and restart with the new sync rules.
|
|
708
|
+
let lastOp: storage.ReplicationCheckpoint | null = null;
|
|
709
|
+
|
|
710
|
+
for await (const _ of stream) {
|
|
711
|
+
if (signal.aborted) {
|
|
712
|
+
break;
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
const doc = await this.db.sync_rules.findOne(
|
|
685
716
|
{
|
|
686
|
-
_id:
|
|
717
|
+
_id: this.group_id,
|
|
687
718
|
state: { $in: [storage.SyncRuleState.ACTIVE, storage.SyncRuleState.ERRORED] }
|
|
688
719
|
},
|
|
689
720
|
{
|
|
690
|
-
session,
|
|
691
|
-
sort: { _id: -1 },
|
|
692
721
|
limit: 1,
|
|
693
722
|
projection: {
|
|
694
723
|
_id: 1,
|
|
@@ -698,70 +727,17 @@ export class MongoSyncBucketStorage
|
|
|
698
727
|
}
|
|
699
728
|
}
|
|
700
729
|
);
|
|
701
|
-
const time = session.clusterTime?.clusterTime ?? null;
|
|
702
|
-
clusterTime = time;
|
|
703
|
-
});
|
|
704
|
-
if (clusterTime == null) {
|
|
705
|
-
throw new ServiceError(ErrorCode.PSYNC_S2401, 'Could not get clusterTime');
|
|
706
|
-
}
|
|
707
|
-
|
|
708
|
-
if (signal.aborted) {
|
|
709
|
-
return;
|
|
710
|
-
}
|
|
711
|
-
|
|
712
|
-
if (doc == null) {
|
|
713
|
-
// Sync rules not present or not active.
|
|
714
|
-
// Abort the connections - clients will have to retry later.
|
|
715
|
-
throw new ServiceError(ErrorCode.PSYNC_S2302, 'No active sync rules available');
|
|
716
|
-
}
|
|
717
|
-
|
|
718
|
-
yield this.makeActiveCheckpoint(doc);
|
|
719
|
-
|
|
720
|
-
// We only watch changes to the active sync rules.
|
|
721
|
-
// If it changes to inactive, we abort and restart with the new sync rules.
|
|
722
|
-
|
|
723
|
-
const pipeline = this.getChangeStreamPipeline();
|
|
724
|
-
|
|
725
|
-
const stream = this.db.sync_rules.watch(pipeline, {
|
|
726
|
-
// Start at the cluster time where we got the initial doc, to make sure
|
|
727
|
-
// we don't skip any updates.
|
|
728
|
-
// This may result in the first operation being a duplicate, but we filter
|
|
729
|
-
// it out anyway.
|
|
730
|
-
startAtOperationTime: clusterTime
|
|
731
|
-
});
|
|
732
|
-
|
|
733
|
-
signal.addEventListener(
|
|
734
|
-
'abort',
|
|
735
|
-
() => {
|
|
736
|
-
stream.close();
|
|
737
|
-
},
|
|
738
|
-
{ once: true }
|
|
739
|
-
);
|
|
740
|
-
|
|
741
|
-
let lastOp: storage.ReplicationCheckpoint | null = null;
|
|
742
|
-
let lastDoc: SyncRuleCheckpointState | null = doc;
|
|
743
730
|
|
|
744
|
-
for await (const update of stream.stream()) {
|
|
745
|
-
if (signal.aborted) {
|
|
746
|
-
break;
|
|
747
|
-
}
|
|
748
|
-
if (update.operationType != 'insert' && update.operationType != 'update' && update.operationType != 'replace') {
|
|
749
|
-
continue;
|
|
750
|
-
}
|
|
751
|
-
|
|
752
|
-
const doc = await this.getOperationDoc(lastDoc, update as lib_mongo.mongo.ChangeStreamDocument<SyncRuleDocument>);
|
|
753
731
|
if (doc == null) {
|
|
754
|
-
//
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
if (doc.state != storage.SyncRuleState.ACTIVE && doc.state != storage.SyncRuleState.ERRORED) {
|
|
732
|
+
// Sync rules not present or not active.
|
|
733
|
+
// Abort the connections - clients will have to retry later.
|
|
734
|
+
throw new ServiceError(ErrorCode.PSYNC_S2302, 'No active sync rules available');
|
|
735
|
+
} else if (doc.state != storage.SyncRuleState.ACTIVE && doc.state != storage.SyncRuleState.ERRORED) {
|
|
758
736
|
// Sync rules have changed - abort and restart.
|
|
759
737
|
// We do a soft close of the stream here - no error
|
|
760
738
|
break;
|
|
761
739
|
}
|
|
762
740
|
|
|
763
|
-
lastDoc = doc;
|
|
764
|
-
|
|
765
741
|
const op = this.makeActiveCheckpoint(doc);
|
|
766
742
|
// Check for LSN / checkpoint changes - ignore other metadata changes
|
|
767
743
|
if (lastOp == null || op.lsn != lastOp.lsn || op.checkpoint != lastOp.checkpoint) {
|
|
@@ -780,61 +756,32 @@ export class MongoSyncBucketStorage
|
|
|
780
756
|
* User-specific watch on the latest checkpoint and/or write checkpoint.
|
|
781
757
|
*/
|
|
782
758
|
async *watchCheckpointChanges(options: WatchWriteCheckpointOptions): AsyncIterable<storage.StorageCheckpointUpdate> {
|
|
783
|
-
|
|
784
|
-
let lastCheckpoint: utils.InternalOpId | null = null;
|
|
785
|
-
let lastWriteCheckpoint: bigint | null = null;
|
|
786
|
-
let lastWriteCheckpointDoc: WriteCheckpointResult | null = null;
|
|
787
|
-
let nextWriteCheckpoint: bigint | null = null;
|
|
788
|
-
let lastCheckpointEvent: ReplicationCheckpoint | null = null;
|
|
789
|
-
let receivedWriteCheckpoint = false;
|
|
790
|
-
|
|
791
|
-
const writeCheckpointIter = this.writeCheckpointAPI.watchUserWriteCheckpoint({
|
|
792
|
-
user_id: options.user_id,
|
|
793
|
-
signal,
|
|
794
|
-
sync_rules_id: this.group_id
|
|
795
|
-
});
|
|
796
|
-
const iter = mergeAsyncIterables<ReplicationCheckpoint | storage.WriteCheckpointResult>(
|
|
797
|
-
[this.sharedIter, writeCheckpointIter],
|
|
798
|
-
signal
|
|
799
|
-
);
|
|
759
|
+
let lastCheckpoint: ReplicationCheckpoint | null = null;
|
|
800
760
|
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
lastCheckpointEvent = event;
|
|
804
|
-
} else {
|
|
805
|
-
lastWriteCheckpointDoc = event;
|
|
806
|
-
receivedWriteCheckpoint = true;
|
|
807
|
-
}
|
|
808
|
-
|
|
809
|
-
if (lastCheckpointEvent == null || !receivedWriteCheckpoint) {
|
|
810
|
-
// We need to wait until we received at least on checkpoint, and one write checkpoint.
|
|
811
|
-
continue;
|
|
812
|
-
}
|
|
761
|
+
const iter = this.sharedIter[Symbol.asyncIterator](options.signal);
|
|
762
|
+
let writeCheckpoint: bigint | null = null;
|
|
813
763
|
|
|
764
|
+
for await (const nextCheckpoint of iter) {
|
|
814
765
|
// lsn changes are not important by itself.
|
|
815
766
|
// What is important is:
|
|
816
767
|
// 1. checkpoint (op_id) changes.
|
|
817
768
|
// 2. write checkpoint changes for the specific user
|
|
818
769
|
|
|
819
|
-
|
|
770
|
+
if (nextCheckpoint.lsn != null) {
|
|
771
|
+
writeCheckpoint ??= await this.writeCheckpointAPI.lastWriteCheckpoint({
|
|
772
|
+
sync_rules_id: this.group_id,
|
|
773
|
+
user_id: options.user_id,
|
|
774
|
+
heads: {
|
|
775
|
+
'1': nextCheckpoint.lsn
|
|
776
|
+
}
|
|
777
|
+
});
|
|
778
|
+
}
|
|
820
779
|
|
|
821
780
|
if (
|
|
822
|
-
|
|
823
|
-
|
|
781
|
+
lastCheckpoint != null &&
|
|
782
|
+
lastCheckpoint.checkpoint == nextCheckpoint.checkpoint &&
|
|
783
|
+
lastCheckpoint.lsn == nextCheckpoint.lsn
|
|
824
784
|
) {
|
|
825
|
-
const writeCheckpoint = lastWriteCheckpointDoc.id;
|
|
826
|
-
if (nextWriteCheckpoint == null || (writeCheckpoint != null && writeCheckpoint > nextWriteCheckpoint)) {
|
|
827
|
-
nextWriteCheckpoint = writeCheckpoint;
|
|
828
|
-
}
|
|
829
|
-
// We used the doc - clear it
|
|
830
|
-
lastWriteCheckpointDoc = null;
|
|
831
|
-
}
|
|
832
|
-
|
|
833
|
-
const { checkpoint } = lastCheckpointEvent;
|
|
834
|
-
|
|
835
|
-
const currentWriteCheckpoint = nextWriteCheckpoint;
|
|
836
|
-
|
|
837
|
-
if (currentWriteCheckpoint == lastWriteCheckpoint && checkpoint == lastCheckpoint) {
|
|
838
785
|
// No change - wait for next one
|
|
839
786
|
// In some cases, many LSNs may be produced in a short time.
|
|
840
787
|
// Add a delay to throttle the write checkpoint lookup a bit.
|
|
@@ -842,75 +789,106 @@ export class MongoSyncBucketStorage
|
|
|
842
789
|
continue;
|
|
843
790
|
}
|
|
844
791
|
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
792
|
+
if (lastCheckpoint == null) {
|
|
793
|
+
yield {
|
|
794
|
+
base: nextCheckpoint,
|
|
795
|
+
writeCheckpoint,
|
|
796
|
+
update: CHECKPOINT_INVALIDATE_ALL
|
|
797
|
+
};
|
|
798
|
+
} else {
|
|
799
|
+
const updates = await this.getCheckpointChanges({
|
|
800
|
+
lastCheckpoint,
|
|
801
|
+
nextCheckpoint
|
|
802
|
+
});
|
|
803
|
+
|
|
804
|
+
let updatedWriteCheckpoint = updates.updatedWriteCheckpoints.get(options.user_id) ?? null;
|
|
805
|
+
if (updates.invalidateWriteCheckpoints) {
|
|
806
|
+
updatedWriteCheckpoint ??= await this.writeCheckpointAPI.lastWriteCheckpoint({
|
|
807
|
+
sync_rules_id: this.group_id,
|
|
808
|
+
user_id: options.user_id,
|
|
809
|
+
heads: {
|
|
810
|
+
'1': nextCheckpoint.lsn!
|
|
811
|
+
}
|
|
812
|
+
});
|
|
813
|
+
}
|
|
814
|
+
if (updatedWriteCheckpoint != null && (writeCheckpoint == null || updatedWriteCheckpoint > writeCheckpoint)) {
|
|
815
|
+
writeCheckpoint = updatedWriteCheckpoint;
|
|
816
|
+
}
|
|
863
817
|
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
818
|
+
yield {
|
|
819
|
+
base: nextCheckpoint,
|
|
820
|
+
writeCheckpoint,
|
|
821
|
+
update: {
|
|
822
|
+
updatedDataBuckets: updates.updatedDataBuckets,
|
|
823
|
+
invalidateDataBuckets: updates.invalidateDataBuckets,
|
|
824
|
+
updatedParameterLookups: updates.updatedParameterLookups,
|
|
825
|
+
invalidateParameterBuckets: updates.invalidateParameterBuckets
|
|
826
|
+
}
|
|
827
|
+
};
|
|
874
828
|
}
|
|
875
829
|
|
|
876
|
-
|
|
877
|
-
_id: lastDoc._id,
|
|
878
|
-
last_checkpoint: updatedFields.last_checkpoint ?? lastDoc.last_checkpoint,
|
|
879
|
-
last_checkpoint_lsn: updatedFields.last_checkpoint_lsn ?? lastDoc.last_checkpoint_lsn,
|
|
880
|
-
state: updatedFields.state ?? lastDoc.state
|
|
881
|
-
};
|
|
882
|
-
|
|
883
|
-
return mergedDoc;
|
|
884
|
-
} else {
|
|
885
|
-
// Unknown event type
|
|
886
|
-
return null;
|
|
830
|
+
lastCheckpoint = nextCheckpoint;
|
|
887
831
|
}
|
|
888
832
|
}
|
|
889
833
|
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
834
|
+
/**
|
|
835
|
+
* This watches the checkpoint_events capped collection for new documents inserted,
|
|
836
|
+
* and yields whenever one or more documents are inserted.
|
|
837
|
+
*
|
|
838
|
+
* The actual checkpoint must be queried on the sync_rules collection after this.
|
|
839
|
+
*/
|
|
840
|
+
private async *checkpointChangesStream(signal: AbortSignal): AsyncGenerator<void> {
|
|
841
|
+
if (signal.aborted) {
|
|
842
|
+
return;
|
|
843
|
+
}
|
|
844
|
+
|
|
845
|
+
const query = () => {
|
|
846
|
+
return this.db.checkpoint_events.find(
|
|
847
|
+
{},
|
|
848
|
+
{ tailable: true, awaitData: true, maxAwaitTimeMS: 10_000, batchSize: 1000 }
|
|
849
|
+
);
|
|
850
|
+
};
|
|
851
|
+
|
|
852
|
+
let cursor = query();
|
|
853
|
+
|
|
854
|
+
signal.addEventListener('abort', () => {
|
|
855
|
+
cursor.close().catch(() => {});
|
|
856
|
+
});
|
|
857
|
+
|
|
858
|
+
// Yield once on start, regardless of whether there are documents in the cursor.
|
|
859
|
+
// This is to ensure that the first iteration of the generator yields immediately.
|
|
860
|
+
yield;
|
|
861
|
+
|
|
862
|
+
try {
|
|
863
|
+
while (!signal.aborted) {
|
|
864
|
+
const doc = await cursor.tryNext().catch((e) => {
|
|
865
|
+
if (lib_mongo.isMongoServerError(e) && e.codeName === 'CappedPositionLost') {
|
|
866
|
+
// Cursor position lost, potentially due to a high rate of notifications
|
|
867
|
+
cursor = query();
|
|
868
|
+
// Treat as an event found, before querying the new cursor again
|
|
869
|
+
return {};
|
|
870
|
+
} else {
|
|
871
|
+
return Promise.reject(e);
|
|
872
|
+
}
|
|
873
|
+
});
|
|
874
|
+
if (cursor.closed) {
|
|
875
|
+
return;
|
|
897
876
|
}
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
'updateDescription.updatedFields.state': 1,
|
|
904
|
-
'updateDescription.updatedFields.last_checkpoint': 1,
|
|
905
|
-
'updateDescription.updatedFields.last_checkpoint_lsn': 1,
|
|
906
|
-
'fullDocument._id': 1,
|
|
907
|
-
'fullDocument.state': 1,
|
|
908
|
-
'fullDocument.last_checkpoint': 1,
|
|
909
|
-
'fullDocument.last_checkpoint_lsn': 1
|
|
877
|
+
// Skip buffered documents, if any. We don't care about the contents,
|
|
878
|
+
// we only want to know when new documents are inserted.
|
|
879
|
+
cursor.readBufferedDocuments();
|
|
880
|
+
if (doc != null) {
|
|
881
|
+
yield;
|
|
910
882
|
}
|
|
911
883
|
}
|
|
912
|
-
|
|
913
|
-
|
|
884
|
+
} catch (e) {
|
|
885
|
+
if (signal.aborted) {
|
|
886
|
+
return;
|
|
887
|
+
}
|
|
888
|
+
throw e;
|
|
889
|
+
} finally {
|
|
890
|
+
await cursor.close();
|
|
891
|
+
}
|
|
914
892
|
}
|
|
915
893
|
|
|
916
894
|
private async getDataBucketChanges(
|
|
@@ -922,7 +900,7 @@ export class MongoSyncBucketStorage
|
|
|
922
900
|
{
|
|
923
901
|
// We have an index on (_id.g, last_op).
|
|
924
902
|
'_id.g': this.group_id,
|
|
925
|
-
last_op: { $gt:
|
|
903
|
+
last_op: { $gt: options.lastCheckpoint.checkpoint }
|
|
926
904
|
},
|
|
927
905
|
{
|
|
928
906
|
projection: {
|
|
@@ -951,7 +929,7 @@ export class MongoSyncBucketStorage
|
|
|
951
929
|
const parameterUpdates = await this.db.bucket_parameters
|
|
952
930
|
.find(
|
|
953
931
|
{
|
|
954
|
-
_id: { $gt:
|
|
932
|
+
_id: { $gt: options.lastCheckpoint.checkpoint, $lte: options.nextCheckpoint.checkpoint },
|
|
955
933
|
'key.g': this.group_id
|
|
956
934
|
},
|
|
957
935
|
{
|
|
@@ -979,7 +957,11 @@ export class MongoSyncBucketStorage
|
|
|
979
957
|
// TODO (later):
|
|
980
958
|
// We can optimize this by implementing it like ChecksumCache: We can use partial cache results to do
|
|
981
959
|
// more efficient lookups in some cases.
|
|
982
|
-
private checkpointChangesCache = new LRUCache<
|
|
960
|
+
private checkpointChangesCache = new LRUCache<
|
|
961
|
+
string,
|
|
962
|
+
InternalCheckpointChanges,
|
|
963
|
+
{ options: GetCheckpointChangesOptions }
|
|
964
|
+
>({
|
|
983
965
|
// Limit to 50 cache entries, or 10MB, whichever comes first.
|
|
984
966
|
// Some rough calculations:
|
|
985
967
|
// If we process 10 checkpoints per second, and a connection may be 2 seconds behind, we could have
|
|
@@ -987,31 +969,39 @@ export class MongoSyncBucketStorage
|
|
|
987
969
|
// That is a worst-case scenario, so we don't actually store that many. In real life, the cache keys
|
|
988
970
|
// would likely be clustered around a few values, rather than spread over all 400 potential values.
|
|
989
971
|
max: 50,
|
|
990
|
-
maxSize:
|
|
991
|
-
sizeCalculation: (value:
|
|
972
|
+
maxSize: 12 * 1024 * 1024,
|
|
973
|
+
sizeCalculation: (value: InternalCheckpointChanges) => {
|
|
992
974
|
// Estimate of memory usage
|
|
993
975
|
const paramSize = [...value.updatedParameterLookups].reduce<number>((a, b) => a + b.length, 0);
|
|
994
976
|
const bucketSize = [...value.updatedDataBuckets].reduce<number>((a, b) => a + b.length, 0);
|
|
995
|
-
|
|
977
|
+
const writeCheckpointSize = value.updatedWriteCheckpoints.size * 30; // estiamte for user_id + bigint
|
|
978
|
+
return 100 + paramSize + bucketSize + writeCheckpointSize;
|
|
996
979
|
},
|
|
997
980
|
fetchMethod: async (_key, _staleValue, options) => {
|
|
998
981
|
return this.getCheckpointChangesInternal(options.context.options);
|
|
999
982
|
}
|
|
1000
983
|
});
|
|
1001
984
|
|
|
1002
|
-
async getCheckpointChanges(options: GetCheckpointChangesOptions): Promise<
|
|
1003
|
-
const key = `${options.lastCheckpoint}_${options.nextCheckpoint}`;
|
|
985
|
+
async getCheckpointChanges(options: GetCheckpointChangesOptions): Promise<InternalCheckpointChanges> {
|
|
986
|
+
const key = `${options.lastCheckpoint.checkpoint}_${options.lastCheckpoint.lsn}__${options.nextCheckpoint.checkpoint}_${options.nextCheckpoint.lsn}`;
|
|
1004
987
|
const result = await this.checkpointChangesCache.fetch(key, { context: { options } });
|
|
1005
988
|
return result!;
|
|
1006
989
|
}
|
|
1007
990
|
|
|
1008
|
-
private async getCheckpointChangesInternal(options: GetCheckpointChangesOptions): Promise<
|
|
991
|
+
private async getCheckpointChangesInternal(options: GetCheckpointChangesOptions): Promise<InternalCheckpointChanges> {
|
|
1009
992
|
const dataUpdates = await this.getDataBucketChanges(options);
|
|
1010
993
|
const parameterUpdates = await this.getParameterBucketChanges(options);
|
|
994
|
+
const writeCheckpointUpdates = await this.writeCheckpointAPI.getWriteCheckpointChanges(options);
|
|
1011
995
|
|
|
1012
996
|
return {
|
|
1013
997
|
...dataUpdates,
|
|
1014
|
-
...parameterUpdates
|
|
998
|
+
...parameterUpdates,
|
|
999
|
+
...writeCheckpointUpdates
|
|
1015
1000
|
};
|
|
1016
1001
|
}
|
|
1017
1002
|
}
|
|
1003
|
+
|
|
1004
|
+
interface InternalCheckpointChanges extends CheckpointChanges {
|
|
1005
|
+
updatedWriteCheckpoints: Map<string, bigint>;
|
|
1006
|
+
invalidateWriteCheckpoints: boolean;
|
|
1007
|
+
}
|
|
@@ -16,6 +16,9 @@ export const MongoTestStorageFactoryGenerator = (factoryOptions: MongoTestStorag
|
|
|
16
16
|
await db.db.createCollection('bucket_parameters');
|
|
17
17
|
}
|
|
18
18
|
|
|
19
|
+
// Full migrations are not currently run for tests, so we manually create this
|
|
20
|
+
await db.createCheckpointEventsCollection();
|
|
21
|
+
|
|
19
22
|
if (!options?.doNotClear) {
|
|
20
23
|
await db.clear();
|
|
21
24
|
}
|