@powersync/service-module-mongodb-storage 0.9.5 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +23 -0
- package/dist/migrations/db/migrations/1749720702136-checkpoint-events.d.ts +3 -0
- package/dist/migrations/db/migrations/1749720702136-checkpoint-events.js +34 -0
- package/dist/migrations/db/migrations/1749720702136-checkpoint-events.js.map +1 -0
- package/dist/storage/MongoBucketStorage.js +5 -0
- package/dist/storage/MongoBucketStorage.js.map +1 -1
- package/dist/storage/implementation/MongoBucketBatch.d.ts +9 -3
- package/dist/storage/implementation/MongoBucketBatch.js +116 -36
- package/dist/storage/implementation/MongoBucketBatch.js.map +1 -1
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.d.ts +1 -0
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.js +2 -0
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.js.map +1 -1
- package/dist/storage/implementation/MongoStorageProvider.js +23 -1
- package/dist/storage/implementation/MongoStorageProvider.js.map +1 -1
- package/dist/storage/implementation/MongoSyncBucketStorage.d.ts +14 -5
- package/dist/storage/implementation/MongoSyncBucketStorage.js +161 -159
- package/dist/storage/implementation/MongoSyncBucketStorage.js.map +1 -1
- package/dist/storage/implementation/MongoTestStorageFactoryGenerator.js +2 -0
- package/dist/storage/implementation/MongoTestStorageFactoryGenerator.js.map +1 -1
- package/dist/storage/implementation/MongoWriteCheckpointAPI.d.ts +9 -15
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js +55 -191
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js.map +1 -1
- package/dist/storage/implementation/PersistedBatch.d.ts +6 -2
- package/dist/storage/implementation/PersistedBatch.js +38 -6
- package/dist/storage/implementation/PersistedBatch.js.map +1 -1
- package/dist/storage/implementation/db.d.ts +12 -1
- package/dist/storage/implementation/db.js +39 -0
- package/dist/storage/implementation/db.js.map +1 -1
- package/dist/storage/implementation/models.d.ts +29 -1
- package/package.json +6 -6
- package/src/migrations/db/migrations/1749720702136-checkpoint-events.ts +50 -0
- package/src/storage/MongoBucketStorage.ts +5 -0
- package/src/storage/implementation/MongoBucketBatch.ts +159 -48
- package/src/storage/implementation/MongoPersistedSyncRulesContent.ts +2 -0
- package/src/storage/implementation/MongoStorageProvider.ts +27 -1
- package/src/storage/implementation/MongoSyncBucketStorage.ts +187 -200
- package/src/storage/implementation/MongoTestStorageFactoryGenerator.ts +3 -0
- package/src/storage/implementation/MongoWriteCheckpointAPI.ts +66 -255
- package/src/storage/implementation/PersistedBatch.ts +49 -10
- package/src/storage/implementation/db.ts +42 -0
- package/src/storage/implementation/models.ts +32 -1
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -4,6 +4,7 @@ import {
|
|
|
4
4
|
BaseObserver,
|
|
5
5
|
ErrorCode,
|
|
6
6
|
logger,
|
|
7
|
+
ReplicationAbortedError,
|
|
7
8
|
ServiceAssertionError,
|
|
8
9
|
ServiceError
|
|
9
10
|
} from '@powersync/lib-services-framework';
|
|
@@ -15,13 +16,11 @@ import {
|
|
|
15
16
|
GetCheckpointChangesOptions,
|
|
16
17
|
InternalOpId,
|
|
17
18
|
internalToExternalOpId,
|
|
18
|
-
mergeAsyncIterables,
|
|
19
19
|
ProtocolOpId,
|
|
20
20
|
ReplicationCheckpoint,
|
|
21
21
|
storage,
|
|
22
22
|
utils,
|
|
23
|
-
WatchWriteCheckpointOptions
|
|
24
|
-
WriteCheckpointResult
|
|
23
|
+
WatchWriteCheckpointOptions
|
|
25
24
|
} from '@powersync/service-core';
|
|
26
25
|
import { JSONBig } from '@powersync/service-jsonbig';
|
|
27
26
|
import { ParameterLookup, SqliteJsonRow, SqlSyncRules } from '@powersync/service-sync-rules';
|
|
@@ -36,8 +35,7 @@ import {
|
|
|
36
35
|
BucketStateDocument,
|
|
37
36
|
SourceKey,
|
|
38
37
|
SourceTableDocument,
|
|
39
|
-
SyncRuleCheckpointState
|
|
40
|
-
SyncRuleDocument
|
|
38
|
+
SyncRuleCheckpointState
|
|
41
39
|
} from './models.js';
|
|
42
40
|
import { MongoBucketBatch } from './MongoBucketBatch.js';
|
|
43
41
|
import { MongoCompactor } from './MongoCompactor.js';
|
|
@@ -56,7 +54,7 @@ export class MongoSyncBucketStorage
|
|
|
56
54
|
});
|
|
57
55
|
|
|
58
56
|
private parsedSyncRulesCache: { parsed: SqlSyncRules; options: storage.ParseSyncRulesOptions } | undefined;
|
|
59
|
-
private writeCheckpointAPI:
|
|
57
|
+
private writeCheckpointAPI: MongoWriteCheckpointAPI;
|
|
60
58
|
|
|
61
59
|
constructor(
|
|
62
60
|
public readonly factory: MongoBucketStorage,
|
|
@@ -82,12 +80,6 @@ export class MongoSyncBucketStorage
|
|
|
82
80
|
this.writeCheckpointAPI.setWriteCheckpointMode(mode);
|
|
83
81
|
}
|
|
84
82
|
|
|
85
|
-
batchCreateCustomWriteCheckpoints(checkpoints: storage.BatchedCustomWriteCheckpointOptions[]): Promise<void> {
|
|
86
|
-
return this.writeCheckpointAPI.batchCreateCustomWriteCheckpoints(
|
|
87
|
-
checkpoints.map((checkpoint) => ({ ...checkpoint, sync_rules_id: this.group_id }))
|
|
88
|
-
);
|
|
89
|
-
}
|
|
90
|
-
|
|
91
83
|
createManagedWriteCheckpoint(checkpoint: storage.ManagedWriteCheckpointOptions): Promise<bigint> {
|
|
92
84
|
return this.writeCheckpointAPI.createManagedWriteCheckpoint(checkpoint);
|
|
93
85
|
}
|
|
@@ -116,9 +108,15 @@ export class MongoSyncBucketStorage
|
|
|
116
108
|
const doc = await this.db.sync_rules.findOne(
|
|
117
109
|
{ _id: this.group_id },
|
|
118
110
|
{
|
|
119
|
-
projection: { last_checkpoint: 1, last_checkpoint_lsn: 1 }
|
|
111
|
+
projection: { last_checkpoint: 1, last_checkpoint_lsn: 1, snapshot_done: 1 }
|
|
120
112
|
}
|
|
121
113
|
);
|
|
114
|
+
if (!doc?.snapshot_done) {
|
|
115
|
+
return {
|
|
116
|
+
checkpoint: 0n,
|
|
117
|
+
lsn: null
|
|
118
|
+
};
|
|
119
|
+
}
|
|
122
120
|
return {
|
|
123
121
|
checkpoint: doc?.last_checkpoint ?? 0n,
|
|
124
122
|
lsn: doc?.last_checkpoint_lsn ?? null
|
|
@@ -138,6 +136,7 @@ export class MongoSyncBucketStorage
|
|
|
138
136
|
const checkpoint_lsn = doc?.last_checkpoint_lsn ?? null;
|
|
139
137
|
|
|
140
138
|
await using batch = new MongoBucketBatch({
|
|
139
|
+
logger: options.logger,
|
|
141
140
|
db: this.db,
|
|
142
141
|
syncRules: this.sync_rules.parsed(options).sync_rules,
|
|
143
142
|
groupId: this.group_id,
|
|
@@ -146,7 +145,8 @@ export class MongoSyncBucketStorage
|
|
|
146
145
|
noCheckpointBeforeLsn: doc?.no_checkpoint_before ?? options.zeroLSN,
|
|
147
146
|
keepaliveOp: doc?.keepalive_op ? BigInt(doc.keepalive_op) : null,
|
|
148
147
|
storeCurrentData: options.storeCurrentData,
|
|
149
|
-
skipExistingRows: options.skipExistingRows ?? false
|
|
148
|
+
skipExistingRows: options.skipExistingRows ?? false,
|
|
149
|
+
markRecordUnavailable: options.markRecordUnavailable
|
|
150
150
|
});
|
|
151
151
|
this.iterateListeners((cb) => cb.batchStarted?.(batch));
|
|
152
152
|
|
|
@@ -193,7 +193,8 @@ export class MongoSyncBucketStorage
|
|
|
193
193
|
table_name: table,
|
|
194
194
|
replica_id_columns: null,
|
|
195
195
|
replica_id_columns2: columns,
|
|
196
|
-
snapshot_done: false
|
|
196
|
+
snapshot_done: false,
|
|
197
|
+
snapshot_status: undefined
|
|
197
198
|
};
|
|
198
199
|
|
|
199
200
|
await col.insertOne(doc, { session });
|
|
@@ -210,6 +211,14 @@ export class MongoSyncBucketStorage
|
|
|
210
211
|
sourceTable.syncEvent = options.sync_rules.tableTriggersEvent(sourceTable);
|
|
211
212
|
sourceTable.syncData = options.sync_rules.tableSyncsData(sourceTable);
|
|
212
213
|
sourceTable.syncParameters = options.sync_rules.tableSyncsParameters(sourceTable);
|
|
214
|
+
sourceTable.snapshotStatus =
|
|
215
|
+
doc.snapshot_status == null
|
|
216
|
+
? undefined
|
|
217
|
+
: {
|
|
218
|
+
lastKey: doc.snapshot_status.last_key?.buffer ?? null,
|
|
219
|
+
totalEstimatedCount: doc.snapshot_status.total_estimated_count,
|
|
220
|
+
replicatedCount: doc.snapshot_status.replicated_count
|
|
221
|
+
};
|
|
213
222
|
|
|
214
223
|
let dropTables: storage.SourceTable[] = [];
|
|
215
224
|
// Detect tables that are either renamed, or have different replica_id_columns
|
|
@@ -496,7 +505,7 @@ export class MongoSyncBucketStorage
|
|
|
496
505
|
async terminate(options?: storage.TerminateOptions) {
|
|
497
506
|
// Default is to clear the storage except when explicitly requested not to.
|
|
498
507
|
if (!options || options?.clearStorage) {
|
|
499
|
-
await this.clear();
|
|
508
|
+
await this.clear(options);
|
|
500
509
|
}
|
|
501
510
|
await this.db.sync_rules.updateOne(
|
|
502
511
|
{
|
|
@@ -510,6 +519,7 @@ export class MongoSyncBucketStorage
|
|
|
510
519
|
}
|
|
511
520
|
}
|
|
512
521
|
);
|
|
522
|
+
await this.db.notifyCheckpoint();
|
|
513
523
|
}
|
|
514
524
|
|
|
515
525
|
async getStatus(): Promise<storage.SyncRuleStatus> {
|
|
@@ -521,7 +531,8 @@ export class MongoSyncBucketStorage
|
|
|
521
531
|
projection: {
|
|
522
532
|
snapshot_done: 1,
|
|
523
533
|
last_checkpoint_lsn: 1,
|
|
524
|
-
state: 1
|
|
534
|
+
state: 1,
|
|
535
|
+
snapshot_lsn: 1
|
|
525
536
|
}
|
|
526
537
|
}
|
|
527
538
|
);
|
|
@@ -531,13 +542,17 @@ export class MongoSyncBucketStorage
|
|
|
531
542
|
|
|
532
543
|
return {
|
|
533
544
|
snapshot_done: doc.snapshot_done,
|
|
545
|
+
snapshot_lsn: doc.snapshot_lsn ?? null,
|
|
534
546
|
active: doc.state == 'ACTIVE',
|
|
535
547
|
checkpoint_lsn: doc.last_checkpoint_lsn
|
|
536
548
|
};
|
|
537
549
|
}
|
|
538
550
|
|
|
539
|
-
async clear(): Promise<void> {
|
|
551
|
+
async clear(options?: storage.ClearStorageOptions): Promise<void> {
|
|
540
552
|
while (true) {
|
|
553
|
+
if (options?.signal?.aborted) {
|
|
554
|
+
throw new ReplicationAbortedError('Aborted clearing data');
|
|
555
|
+
}
|
|
541
556
|
try {
|
|
542
557
|
await this.clearIteration();
|
|
543
558
|
|
|
@@ -572,6 +587,9 @@ export class MongoSyncBucketStorage
|
|
|
572
587
|
last_checkpoint_lsn: null,
|
|
573
588
|
last_checkpoint: null,
|
|
574
589
|
no_checkpoint_before: null
|
|
590
|
+
},
|
|
591
|
+
$unset: {
|
|
592
|
+
snapshot_lsn: 1
|
|
575
593
|
}
|
|
576
594
|
},
|
|
577
595
|
{ maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS }
|
|
@@ -640,6 +658,7 @@ export class MongoSyncBucketStorage
|
|
|
640
658
|
},
|
|
641
659
|
{ session }
|
|
642
660
|
);
|
|
661
|
+
await this.db.notifyCheckpoint();
|
|
643
662
|
}
|
|
644
663
|
});
|
|
645
664
|
});
|
|
@@ -657,6 +676,7 @@ export class MongoSyncBucketStorage
|
|
|
657
676
|
}
|
|
658
677
|
}
|
|
659
678
|
);
|
|
679
|
+
await this.db.notifyCheckpoint();
|
|
660
680
|
}
|
|
661
681
|
|
|
662
682
|
async compact(options?: storage.CompactOptions) {
|
|
@@ -674,21 +694,27 @@ export class MongoSyncBucketStorage
|
|
|
674
694
|
* Instance-wide watch on the latest available checkpoint (op_id + lsn).
|
|
675
695
|
*/
|
|
676
696
|
private async *watchActiveCheckpoint(signal: AbortSignal): AsyncIterable<ReplicationCheckpoint> {
|
|
677
|
-
|
|
678
|
-
// otherwise we get weird "doc: never" issues.
|
|
679
|
-
let doc = null as SyncRuleCheckpointState | null;
|
|
680
|
-
let clusterTime = null as mongo.Timestamp | null;
|
|
681
|
-
const syncRulesId = this.group_id;
|
|
697
|
+
const stream = this.checkpointChangesStream(signal);
|
|
682
698
|
|
|
683
|
-
|
|
684
|
-
|
|
699
|
+
if (signal.aborted) {
|
|
700
|
+
return;
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
// We only watch changes to the active sync rules.
|
|
704
|
+
// If it changes to inactive, we abort and restart with the new sync rules.
|
|
705
|
+
let lastOp: storage.ReplicationCheckpoint | null = null;
|
|
706
|
+
|
|
707
|
+
for await (const _ of stream) {
|
|
708
|
+
if (signal.aborted) {
|
|
709
|
+
break;
|
|
710
|
+
}
|
|
711
|
+
|
|
712
|
+
const doc = await this.db.sync_rules.findOne(
|
|
685
713
|
{
|
|
686
|
-
_id:
|
|
714
|
+
_id: this.group_id,
|
|
687
715
|
state: { $in: [storage.SyncRuleState.ACTIVE, storage.SyncRuleState.ERRORED] }
|
|
688
716
|
},
|
|
689
717
|
{
|
|
690
|
-
session,
|
|
691
|
-
sort: { _id: -1 },
|
|
692
718
|
limit: 1,
|
|
693
719
|
projection: {
|
|
694
720
|
_id: 1,
|
|
@@ -698,70 +724,17 @@ export class MongoSyncBucketStorage
|
|
|
698
724
|
}
|
|
699
725
|
}
|
|
700
726
|
);
|
|
701
|
-
const time = session.clusterTime?.clusterTime ?? null;
|
|
702
|
-
clusterTime = time;
|
|
703
|
-
});
|
|
704
|
-
if (clusterTime == null) {
|
|
705
|
-
throw new ServiceError(ErrorCode.PSYNC_S2401, 'Could not get clusterTime');
|
|
706
|
-
}
|
|
707
|
-
|
|
708
|
-
if (signal.aborted) {
|
|
709
|
-
return;
|
|
710
|
-
}
|
|
711
|
-
|
|
712
|
-
if (doc == null) {
|
|
713
|
-
// Sync rules not present or not active.
|
|
714
|
-
// Abort the connections - clients will have to retry later.
|
|
715
|
-
throw new ServiceError(ErrorCode.PSYNC_S2302, 'No active sync rules available');
|
|
716
|
-
}
|
|
717
|
-
|
|
718
|
-
yield this.makeActiveCheckpoint(doc);
|
|
719
|
-
|
|
720
|
-
// We only watch changes to the active sync rules.
|
|
721
|
-
// If it changes to inactive, we abort and restart with the new sync rules.
|
|
722
|
-
|
|
723
|
-
const pipeline = this.getChangeStreamPipeline();
|
|
724
|
-
|
|
725
|
-
const stream = this.db.sync_rules.watch(pipeline, {
|
|
726
|
-
// Start at the cluster time where we got the initial doc, to make sure
|
|
727
|
-
// we don't skip any updates.
|
|
728
|
-
// This may result in the first operation being a duplicate, but we filter
|
|
729
|
-
// it out anyway.
|
|
730
|
-
startAtOperationTime: clusterTime
|
|
731
|
-
});
|
|
732
|
-
|
|
733
|
-
signal.addEventListener(
|
|
734
|
-
'abort',
|
|
735
|
-
() => {
|
|
736
|
-
stream.close();
|
|
737
|
-
},
|
|
738
|
-
{ once: true }
|
|
739
|
-
);
|
|
740
|
-
|
|
741
|
-
let lastOp: storage.ReplicationCheckpoint | null = null;
|
|
742
|
-
let lastDoc: SyncRuleCheckpointState | null = doc;
|
|
743
727
|
|
|
744
|
-
for await (const update of stream.stream()) {
|
|
745
|
-
if (signal.aborted) {
|
|
746
|
-
break;
|
|
747
|
-
}
|
|
748
|
-
if (update.operationType != 'insert' && update.operationType != 'update' && update.operationType != 'replace') {
|
|
749
|
-
continue;
|
|
750
|
-
}
|
|
751
|
-
|
|
752
|
-
const doc = await this.getOperationDoc(lastDoc, update as lib_mongo.mongo.ChangeStreamDocument<SyncRuleDocument>);
|
|
753
728
|
if (doc == null) {
|
|
754
|
-
//
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
if (doc.state != storage.SyncRuleState.ACTIVE && doc.state != storage.SyncRuleState.ERRORED) {
|
|
729
|
+
// Sync rules not present or not active.
|
|
730
|
+
// Abort the connections - clients will have to retry later.
|
|
731
|
+
throw new ServiceError(ErrorCode.PSYNC_S2302, 'No active sync rules available');
|
|
732
|
+
} else if (doc.state != storage.SyncRuleState.ACTIVE && doc.state != storage.SyncRuleState.ERRORED) {
|
|
758
733
|
// Sync rules have changed - abort and restart.
|
|
759
734
|
// We do a soft close of the stream here - no error
|
|
760
735
|
break;
|
|
761
736
|
}
|
|
762
737
|
|
|
763
|
-
lastDoc = doc;
|
|
764
|
-
|
|
765
738
|
const op = this.makeActiveCheckpoint(doc);
|
|
766
739
|
// Check for LSN / checkpoint changes - ignore other metadata changes
|
|
767
740
|
if (lastOp == null || op.lsn != lastOp.lsn || op.checkpoint != lastOp.checkpoint) {
|
|
@@ -780,61 +753,32 @@ export class MongoSyncBucketStorage
|
|
|
780
753
|
* User-specific watch on the latest checkpoint and/or write checkpoint.
|
|
781
754
|
*/
|
|
782
755
|
async *watchCheckpointChanges(options: WatchWriteCheckpointOptions): AsyncIterable<storage.StorageCheckpointUpdate> {
|
|
783
|
-
|
|
784
|
-
let lastCheckpoint: utils.InternalOpId | null = null;
|
|
785
|
-
let lastWriteCheckpoint: bigint | null = null;
|
|
786
|
-
let lastWriteCheckpointDoc: WriteCheckpointResult | null = null;
|
|
787
|
-
let nextWriteCheckpoint: bigint | null = null;
|
|
788
|
-
let lastCheckpointEvent: ReplicationCheckpoint | null = null;
|
|
789
|
-
let receivedWriteCheckpoint = false;
|
|
790
|
-
|
|
791
|
-
const writeCheckpointIter = this.writeCheckpointAPI.watchUserWriteCheckpoint({
|
|
792
|
-
user_id: options.user_id,
|
|
793
|
-
signal,
|
|
794
|
-
sync_rules_id: this.group_id
|
|
795
|
-
});
|
|
796
|
-
const iter = mergeAsyncIterables<ReplicationCheckpoint | storage.WriteCheckpointResult>(
|
|
797
|
-
[this.sharedIter, writeCheckpointIter],
|
|
798
|
-
signal
|
|
799
|
-
);
|
|
756
|
+
let lastCheckpoint: ReplicationCheckpoint | null = null;
|
|
800
757
|
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
lastCheckpointEvent = event;
|
|
804
|
-
} else {
|
|
805
|
-
lastWriteCheckpointDoc = event;
|
|
806
|
-
receivedWriteCheckpoint = true;
|
|
807
|
-
}
|
|
808
|
-
|
|
809
|
-
if (lastCheckpointEvent == null || !receivedWriteCheckpoint) {
|
|
810
|
-
// We need to wait until we received at least on checkpoint, and one write checkpoint.
|
|
811
|
-
continue;
|
|
812
|
-
}
|
|
758
|
+
const iter = this.sharedIter[Symbol.asyncIterator](options.signal);
|
|
759
|
+
let writeCheckpoint: bigint | null = null;
|
|
813
760
|
|
|
761
|
+
for await (const nextCheckpoint of iter) {
|
|
814
762
|
// lsn changes are not important by itself.
|
|
815
763
|
// What is important is:
|
|
816
764
|
// 1. checkpoint (op_id) changes.
|
|
817
765
|
// 2. write checkpoint changes for the specific user
|
|
818
766
|
|
|
819
|
-
|
|
767
|
+
if (nextCheckpoint.lsn != null) {
|
|
768
|
+
writeCheckpoint ??= await this.writeCheckpointAPI.lastWriteCheckpoint({
|
|
769
|
+
sync_rules_id: this.group_id,
|
|
770
|
+
user_id: options.user_id,
|
|
771
|
+
heads: {
|
|
772
|
+
'1': nextCheckpoint.lsn
|
|
773
|
+
}
|
|
774
|
+
});
|
|
775
|
+
}
|
|
820
776
|
|
|
821
777
|
if (
|
|
822
|
-
|
|
823
|
-
|
|
778
|
+
lastCheckpoint != null &&
|
|
779
|
+
lastCheckpoint.checkpoint == nextCheckpoint.checkpoint &&
|
|
780
|
+
lastCheckpoint.lsn == nextCheckpoint.lsn
|
|
824
781
|
) {
|
|
825
|
-
const writeCheckpoint = lastWriteCheckpointDoc.id;
|
|
826
|
-
if (nextWriteCheckpoint == null || (writeCheckpoint != null && writeCheckpoint > nextWriteCheckpoint)) {
|
|
827
|
-
nextWriteCheckpoint = writeCheckpoint;
|
|
828
|
-
}
|
|
829
|
-
// We used the doc - clear it
|
|
830
|
-
lastWriteCheckpointDoc = null;
|
|
831
|
-
}
|
|
832
|
-
|
|
833
|
-
const { checkpoint } = lastCheckpointEvent;
|
|
834
|
-
|
|
835
|
-
const currentWriteCheckpoint = nextWriteCheckpoint;
|
|
836
|
-
|
|
837
|
-
if (currentWriteCheckpoint == lastWriteCheckpoint && checkpoint == lastCheckpoint) {
|
|
838
782
|
// No change - wait for next one
|
|
839
783
|
// In some cases, many LSNs may be produced in a short time.
|
|
840
784
|
// Add a delay to throttle the write checkpoint lookup a bit.
|
|
@@ -842,75 +786,106 @@ export class MongoSyncBucketStorage
|
|
|
842
786
|
continue;
|
|
843
787
|
}
|
|
844
788
|
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
789
|
+
if (lastCheckpoint == null) {
|
|
790
|
+
yield {
|
|
791
|
+
base: nextCheckpoint,
|
|
792
|
+
writeCheckpoint,
|
|
793
|
+
update: CHECKPOINT_INVALIDATE_ALL
|
|
794
|
+
};
|
|
795
|
+
} else {
|
|
796
|
+
const updates = await this.getCheckpointChanges({
|
|
797
|
+
lastCheckpoint,
|
|
798
|
+
nextCheckpoint
|
|
799
|
+
});
|
|
800
|
+
|
|
801
|
+
let updatedWriteCheckpoint = updates.updatedWriteCheckpoints.get(options.user_id) ?? null;
|
|
802
|
+
if (updates.invalidateWriteCheckpoints) {
|
|
803
|
+
updatedWriteCheckpoint ??= await this.writeCheckpointAPI.lastWriteCheckpoint({
|
|
804
|
+
sync_rules_id: this.group_id,
|
|
805
|
+
user_id: options.user_id,
|
|
806
|
+
heads: {
|
|
807
|
+
'1': nextCheckpoint.lsn!
|
|
808
|
+
}
|
|
809
|
+
});
|
|
810
|
+
}
|
|
811
|
+
if (updatedWriteCheckpoint != null && (writeCheckpoint == null || updatedWriteCheckpoint > writeCheckpoint)) {
|
|
812
|
+
writeCheckpoint = updatedWriteCheckpoint;
|
|
813
|
+
}
|
|
863
814
|
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
815
|
+
yield {
|
|
816
|
+
base: nextCheckpoint,
|
|
817
|
+
writeCheckpoint,
|
|
818
|
+
update: {
|
|
819
|
+
updatedDataBuckets: updates.updatedDataBuckets,
|
|
820
|
+
invalidateDataBuckets: updates.invalidateDataBuckets,
|
|
821
|
+
updatedParameterLookups: updates.updatedParameterLookups,
|
|
822
|
+
invalidateParameterBuckets: updates.invalidateParameterBuckets
|
|
823
|
+
}
|
|
824
|
+
};
|
|
874
825
|
}
|
|
875
826
|
|
|
876
|
-
|
|
877
|
-
_id: lastDoc._id,
|
|
878
|
-
last_checkpoint: updatedFields.last_checkpoint ?? lastDoc.last_checkpoint,
|
|
879
|
-
last_checkpoint_lsn: updatedFields.last_checkpoint_lsn ?? lastDoc.last_checkpoint_lsn,
|
|
880
|
-
state: updatedFields.state ?? lastDoc.state
|
|
881
|
-
};
|
|
882
|
-
|
|
883
|
-
return mergedDoc;
|
|
884
|
-
} else {
|
|
885
|
-
// Unknown event type
|
|
886
|
-
return null;
|
|
827
|
+
lastCheckpoint = nextCheckpoint;
|
|
887
828
|
}
|
|
888
829
|
}
|
|
889
830
|
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
831
|
+
/**
|
|
832
|
+
* This watches the checkpoint_events capped collection for new documents inserted,
|
|
833
|
+
* and yields whenever one or more documents are inserted.
|
|
834
|
+
*
|
|
835
|
+
* The actual checkpoint must be queried on the sync_rules collection after this.
|
|
836
|
+
*/
|
|
837
|
+
private async *checkpointChangesStream(signal: AbortSignal): AsyncGenerator<void> {
|
|
838
|
+
if (signal.aborted) {
|
|
839
|
+
return;
|
|
840
|
+
}
|
|
841
|
+
|
|
842
|
+
const query = () => {
|
|
843
|
+
return this.db.checkpoint_events.find(
|
|
844
|
+
{},
|
|
845
|
+
{ tailable: true, awaitData: true, maxAwaitTimeMS: 10_000, batchSize: 1000 }
|
|
846
|
+
);
|
|
847
|
+
};
|
|
848
|
+
|
|
849
|
+
let cursor = query();
|
|
850
|
+
|
|
851
|
+
signal.addEventListener('abort', () => {
|
|
852
|
+
cursor.close().catch(() => {});
|
|
853
|
+
});
|
|
854
|
+
|
|
855
|
+
// Yield once on start, regardless of whether there are documents in the cursor.
|
|
856
|
+
// This is to ensure that the first iteration of the generator yields immediately.
|
|
857
|
+
yield;
|
|
858
|
+
|
|
859
|
+
try {
|
|
860
|
+
while (!signal.aborted) {
|
|
861
|
+
const doc = await cursor.tryNext().catch((e) => {
|
|
862
|
+
if (lib_mongo.isMongoServerError(e) && e.codeName === 'CappedPositionLost') {
|
|
863
|
+
// Cursor position lost, potentially due to a high rate of notifications
|
|
864
|
+
cursor = query();
|
|
865
|
+
// Treat as an event found, before querying the new cursor again
|
|
866
|
+
return {};
|
|
867
|
+
} else {
|
|
868
|
+
return Promise.reject(e);
|
|
869
|
+
}
|
|
870
|
+
});
|
|
871
|
+
if (cursor.closed) {
|
|
872
|
+
return;
|
|
897
873
|
}
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
'updateDescription.updatedFields.state': 1,
|
|
904
|
-
'updateDescription.updatedFields.last_checkpoint': 1,
|
|
905
|
-
'updateDescription.updatedFields.last_checkpoint_lsn': 1,
|
|
906
|
-
'fullDocument._id': 1,
|
|
907
|
-
'fullDocument.state': 1,
|
|
908
|
-
'fullDocument.last_checkpoint': 1,
|
|
909
|
-
'fullDocument.last_checkpoint_lsn': 1
|
|
874
|
+
// Skip buffered documents, if any. We don't care about the contents,
|
|
875
|
+
// we only want to know when new documents are inserted.
|
|
876
|
+
cursor.readBufferedDocuments();
|
|
877
|
+
if (doc != null) {
|
|
878
|
+
yield;
|
|
910
879
|
}
|
|
911
880
|
}
|
|
912
|
-
|
|
913
|
-
|
|
881
|
+
} catch (e) {
|
|
882
|
+
if (signal.aborted) {
|
|
883
|
+
return;
|
|
884
|
+
}
|
|
885
|
+
throw e;
|
|
886
|
+
} finally {
|
|
887
|
+
await cursor.close();
|
|
888
|
+
}
|
|
914
889
|
}
|
|
915
890
|
|
|
916
891
|
private async getDataBucketChanges(
|
|
@@ -922,7 +897,7 @@ export class MongoSyncBucketStorage
|
|
|
922
897
|
{
|
|
923
898
|
// We have an index on (_id.g, last_op).
|
|
924
899
|
'_id.g': this.group_id,
|
|
925
|
-
last_op: { $gt:
|
|
900
|
+
last_op: { $gt: options.lastCheckpoint.checkpoint }
|
|
926
901
|
},
|
|
927
902
|
{
|
|
928
903
|
projection: {
|
|
@@ -951,7 +926,7 @@ export class MongoSyncBucketStorage
|
|
|
951
926
|
const parameterUpdates = await this.db.bucket_parameters
|
|
952
927
|
.find(
|
|
953
928
|
{
|
|
954
|
-
_id: { $gt:
|
|
929
|
+
_id: { $gt: options.lastCheckpoint.checkpoint, $lte: options.nextCheckpoint.checkpoint },
|
|
955
930
|
'key.g': this.group_id
|
|
956
931
|
},
|
|
957
932
|
{
|
|
@@ -979,7 +954,11 @@ export class MongoSyncBucketStorage
|
|
|
979
954
|
// TODO (later):
|
|
980
955
|
// We can optimize this by implementing it like ChecksumCache: We can use partial cache results to do
|
|
981
956
|
// more efficient lookups in some cases.
|
|
982
|
-
private checkpointChangesCache = new LRUCache<
|
|
957
|
+
private checkpointChangesCache = new LRUCache<
|
|
958
|
+
string,
|
|
959
|
+
InternalCheckpointChanges,
|
|
960
|
+
{ options: GetCheckpointChangesOptions }
|
|
961
|
+
>({
|
|
983
962
|
// Limit to 50 cache entries, or 10MB, whichever comes first.
|
|
984
963
|
// Some rough calculations:
|
|
985
964
|
// If we process 10 checkpoints per second, and a connection may be 2 seconds behind, we could have
|
|
@@ -987,31 +966,39 @@ export class MongoSyncBucketStorage
|
|
|
987
966
|
// That is a worst-case scenario, so we don't actually store that many. In real life, the cache keys
|
|
988
967
|
// would likely be clustered around a few values, rather than spread over all 400 potential values.
|
|
989
968
|
max: 50,
|
|
990
|
-
maxSize:
|
|
991
|
-
sizeCalculation: (value:
|
|
969
|
+
maxSize: 12 * 1024 * 1024,
|
|
970
|
+
sizeCalculation: (value: InternalCheckpointChanges) => {
|
|
992
971
|
// Estimate of memory usage
|
|
993
972
|
const paramSize = [...value.updatedParameterLookups].reduce<number>((a, b) => a + b.length, 0);
|
|
994
973
|
const bucketSize = [...value.updatedDataBuckets].reduce<number>((a, b) => a + b.length, 0);
|
|
995
|
-
|
|
974
|
+
const writeCheckpointSize = value.updatedWriteCheckpoints.size * 30; // estiamte for user_id + bigint
|
|
975
|
+
return 100 + paramSize + bucketSize + writeCheckpointSize;
|
|
996
976
|
},
|
|
997
977
|
fetchMethod: async (_key, _staleValue, options) => {
|
|
998
978
|
return this.getCheckpointChangesInternal(options.context.options);
|
|
999
979
|
}
|
|
1000
980
|
});
|
|
1001
981
|
|
|
1002
|
-
async getCheckpointChanges(options: GetCheckpointChangesOptions): Promise<
|
|
1003
|
-
const key = `${options.lastCheckpoint}_${options.nextCheckpoint}`;
|
|
982
|
+
async getCheckpointChanges(options: GetCheckpointChangesOptions): Promise<InternalCheckpointChanges> {
|
|
983
|
+
const key = `${options.lastCheckpoint.checkpoint}_${options.lastCheckpoint.lsn}__${options.nextCheckpoint.checkpoint}_${options.nextCheckpoint.lsn}`;
|
|
1004
984
|
const result = await this.checkpointChangesCache.fetch(key, { context: { options } });
|
|
1005
985
|
return result!;
|
|
1006
986
|
}
|
|
1007
987
|
|
|
1008
|
-
private async getCheckpointChangesInternal(options: GetCheckpointChangesOptions): Promise<
|
|
988
|
+
private async getCheckpointChangesInternal(options: GetCheckpointChangesOptions): Promise<InternalCheckpointChanges> {
|
|
1009
989
|
const dataUpdates = await this.getDataBucketChanges(options);
|
|
1010
990
|
const parameterUpdates = await this.getParameterBucketChanges(options);
|
|
991
|
+
const writeCheckpointUpdates = await this.writeCheckpointAPI.getWriteCheckpointChanges(options);
|
|
1011
992
|
|
|
1012
993
|
return {
|
|
1013
994
|
...dataUpdates,
|
|
1014
|
-
...parameterUpdates
|
|
995
|
+
...parameterUpdates,
|
|
996
|
+
...writeCheckpointUpdates
|
|
1015
997
|
};
|
|
1016
998
|
}
|
|
1017
999
|
}
|
|
1000
|
+
|
|
1001
|
+
interface InternalCheckpointChanges extends CheckpointChanges {
|
|
1002
|
+
updatedWriteCheckpoints: Map<string, bigint>;
|
|
1003
|
+
invalidateWriteCheckpoints: boolean;
|
|
1004
|
+
}
|
|
@@ -16,6 +16,9 @@ export const MongoTestStorageFactoryGenerator = (factoryOptions: MongoTestStorag
|
|
|
16
16
|
await db.db.createCollection('bucket_parameters');
|
|
17
17
|
}
|
|
18
18
|
|
|
19
|
+
// Full migrations are not currently run for tests, so we manually create this
|
|
20
|
+
await db.createCheckpointEventsCollection();
|
|
21
|
+
|
|
19
22
|
if (!options?.doNotClear) {
|
|
20
23
|
await db.clear();
|
|
21
24
|
}
|