@powersync/service-module-mongodb-storage 0.7.2 → 0.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +22 -0
- package/dist/migrations/db/migrations/1741697235857-bucket-state-index.d.ts +3 -0
- package/dist/migrations/db/migrations/1741697235857-bucket-state-index.js +28 -0
- package/dist/migrations/db/migrations/1741697235857-bucket-state-index.js.map +1 -0
- package/dist/storage/implementation/MongoCompactor.js +3 -1
- package/dist/storage/implementation/MongoCompactor.js.map +1 -1
- package/dist/storage/implementation/MongoSyncBucketStorage.d.ts +7 -4
- package/dist/storage/implementation/MongoSyncBucketStorage.js +128 -31
- package/dist/storage/implementation/MongoSyncBucketStorage.js.map +1 -1
- package/dist/storage/implementation/MongoWriteCheckpointAPI.d.ts +15 -2
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js +193 -17
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js.map +1 -1
- package/dist/storage/implementation/PersistedBatch.d.ts +8 -0
- package/dist/storage/implementation/PersistedBatch.js +44 -0
- package/dist/storage/implementation/PersistedBatch.js.map +1 -1
- package/dist/storage/implementation/db.d.ts +2 -1
- package/dist/storage/implementation/db.js +4 -0
- package/dist/storage/implementation/db.js.map +1 -1
- package/dist/storage/implementation/models.d.ts +17 -0
- package/package.json +4 -4
- package/src/migrations/db/migrations/1741697235857-bucket-state-index.ts +40 -0
- package/src/storage/implementation/MongoCompactor.ts +3 -1
- package/src/storage/implementation/MongoSyncBucketStorage.ts +164 -35
- package/src/storage/implementation/MongoWriteCheckpointAPI.ts +262 -25
- package/src/storage/implementation/PersistedBatch.ts +52 -0
- package/src/storage/implementation/db.ts +5 -0
- package/src/storage/implementation/models.ts +18 -0
- package/test/src/__snapshots__/storage_sync.test.ts.snap +171 -0
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -11,25 +11,29 @@ import {
|
|
|
11
11
|
BroadcastIterable,
|
|
12
12
|
CHECKPOINT_INVALIDATE_ALL,
|
|
13
13
|
CheckpointChanges,
|
|
14
|
+
deserializeParameterLookup,
|
|
14
15
|
GetCheckpointChangesOptions,
|
|
15
16
|
InternalOpId,
|
|
16
17
|
internalToExternalOpId,
|
|
18
|
+
mergeAsyncIterables,
|
|
17
19
|
ProtocolOpId,
|
|
18
20
|
ReplicationCheckpoint,
|
|
19
|
-
SourceTable,
|
|
20
21
|
storage,
|
|
21
22
|
utils,
|
|
22
|
-
WatchWriteCheckpointOptions
|
|
23
|
+
WatchWriteCheckpointOptions,
|
|
24
|
+
WriteCheckpointResult
|
|
23
25
|
} from '@powersync/service-core';
|
|
24
|
-
import {
|
|
26
|
+
import { JSONBig } from '@powersync/service-jsonbig';
|
|
27
|
+
import { ParameterLookup, SqliteJsonRow, SqlSyncRules } from '@powersync/service-sync-rules';
|
|
25
28
|
import * as bson from 'bson';
|
|
26
|
-
import {
|
|
29
|
+
import { LRUCache } from 'lru-cache';
|
|
27
30
|
import * as timers from 'timers/promises';
|
|
28
31
|
import { MongoBucketStorage } from '../MongoBucketStorage.js';
|
|
29
32
|
import { PowerSyncMongo } from './db.js';
|
|
30
33
|
import {
|
|
31
34
|
BucketDataDocument,
|
|
32
35
|
BucketDataKey,
|
|
36
|
+
BucketStateDocument,
|
|
33
37
|
SourceKey,
|
|
34
38
|
SourceTableDocument,
|
|
35
39
|
SyncRuleCheckpointState,
|
|
@@ -65,7 +69,8 @@ export class MongoSyncBucketStorage
|
|
|
65
69
|
this.db = factory.db;
|
|
66
70
|
this.writeCheckpointAPI = new MongoWriteCheckpointAPI({
|
|
67
71
|
db: this.db,
|
|
68
|
-
mode: writeCheckpointMode
|
|
72
|
+
mode: writeCheckpointMode,
|
|
73
|
+
sync_rules_id: group_id
|
|
69
74
|
});
|
|
70
75
|
}
|
|
71
76
|
|
|
@@ -83,13 +88,6 @@ export class MongoSyncBucketStorage
|
|
|
83
88
|
);
|
|
84
89
|
}
|
|
85
90
|
|
|
86
|
-
createCustomWriteCheckpoint(checkpoint: storage.BatchedCustomWriteCheckpointOptions): Promise<bigint> {
|
|
87
|
-
return this.writeCheckpointAPI.createCustomWriteCheckpoint({
|
|
88
|
-
...checkpoint,
|
|
89
|
-
sync_rules_id: this.group_id
|
|
90
|
-
});
|
|
91
|
-
}
|
|
92
|
-
|
|
93
91
|
createManagedWriteCheckpoint(checkpoint: storage.ManagedWriteCheckpointOptions): Promise<bigint> {
|
|
94
92
|
return this.writeCheckpointAPI.createManagedWriteCheckpoint(checkpoint);
|
|
95
93
|
}
|
|
@@ -154,7 +152,7 @@ export class MongoSyncBucketStorage
|
|
|
154
152
|
|
|
155
153
|
await callback(batch);
|
|
156
154
|
await batch.flush();
|
|
157
|
-
if (batch.last_flushed_op) {
|
|
155
|
+
if (batch.last_flushed_op != null) {
|
|
158
156
|
return { flushed_op: batch.last_flushed_op };
|
|
159
157
|
} else {
|
|
160
158
|
return null;
|
|
@@ -252,7 +250,7 @@ export class MongoSyncBucketStorage
|
|
|
252
250
|
return result!;
|
|
253
251
|
}
|
|
254
252
|
|
|
255
|
-
async getParameterSets(checkpoint: utils.InternalOpId, lookups:
|
|
253
|
+
async getParameterSets(checkpoint: utils.InternalOpId, lookups: ParameterLookup[]): Promise<SqliteJsonRow[]> {
|
|
256
254
|
const lookupFilter = lookups.map((lookup) => {
|
|
257
255
|
return storage.serializeLookup(lookup);
|
|
258
256
|
});
|
|
@@ -585,6 +583,13 @@ export class MongoSyncBucketStorage
|
|
|
585
583
|
{ maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS }
|
|
586
584
|
);
|
|
587
585
|
|
|
586
|
+
await this.db.bucket_state.deleteMany(
|
|
587
|
+
{
|
|
588
|
+
_id: idPrefixFilter<BucketStateDocument['_id']>({ g: this.group_id }, ['b'])
|
|
589
|
+
},
|
|
590
|
+
{ maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS }
|
|
591
|
+
);
|
|
592
|
+
|
|
588
593
|
await this.db.source_tables.deleteMany(
|
|
589
594
|
{
|
|
590
595
|
group_id: this.group_id
|
|
@@ -694,8 +699,7 @@ export class MongoSyncBucketStorage
|
|
|
694
699
|
if (doc == null) {
|
|
695
700
|
// Sync rules not present or not active.
|
|
696
701
|
// Abort the connections - clients will have to retry later.
|
|
697
|
-
|
|
698
|
-
return;
|
|
702
|
+
throw new ServiceError(ErrorCode.PSYNC_S2302, 'No active sync rules available');
|
|
699
703
|
}
|
|
700
704
|
|
|
701
705
|
yield this.makeActiveCheckpoint(doc);
|
|
@@ -739,7 +743,7 @@ export class MongoSyncBucketStorage
|
|
|
739
743
|
}
|
|
740
744
|
if (doc.state != storage.SyncRuleState.ACTIVE && doc.state != storage.SyncRuleState.ERRORED) {
|
|
741
745
|
// Sync rules have changed - abort and restart.
|
|
742
|
-
//
|
|
746
|
+
// We do a soft close of the stream here - no error
|
|
743
747
|
break;
|
|
744
748
|
}
|
|
745
749
|
|
|
@@ -762,28 +766,60 @@ export class MongoSyncBucketStorage
|
|
|
762
766
|
/**
|
|
763
767
|
* User-specific watch on the latest checkpoint and/or write checkpoint.
|
|
764
768
|
*/
|
|
765
|
-
async *
|
|
766
|
-
const {
|
|
769
|
+
async *watchCheckpointChanges(options: WatchWriteCheckpointOptions): AsyncIterable<storage.StorageCheckpointUpdate> {
|
|
770
|
+
const { signal } = options;
|
|
767
771
|
let lastCheckpoint: utils.InternalOpId | null = null;
|
|
768
772
|
let lastWriteCheckpoint: bigint | null = null;
|
|
773
|
+
let lastWriteCheckpointDoc: WriteCheckpointResult | null = null;
|
|
774
|
+
let nextWriteCheckpoint: bigint | null = null;
|
|
775
|
+
let lastCheckpointEvent: ReplicationCheckpoint | null = null;
|
|
776
|
+
let receivedWriteCheckpoint = false;
|
|
777
|
+
|
|
778
|
+
const writeCheckpointIter = this.writeCheckpointAPI.watchUserWriteCheckpoint({
|
|
779
|
+
user_id: options.user_id,
|
|
780
|
+
signal,
|
|
781
|
+
sync_rules_id: this.group_id
|
|
782
|
+
});
|
|
783
|
+
const iter = mergeAsyncIterables<ReplicationCheckpoint | storage.WriteCheckpointResult>(
|
|
784
|
+
[this.sharedIter, writeCheckpointIter],
|
|
785
|
+
signal
|
|
786
|
+
);
|
|
769
787
|
|
|
770
|
-
const iter = wrapWithAbort(this.sharedIter, signal);
|
|
771
788
|
for await (const event of iter) {
|
|
772
|
-
|
|
789
|
+
if ('checkpoint' in event) {
|
|
790
|
+
lastCheckpointEvent = event;
|
|
791
|
+
} else {
|
|
792
|
+
lastWriteCheckpointDoc = event;
|
|
793
|
+
receivedWriteCheckpoint = true;
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
if (lastCheckpointEvent == null || !receivedWriteCheckpoint) {
|
|
797
|
+
// We need to wait until we received at least on checkpoint, and one write checkpoint.
|
|
798
|
+
continue;
|
|
799
|
+
}
|
|
773
800
|
|
|
774
801
|
// lsn changes are not important by itself.
|
|
775
802
|
// What is important is:
|
|
776
803
|
// 1. checkpoint (op_id) changes.
|
|
777
804
|
// 2. write checkpoint changes for the specific user
|
|
778
805
|
|
|
779
|
-
const
|
|
806
|
+
const lsn = lastCheckpointEvent?.lsn;
|
|
780
807
|
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
808
|
+
if (
|
|
809
|
+
lastWriteCheckpointDoc != null &&
|
|
810
|
+
(lastWriteCheckpointDoc.lsn == null || (lsn != null && lsn >= lastWriteCheckpointDoc.lsn))
|
|
811
|
+
) {
|
|
812
|
+
const writeCheckpoint = lastWriteCheckpointDoc.id;
|
|
813
|
+
if (nextWriteCheckpoint == null || (writeCheckpoint != null && writeCheckpoint > nextWriteCheckpoint)) {
|
|
814
|
+
nextWriteCheckpoint = writeCheckpoint;
|
|
785
815
|
}
|
|
786
|
-
|
|
816
|
+
// We used the doc - clear it
|
|
817
|
+
lastWriteCheckpointDoc = null;
|
|
818
|
+
}
|
|
819
|
+
|
|
820
|
+
const { checkpoint } = lastCheckpointEvent;
|
|
821
|
+
|
|
822
|
+
const currentWriteCheckpoint = nextWriteCheckpoint;
|
|
787
823
|
|
|
788
824
|
if (currentWriteCheckpoint == lastWriteCheckpoint && checkpoint == lastCheckpoint) {
|
|
789
825
|
// No change - wait for next one
|
|
@@ -795,12 +831,7 @@ export class MongoSyncBucketStorage
|
|
|
795
831
|
|
|
796
832
|
const updates: CheckpointChanges =
|
|
797
833
|
lastCheckpoint == null
|
|
798
|
-
?
|
|
799
|
-
invalidateDataBuckets: true,
|
|
800
|
-
invalidateParameterBuckets: true,
|
|
801
|
-
updatedDataBuckets: [],
|
|
802
|
-
updatedParameterBucketDefinitions: []
|
|
803
|
-
}
|
|
834
|
+
? CHECKPOINT_INVALIDATE_ALL
|
|
804
835
|
: await this.getCheckpointChanges({
|
|
805
836
|
lastCheckpoint: lastCheckpoint,
|
|
806
837
|
nextCheckpoint: checkpoint
|
|
@@ -810,7 +841,7 @@ export class MongoSyncBucketStorage
|
|
|
810
841
|
lastCheckpoint = checkpoint;
|
|
811
842
|
|
|
812
843
|
yield {
|
|
813
|
-
base:
|
|
844
|
+
base: lastCheckpointEvent,
|
|
814
845
|
writeCheckpoint: currentWriteCheckpoint,
|
|
815
846
|
update: updates
|
|
816
847
|
};
|
|
@@ -869,7 +900,105 @@ export class MongoSyncBucketStorage
|
|
|
869
900
|
return pipeline;
|
|
870
901
|
}
|
|
871
902
|
|
|
903
|
+
private async getDataBucketChanges(
|
|
904
|
+
options: GetCheckpointChangesOptions
|
|
905
|
+
): Promise<Pick<CheckpointChanges, 'updatedDataBuckets' | 'invalidateDataBuckets'>> {
|
|
906
|
+
const limit = 1000;
|
|
907
|
+
const bucketStateUpdates = await this.db.bucket_state
|
|
908
|
+
.find(
|
|
909
|
+
{
|
|
910
|
+
// We have an index on (_id.g, last_op).
|
|
911
|
+
'_id.g': this.group_id,
|
|
912
|
+
last_op: { $gt: BigInt(options.lastCheckpoint) }
|
|
913
|
+
},
|
|
914
|
+
{
|
|
915
|
+
projection: {
|
|
916
|
+
'_id.b': 1
|
|
917
|
+
},
|
|
918
|
+
limit: limit + 1,
|
|
919
|
+
batchSize: limit + 1,
|
|
920
|
+
singleBatch: true
|
|
921
|
+
}
|
|
922
|
+
)
|
|
923
|
+
.toArray();
|
|
924
|
+
|
|
925
|
+
const buckets = bucketStateUpdates.map((doc) => doc._id.b);
|
|
926
|
+
const invalidateDataBuckets = buckets.length > limit;
|
|
927
|
+
|
|
928
|
+
return {
|
|
929
|
+
invalidateDataBuckets: invalidateDataBuckets,
|
|
930
|
+
updatedDataBuckets: invalidateDataBuckets ? new Set<string>() : new Set(buckets)
|
|
931
|
+
};
|
|
932
|
+
}
|
|
933
|
+
|
|
934
|
+
private async getParameterBucketChanges(
|
|
935
|
+
options: GetCheckpointChangesOptions
|
|
936
|
+
): Promise<Pick<CheckpointChanges, 'updatedParameterLookups' | 'invalidateParameterBuckets'>> {
|
|
937
|
+
const limit = 1000;
|
|
938
|
+
const parameterUpdates = await this.db.bucket_parameters
|
|
939
|
+
.find(
|
|
940
|
+
{
|
|
941
|
+
_id: { $gt: BigInt(options.lastCheckpoint), $lte: BigInt(options.nextCheckpoint) },
|
|
942
|
+
'key.g': this.group_id
|
|
943
|
+
},
|
|
944
|
+
{
|
|
945
|
+
projection: {
|
|
946
|
+
lookup: 1
|
|
947
|
+
},
|
|
948
|
+
limit: limit + 1,
|
|
949
|
+
batchSize: limit + 1,
|
|
950
|
+
singleBatch: true
|
|
951
|
+
}
|
|
952
|
+
)
|
|
953
|
+
.toArray();
|
|
954
|
+
const invalidateParameterUpdates = parameterUpdates.length > limit;
|
|
955
|
+
|
|
956
|
+
return {
|
|
957
|
+
invalidateParameterBuckets: invalidateParameterUpdates,
|
|
958
|
+
updatedParameterLookups: invalidateParameterUpdates
|
|
959
|
+
? new Set<string>()
|
|
960
|
+
: new Set<string>(parameterUpdates.map((p) => JSONBig.stringify(deserializeParameterLookup(p.lookup))))
|
|
961
|
+
};
|
|
962
|
+
}
|
|
963
|
+
|
|
964
|
+
// If we processed all connections together for each checkpoint, we could do a single lookup for all connections.
|
|
965
|
+
// In practice, specific connections may fall behind. So instead, we just cache the results of each specific lookup.
|
|
966
|
+
// TODO (later):
|
|
967
|
+
// We can optimize this by implementing it like ChecksumCache: We can use partial cache results to do
|
|
968
|
+
// more efficient lookups in some cases.
|
|
969
|
+
private checkpointChangesCache = new LRUCache<string, CheckpointChanges, { options: GetCheckpointChangesOptions }>({
|
|
970
|
+
// Limit to 50 cache entries, or 10MB, whichever comes first.
|
|
971
|
+
// Some rough calculations:
|
|
972
|
+
// If we process 10 checkpoints per second, and a connection may be 2 seconds behind, we could have
|
|
973
|
+
// up to 20 relevant checkpoints. That gives us 20*20 = 400 potentially-relevant cache entries.
|
|
974
|
+
// That is a worst-case scenario, so we don't actually store that many. In real life, the cache keys
|
|
975
|
+
// would likely be clustered around a few values, rather than spread over all 400 potential values.
|
|
976
|
+
max: 50,
|
|
977
|
+
maxSize: 10 * 1024 * 1024,
|
|
978
|
+
sizeCalculation: (value: CheckpointChanges) => {
|
|
979
|
+
// Estimate of memory usage
|
|
980
|
+
const paramSize = [...value.updatedParameterLookups].reduce<number>((a, b) => a + b.length, 0);
|
|
981
|
+
const bucketSize = [...value.updatedDataBuckets].reduce<number>((a, b) => a + b.length, 0);
|
|
982
|
+
return 100 + paramSize + bucketSize;
|
|
983
|
+
},
|
|
984
|
+
fetchMethod: async (_key, _staleValue, options) => {
|
|
985
|
+
return this.getCheckpointChangesInternal(options.context.options);
|
|
986
|
+
}
|
|
987
|
+
});
|
|
988
|
+
|
|
872
989
|
async getCheckpointChanges(options: GetCheckpointChangesOptions): Promise<CheckpointChanges> {
|
|
873
|
-
|
|
990
|
+
const key = `${options.lastCheckpoint}_${options.nextCheckpoint}`;
|
|
991
|
+
const result = await this.checkpointChangesCache.fetch(key, { context: { options } });
|
|
992
|
+
return result!;
|
|
993
|
+
}
|
|
994
|
+
|
|
995
|
+
private async getCheckpointChangesInternal(options: GetCheckpointChangesOptions): Promise<CheckpointChanges> {
|
|
996
|
+
const dataUpdates = await this.getDataBucketChanges(options);
|
|
997
|
+
const parameterUpdates = await this.getParameterBucketChanges(options);
|
|
998
|
+
|
|
999
|
+
return {
|
|
1000
|
+
...dataUpdates,
|
|
1001
|
+
...parameterUpdates
|
|
1002
|
+
};
|
|
874
1003
|
}
|
|
875
1004
|
}
|
|
@@ -1,19 +1,30 @@
|
|
|
1
|
+
import { mongo } from '@powersync/lib-service-mongodb';
|
|
1
2
|
import * as framework from '@powersync/lib-services-framework';
|
|
2
|
-
import {
|
|
3
|
+
import {
|
|
4
|
+
Demultiplexer,
|
|
5
|
+
DemultiplexerValue,
|
|
6
|
+
storage,
|
|
7
|
+
WatchUserWriteCheckpointOptions,
|
|
8
|
+
WriteCheckpointResult
|
|
9
|
+
} from '@powersync/service-core';
|
|
3
10
|
import { PowerSyncMongo } from './db.js';
|
|
11
|
+
import { CustomWriteCheckpointDocument, WriteCheckpointDocument } from './models.js';
|
|
4
12
|
|
|
5
13
|
export type MongoCheckpointAPIOptions = {
|
|
6
14
|
db: PowerSyncMongo;
|
|
7
15
|
mode: storage.WriteCheckpointMode;
|
|
16
|
+
sync_rules_id: number;
|
|
8
17
|
};
|
|
9
18
|
|
|
10
19
|
export class MongoWriteCheckpointAPI implements storage.WriteCheckpointAPI {
|
|
11
20
|
readonly db: PowerSyncMongo;
|
|
12
21
|
private _mode: storage.WriteCheckpointMode;
|
|
22
|
+
private sync_rules_id: number;
|
|
13
23
|
|
|
14
24
|
constructor(options: MongoCheckpointAPIOptions) {
|
|
15
25
|
this.db = options.db;
|
|
16
26
|
this._mode = options.mode;
|
|
27
|
+
this.sync_rules_id = options.sync_rules_id;
|
|
17
28
|
}
|
|
18
29
|
|
|
19
30
|
get writeCheckpointMode() {
|
|
@@ -28,29 +39,6 @@ export class MongoWriteCheckpointAPI implements storage.WriteCheckpointAPI {
|
|
|
28
39
|
return batchCreateCustomWriteCheckpoints(this.db, checkpoints);
|
|
29
40
|
}
|
|
30
41
|
|
|
31
|
-
async createCustomWriteCheckpoint(options: storage.CustomWriteCheckpointOptions): Promise<bigint> {
|
|
32
|
-
if (this.writeCheckpointMode !== storage.WriteCheckpointMode.CUSTOM) {
|
|
33
|
-
throw new framework.errors.ValidationError(
|
|
34
|
-
`Creating a custom Write Checkpoint when the current Write Checkpoint mode is set to "${this.writeCheckpointMode}"`
|
|
35
|
-
);
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
const { checkpoint, user_id, sync_rules_id } = options;
|
|
39
|
-
const doc = await this.db.custom_write_checkpoints.findOneAndUpdate(
|
|
40
|
-
{
|
|
41
|
-
user_id: user_id,
|
|
42
|
-
sync_rules_id
|
|
43
|
-
},
|
|
44
|
-
{
|
|
45
|
-
$set: {
|
|
46
|
-
checkpoint
|
|
47
|
-
}
|
|
48
|
-
},
|
|
49
|
-
{ upsert: true, returnDocument: 'after' }
|
|
50
|
-
);
|
|
51
|
-
return doc!.checkpoint;
|
|
52
|
-
}
|
|
53
|
-
|
|
54
42
|
async createManagedWriteCheckpoint(checkpoint: storage.ManagedWriteCheckpointOptions): Promise<bigint> {
|
|
55
43
|
if (this.writeCheckpointMode !== storage.WriteCheckpointMode.MANAGED) {
|
|
56
44
|
throw new framework.errors.ValidationError(
|
|
@@ -93,6 +81,231 @@ export class MongoWriteCheckpointAPI implements storage.WriteCheckpointAPI {
|
|
|
93
81
|
}
|
|
94
82
|
}
|
|
95
83
|
|
|
84
|
+
watchUserWriteCheckpoint(options: WatchUserWriteCheckpointOptions): AsyncIterable<storage.WriteCheckpointResult> {
|
|
85
|
+
switch (this.writeCheckpointMode) {
|
|
86
|
+
case storage.WriteCheckpointMode.CUSTOM:
|
|
87
|
+
return this.watchCustomWriteCheckpoint(options);
|
|
88
|
+
case storage.WriteCheckpointMode.MANAGED:
|
|
89
|
+
return this.watchManagedWriteCheckpoint(options);
|
|
90
|
+
default:
|
|
91
|
+
throw new Error('Invalid write checkpoint mode');
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
private sharedManagedIter = new Demultiplexer<WriteCheckpointResult>((signal) => {
|
|
96
|
+
const clusterTimePromise = this.getClusterTime();
|
|
97
|
+
|
|
98
|
+
return {
|
|
99
|
+
iterator: this.watchAllManagedWriteCheckpoints(clusterTimePromise, signal),
|
|
100
|
+
getFirstValue: async (user_id: string) => {
|
|
101
|
+
// Potential race conditions we cater for:
|
|
102
|
+
|
|
103
|
+
// Case 1: changestream is behind.
|
|
104
|
+
// We get a doc now, then the same or older doc again later.
|
|
105
|
+
// No problem!
|
|
106
|
+
|
|
107
|
+
// Case 2: Query is behind. I.e. doc has been created, and emitted on the changestream, but the query doesn't see it yet.
|
|
108
|
+
// Not possible luckily, but can we make sure?
|
|
109
|
+
|
|
110
|
+
// Case 3: changestream delays openeing. A doc is created after our query here, but before the changestream is opened.
|
|
111
|
+
// Awaiting clusterTimePromise should be sufficient here, but as a sanity check we also confirm that our query
|
|
112
|
+
// timestamp is > the startClusterTime.
|
|
113
|
+
|
|
114
|
+
const changeStreamStart = await clusterTimePromise;
|
|
115
|
+
|
|
116
|
+
let doc = null as WriteCheckpointDocument | null;
|
|
117
|
+
let clusterTime = null as mongo.Timestamp | null;
|
|
118
|
+
|
|
119
|
+
await this.db.client.withSession(async (session) => {
|
|
120
|
+
doc = await this.db.write_checkpoints.findOne(
|
|
121
|
+
{
|
|
122
|
+
user_id: user_id
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
session
|
|
126
|
+
}
|
|
127
|
+
);
|
|
128
|
+
const time = session.clusterTime?.clusterTime ?? null;
|
|
129
|
+
clusterTime = time;
|
|
130
|
+
});
|
|
131
|
+
if (clusterTime == null) {
|
|
132
|
+
throw new framework.ServiceAssertionError('Could not get clusterTime for write checkpoint');
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
if (clusterTime.lessThan(changeStreamStart)) {
|
|
136
|
+
throw new framework.ServiceAssertionError(
|
|
137
|
+
'clusterTime for write checkpoint is older than changestream start'
|
|
138
|
+
);
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
if (doc == null) {
|
|
142
|
+
return {
|
|
143
|
+
id: null,
|
|
144
|
+
lsn: null
|
|
145
|
+
};
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
return {
|
|
149
|
+
id: doc.client_id,
|
|
150
|
+
lsn: doc.lsns['1']
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
};
|
|
154
|
+
});
|
|
155
|
+
|
|
156
|
+
private async *watchAllManagedWriteCheckpoints(
|
|
157
|
+
clusterTimePromise: Promise<mongo.BSON.Timestamp>,
|
|
158
|
+
signal: AbortSignal
|
|
159
|
+
): AsyncGenerator<DemultiplexerValue<WriteCheckpointResult>> {
|
|
160
|
+
const clusterTime = await clusterTimePromise;
|
|
161
|
+
|
|
162
|
+
const stream = this.db.write_checkpoints.watch(
|
|
163
|
+
[{ $match: { operationType: { $in: ['insert', 'update', 'replace'] } } }],
|
|
164
|
+
{
|
|
165
|
+
fullDocument: 'updateLookup',
|
|
166
|
+
startAtOperationTime: clusterTime
|
|
167
|
+
}
|
|
168
|
+
);
|
|
169
|
+
|
|
170
|
+
signal.onabort = () => {
|
|
171
|
+
stream.close();
|
|
172
|
+
};
|
|
173
|
+
|
|
174
|
+
if (signal.aborted) {
|
|
175
|
+
stream.close();
|
|
176
|
+
return;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
for await (let event of stream) {
|
|
180
|
+
if (!('fullDocument' in event) || event.fullDocument == null) {
|
|
181
|
+
continue;
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
const user_id = event.fullDocument.user_id;
|
|
185
|
+
yield {
|
|
186
|
+
key: user_id,
|
|
187
|
+
value: {
|
|
188
|
+
id: event.fullDocument.client_id,
|
|
189
|
+
lsn: event.fullDocument.lsns['1']
|
|
190
|
+
}
|
|
191
|
+
};
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
watchManagedWriteCheckpoint(options: WatchUserWriteCheckpointOptions): AsyncIterable<storage.WriteCheckpointResult> {
|
|
196
|
+
const stream = this.sharedManagedIter.subscribe(options.user_id, options.signal);
|
|
197
|
+
return this.orderedStream(stream);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
private sharedCustomIter = new Demultiplexer<WriteCheckpointResult>((signal) => {
|
|
201
|
+
const clusterTimePromise = this.getClusterTime();
|
|
202
|
+
|
|
203
|
+
return {
|
|
204
|
+
iterator: this.watchAllCustomWriteCheckpoints(clusterTimePromise, signal),
|
|
205
|
+
getFirstValue: async (user_id: string) => {
|
|
206
|
+
// We cater for the same potential race conditions as for managed write checkpoints.
|
|
207
|
+
|
|
208
|
+
const changeStreamStart = await clusterTimePromise;
|
|
209
|
+
|
|
210
|
+
let doc = null as CustomWriteCheckpointDocument | null;
|
|
211
|
+
let clusterTime = null as mongo.Timestamp | null;
|
|
212
|
+
|
|
213
|
+
await this.db.client.withSession(async (session) => {
|
|
214
|
+
doc = await this.db.custom_write_checkpoints.findOne(
|
|
215
|
+
{
|
|
216
|
+
user_id: user_id,
|
|
217
|
+
sync_rules_id: this.sync_rules_id
|
|
218
|
+
},
|
|
219
|
+
{
|
|
220
|
+
session
|
|
221
|
+
}
|
|
222
|
+
);
|
|
223
|
+
const time = session.clusterTime?.clusterTime ?? null;
|
|
224
|
+
clusterTime = time;
|
|
225
|
+
});
|
|
226
|
+
if (clusterTime == null) {
|
|
227
|
+
throw new framework.ServiceAssertionError('Could not get clusterTime for write checkpoint');
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
if (clusterTime.lessThan(changeStreamStart)) {
|
|
231
|
+
throw new framework.ServiceAssertionError(
|
|
232
|
+
'clusterTime for write checkpoint is older than changestream start'
|
|
233
|
+
);
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
if (doc == null) {
|
|
237
|
+
// No write checkpoint, but we still need to return a result
|
|
238
|
+
return {
|
|
239
|
+
id: null,
|
|
240
|
+
lsn: null
|
|
241
|
+
};
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
return {
|
|
245
|
+
id: doc.checkpoint,
|
|
246
|
+
// custom write checkpoints are not tied to a LSN
|
|
247
|
+
lsn: null
|
|
248
|
+
};
|
|
249
|
+
}
|
|
250
|
+
};
|
|
251
|
+
});
|
|
252
|
+
|
|
253
|
+
private async *watchAllCustomWriteCheckpoints(
|
|
254
|
+
clusterTimePromise: Promise<mongo.BSON.Timestamp>,
|
|
255
|
+
signal: AbortSignal
|
|
256
|
+
): AsyncGenerator<DemultiplexerValue<WriteCheckpointResult>> {
|
|
257
|
+
const clusterTime = await clusterTimePromise;
|
|
258
|
+
|
|
259
|
+
const stream = this.db.custom_write_checkpoints.watch(
|
|
260
|
+
[
|
|
261
|
+
{
|
|
262
|
+
$match: {
|
|
263
|
+
'fullDocument.sync_rules_id': this.sync_rules_id,
|
|
264
|
+
operationType: { $in: ['insert', 'update', 'replace'] }
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
],
|
|
268
|
+
{
|
|
269
|
+
fullDocument: 'updateLookup',
|
|
270
|
+
startAtOperationTime: clusterTime
|
|
271
|
+
}
|
|
272
|
+
);
|
|
273
|
+
|
|
274
|
+
signal.onabort = () => {
|
|
275
|
+
stream.close();
|
|
276
|
+
};
|
|
277
|
+
|
|
278
|
+
if (signal.aborted) {
|
|
279
|
+
stream.close();
|
|
280
|
+
return;
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
for await (let event of stream) {
|
|
284
|
+
if (!('fullDocument' in event) || event.fullDocument == null) {
|
|
285
|
+
continue;
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
const user_id = event.fullDocument.user_id;
|
|
289
|
+
yield {
|
|
290
|
+
key: user_id,
|
|
291
|
+
value: {
|
|
292
|
+
id: event.fullDocument.checkpoint,
|
|
293
|
+
// Custom write checkpoints are not tied to a specific LSN
|
|
294
|
+
lsn: null
|
|
295
|
+
}
|
|
296
|
+
};
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
watchCustomWriteCheckpoint(options: WatchUserWriteCheckpointOptions): AsyncIterable<storage.WriteCheckpointResult> {
|
|
301
|
+
if (options.sync_rules_id != this.sync_rules_id) {
|
|
302
|
+
throw new framework.ServiceAssertionError('sync_rules_id does not match');
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
const stream = this.sharedCustomIter.subscribe(options.user_id, options.signal);
|
|
306
|
+
return this.orderedStream(stream);
|
|
307
|
+
}
|
|
308
|
+
|
|
96
309
|
protected async lastCustomWriteCheckpoint(filters: storage.CustomWriteCheckpointFilters) {
|
|
97
310
|
const { user_id, sync_rules_id } = filters;
|
|
98
311
|
const lastWriteCheckpoint = await this.db.custom_write_checkpoints.findOne({
|
|
@@ -116,13 +329,37 @@ export class MongoWriteCheckpointAPI implements storage.WriteCheckpointAPI {
|
|
|
116
329
|
});
|
|
117
330
|
return lastWriteCheckpoint?.client_id ?? null;
|
|
118
331
|
}
|
|
332
|
+
|
|
333
|
+
private async getClusterTime(): Promise<mongo.Timestamp> {
|
|
334
|
+
const hello = await this.db.db.command({ hello: 1 });
|
|
335
|
+
// Note: This is not valid on sharded clusters.
|
|
336
|
+
const startClusterTime = hello.lastWrite?.majorityOpTime?.ts as mongo.Timestamp;
|
|
337
|
+
return startClusterTime;
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
/**
|
|
341
|
+
* Makes a write checkpoint stream an orderered one - any out-of-order events are discarded.
|
|
342
|
+
*/
|
|
343
|
+
private async *orderedStream(stream: AsyncIterable<storage.WriteCheckpointResult>) {
|
|
344
|
+
let lastId = -1n;
|
|
345
|
+
|
|
346
|
+
for await (let event of stream) {
|
|
347
|
+
// Guard against out-of-order events
|
|
348
|
+
if (lastId == -1n || (event.id != null && event.id > lastId)) {
|
|
349
|
+
yield event;
|
|
350
|
+
if (event.id != null) {
|
|
351
|
+
lastId = event.id;
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
}
|
|
119
356
|
}
|
|
120
357
|
|
|
121
358
|
export async function batchCreateCustomWriteCheckpoints(
|
|
122
359
|
db: PowerSyncMongo,
|
|
123
360
|
checkpoints: storage.CustomWriteCheckpointOptions[]
|
|
124
361
|
): Promise<void> {
|
|
125
|
-
if (
|
|
362
|
+
if (checkpoints.length == 0) {
|
|
126
363
|
return;
|
|
127
364
|
}
|
|
128
365
|
|