@powersync/service-module-mongodb-storage 0.0.0-dev-20250108073049
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +19 -0
- package/LICENSE +67 -0
- package/README.md +3 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +6 -0
- package/dist/index.js.map +1 -0
- package/dist/migrations/MongoMigrationAgent.d.ts +12 -0
- package/dist/migrations/MongoMigrationAgent.js +25 -0
- package/dist/migrations/MongoMigrationAgent.js.map +1 -0
- package/dist/migrations/db/migrations/1684951997326-init.d.ts +3 -0
- package/dist/migrations/db/migrations/1684951997326-init.js +30 -0
- package/dist/migrations/db/migrations/1684951997326-init.js.map +1 -0
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.d.ts +2 -0
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js +5 -0
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js.map +1 -0
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.d.ts +3 -0
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js +54 -0
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js.map +1 -0
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.d.ts +3 -0
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js +26 -0
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js.map +1 -0
- package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.d.ts +3 -0
- package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.js +28 -0
- package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.js.map +1 -0
- package/dist/migrations/mongo-migration-store.d.ts +7 -0
- package/dist/migrations/mongo-migration-store.js +49 -0
- package/dist/migrations/mongo-migration-store.js.map +1 -0
- package/dist/module/MongoStorageModule.d.ts +10 -0
- package/dist/module/MongoStorageModule.js +31 -0
- package/dist/module/MongoStorageModule.js.map +1 -0
- package/dist/storage/MongoBucketStorage.d.ts +48 -0
- package/dist/storage/MongoBucketStorage.js +426 -0
- package/dist/storage/MongoBucketStorage.js.map +1 -0
- package/dist/storage/implementation/MongoBucketBatch.d.ts +72 -0
- package/dist/storage/implementation/MongoBucketBatch.js +681 -0
- package/dist/storage/implementation/MongoBucketBatch.js.map +1 -0
- package/dist/storage/implementation/MongoCompactor.d.ts +40 -0
- package/dist/storage/implementation/MongoCompactor.js +300 -0
- package/dist/storage/implementation/MongoCompactor.js.map +1 -0
- package/dist/storage/implementation/MongoIdSequence.d.ts +12 -0
- package/dist/storage/implementation/MongoIdSequence.js +21 -0
- package/dist/storage/implementation/MongoIdSequence.js.map +1 -0
- package/dist/storage/implementation/MongoPersistedSyncRules.d.ts +9 -0
- package/dist/storage/implementation/MongoPersistedSyncRules.js +9 -0
- package/dist/storage/implementation/MongoPersistedSyncRules.js.map +1 -0
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.d.ts +20 -0
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.js +26 -0
- package/dist/storage/implementation/MongoPersistedSyncRulesContent.js.map +1 -0
- package/dist/storage/implementation/MongoStorageProvider.d.ts +5 -0
- package/dist/storage/implementation/MongoStorageProvider.js +33 -0
- package/dist/storage/implementation/MongoStorageProvider.js.map +1 -0
- package/dist/storage/implementation/MongoSyncBucketStorage.d.ts +36 -0
- package/dist/storage/implementation/MongoSyncBucketStorage.js +529 -0
- package/dist/storage/implementation/MongoSyncBucketStorage.js.map +1 -0
- package/dist/storage/implementation/MongoSyncRulesLock.d.ts +16 -0
- package/dist/storage/implementation/MongoSyncRulesLock.js +65 -0
- package/dist/storage/implementation/MongoSyncRulesLock.js.map +1 -0
- package/dist/storage/implementation/MongoTestStorageFactoryGenerator.d.ts +7 -0
- package/dist/storage/implementation/MongoTestStorageFactoryGenerator.js +16 -0
- package/dist/storage/implementation/MongoTestStorageFactoryGenerator.js.map +1 -0
- package/dist/storage/implementation/MongoWriteCheckpointAPI.d.ts +20 -0
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js +104 -0
- package/dist/storage/implementation/MongoWriteCheckpointAPI.js.map +1 -0
- package/dist/storage/implementation/OperationBatch.d.ts +34 -0
- package/dist/storage/implementation/OperationBatch.js +119 -0
- package/dist/storage/implementation/OperationBatch.js.map +1 -0
- package/dist/storage/implementation/PersistedBatch.d.ts +46 -0
- package/dist/storage/implementation/PersistedBatch.js +223 -0
- package/dist/storage/implementation/PersistedBatch.js.map +1 -0
- package/dist/storage/implementation/db.d.ts +36 -0
- package/dist/storage/implementation/db.js +47 -0
- package/dist/storage/implementation/db.js.map +1 -0
- package/dist/storage/implementation/models.d.ts +139 -0
- package/dist/storage/implementation/models.js +2 -0
- package/dist/storage/implementation/models.js.map +1 -0
- package/dist/storage/implementation/util.d.ts +46 -0
- package/dist/storage/implementation/util.js +155 -0
- package/dist/storage/implementation/util.js.map +1 -0
- package/dist/storage/storage-index.d.ts +14 -0
- package/dist/storage/storage-index.js +15 -0
- package/dist/storage/storage-index.js.map +1 -0
- package/dist/types/types.d.ts +18 -0
- package/dist/types/types.js +9 -0
- package/dist/types/types.js.map +1 -0
- package/package.json +48 -0
- package/src/index.ts +7 -0
- package/src/migrations/MongoMigrationAgent.ts +39 -0
- package/src/migrations/db/migrations/1684951997326-init.ts +39 -0
- package/src/migrations/db/migrations/1688556755264-initial-sync-rules.ts +5 -0
- package/src/migrations/db/migrations/1702295701188-sync-rule-state.ts +105 -0
- package/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts +38 -0
- package/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts +40 -0
- package/src/migrations/mongo-migration-store.ts +62 -0
- package/src/module/MongoStorageModule.ts +37 -0
- package/src/storage/MongoBucketStorage.ts +531 -0
- package/src/storage/implementation/MongoBucketBatch.ts +896 -0
- package/src/storage/implementation/MongoCompactor.ts +381 -0
- package/src/storage/implementation/MongoIdSequence.ts +24 -0
- package/src/storage/implementation/MongoPersistedSyncRules.ts +16 -0
- package/src/storage/implementation/MongoPersistedSyncRulesContent.ts +49 -0
- package/src/storage/implementation/MongoStorageProvider.ts +39 -0
- package/src/storage/implementation/MongoSyncBucketStorage.ts +612 -0
- package/src/storage/implementation/MongoSyncRulesLock.ts +88 -0
- package/src/storage/implementation/MongoTestStorageFactoryGenerator.ts +25 -0
- package/src/storage/implementation/MongoWriteCheckpointAPI.ts +146 -0
- package/src/storage/implementation/OperationBatch.ts +129 -0
- package/src/storage/implementation/PersistedBatch.ts +283 -0
- package/src/storage/implementation/db.ts +87 -0
- package/src/storage/implementation/models.ts +161 -0
- package/src/storage/implementation/util.ts +169 -0
- package/src/storage/storage-index.ts +14 -0
- package/src/types/types.ts +18 -0
- package/test/src/__snapshots__/storage_sync.test.ts.snap +332 -0
- package/test/src/env.ts +6 -0
- package/test/src/setup.ts +9 -0
- package/test/src/storage.test.ts +7 -0
- package/test/src/storage_compacting.test.ts +6 -0
- package/test/src/storage_sync.test.ts +113 -0
- package/test/src/util.ts +8 -0
- package/test/tsconfig.json +31 -0
- package/tsconfig.json +31 -0
- package/tsconfig.tsbuildinfo +1 -0
- package/vitest.config.ts +15 -0
|
@@ -0,0 +1,612 @@
|
|
|
1
|
+
import { SqliteJsonRow, SqliteJsonValue, SqlSyncRules } from '@powersync/service-sync-rules';
|
|
2
|
+
import * as bson from 'bson';
|
|
3
|
+
import * as mongo from 'mongodb';
|
|
4
|
+
|
|
5
|
+
import * as lib_mongo from '@powersync/lib-service-mongodb';
|
|
6
|
+
import { DisposableObserver, logger } from '@powersync/lib-services-framework';
|
|
7
|
+
import { storage, utils } from '@powersync/service-core';
|
|
8
|
+
import * as timers from 'timers/promises';
|
|
9
|
+
import { MongoBucketStorage } from '../MongoBucketStorage.js';
|
|
10
|
+
import { PowerSyncMongo } from './db.js';
|
|
11
|
+
import { BucketDataDocument, BucketDataKey, SourceKey } from './models.js';
|
|
12
|
+
import { MongoBucketBatch } from './MongoBucketBatch.js';
|
|
13
|
+
import { MongoCompactor } from './MongoCompactor.js';
|
|
14
|
+
import { MongoWriteCheckpointAPI } from './MongoWriteCheckpointAPI.js';
|
|
15
|
+
import { idPrefixFilter, mapOpEntry, readSingleBatch } from './util.js';
|
|
16
|
+
|
|
17
|
+
export class MongoSyncBucketStorage
|
|
18
|
+
extends DisposableObserver<storage.SyncRulesBucketStorageListener>
|
|
19
|
+
implements storage.SyncRulesBucketStorage
|
|
20
|
+
{
|
|
21
|
+
private readonly db: PowerSyncMongo;
|
|
22
|
+
private checksumCache = new storage.ChecksumCache({
|
|
23
|
+
fetchChecksums: (batch) => {
|
|
24
|
+
return this.getChecksumsInternal(batch);
|
|
25
|
+
}
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
private parsedSyncRulesCache: { parsed: SqlSyncRules; options: storage.ParseSyncRulesOptions } | undefined;
|
|
29
|
+
private writeCheckpointAPI: storage.WriteCheckpointAPI;
|
|
30
|
+
|
|
31
|
+
constructor(
|
|
32
|
+
public readonly factory: MongoBucketStorage,
|
|
33
|
+
public readonly group_id: number,
|
|
34
|
+
private readonly sync_rules: storage.PersistedSyncRulesContent,
|
|
35
|
+
public readonly slot_name: string,
|
|
36
|
+
writeCheckpointMode: storage.WriteCheckpointMode = storage.WriteCheckpointMode.MANAGED
|
|
37
|
+
) {
|
|
38
|
+
super();
|
|
39
|
+
this.db = factory.db;
|
|
40
|
+
this.writeCheckpointAPI = new MongoWriteCheckpointAPI({
|
|
41
|
+
db: this.db,
|
|
42
|
+
mode: writeCheckpointMode
|
|
43
|
+
});
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
get writeCheckpointMode() {
|
|
47
|
+
return this.writeCheckpointAPI.writeCheckpointMode;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
setWriteCheckpointMode(mode: storage.WriteCheckpointMode): void {
|
|
51
|
+
this.writeCheckpointAPI.setWriteCheckpointMode(mode);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
batchCreateCustomWriteCheckpoints(checkpoints: storage.BatchedCustomWriteCheckpointOptions[]): Promise<void> {
|
|
55
|
+
return this.writeCheckpointAPI.batchCreateCustomWriteCheckpoints(
|
|
56
|
+
checkpoints.map((checkpoint) => ({ ...checkpoint, sync_rules_id: this.group_id }))
|
|
57
|
+
);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
createCustomWriteCheckpoint(checkpoint: storage.BatchedCustomWriteCheckpointOptions): Promise<bigint> {
|
|
61
|
+
return this.writeCheckpointAPI.createCustomWriteCheckpoint({
|
|
62
|
+
...checkpoint,
|
|
63
|
+
sync_rules_id: this.group_id
|
|
64
|
+
});
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
createManagedWriteCheckpoint(checkpoint: storage.ManagedWriteCheckpointOptions): Promise<bigint> {
|
|
68
|
+
return this.writeCheckpointAPI.createManagedWriteCheckpoint(checkpoint);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
lastWriteCheckpoint(filters: storage.SyncStorageLastWriteCheckpointFilters): Promise<bigint | null> {
|
|
72
|
+
return this.writeCheckpointAPI.lastWriteCheckpoint({
|
|
73
|
+
...filters,
|
|
74
|
+
sync_rules_id: this.group_id
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
getParsedSyncRules(options: storage.ParseSyncRulesOptions): SqlSyncRules {
|
|
79
|
+
const { parsed, options: cachedOptions } = this.parsedSyncRulesCache ?? {};
|
|
80
|
+
/**
|
|
81
|
+
* Check if the cached sync rules, if present, had the same options.
|
|
82
|
+
* Parse sync rules if the options are different or if there is no cached value.
|
|
83
|
+
*/
|
|
84
|
+
if (!parsed || options.defaultSchema != cachedOptions?.defaultSchema) {
|
|
85
|
+
this.parsedSyncRulesCache = { parsed: this.sync_rules.parsed(options).sync_rules, options };
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return this.parsedSyncRulesCache!.parsed;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
async getCheckpoint(): Promise<storage.ReplicationCheckpoint> {
|
|
92
|
+
const doc = await this.db.sync_rules.findOne(
|
|
93
|
+
{ _id: this.group_id },
|
|
94
|
+
{
|
|
95
|
+
projection: { last_checkpoint: 1, last_checkpoint_lsn: 1 }
|
|
96
|
+
}
|
|
97
|
+
);
|
|
98
|
+
return {
|
|
99
|
+
checkpoint: utils.timestampToOpId(doc?.last_checkpoint ?? 0n),
|
|
100
|
+
lsn: doc?.last_checkpoint_lsn ?? null
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
async startBatch(
|
|
105
|
+
options: storage.StartBatchOptions,
|
|
106
|
+
callback: (batch: storage.BucketStorageBatch) => Promise<void>
|
|
107
|
+
): Promise<storage.FlushedResult | null> {
|
|
108
|
+
const doc = await this.db.sync_rules.findOne(
|
|
109
|
+
{
|
|
110
|
+
_id: this.group_id
|
|
111
|
+
},
|
|
112
|
+
{ projection: { last_checkpoint_lsn: 1, no_checkpoint_before: 1, keepalive_op: 1 } }
|
|
113
|
+
);
|
|
114
|
+
const checkpoint_lsn = doc?.last_checkpoint_lsn ?? null;
|
|
115
|
+
|
|
116
|
+
await using batch = new MongoBucketBatch({
|
|
117
|
+
db: this.db,
|
|
118
|
+
syncRules: this.sync_rules.parsed(options).sync_rules,
|
|
119
|
+
groupId: this.group_id,
|
|
120
|
+
slotName: this.slot_name,
|
|
121
|
+
lastCheckpointLsn: checkpoint_lsn,
|
|
122
|
+
noCheckpointBeforeLsn: doc?.no_checkpoint_before ?? options.zeroLSN,
|
|
123
|
+
keepaliveOp: doc?.keepalive_op ?? null,
|
|
124
|
+
storeCurrentData: options.storeCurrentData,
|
|
125
|
+
skipExistingRows: options.skipExistingRows ?? false
|
|
126
|
+
});
|
|
127
|
+
this.iterateListeners((cb) => cb.batchStarted?.(batch));
|
|
128
|
+
|
|
129
|
+
await callback(batch);
|
|
130
|
+
await batch.flush();
|
|
131
|
+
if (batch.last_flushed_op) {
|
|
132
|
+
return { flushed_op: String(batch.last_flushed_op) };
|
|
133
|
+
} else {
|
|
134
|
+
return null;
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
async resolveTable(options: storage.ResolveTableOptions): Promise<storage.ResolveTableResult> {
|
|
139
|
+
const { group_id, connection_id, connection_tag, entity_descriptor } = options;
|
|
140
|
+
|
|
141
|
+
const { schema, name: table, objectId, replicationColumns } = entity_descriptor;
|
|
142
|
+
|
|
143
|
+
const columns = replicationColumns.map((column) => ({
|
|
144
|
+
name: column.name,
|
|
145
|
+
type: column.type,
|
|
146
|
+
type_oid: column.typeId
|
|
147
|
+
}));
|
|
148
|
+
let result: storage.ResolveTableResult | null = null;
|
|
149
|
+
await this.db.client.withSession(async (session) => {
|
|
150
|
+
const col = this.db.source_tables;
|
|
151
|
+
let doc = await col.findOne(
|
|
152
|
+
{
|
|
153
|
+
group_id: group_id,
|
|
154
|
+
connection_id: connection_id,
|
|
155
|
+
relation_id: objectId,
|
|
156
|
+
schema_name: schema,
|
|
157
|
+
table_name: table,
|
|
158
|
+
replica_id_columns2: columns
|
|
159
|
+
},
|
|
160
|
+
{ session }
|
|
161
|
+
);
|
|
162
|
+
if (doc == null) {
|
|
163
|
+
doc = {
|
|
164
|
+
_id: new bson.ObjectId(),
|
|
165
|
+
group_id: group_id,
|
|
166
|
+
connection_id: connection_id,
|
|
167
|
+
relation_id: objectId,
|
|
168
|
+
schema_name: schema,
|
|
169
|
+
table_name: table,
|
|
170
|
+
replica_id_columns: null,
|
|
171
|
+
replica_id_columns2: columns,
|
|
172
|
+
snapshot_done: false
|
|
173
|
+
};
|
|
174
|
+
|
|
175
|
+
await col.insertOne(doc, { session });
|
|
176
|
+
}
|
|
177
|
+
const sourceTable = new storage.SourceTable(
|
|
178
|
+
doc._id,
|
|
179
|
+
connection_tag,
|
|
180
|
+
objectId,
|
|
181
|
+
schema,
|
|
182
|
+
table,
|
|
183
|
+
replicationColumns,
|
|
184
|
+
doc.snapshot_done ?? true
|
|
185
|
+
);
|
|
186
|
+
sourceTable.syncEvent = options.sync_rules.tableTriggersEvent(sourceTable);
|
|
187
|
+
sourceTable.syncData = options.sync_rules.tableSyncsData(sourceTable);
|
|
188
|
+
sourceTable.syncParameters = options.sync_rules.tableSyncsParameters(sourceTable);
|
|
189
|
+
|
|
190
|
+
const truncate = await col
|
|
191
|
+
.find(
|
|
192
|
+
{
|
|
193
|
+
group_id: group_id,
|
|
194
|
+
connection_id: connection_id,
|
|
195
|
+
_id: { $ne: doc._id },
|
|
196
|
+
$or: [{ relation_id: objectId }, { schema_name: schema, table_name: table }]
|
|
197
|
+
},
|
|
198
|
+
{ session }
|
|
199
|
+
)
|
|
200
|
+
.toArray();
|
|
201
|
+
result = {
|
|
202
|
+
table: sourceTable,
|
|
203
|
+
dropTables: truncate.map(
|
|
204
|
+
(doc) =>
|
|
205
|
+
new storage.SourceTable(
|
|
206
|
+
doc._id,
|
|
207
|
+
connection_tag,
|
|
208
|
+
doc.relation_id ?? 0,
|
|
209
|
+
doc.schema_name,
|
|
210
|
+
doc.table_name,
|
|
211
|
+
doc.replica_id_columns2?.map((c) => ({ name: c.name, typeOid: c.type_oid, type: c.type })) ?? [],
|
|
212
|
+
doc.snapshot_done ?? true
|
|
213
|
+
)
|
|
214
|
+
)
|
|
215
|
+
};
|
|
216
|
+
});
|
|
217
|
+
return result!;
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
async getParameterSets(checkpoint: utils.OpId, lookups: SqliteJsonValue[][]): Promise<SqliteJsonRow[]> {
|
|
221
|
+
const lookupFilter = lookups.map((lookup) => {
|
|
222
|
+
return storage.serializeLookup(lookup);
|
|
223
|
+
});
|
|
224
|
+
const rows = await this.db.bucket_parameters
|
|
225
|
+
.aggregate([
|
|
226
|
+
{
|
|
227
|
+
$match: {
|
|
228
|
+
'key.g': this.group_id,
|
|
229
|
+
lookup: { $in: lookupFilter },
|
|
230
|
+
_id: { $lte: BigInt(checkpoint) }
|
|
231
|
+
}
|
|
232
|
+
},
|
|
233
|
+
{
|
|
234
|
+
$sort: {
|
|
235
|
+
_id: -1
|
|
236
|
+
}
|
|
237
|
+
},
|
|
238
|
+
{
|
|
239
|
+
$group: {
|
|
240
|
+
_id: { key: '$key', lookup: '$lookup' },
|
|
241
|
+
bucket_parameters: {
|
|
242
|
+
$first: '$bucket_parameters'
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
])
|
|
247
|
+
.toArray();
|
|
248
|
+
const groupedParameters = rows.map((row) => {
|
|
249
|
+
return row.bucket_parameters;
|
|
250
|
+
});
|
|
251
|
+
return groupedParameters.flat();
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
async *getBucketDataBatch(
|
|
255
|
+
checkpoint: utils.OpId,
|
|
256
|
+
dataBuckets: Map<string, string>,
|
|
257
|
+
options?: storage.BucketDataBatchOptions
|
|
258
|
+
): AsyncIterable<storage.SyncBucketDataBatch> {
|
|
259
|
+
if (dataBuckets.size == 0) {
|
|
260
|
+
return;
|
|
261
|
+
}
|
|
262
|
+
let filters: mongo.Filter<BucketDataDocument>[] = [];
|
|
263
|
+
|
|
264
|
+
const end = checkpoint ? BigInt(checkpoint) : new bson.MaxKey();
|
|
265
|
+
for (let [name, start] of dataBuckets.entries()) {
|
|
266
|
+
filters.push({
|
|
267
|
+
_id: {
|
|
268
|
+
$gt: {
|
|
269
|
+
g: this.group_id,
|
|
270
|
+
b: name,
|
|
271
|
+
o: BigInt(start)
|
|
272
|
+
},
|
|
273
|
+
$lte: {
|
|
274
|
+
g: this.group_id,
|
|
275
|
+
b: name,
|
|
276
|
+
o: end as any
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
});
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
const limit = options?.limit ?? storage.DEFAULT_DOCUMENT_BATCH_LIMIT;
|
|
283
|
+
const sizeLimit = options?.chunkLimitBytes ?? storage.DEFAULT_DOCUMENT_CHUNK_LIMIT_BYTES;
|
|
284
|
+
|
|
285
|
+
const cursor = this.db.bucket_data.find(
|
|
286
|
+
{
|
|
287
|
+
$or: filters
|
|
288
|
+
},
|
|
289
|
+
{
|
|
290
|
+
session: undefined,
|
|
291
|
+
sort: { _id: 1 },
|
|
292
|
+
limit: limit,
|
|
293
|
+
// Increase batch size above the default 101, so that we can fill an entire batch in
|
|
294
|
+
// one go.
|
|
295
|
+
batchSize: limit,
|
|
296
|
+
// Raw mode is returns an array of Buffer instead of parsed documents.
|
|
297
|
+
// We use it so that:
|
|
298
|
+
// 1. We can calculate the document size accurately without serializing again.
|
|
299
|
+
// 2. We can delay parsing the results until it's needed.
|
|
300
|
+
// We manually use bson.deserialize below
|
|
301
|
+
raw: true,
|
|
302
|
+
|
|
303
|
+
// Since we're using raw: true and parsing ourselves later, we don't need bigint
|
|
304
|
+
// support here.
|
|
305
|
+
// Disabling due to https://jira.mongodb.org/browse/NODE-6165, and the fact that this
|
|
306
|
+
// is one of our most common queries.
|
|
307
|
+
useBigInt64: false
|
|
308
|
+
}
|
|
309
|
+
) as unknown as mongo.FindCursor<Buffer>;
|
|
310
|
+
|
|
311
|
+
// We want to limit results to a single batch to avoid high memory usage.
|
|
312
|
+
// This approach uses MongoDB's batch limits to limit the data here, which limits
|
|
313
|
+
// to the lower of the batch count and size limits.
|
|
314
|
+
// This is similar to using `singleBatch: true` in the find options, but allows
|
|
315
|
+
// detecting "hasMore".
|
|
316
|
+
let { data, hasMore } = await readSingleBatch(cursor);
|
|
317
|
+
if (data.length == limit) {
|
|
318
|
+
// Limit reached - could have more data, despite the cursor being drained.
|
|
319
|
+
hasMore = true;
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
let batchSize = 0;
|
|
323
|
+
let currentBatch: utils.SyncBucketData | null = null;
|
|
324
|
+
let targetOp: bigint | null = null;
|
|
325
|
+
|
|
326
|
+
// Ordered by _id, meaning buckets are grouped together
|
|
327
|
+
for (let rawData of data) {
|
|
328
|
+
const row = bson.deserialize(rawData, storage.BSON_DESERIALIZE_OPTIONS) as BucketDataDocument;
|
|
329
|
+
const bucket = row._id.b;
|
|
330
|
+
|
|
331
|
+
if (currentBatch == null || currentBatch.bucket != bucket || batchSize >= sizeLimit) {
|
|
332
|
+
let start: string | undefined = undefined;
|
|
333
|
+
if (currentBatch != null) {
|
|
334
|
+
if (currentBatch.bucket == bucket) {
|
|
335
|
+
currentBatch.has_more = true;
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
const yieldBatch = currentBatch;
|
|
339
|
+
start = currentBatch.after;
|
|
340
|
+
currentBatch = null;
|
|
341
|
+
batchSize = 0;
|
|
342
|
+
yield { batch: yieldBatch, targetOp: targetOp };
|
|
343
|
+
targetOp = null;
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
start ??= dataBuckets.get(bucket);
|
|
347
|
+
if (start == null) {
|
|
348
|
+
throw new Error(`data for unexpected bucket: ${bucket}`);
|
|
349
|
+
}
|
|
350
|
+
currentBatch = {
|
|
351
|
+
bucket,
|
|
352
|
+
after: start,
|
|
353
|
+
has_more: hasMore,
|
|
354
|
+
data: [],
|
|
355
|
+
next_after: start
|
|
356
|
+
};
|
|
357
|
+
targetOp = null;
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
const entry = mapOpEntry(row);
|
|
361
|
+
|
|
362
|
+
if (row.target_op != null) {
|
|
363
|
+
// MOVE, CLEAR
|
|
364
|
+
if (targetOp == null || row.target_op > targetOp) {
|
|
365
|
+
targetOp = row.target_op;
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
currentBatch.data.push(entry);
|
|
370
|
+
currentBatch.next_after = entry.op_id;
|
|
371
|
+
|
|
372
|
+
batchSize += rawData.byteLength;
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
if (currentBatch != null) {
|
|
376
|
+
const yieldBatch = currentBatch;
|
|
377
|
+
currentBatch = null;
|
|
378
|
+
yield { batch: yieldBatch, targetOp: targetOp };
|
|
379
|
+
targetOp = null;
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
async getChecksums(checkpoint: utils.OpId, buckets: string[]): Promise<utils.ChecksumMap> {
|
|
384
|
+
return this.checksumCache.getChecksumMap(checkpoint, buckets);
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
private async getChecksumsInternal(batch: storage.FetchPartialBucketChecksum[]): Promise<storage.PartialChecksumMap> {
|
|
388
|
+
if (batch.length == 0) {
|
|
389
|
+
return new Map();
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
const filters: any[] = [];
|
|
393
|
+
for (let request of batch) {
|
|
394
|
+
filters.push({
|
|
395
|
+
_id: {
|
|
396
|
+
$gt: {
|
|
397
|
+
g: this.group_id,
|
|
398
|
+
b: request.bucket,
|
|
399
|
+
o: request.start ? BigInt(request.start) : new bson.MinKey()
|
|
400
|
+
},
|
|
401
|
+
$lte: {
|
|
402
|
+
g: this.group_id,
|
|
403
|
+
b: request.bucket,
|
|
404
|
+
o: BigInt(request.end)
|
|
405
|
+
}
|
|
406
|
+
}
|
|
407
|
+
});
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
const aggregate = await this.db.bucket_data
|
|
411
|
+
.aggregate(
|
|
412
|
+
[
|
|
413
|
+
{
|
|
414
|
+
$match: {
|
|
415
|
+
$or: filters
|
|
416
|
+
}
|
|
417
|
+
},
|
|
418
|
+
{
|
|
419
|
+
$group: {
|
|
420
|
+
_id: '$_id.b',
|
|
421
|
+
checksum_total: { $sum: '$checksum' },
|
|
422
|
+
count: { $sum: 1 },
|
|
423
|
+
has_clear_op: {
|
|
424
|
+
$max: {
|
|
425
|
+
$cond: [{ $eq: ['$op', 'CLEAR'] }, 1, 0]
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
}
|
|
430
|
+
],
|
|
431
|
+
{ session: undefined, readConcern: 'snapshot' }
|
|
432
|
+
)
|
|
433
|
+
.toArray();
|
|
434
|
+
|
|
435
|
+
return new Map<string, storage.PartialChecksum>(
|
|
436
|
+
aggregate.map((doc) => {
|
|
437
|
+
return [
|
|
438
|
+
doc._id,
|
|
439
|
+
{
|
|
440
|
+
bucket: doc._id,
|
|
441
|
+
partialCount: doc.count,
|
|
442
|
+
partialChecksum: Number(BigInt(doc.checksum_total) & 0xffffffffn) & 0xffffffff,
|
|
443
|
+
isFullChecksum: doc.has_clear_op == 1
|
|
444
|
+
} satisfies storage.PartialChecksum
|
|
445
|
+
];
|
|
446
|
+
})
|
|
447
|
+
);
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
async terminate(options?: storage.TerminateOptions) {
|
|
451
|
+
// Default is to clear the storage except when explicitly requested not to.
|
|
452
|
+
if (!options || options?.clearStorage) {
|
|
453
|
+
await this.clear();
|
|
454
|
+
}
|
|
455
|
+
await this.db.sync_rules.updateOne(
|
|
456
|
+
{
|
|
457
|
+
_id: this.group_id
|
|
458
|
+
},
|
|
459
|
+
{
|
|
460
|
+
$set: {
|
|
461
|
+
state: storage.SyncRuleState.TERMINATED,
|
|
462
|
+
persisted_lsn: null,
|
|
463
|
+
snapshot_done: false
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
);
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
async getStatus(): Promise<storage.SyncRuleStatus> {
|
|
470
|
+
const doc = await this.db.sync_rules.findOne(
|
|
471
|
+
{
|
|
472
|
+
_id: this.group_id
|
|
473
|
+
},
|
|
474
|
+
{
|
|
475
|
+
projection: {
|
|
476
|
+
snapshot_done: 1,
|
|
477
|
+
last_checkpoint_lsn: 1,
|
|
478
|
+
state: 1
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
);
|
|
482
|
+
if (doc == null) {
|
|
483
|
+
throw new Error('Cannot find sync rules status');
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
return {
|
|
487
|
+
snapshot_done: doc.snapshot_done,
|
|
488
|
+
active: doc.state == 'ACTIVE',
|
|
489
|
+
checkpoint_lsn: doc.last_checkpoint_lsn
|
|
490
|
+
};
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
async clear(): Promise<void> {
|
|
494
|
+
while (true) {
|
|
495
|
+
try {
|
|
496
|
+
await this.clearIteration();
|
|
497
|
+
|
|
498
|
+
logger.info(`${this.slot_name} Done clearing data`);
|
|
499
|
+
return;
|
|
500
|
+
} catch (e: unknown) {
|
|
501
|
+
if (e instanceof mongo.MongoServerError && e.codeName == 'MaxTimeMSExpired') {
|
|
502
|
+
logger.info(
|
|
503
|
+
`${this.slot_name} Cleared batch of data in ${lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS}ms, continuing...`
|
|
504
|
+
);
|
|
505
|
+
await timers.setTimeout(lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS / 5);
|
|
506
|
+
continue;
|
|
507
|
+
} else {
|
|
508
|
+
throw e;
|
|
509
|
+
}
|
|
510
|
+
}
|
|
511
|
+
}
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
private async clearIteration(): Promise<void> {
|
|
515
|
+
// Individual operations here may time out with the maxTimeMS option.
|
|
516
|
+
// It is expected to still make progress, and continue on the next try.
|
|
517
|
+
|
|
518
|
+
await this.db.sync_rules.updateOne(
|
|
519
|
+
{
|
|
520
|
+
_id: this.group_id
|
|
521
|
+
},
|
|
522
|
+
{
|
|
523
|
+
$set: {
|
|
524
|
+
snapshot_done: false,
|
|
525
|
+
persisted_lsn: null,
|
|
526
|
+
last_checkpoint_lsn: null,
|
|
527
|
+
last_checkpoint: null,
|
|
528
|
+
no_checkpoint_before: null
|
|
529
|
+
}
|
|
530
|
+
},
|
|
531
|
+
{ maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS }
|
|
532
|
+
);
|
|
533
|
+
await this.db.bucket_data.deleteMany(
|
|
534
|
+
{
|
|
535
|
+
_id: idPrefixFilter<BucketDataKey>({ g: this.group_id }, ['b', 'o'])
|
|
536
|
+
},
|
|
537
|
+
{ maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS }
|
|
538
|
+
);
|
|
539
|
+
await this.db.bucket_parameters.deleteMany(
|
|
540
|
+
{
|
|
541
|
+
key: idPrefixFilter<SourceKey>({ g: this.group_id }, ['t', 'k'])
|
|
542
|
+
},
|
|
543
|
+
{ maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS }
|
|
544
|
+
);
|
|
545
|
+
|
|
546
|
+
await this.db.current_data.deleteMany(
|
|
547
|
+
{
|
|
548
|
+
_id: idPrefixFilter<SourceKey>({ g: this.group_id }, ['t', 'k'])
|
|
549
|
+
},
|
|
550
|
+
{ maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS }
|
|
551
|
+
);
|
|
552
|
+
|
|
553
|
+
await this.db.source_tables.deleteMany(
|
|
554
|
+
{
|
|
555
|
+
group_id: this.group_id
|
|
556
|
+
},
|
|
557
|
+
{ maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS }
|
|
558
|
+
);
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
async autoActivate(): Promise<void> {
|
|
562
|
+
await this.db.client.withSession(async (session) => {
|
|
563
|
+
await session.withTransaction(async () => {
|
|
564
|
+
const doc = await this.db.sync_rules.findOne({ _id: this.group_id }, { session });
|
|
565
|
+
if (doc && doc.state == 'PROCESSING') {
|
|
566
|
+
await this.db.sync_rules.updateOne(
|
|
567
|
+
{
|
|
568
|
+
_id: this.group_id
|
|
569
|
+
},
|
|
570
|
+
{
|
|
571
|
+
$set: {
|
|
572
|
+
state: storage.SyncRuleState.ACTIVE
|
|
573
|
+
}
|
|
574
|
+
},
|
|
575
|
+
{ session }
|
|
576
|
+
);
|
|
577
|
+
|
|
578
|
+
await this.db.sync_rules.updateMany(
|
|
579
|
+
{
|
|
580
|
+
_id: { $ne: this.group_id },
|
|
581
|
+
state: storage.SyncRuleState.ACTIVE
|
|
582
|
+
},
|
|
583
|
+
{
|
|
584
|
+
$set: {
|
|
585
|
+
state: storage.SyncRuleState.STOP
|
|
586
|
+
}
|
|
587
|
+
},
|
|
588
|
+
{ session }
|
|
589
|
+
);
|
|
590
|
+
}
|
|
591
|
+
});
|
|
592
|
+
});
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
async reportError(e: any): Promise<void> {
|
|
596
|
+
const message = String(e.message ?? 'Replication failure');
|
|
597
|
+
await this.db.sync_rules.updateOne(
|
|
598
|
+
{
|
|
599
|
+
_id: this.group_id
|
|
600
|
+
},
|
|
601
|
+
{
|
|
602
|
+
$set: {
|
|
603
|
+
last_fatal_error: message
|
|
604
|
+
}
|
|
605
|
+
}
|
|
606
|
+
);
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
async compact(options?: storage.CompactOptions) {
|
|
610
|
+
return new MongoCompactor(this.db, this.group_id, options).compact();
|
|
611
|
+
}
|
|
612
|
+
}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import crypto from 'crypto';
|
|
2
|
+
|
|
3
|
+
import { logger } from '@powersync/lib-services-framework';
|
|
4
|
+
import { storage } from '@powersync/service-core';
|
|
5
|
+
import { PowerSyncMongo } from './db.js';
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Manages a lock on a sync rules document, so that only one process
|
|
9
|
+
* replicates those sync rules at a time.
|
|
10
|
+
*/
|
|
11
|
+
export class MongoSyncRulesLock implements storage.ReplicationLock {
|
|
12
|
+
private readonly refreshInterval: NodeJS.Timeout;
|
|
13
|
+
|
|
14
|
+
static async createLock(
|
|
15
|
+
db: PowerSyncMongo,
|
|
16
|
+
sync_rules: storage.PersistedSyncRulesContent
|
|
17
|
+
): Promise<MongoSyncRulesLock> {
|
|
18
|
+
const lockId = crypto.randomBytes(8).toString('hex');
|
|
19
|
+
const doc = await db.sync_rules.findOneAndUpdate(
|
|
20
|
+
{ _id: sync_rules.id, $or: [{ lock: null }, { 'lock.expires_at': { $lt: new Date() } }] },
|
|
21
|
+
{
|
|
22
|
+
$set: {
|
|
23
|
+
lock: {
|
|
24
|
+
id: lockId,
|
|
25
|
+
expires_at: new Date(Date.now() + 60 * 1000)
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
projection: { lock: 1 },
|
|
31
|
+
returnDocument: 'before'
|
|
32
|
+
}
|
|
33
|
+
);
|
|
34
|
+
|
|
35
|
+
if (doc == null) {
|
|
36
|
+
throw new Error(`Sync rules: ${sync_rules.id} have been locked by another process for replication.`);
|
|
37
|
+
}
|
|
38
|
+
return new MongoSyncRulesLock(db, sync_rules.id, lockId);
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
constructor(
|
|
42
|
+
private db: PowerSyncMongo,
|
|
43
|
+
public sync_rules_id: number,
|
|
44
|
+
private lock_id: string
|
|
45
|
+
) {
|
|
46
|
+
this.refreshInterval = setInterval(async () => {
|
|
47
|
+
try {
|
|
48
|
+
await this.refresh();
|
|
49
|
+
} catch (e) {
|
|
50
|
+
logger.error('Failed to refresh lock', e);
|
|
51
|
+
clearInterval(this.refreshInterval);
|
|
52
|
+
}
|
|
53
|
+
}, 30_130);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
async release(): Promise<void> {
|
|
57
|
+
clearInterval(this.refreshInterval);
|
|
58
|
+
const result = await this.db.sync_rules.updateOne(
|
|
59
|
+
{
|
|
60
|
+
_id: this.sync_rules_id,
|
|
61
|
+
'lock.id': this.lock_id
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
$unset: { lock: 1 }
|
|
65
|
+
}
|
|
66
|
+
);
|
|
67
|
+
if (result.modifiedCount == 0) {
|
|
68
|
+
// Log and ignore
|
|
69
|
+
logger.warn(`Lock already released: ${this.sync_rules_id}/${this.lock_id}`);
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
private async refresh(): Promise<void> {
|
|
74
|
+
const result = await this.db.sync_rules.findOneAndUpdate(
|
|
75
|
+
{
|
|
76
|
+
_id: this.sync_rules_id,
|
|
77
|
+
'lock.id': this.lock_id
|
|
78
|
+
},
|
|
79
|
+
{
|
|
80
|
+
$set: { 'lock.expires_at': new Date(Date.now() + 60 * 1000) }
|
|
81
|
+
},
|
|
82
|
+
{ returnDocument: 'after' }
|
|
83
|
+
);
|
|
84
|
+
if (result == null) {
|
|
85
|
+
throw new Error(`Lock not held anymore: ${this.sync_rules_id}/${this.lock_id}`);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|