@powersync/service-core 0.12.0 → 0.12.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/storage/mongo/MongoBucketBatch.d.ts +1 -0
- package/dist/storage/mongo/MongoBucketBatch.js +5 -1
- package/dist/storage/mongo/MongoBucketBatch.js.map +1 -1
- package/dist/storage/mongo/MongoCompactor.js +2 -1
- package/dist/storage/mongo/MongoCompactor.js.map +1 -1
- package/dist/storage/mongo/MongoSyncBucketStorage.js +3 -2
- package/dist/storage/mongo/MongoSyncBucketStorage.js.map +1 -1
- package/dist/storage/mongo/MongoWriteCheckpointAPI.js +3 -2
- package/dist/storage/mongo/MongoWriteCheckpointAPI.js.map +1 -1
- package/dist/storage/mongo/PersistedBatch.js +15 -5
- package/dist/storage/mongo/PersistedBatch.js.map +1 -1
- package/dist/storage/mongo/util.d.ts +14 -0
- package/dist/storage/mongo/util.js +39 -0
- package/dist/storage/mongo/util.js.map +1 -1
- package/package.json +2 -2
- package/src/storage/mongo/MongoBucketBatch.ts +8 -3
- package/src/storage/mongo/MongoCompactor.ts +2 -1
- package/src/storage/mongo/MongoSyncBucketStorage.ts +4 -2
- package/src/storage/mongo/MongoWriteCheckpointAPI.ts +5 -2
- package/src/storage/mongo/PersistedBatch.ts +18 -5
- package/src/storage/mongo/util.ts +45 -0
- package/test/src/data_storage.test.ts +70 -0
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -265,7 +265,7 @@ export class MongoSyncBucketStorage
|
|
|
265
265
|
},
|
|
266
266
|
{
|
|
267
267
|
$group: {
|
|
268
|
-
_id: '$key',
|
|
268
|
+
_id: { key: '$key', lookup: '$lookup' },
|
|
269
269
|
bucket_parameters: {
|
|
270
270
|
$first: '$bucket_parameters'
|
|
271
271
|
}
|
|
@@ -522,11 +522,13 @@ export class MongoSyncBucketStorage
|
|
|
522
522
|
while (true) {
|
|
523
523
|
try {
|
|
524
524
|
await this.clearIteration();
|
|
525
|
+
|
|
526
|
+
logger.info(`${this.slot_name} Done clearing data`);
|
|
525
527
|
return;
|
|
526
528
|
} catch (e: unknown) {
|
|
527
529
|
if (e instanceof mongo.MongoServerError && e.codeName == 'MaxTimeMSExpired') {
|
|
528
530
|
logger.info(
|
|
529
|
-
|
|
531
|
+
`${this.slot_name} Cleared batch of data in ${db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS}ms, continuing...`
|
|
530
532
|
);
|
|
531
533
|
await timers.setTimeout(db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS / 5);
|
|
532
534
|
continue;
|
|
@@ -9,6 +9,7 @@ import {
|
|
|
9
9
|
WriteCheckpointMode
|
|
10
10
|
} from '../WriteCheckpointAPI.js';
|
|
11
11
|
import { PowerSyncMongo } from './db.js';
|
|
12
|
+
import { safeBulkWrite } from './util.js';
|
|
12
13
|
|
|
13
14
|
export type MongoCheckpointAPIOptions = {
|
|
14
15
|
db: PowerSyncMongo;
|
|
@@ -134,7 +135,8 @@ export async function batchCreateCustomWriteCheckpoints(
|
|
|
134
135
|
return;
|
|
135
136
|
}
|
|
136
137
|
|
|
137
|
-
await
|
|
138
|
+
await safeBulkWrite(
|
|
139
|
+
db.custom_write_checkpoints,
|
|
138
140
|
checkpoints.map((checkpointOptions) => ({
|
|
139
141
|
updateOne: {
|
|
140
142
|
filter: { user_id: checkpointOptions.user_id, sync_rules_id: checkpointOptions.sync_rules_id },
|
|
@@ -146,6 +148,7 @@ export async function batchCreateCustomWriteCheckpoints(
|
|
|
146
148
|
},
|
|
147
149
|
upsert: true
|
|
148
150
|
}
|
|
149
|
-
}))
|
|
151
|
+
})),
|
|
152
|
+
{}
|
|
150
153
|
);
|
|
151
154
|
}
|
|
@@ -16,7 +16,7 @@ import {
|
|
|
16
16
|
SourceKey,
|
|
17
17
|
ReplicaId
|
|
18
18
|
} from './models.js';
|
|
19
|
-
import { replicaIdToSubkey, serializeLookup } from './util.js';
|
|
19
|
+
import { replicaIdToSubkey, safeBulkWrite, serializeLookup } from './util.js';
|
|
20
20
|
import { logger } from '@powersync/lib-services-framework';
|
|
21
21
|
|
|
22
22
|
/**
|
|
@@ -33,6 +33,13 @@ import { logger } from '@powersync/lib-services-framework';
|
|
|
33
33
|
*/
|
|
34
34
|
const MAX_TRANSACTION_BATCH_SIZE = 30_000_000;
|
|
35
35
|
|
|
36
|
+
/**
|
|
37
|
+
* Limit number of documents to write in a single transaction.
|
|
38
|
+
*
|
|
39
|
+
* This has an effect on error message size in some cases.
|
|
40
|
+
*/
|
|
41
|
+
const MAX_TRANSACTION_DOC_COUNT = 2_000;
|
|
42
|
+
|
|
36
43
|
/**
|
|
37
44
|
* Keeps track of bulkwrite operations within a transaction.
|
|
38
45
|
*
|
|
@@ -231,26 +238,32 @@ export class PersistedBatch {
|
|
|
231
238
|
}
|
|
232
239
|
|
|
233
240
|
shouldFlushTransaction() {
|
|
234
|
-
return
|
|
241
|
+
return (
|
|
242
|
+
this.currentSize >= MAX_TRANSACTION_BATCH_SIZE ||
|
|
243
|
+
this.bucketData.length >= MAX_TRANSACTION_DOC_COUNT ||
|
|
244
|
+
this.currentData.length >= MAX_TRANSACTION_DOC_COUNT ||
|
|
245
|
+
this.bucketParameters.length >= MAX_TRANSACTION_DOC_COUNT
|
|
246
|
+
);
|
|
235
247
|
}
|
|
236
248
|
|
|
237
249
|
async flush(db: PowerSyncMongo, session: mongo.ClientSession) {
|
|
238
250
|
if (this.bucketData.length > 0) {
|
|
239
|
-
|
|
251
|
+
// calculate total size
|
|
252
|
+
await safeBulkWrite(db.bucket_data, this.bucketData, {
|
|
240
253
|
session,
|
|
241
254
|
// inserts only - order doesn't matter
|
|
242
255
|
ordered: false
|
|
243
256
|
});
|
|
244
257
|
}
|
|
245
258
|
if (this.bucketParameters.length > 0) {
|
|
246
|
-
await db.bucket_parameters
|
|
259
|
+
await safeBulkWrite(db.bucket_parameters, this.bucketParameters, {
|
|
247
260
|
session,
|
|
248
261
|
// inserts only - order doesn't matter
|
|
249
262
|
ordered: false
|
|
250
263
|
});
|
|
251
264
|
}
|
|
252
265
|
if (this.currentData.length > 0) {
|
|
253
|
-
await db.current_data
|
|
266
|
+
await safeBulkWrite(db.current_data, this.currentData, {
|
|
254
267
|
session,
|
|
255
268
|
// may update and delete data within the same batch - order matters
|
|
256
269
|
ordered: true
|
|
@@ -156,3 +156,48 @@ export function isUUID(value: any): value is bson.UUID {
|
|
|
156
156
|
const uuid = value as bson.UUID;
|
|
157
157
|
return uuid._bsontype == 'Binary' && uuid.sub_type == bson.Binary.SUBTYPE_UUID;
|
|
158
158
|
}
|
|
159
|
+
|
|
160
|
+
/**
|
|
161
|
+
* MongoDB bulkWrite internally splits the operations into batches
|
|
162
|
+
* so that no batch exceeds 16MB. However, there are cases where
|
|
163
|
+
* the batch size is very close to 16MB, where additional metadata
|
|
164
|
+
* on the server pushes it over the limit, resulting in this error
|
|
165
|
+
* from the server:
|
|
166
|
+
*
|
|
167
|
+
* > MongoBulkWriteError: BSONObj size: 16814023 (0x1008FC7) is invalid. Size must be between 0 and 16793600(16MB) First element: insert: "bucket_data"
|
|
168
|
+
*
|
|
169
|
+
* We work around the issue by doing our own batching, limiting the
|
|
170
|
+
* batch size to 15MB. This does add additional overhead with
|
|
171
|
+
* BSON.calculateObjectSize.
|
|
172
|
+
*/
|
|
173
|
+
export async function safeBulkWrite<T extends mongo.Document>(
|
|
174
|
+
collection: mongo.Collection<T>,
|
|
175
|
+
operations: mongo.AnyBulkWriteOperation<T>[],
|
|
176
|
+
options: mongo.BulkWriteOptions
|
|
177
|
+
) {
|
|
178
|
+
// Must be below 16MB.
|
|
179
|
+
// We could probably go a little closer, but 15MB is a safe threshold.
|
|
180
|
+
const BULK_WRITE_LIMIT = 15 * 1024 * 1024;
|
|
181
|
+
|
|
182
|
+
let batch: mongo.AnyBulkWriteOperation<T>[] = [];
|
|
183
|
+
let currentSize = 0;
|
|
184
|
+
// Estimated overhead per operation, should be smaller in reality.
|
|
185
|
+
const keySize = 8;
|
|
186
|
+
for (let op of operations) {
|
|
187
|
+
const bsonSize =
|
|
188
|
+
mongo.BSON.calculateObjectSize(op, {
|
|
189
|
+
checkKeys: false,
|
|
190
|
+
ignoreUndefined: true
|
|
191
|
+
} as any) + keySize;
|
|
192
|
+
if (batch.length > 0 && currentSize + bsonSize > BULK_WRITE_LIMIT) {
|
|
193
|
+
await collection.bulkWrite(batch, options);
|
|
194
|
+
currentSize = 0;
|
|
195
|
+
batch = [];
|
|
196
|
+
}
|
|
197
|
+
batch.push(op);
|
|
198
|
+
currentSize += bsonSize;
|
|
199
|
+
}
|
|
200
|
+
if (batch.length > 0) {
|
|
201
|
+
await collection.bulkWrite(batch, options);
|
|
202
|
+
}
|
|
203
|
+
}
|
|
@@ -119,6 +119,76 @@ bucket_definitions:
|
|
|
119
119
|
]);
|
|
120
120
|
});
|
|
121
121
|
|
|
122
|
+
test('it should use the latest version after updates', async () => {
|
|
123
|
+
const sync_rules = testRules(
|
|
124
|
+
`
|
|
125
|
+
bucket_definitions:
|
|
126
|
+
mybucket:
|
|
127
|
+
parameters:
|
|
128
|
+
- SELECT id AS todo_id
|
|
129
|
+
FROM todos
|
|
130
|
+
WHERE list_id IN token_parameters.list_id
|
|
131
|
+
data: []
|
|
132
|
+
`
|
|
133
|
+
);
|
|
134
|
+
|
|
135
|
+
const storage = (await factory()).getInstance(sync_rules);
|
|
136
|
+
|
|
137
|
+
const table = makeTestTable('todos', ['id', 'list_id']);
|
|
138
|
+
|
|
139
|
+
await storage.startBatch(BATCH_OPTIONS, async (batch) => {
|
|
140
|
+
// Create two todos which initially belong to different lists
|
|
141
|
+
await batch.save({
|
|
142
|
+
sourceTable: table,
|
|
143
|
+
tag: SaveOperationTag.INSERT,
|
|
144
|
+
after: {
|
|
145
|
+
id: 'todo1',
|
|
146
|
+
list_id: 'list1'
|
|
147
|
+
},
|
|
148
|
+
afterReplicaId: rid('todo1')
|
|
149
|
+
});
|
|
150
|
+
await batch.save({
|
|
151
|
+
sourceTable: table,
|
|
152
|
+
tag: SaveOperationTag.INSERT,
|
|
153
|
+
after: {
|
|
154
|
+
id: 'todo2',
|
|
155
|
+
list_id: 'list2'
|
|
156
|
+
},
|
|
157
|
+
afterReplicaId: rid('todo2')
|
|
158
|
+
});
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
|
|
162
|
+
// Update the second todo item to now belong to list 1
|
|
163
|
+
await batch.save({
|
|
164
|
+
sourceTable: table,
|
|
165
|
+
tag: SaveOperationTag.UPDATE,
|
|
166
|
+
after: {
|
|
167
|
+
id: 'todo2',
|
|
168
|
+
list_id: 'list1'
|
|
169
|
+
},
|
|
170
|
+
afterReplicaId: rid('todo2')
|
|
171
|
+
});
|
|
172
|
+
});
|
|
173
|
+
|
|
174
|
+
// We specifically request the todo_ids for both lists.
|
|
175
|
+
// There removal operation for the association of `list2`::`todo2` should not interfere with the new
|
|
176
|
+
// association of `list1`::`todo2`
|
|
177
|
+
const parameters = await storage.getParameterSets(BigInt(result2!.flushed_op).toString(), [
|
|
178
|
+
['mybucket', '1', 'list1'],
|
|
179
|
+
['mybucket', '1', 'list2']
|
|
180
|
+
]);
|
|
181
|
+
|
|
182
|
+
expect(parameters.sort((a, b) => (a.todo_id as string).localeCompare(b.todo_id as string))).toEqual([
|
|
183
|
+
{
|
|
184
|
+
todo_id: 'todo1'
|
|
185
|
+
},
|
|
186
|
+
{
|
|
187
|
+
todo_id: 'todo2'
|
|
188
|
+
}
|
|
189
|
+
]);
|
|
190
|
+
});
|
|
191
|
+
|
|
122
192
|
test('save and load parameters with different number types', async () => {
|
|
123
193
|
const sync_rules = testRules(
|
|
124
194
|
`
|