@powersync/service-core-tests 0.0.0-dev-20250214100224 → 0.0.0-dev-20250227082606
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +45 -3
- package/README.md +1 -1
- package/dist/tests/register-compacting-tests.d.ts +1 -1
- package/dist/tests/register-compacting-tests.js +135 -4
- package/dist/tests/register-compacting-tests.js.map +1 -1
- package/dist/tests/register-data-storage-tests.js +30 -126
- package/dist/tests/register-data-storage-tests.js.map +1 -1
- package/dist/tests/register-sync-tests.js +316 -58
- package/dist/tests/register-sync-tests.js.map +1 -1
- package/package.json +5 -5
- package/src/tests/register-compacting-tests.ts +142 -7
- package/src/tests/register-data-storage-tests.ts +18 -94
- package/src/tests/register-sync-tests.ts +266 -18
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -15,10 +15,7 @@ const TEST_TABLE = test_utils.makeTestTable('test', ['id']);
|
|
|
15
15
|
* compactTests(() => new MongoStorageFactory(), { clearBatchLimit: 2, moveBatchLimit: 1, moveBatchQueryLimit: 1 }));
|
|
16
16
|
* ```
|
|
17
17
|
*/
|
|
18
|
-
export function registerCompactTests
|
|
19
|
-
generateStorageFactory: storage.TestStorageFactory,
|
|
20
|
-
compactOptions: CompactOptions
|
|
21
|
-
) {
|
|
18
|
+
export function registerCompactTests(generateStorageFactory: storage.TestStorageFactory) {
|
|
22
19
|
test('compacting (1)', async () => {
|
|
23
20
|
const sync_rules = test_utils.testRules(`
|
|
24
21
|
bucket_definitions:
|
|
@@ -87,7 +84,11 @@ bucket_definitions:
|
|
|
87
84
|
}
|
|
88
85
|
]);
|
|
89
86
|
|
|
90
|
-
await bucketStorage.compact(
|
|
87
|
+
await bucketStorage.compact({
|
|
88
|
+
clearBatchLimit: 2,
|
|
89
|
+
moveBatchLimit: 1,
|
|
90
|
+
moveBatchQueryLimit: 1
|
|
91
|
+
});
|
|
91
92
|
|
|
92
93
|
const batchAfter = await test_utils.oneFromAsync(
|
|
93
94
|
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))
|
|
@@ -204,7 +205,11 @@ bucket_definitions:
|
|
|
204
205
|
}
|
|
205
206
|
]);
|
|
206
207
|
|
|
207
|
-
await bucketStorage.compact(
|
|
208
|
+
await bucketStorage.compact({
|
|
209
|
+
clearBatchLimit: 2,
|
|
210
|
+
moveBatchLimit: 1,
|
|
211
|
+
moveBatchQueryLimit: 1
|
|
212
|
+
});
|
|
208
213
|
|
|
209
214
|
const batchAfter = await test_utils.oneFromAsync(
|
|
210
215
|
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))
|
|
@@ -285,7 +290,11 @@ bucket_definitions:
|
|
|
285
290
|
});
|
|
286
291
|
const checkpoint2 = result2!.flushed_op;
|
|
287
292
|
|
|
288
|
-
await bucketStorage.compact(
|
|
293
|
+
await bucketStorage.compact({
|
|
294
|
+
clearBatchLimit: 2,
|
|
295
|
+
moveBatchLimit: 1,
|
|
296
|
+
moveBatchQueryLimit: 1
|
|
297
|
+
});
|
|
289
298
|
|
|
290
299
|
const batchAfter = await test_utils.oneFromAsync(
|
|
291
300
|
bucketStorage.getBucketDataBatch(checkpoint2, new Map([['global[]', '0']]))
|
|
@@ -307,4 +316,130 @@ bucket_definitions:
|
|
|
307
316
|
checksum: 1874612650
|
|
308
317
|
});
|
|
309
318
|
});
|
|
319
|
+
|
|
320
|
+
test('compacting (4)', async () => {
|
|
321
|
+
const sync_rules = test_utils.testRules(/* yaml */
|
|
322
|
+
` bucket_definitions:
|
|
323
|
+
grouped:
|
|
324
|
+
# The parameter query here is not important
|
|
325
|
+
# We specifically don't want to create bucket_parameter records here
|
|
326
|
+
# since the op_ids for bucket_data could vary between storage implementations.
|
|
327
|
+
parameters: select 'b' as b
|
|
328
|
+
data:
|
|
329
|
+
- select * from test where b = bucket.b`);
|
|
330
|
+
|
|
331
|
+
await using factory = await generateStorageFactory();
|
|
332
|
+
const bucketStorage = factory.getInstance(sync_rules);
|
|
333
|
+
|
|
334
|
+
const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
335
|
+
/**
|
|
336
|
+
* Repeatedly create operations which fall into different buckets.
|
|
337
|
+
* The bucket operations are purposely interleaved as the op_id increases.
|
|
338
|
+
* A large amount of operations are created here.
|
|
339
|
+
* The configured window of compacting operations is 100. This means the initial window will
|
|
340
|
+
* contain operations from multiple buckets.
|
|
341
|
+
*/
|
|
342
|
+
for (let count = 0; count < 100; count++) {
|
|
343
|
+
await batch.save({
|
|
344
|
+
sourceTable: TEST_TABLE,
|
|
345
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
346
|
+
after: {
|
|
347
|
+
id: 't1',
|
|
348
|
+
b: 'b1',
|
|
349
|
+
value: 'start'
|
|
350
|
+
},
|
|
351
|
+
afterReplicaId: test_utils.rid('t1')
|
|
352
|
+
});
|
|
353
|
+
|
|
354
|
+
await batch.save({
|
|
355
|
+
sourceTable: TEST_TABLE,
|
|
356
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
357
|
+
after: {
|
|
358
|
+
id: 't1',
|
|
359
|
+
b: 'b1',
|
|
360
|
+
value: 'intermediate'
|
|
361
|
+
},
|
|
362
|
+
afterReplicaId: test_utils.rid('t1')
|
|
363
|
+
});
|
|
364
|
+
|
|
365
|
+
await batch.save({
|
|
366
|
+
sourceTable: TEST_TABLE,
|
|
367
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
368
|
+
after: {
|
|
369
|
+
id: 't2',
|
|
370
|
+
b: 'b2',
|
|
371
|
+
value: 'start'
|
|
372
|
+
},
|
|
373
|
+
afterReplicaId: test_utils.rid('t2')
|
|
374
|
+
});
|
|
375
|
+
|
|
376
|
+
await batch.save({
|
|
377
|
+
sourceTable: TEST_TABLE,
|
|
378
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
379
|
+
after: {
|
|
380
|
+
id: 't1',
|
|
381
|
+
b: 'b1',
|
|
382
|
+
value: 'final'
|
|
383
|
+
},
|
|
384
|
+
afterReplicaId: test_utils.rid('t1')
|
|
385
|
+
});
|
|
386
|
+
|
|
387
|
+
await batch.save({
|
|
388
|
+
sourceTable: TEST_TABLE,
|
|
389
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
390
|
+
after: {
|
|
391
|
+
id: 't2',
|
|
392
|
+
b: 'b2',
|
|
393
|
+
value: 'final'
|
|
394
|
+
},
|
|
395
|
+
afterReplicaId: test_utils.rid('t2')
|
|
396
|
+
});
|
|
397
|
+
}
|
|
398
|
+
});
|
|
399
|
+
|
|
400
|
+
const checkpoint = result!.flushed_op;
|
|
401
|
+
|
|
402
|
+
await bucketStorage.compact({
|
|
403
|
+
clearBatchLimit: 100,
|
|
404
|
+
moveBatchLimit: 100,
|
|
405
|
+
moveBatchQueryLimit: 100 // Larger limit for a larger window of operations
|
|
406
|
+
});
|
|
407
|
+
|
|
408
|
+
const batchAfter = await test_utils.fromAsync(
|
|
409
|
+
bucketStorage.getBucketDataBatch(
|
|
410
|
+
checkpoint,
|
|
411
|
+
new Map([
|
|
412
|
+
['grouped["b1"]', '0'],
|
|
413
|
+
['grouped["b2"]', '0']
|
|
414
|
+
])
|
|
415
|
+
)
|
|
416
|
+
);
|
|
417
|
+
const dataAfter = batchAfter.flatMap((b) => b.batch.data);
|
|
418
|
+
|
|
419
|
+
// The op_ids will vary between MongoDB and Postgres storage
|
|
420
|
+
expect(dataAfter).toMatchObject(
|
|
421
|
+
expect.arrayContaining([
|
|
422
|
+
{ op_id: '497', op: 'CLEAR', checksum: -937074151 },
|
|
423
|
+
{
|
|
424
|
+
op_id: '499',
|
|
425
|
+
op: 'PUT',
|
|
426
|
+
object_type: 'test',
|
|
427
|
+
object_id: 't1',
|
|
428
|
+
checksum: 52221819,
|
|
429
|
+
subkey: '6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423',
|
|
430
|
+
data: '{"id":"t1","b":"b1","value":"final"}'
|
|
431
|
+
},
|
|
432
|
+
{ op_id: '498', op: 'CLEAR', checksum: -234380197 },
|
|
433
|
+
{
|
|
434
|
+
op_id: '500',
|
|
435
|
+
op: 'PUT',
|
|
436
|
+
object_type: 'test',
|
|
437
|
+
object_id: 't2',
|
|
438
|
+
checksum: 2126669493,
|
|
439
|
+
subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee',
|
|
440
|
+
data: '{"id":"t2","b":"b2","value":"final"}'
|
|
441
|
+
}
|
|
442
|
+
])
|
|
443
|
+
);
|
|
444
|
+
});
|
|
310
445
|
}
|
|
@@ -140,7 +140,7 @@ bucket_definitions:
|
|
|
140
140
|
);
|
|
141
141
|
|
|
142
142
|
await using factory = await generateStorageFactory();
|
|
143
|
-
|
|
143
|
+
const bucketStorage = factory.getInstance(sync_rules);
|
|
144
144
|
|
|
145
145
|
const table = test_utils.makeTestTable('todos', ['id', 'list_id']);
|
|
146
146
|
|
|
@@ -394,13 +394,12 @@ bucket_definitions:
|
|
|
394
394
|
const parameter_sets = await bucketStorage.getParameterSets(checkpoint, lookups);
|
|
395
395
|
expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }]);
|
|
396
396
|
|
|
397
|
-
const buckets = await sync_rules.
|
|
397
|
+
const buckets = await sync_rules.getBucketParameterQuerier(parameters).queryDynamicBucketDescriptions({
|
|
398
398
|
getParameterSets(lookups) {
|
|
399
399
|
return bucketStorage.getParameterSets(checkpoint, lookups);
|
|
400
|
-
}
|
|
401
|
-
parameters
|
|
400
|
+
}
|
|
402
401
|
});
|
|
403
|
-
expect(buckets).toEqual(['by_workspace["workspace1"]']);
|
|
402
|
+
expect(buckets).toEqual([{ bucket: 'by_workspace["workspace1"]', priority: 3 }]);
|
|
404
403
|
});
|
|
405
404
|
|
|
406
405
|
test('save and load parameters with dynamic global buckets', async () => {
|
|
@@ -466,14 +465,16 @@ bucket_definitions:
|
|
|
466
465
|
parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
|
|
467
466
|
expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
|
|
468
467
|
|
|
469
|
-
const buckets = await sync_rules.
|
|
468
|
+
const buckets = await sync_rules.getBucketParameterQuerier(parameters).queryDynamicBucketDescriptions({
|
|
470
469
|
getParameterSets(lookups) {
|
|
471
470
|
return bucketStorage.getParameterSets(checkpoint, lookups);
|
|
472
|
-
}
|
|
473
|
-
parameters
|
|
471
|
+
}
|
|
474
472
|
});
|
|
475
|
-
buckets.sort();
|
|
476
|
-
expect(buckets).toEqual([
|
|
473
|
+
buckets.sort((a, b) => a.bucket.localeCompare(b.bucket));
|
|
474
|
+
expect(buckets).toEqual([
|
|
475
|
+
{ bucket: 'by_public_workspace["workspace1"]', priority: 3 },
|
|
476
|
+
{ bucket: 'by_public_workspace["workspace3"]', priority: 3 }
|
|
477
|
+
]);
|
|
477
478
|
});
|
|
478
479
|
|
|
479
480
|
test('multiple parameter queries', async () => {
|
|
@@ -562,12 +563,13 @@ bucket_definitions:
|
|
|
562
563
|
expect(parameter_sets2).toEqual([{ workspace_id: 'workspace3' }]);
|
|
563
564
|
|
|
564
565
|
// Test final values - the important part
|
|
565
|
-
const buckets =
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
566
|
+
const buckets = (
|
|
567
|
+
await sync_rules.getBucketParameterQuerier(parameters).queryDynamicBucketDescriptions({
|
|
568
|
+
getParameterSets(lookups) {
|
|
569
|
+
return bucketStorage.getParameterSets(checkpoint, lookups);
|
|
570
|
+
}
|
|
571
|
+
})
|
|
572
|
+
).map((e) => e.bucket);
|
|
571
573
|
buckets.sort();
|
|
572
574
|
expect(buckets).toEqual(['by_workspace["workspace1"]', 'by_workspace["workspace3"]']);
|
|
573
575
|
});
|
|
@@ -1415,84 +1417,6 @@ bucket_definitions:
|
|
|
1415
1417
|
expect(test_utils.getBatchMeta(batch3)).toEqual(null);
|
|
1416
1418
|
});
|
|
1417
1419
|
|
|
1418
|
-
test('batch should be disposed automatically', async () => {
|
|
1419
|
-
const sync_rules = test_utils.testRules(`
|
|
1420
|
-
bucket_definitions:
|
|
1421
|
-
global:
|
|
1422
|
-
data: []
|
|
1423
|
-
`);
|
|
1424
|
-
|
|
1425
|
-
await using factory = await generateStorageFactory();
|
|
1426
|
-
const bucketStorage = factory.getInstance(sync_rules);
|
|
1427
|
-
|
|
1428
|
-
let isDisposed = false;
|
|
1429
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1430
|
-
batch.registerListener({
|
|
1431
|
-
disposed: () => {
|
|
1432
|
-
isDisposed = true;
|
|
1433
|
-
}
|
|
1434
|
-
});
|
|
1435
|
-
});
|
|
1436
|
-
expect(isDisposed).true;
|
|
1437
|
-
|
|
1438
|
-
isDisposed = false;
|
|
1439
|
-
let errorCaught = false;
|
|
1440
|
-
try {
|
|
1441
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1442
|
-
batch.registerListener({
|
|
1443
|
-
disposed: () => {
|
|
1444
|
-
isDisposed = true;
|
|
1445
|
-
}
|
|
1446
|
-
});
|
|
1447
|
-
throw new Error(`Testing exceptions`);
|
|
1448
|
-
});
|
|
1449
|
-
} catch (ex) {
|
|
1450
|
-
errorCaught = true;
|
|
1451
|
-
expect(ex.message.includes('Testing')).true;
|
|
1452
|
-
}
|
|
1453
|
-
expect(errorCaught).true;
|
|
1454
|
-
expect(isDisposed).true;
|
|
1455
|
-
});
|
|
1456
|
-
|
|
1457
|
-
test('batch should be disposed automatically', async () => {
|
|
1458
|
-
const sync_rules = test_utils.testRules(`
|
|
1459
|
-
bucket_definitions:
|
|
1460
|
-
global:
|
|
1461
|
-
data: []
|
|
1462
|
-
`);
|
|
1463
|
-
|
|
1464
|
-
await using factory = await generateStorageFactory();
|
|
1465
|
-
const bucketStorage = factory.getInstance(sync_rules);
|
|
1466
|
-
|
|
1467
|
-
let isDisposed = false;
|
|
1468
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1469
|
-
batch.registerListener({
|
|
1470
|
-
disposed: () => {
|
|
1471
|
-
isDisposed = true;
|
|
1472
|
-
}
|
|
1473
|
-
});
|
|
1474
|
-
});
|
|
1475
|
-
expect(isDisposed).true;
|
|
1476
|
-
|
|
1477
|
-
isDisposed = false;
|
|
1478
|
-
let errorCaught = false;
|
|
1479
|
-
try {
|
|
1480
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1481
|
-
batch.registerListener({
|
|
1482
|
-
disposed: () => {
|
|
1483
|
-
isDisposed = true;
|
|
1484
|
-
}
|
|
1485
|
-
});
|
|
1486
|
-
throw new Error(`Testing exceptions`);
|
|
1487
|
-
});
|
|
1488
|
-
} catch (ex) {
|
|
1489
|
-
errorCaught = true;
|
|
1490
|
-
expect(ex.message.includes('Testing')).true;
|
|
1491
|
-
}
|
|
1492
|
-
expect(errorCaught).true;
|
|
1493
|
-
expect(isDisposed).true;
|
|
1494
|
-
});
|
|
1495
|
-
|
|
1496
1420
|
test('empty storage metrics', async () => {
|
|
1497
1421
|
await using f = await generateStorageFactory({ dropAll: true });
|
|
1498
1422
|
const metrics = await f.getStorageMetrics();
|