@powersync/service-core-tests 0.9.4 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +28 -0
- package/dist/test-utils/bucket-validation.d.ts +1 -1
- package/dist/test-utils/general-utils.js +2 -0
- package/dist/test-utils/general-utils.js.map +1 -1
- package/dist/tests/register-data-storage-tests.js +85 -45
- package/dist/tests/register-data-storage-tests.js.map +1 -1
- package/dist/tests/register-sync-tests.js +217 -71
- package/dist/tests/register-sync-tests.js.map +1 -1
- package/package.json +3 -3
- package/src/test-utils/general-utils.ts +2 -0
- package/src/tests/register-data-storage-tests.ts +57 -24
- package/src/tests/register-sync-tests.ts +149 -0
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -404,7 +404,7 @@ bucket_definitions:
|
|
|
404
404
|
|
|
405
405
|
const parameters = new RequestParameters({ sub: 'u1' }, {});
|
|
406
406
|
|
|
407
|
-
const q1 = sync_rules.
|
|
407
|
+
const q1 = sync_rules.bucketDescriptors[0].parameterQueries[0];
|
|
408
408
|
|
|
409
409
|
const lookups = q1.getLookups(parameters);
|
|
410
410
|
expect(lookups).toEqual([ParameterLookup.normalized('by_workspace', '1', ['u1'])]);
|
|
@@ -474,7 +474,7 @@ bucket_definitions:
|
|
|
474
474
|
|
|
475
475
|
const parameters = new RequestParameters({ sub: 'unknown' }, {});
|
|
476
476
|
|
|
477
|
-
const q1 = sync_rules.
|
|
477
|
+
const q1 = sync_rules.bucketDescriptors[0].parameterQueries[0];
|
|
478
478
|
|
|
479
479
|
const lookups = q1.getLookups(parameters);
|
|
480
480
|
expect(lookups).toEqual([ParameterLookup.normalized('by_public_workspace', '1', [])]);
|
|
@@ -564,7 +564,7 @@ bucket_definitions:
|
|
|
564
564
|
const parameters = new RequestParameters({ sub: 'u1' }, {});
|
|
565
565
|
|
|
566
566
|
// Test intermediate values - could be moved to sync_rules.test.ts
|
|
567
|
-
const q1 = sync_rules.
|
|
567
|
+
const q1 = sync_rules.bucketDescriptors[0].parameterQueries[0];
|
|
568
568
|
const lookups1 = q1.getLookups(parameters);
|
|
569
569
|
expect(lookups1).toEqual([ParameterLookup.normalized('by_workspace', '1', [])]);
|
|
570
570
|
|
|
@@ -572,7 +572,7 @@ bucket_definitions:
|
|
|
572
572
|
parameter_sets1.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
|
|
573
573
|
expect(parameter_sets1).toEqual([{ workspace_id: 'workspace1' }]);
|
|
574
574
|
|
|
575
|
-
const q2 = sync_rules.
|
|
575
|
+
const q2 = sync_rules.bucketDescriptors[0].parameterQueries[1];
|
|
576
576
|
const lookups2 = q2.getLookups(parameters);
|
|
577
577
|
expect(lookups2).toEqual([ParameterLookup.normalized('by_workspace', '2', ['u1'])]);
|
|
578
578
|
|
|
@@ -1769,14 +1769,56 @@ bucket_definitions:
|
|
|
1769
1769
|
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })
|
|
1770
1770
|
[Symbol.asyncIterator]();
|
|
1771
1771
|
|
|
1772
|
-
await bucketStorage.
|
|
1773
|
-
{
|
|
1772
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1773
|
+
await batch.addCustomWriteCheckpoint({
|
|
1774
1774
|
checkpoint: 5n,
|
|
1775
1775
|
user_id: 'user1'
|
|
1776
|
+
});
|
|
1777
|
+
await batch.flush();
|
|
1778
|
+
await batch.keepalive('5/0');
|
|
1779
|
+
});
|
|
1780
|
+
|
|
1781
|
+
const result = await iter.next();
|
|
1782
|
+
expect(result).toMatchObject({
|
|
1783
|
+
done: false,
|
|
1784
|
+
value: {
|
|
1785
|
+
base: {
|
|
1786
|
+
lsn: '5/0'
|
|
1787
|
+
},
|
|
1788
|
+
writeCheckpoint: 5n
|
|
1776
1789
|
}
|
|
1777
|
-
|
|
1790
|
+
});
|
|
1791
|
+
});
|
|
1792
|
+
|
|
1793
|
+
test('custom write checkpoints - standalone checkpoint', async (context) => {
|
|
1794
|
+
await using factory = await generateStorageFactory();
|
|
1795
|
+
const r = await factory.configureSyncRules({
|
|
1796
|
+
content: `
|
|
1797
|
+
bucket_definitions:
|
|
1798
|
+
mybucket:
|
|
1799
|
+
data: []
|
|
1800
|
+
`,
|
|
1801
|
+
validate: false
|
|
1802
|
+
});
|
|
1803
|
+
const bucketStorage = factory.getInstance(r.persisted_sync_rules!);
|
|
1804
|
+
await bucketStorage.autoActivate();
|
|
1805
|
+
bucketStorage.setWriteCheckpointMode(storage.WriteCheckpointMode.CUSTOM);
|
|
1806
|
+
|
|
1807
|
+
const abortController = new AbortController();
|
|
1808
|
+
context.onTestFinished(() => abortController.abort());
|
|
1809
|
+
const iter = bucketStorage
|
|
1810
|
+
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })
|
|
1811
|
+
[Symbol.asyncIterator]();
|
|
1778
1812
|
|
|
1779
1813
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1814
|
+
// Flush to clear state
|
|
1815
|
+
await batch.flush();
|
|
1816
|
+
|
|
1817
|
+
await batch.addCustomWriteCheckpoint({
|
|
1818
|
+
checkpoint: 5n,
|
|
1819
|
+
user_id: 'user1'
|
|
1820
|
+
});
|
|
1821
|
+
await batch.flush();
|
|
1780
1822
|
await batch.keepalive('5/0');
|
|
1781
1823
|
});
|
|
1782
1824
|
|
|
@@ -1785,7 +1827,6 @@ bucket_definitions:
|
|
|
1785
1827
|
done: false,
|
|
1786
1828
|
value: {
|
|
1787
1829
|
base: {
|
|
1788
|
-
checkpoint: 0n,
|
|
1789
1830
|
lsn: '5/0'
|
|
1790
1831
|
},
|
|
1791
1832
|
writeCheckpoint: 5n
|
|
@@ -1822,23 +1863,18 @@ bucket_definitions:
|
|
|
1822
1863
|
done: false,
|
|
1823
1864
|
value: {
|
|
1824
1865
|
base: {
|
|
1825
|
-
checkpoint: 0n,
|
|
1826
1866
|
lsn: '5/0'
|
|
1827
1867
|
},
|
|
1828
1868
|
writeCheckpoint: null
|
|
1829
1869
|
}
|
|
1830
1870
|
});
|
|
1831
1871
|
|
|
1832
|
-
await bucketStorage.
|
|
1833
|
-
{
|
|
1872
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1873
|
+
batch.addCustomWriteCheckpoint({
|
|
1834
1874
|
checkpoint: 6n,
|
|
1835
1875
|
user_id: 'user1'
|
|
1836
|
-
}
|
|
1837
|
-
|
|
1838
|
-
// We have to trigger a new keepalive after the checkpoint, at least to cover postgres storage.
|
|
1839
|
-
// This is what is effetively triggered with RouteAPI.createReplicationHead().
|
|
1840
|
-
// MongoDB storage doesn't explicitly need this anymore.
|
|
1841
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1876
|
+
});
|
|
1877
|
+
await batch.flush();
|
|
1842
1878
|
await batch.keepalive('6/0');
|
|
1843
1879
|
});
|
|
1844
1880
|
|
|
@@ -1847,7 +1883,6 @@ bucket_definitions:
|
|
|
1847
1883
|
done: false,
|
|
1848
1884
|
value: {
|
|
1849
1885
|
base: {
|
|
1850
|
-
checkpoint: 0n
|
|
1851
1886
|
// can be 5/0 or 6/0 - actual value not relevant for custom write checkpoints
|
|
1852
1887
|
// lsn: '6/0'
|
|
1853
1888
|
},
|
|
@@ -1855,13 +1890,12 @@ bucket_definitions:
|
|
|
1855
1890
|
}
|
|
1856
1891
|
});
|
|
1857
1892
|
|
|
1858
|
-
await bucketStorage.
|
|
1859
|
-
{
|
|
1893
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1894
|
+
batch.addCustomWriteCheckpoint({
|
|
1860
1895
|
checkpoint: 7n,
|
|
1861
1896
|
user_id: 'user1'
|
|
1862
|
-
}
|
|
1863
|
-
|
|
1864
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1897
|
+
});
|
|
1898
|
+
await batch.flush();
|
|
1865
1899
|
await batch.keepalive('7/0');
|
|
1866
1900
|
});
|
|
1867
1901
|
|
|
@@ -1870,7 +1904,6 @@ bucket_definitions:
|
|
|
1870
1904
|
done: false,
|
|
1871
1905
|
value: {
|
|
1872
1906
|
base: {
|
|
1873
|
-
checkpoint: 0n
|
|
1874
1907
|
// can be 5/0, 6/0 or 7/0 - actual value not relevant for custom write checkpoints
|
|
1875
1908
|
// lsn: '7/0'
|
|
1876
1909
|
},
|
|
@@ -405,6 +405,155 @@ bucket_definitions:
|
|
|
405
405
|
expect(sentRows).toBe(10002);
|
|
406
406
|
});
|
|
407
407
|
|
|
408
|
+
test('sync interrupts low-priority buckets on new checkpoints (2)', async () => {
|
|
409
|
+
await using f = await factory();
|
|
410
|
+
|
|
411
|
+
// bucket0a -> send all data
|
|
412
|
+
// then interrupt checkpoint with new data for all buckets
|
|
413
|
+
// -> data for all buckets should be sent in the new checkpoint
|
|
414
|
+
|
|
415
|
+
const syncRules = await f.updateSyncRules({
|
|
416
|
+
content: `
|
|
417
|
+
bucket_definitions:
|
|
418
|
+
b0a:
|
|
419
|
+
priority: 2
|
|
420
|
+
data:
|
|
421
|
+
- SELECT * FROM test WHERE LENGTH(id) <= 5;
|
|
422
|
+
b0b:
|
|
423
|
+
priority: 2
|
|
424
|
+
data:
|
|
425
|
+
- SELECT * FROM test WHERE LENGTH(id) <= 5;
|
|
426
|
+
b1:
|
|
427
|
+
priority: 1
|
|
428
|
+
data:
|
|
429
|
+
- SELECT * FROM test WHERE LENGTH(id) > 5;
|
|
430
|
+
`
|
|
431
|
+
});
|
|
432
|
+
|
|
433
|
+
const bucketStorage = f.getInstance(syncRules);
|
|
434
|
+
await bucketStorage.autoActivate();
|
|
435
|
+
|
|
436
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
437
|
+
// Initial data: Add one priority row and 10k low-priority rows.
|
|
438
|
+
await batch.save({
|
|
439
|
+
sourceTable: TEST_TABLE,
|
|
440
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
441
|
+
after: {
|
|
442
|
+
id: 'highprio',
|
|
443
|
+
description: 'High priority row'
|
|
444
|
+
},
|
|
445
|
+
afterReplicaId: 'highprio'
|
|
446
|
+
});
|
|
447
|
+
for (let i = 0; i < 2_000; i++) {
|
|
448
|
+
await batch.save({
|
|
449
|
+
sourceTable: TEST_TABLE,
|
|
450
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
451
|
+
after: {
|
|
452
|
+
id: `${i}`,
|
|
453
|
+
description: 'low prio'
|
|
454
|
+
},
|
|
455
|
+
afterReplicaId: `${i}`
|
|
456
|
+
});
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
await batch.commit('0/1');
|
|
460
|
+
});
|
|
461
|
+
|
|
462
|
+
const stream = sync.streamResponse({
|
|
463
|
+
syncContext,
|
|
464
|
+
bucketStorage,
|
|
465
|
+
syncRules: bucketStorage.getParsedSyncRules(test_utils.PARSE_OPTIONS),
|
|
466
|
+
params: {
|
|
467
|
+
buckets: [],
|
|
468
|
+
include_checksum: true,
|
|
469
|
+
raw_data: true
|
|
470
|
+
},
|
|
471
|
+
tracker,
|
|
472
|
+
syncParams: new RequestParameters({ sub: '' }, {}),
|
|
473
|
+
token: { exp: Date.now() / 1000 + 10 } as any
|
|
474
|
+
});
|
|
475
|
+
|
|
476
|
+
let sentRows = 0;
|
|
477
|
+
let lines: any[] = [];
|
|
478
|
+
|
|
479
|
+
for await (let next of stream) {
|
|
480
|
+
if (typeof next == 'string') {
|
|
481
|
+
next = JSON.parse(next);
|
|
482
|
+
}
|
|
483
|
+
if (typeof next === 'object' && next !== null) {
|
|
484
|
+
if ('partial_checkpoint_complete' in next) {
|
|
485
|
+
lines.push(next);
|
|
486
|
+
}
|
|
487
|
+
if ('checkpoint' in next || 'checkpoint_diff' in next) {
|
|
488
|
+
lines.push(next);
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
if ('data' in next) {
|
|
492
|
+
lines.push({ data: { ...next.data, data: undefined } });
|
|
493
|
+
sentRows += next.data.data.length;
|
|
494
|
+
|
|
495
|
+
if (sentRows == 1001) {
|
|
496
|
+
// Save new data to interrupt the low-priority sync.
|
|
497
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
498
|
+
// Add another high-priority row. This should interrupt the long-running low-priority sync.
|
|
499
|
+
await batch.save({
|
|
500
|
+
sourceTable: TEST_TABLE,
|
|
501
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
502
|
+
after: {
|
|
503
|
+
id: 'highprio2',
|
|
504
|
+
description: 'Another high-priority row'
|
|
505
|
+
},
|
|
506
|
+
afterReplicaId: 'highprio2'
|
|
507
|
+
});
|
|
508
|
+
|
|
509
|
+
// Also add a low-priority row
|
|
510
|
+
await batch.save({
|
|
511
|
+
sourceTable: TEST_TABLE,
|
|
512
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
513
|
+
after: {
|
|
514
|
+
id: '2001',
|
|
515
|
+
description: 'Another low-priority row'
|
|
516
|
+
},
|
|
517
|
+
afterReplicaId: '2001'
|
|
518
|
+
});
|
|
519
|
+
|
|
520
|
+
await batch.commit('0/2');
|
|
521
|
+
});
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
if (sentRows >= 1000 && sentRows <= 2001) {
|
|
525
|
+
// pause for a bit to give the stream time to process interruptions.
|
|
526
|
+
// This covers the data batch above and the next one.
|
|
527
|
+
await timers.setTimeout(50);
|
|
528
|
+
}
|
|
529
|
+
}
|
|
530
|
+
if ('checkpoint_complete' in next) {
|
|
531
|
+
lines.push(next);
|
|
532
|
+
break;
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
// Expected lines (full details in snapshot):
|
|
538
|
+
//
|
|
539
|
+
// checkpoint (4001)
|
|
540
|
+
// data (b1[] 0 -> 1)
|
|
541
|
+
// partial_checkpoint_complete (4001, priority 1)
|
|
542
|
+
// data (b0a[], 0 -> 2000)
|
|
543
|
+
// ## adds new data, interrupting the checkpoint
|
|
544
|
+
// data (b0a[], 2000 -> 4000) # expected - stream is already busy with this by the time it receives the interruption
|
|
545
|
+
// checkpoint_diff (4004)
|
|
546
|
+
// data (b1[], 1 -> 4002)
|
|
547
|
+
// partial_checkpoint_complete (4004, priority 1)
|
|
548
|
+
// data (b0a[], 4000 -> 4003)
|
|
549
|
+
// data (b0b[], 0 -> 1999)
|
|
550
|
+
// data (b0b[], 1999 -> 3999)
|
|
551
|
+
// data (b0b[], 3999 -> 4004)
|
|
552
|
+
// checkpoint_complete (4004)
|
|
553
|
+
expect(lines).toMatchSnapshot();
|
|
554
|
+
expect(sentRows).toBe(4004);
|
|
555
|
+
});
|
|
556
|
+
|
|
408
557
|
test('sends checkpoint complete line for empty checkpoint', async () => {
|
|
409
558
|
await using f = await factory();
|
|
410
559
|
|