@powersync/service-core-tests 0.0.0-dev-20250317113118 → 0.0.0-dev-20250325131118
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +20 -5
- package/dist/test-utils/MetricsHelper.d.ts +12 -0
- package/dist/test-utils/MetricsHelper.js +42 -0
- package/dist/test-utils/MetricsHelper.js.map +1 -0
- package/dist/test-utils/general-utils.d.ts +1 -1
- package/dist/test-utils/general-utils.js +3 -3
- package/dist/test-utils/general-utils.js.map +1 -1
- package/dist/test-utils/test-utils-index.d.ts +1 -1
- package/dist/test-utils/test-utils-index.js +1 -1
- package/dist/tests/register-data-storage-tests.js +254 -0
- package/dist/tests/register-data-storage-tests.js.map +1 -1
- package/dist/tests/register-sync-tests.js +250 -28
- package/dist/tests/register-sync-tests.js.map +1 -1
- package/package.json +5 -4
- package/src/test-utils/MetricsHelper.ts +54 -0
- package/src/test-utils/general-utils.ts +3 -3
- package/src/test-utils/test-utils-index.ts +1 -1
- package/src/tests/register-data-storage-tests.ts +234 -0
- package/src/tests/register-sync-tests.ts +234 -6
- package/tsconfig.tsbuildinfo +1 -1
- package/dist/test-utils/metrics-utils.d.ts +0 -2
- package/dist/test-utils/metrics-utils.js +0 -13
- package/dist/test-utils/metrics-utils.js.map +0 -1
- package/src/test-utils/metrics-utils.ts +0 -14
|
@@ -1477,4 +1477,238 @@ bucket_definitions:
|
|
|
1477
1477
|
expect(parsedSchema3).not.equals(parsedSchema2);
|
|
1478
1478
|
expect(parsedSchema3.getSourceTables()[0].schema).equals('databasename');
|
|
1479
1479
|
});
|
|
1480
|
+
|
|
1481
|
+
test('managed write checkpoints - checkpoint after write', async (context) => {
|
|
1482
|
+
await using factory = await generateStorageFactory();
|
|
1483
|
+
const r = await factory.configureSyncRules({
|
|
1484
|
+
content: `
|
|
1485
|
+
bucket_definitions:
|
|
1486
|
+
mybucket:
|
|
1487
|
+
data: []
|
|
1488
|
+
`,
|
|
1489
|
+
validate: false
|
|
1490
|
+
});
|
|
1491
|
+
const bucketStorage = factory.getInstance(r.persisted_sync_rules!);
|
|
1492
|
+
await bucketStorage.autoActivate();
|
|
1493
|
+
|
|
1494
|
+
const abortController = new AbortController();
|
|
1495
|
+
context.onTestFinished(() => abortController.abort());
|
|
1496
|
+
const iter = bucketStorage
|
|
1497
|
+
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })
|
|
1498
|
+
[Symbol.asyncIterator]();
|
|
1499
|
+
|
|
1500
|
+
const writeCheckpoint = await bucketStorage.createManagedWriteCheckpoint({
|
|
1501
|
+
heads: { '1': '5/0' },
|
|
1502
|
+
user_id: 'user1'
|
|
1503
|
+
});
|
|
1504
|
+
|
|
1505
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1506
|
+
await batch.keepalive('5/0');
|
|
1507
|
+
});
|
|
1508
|
+
|
|
1509
|
+
const result = await iter.next();
|
|
1510
|
+
expect(result).toMatchObject({
|
|
1511
|
+
done: false,
|
|
1512
|
+
value: {
|
|
1513
|
+
base: {
|
|
1514
|
+
checkpoint: 0n,
|
|
1515
|
+
lsn: '5/0'
|
|
1516
|
+
},
|
|
1517
|
+
writeCheckpoint: writeCheckpoint
|
|
1518
|
+
}
|
|
1519
|
+
});
|
|
1520
|
+
});
|
|
1521
|
+
|
|
1522
|
+
test('managed write checkpoints - write after checkpoint', async (context) => {
|
|
1523
|
+
await using factory = await generateStorageFactory();
|
|
1524
|
+
const r = await factory.configureSyncRules({
|
|
1525
|
+
content: `
|
|
1526
|
+
bucket_definitions:
|
|
1527
|
+
mybucket:
|
|
1528
|
+
data: []
|
|
1529
|
+
`,
|
|
1530
|
+
validate: false
|
|
1531
|
+
});
|
|
1532
|
+
const bucketStorage = factory.getInstance(r.persisted_sync_rules!);
|
|
1533
|
+
await bucketStorage.autoActivate();
|
|
1534
|
+
|
|
1535
|
+
const abortController = new AbortController();
|
|
1536
|
+
context.onTestFinished(() => abortController.abort());
|
|
1537
|
+
const iter = bucketStorage
|
|
1538
|
+
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })
|
|
1539
|
+
[Symbol.asyncIterator]();
|
|
1540
|
+
|
|
1541
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1542
|
+
await batch.keepalive('5/0');
|
|
1543
|
+
});
|
|
1544
|
+
|
|
1545
|
+
const result = await iter.next();
|
|
1546
|
+
expect(result).toMatchObject({
|
|
1547
|
+
done: false,
|
|
1548
|
+
value: {
|
|
1549
|
+
base: {
|
|
1550
|
+
checkpoint: 0n,
|
|
1551
|
+
lsn: '5/0'
|
|
1552
|
+
},
|
|
1553
|
+
writeCheckpoint: null
|
|
1554
|
+
}
|
|
1555
|
+
});
|
|
1556
|
+
|
|
1557
|
+
const writeCheckpoint = await bucketStorage.createManagedWriteCheckpoint({
|
|
1558
|
+
heads: { '1': '6/0' },
|
|
1559
|
+
user_id: 'user1'
|
|
1560
|
+
});
|
|
1561
|
+
// We have to trigger a new keepalive after the checkpoint, at least to cover postgres storage.
|
|
1562
|
+
// This is what is effetively triggered with RouteAPI.createReplicationHead().
|
|
1563
|
+
// MongoDB storage doesn't explicitly need this anymore.
|
|
1564
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1565
|
+
await batch.keepalive('6/0');
|
|
1566
|
+
});
|
|
1567
|
+
|
|
1568
|
+
let result2 = await iter.next();
|
|
1569
|
+
if (result2.value?.base?.lsn == '5/0') {
|
|
1570
|
+
// Events could arrive in a different order in some cases - this caters for it
|
|
1571
|
+
result2 = await iter.next();
|
|
1572
|
+
}
|
|
1573
|
+
expect(result2).toMatchObject({
|
|
1574
|
+
done: false,
|
|
1575
|
+
value: {
|
|
1576
|
+
base: {
|
|
1577
|
+
checkpoint: 0n,
|
|
1578
|
+
lsn: '6/0'
|
|
1579
|
+
},
|
|
1580
|
+
writeCheckpoint: writeCheckpoint
|
|
1581
|
+
}
|
|
1582
|
+
});
|
|
1583
|
+
});
|
|
1584
|
+
|
|
1585
|
+
test('custom write checkpoints - checkpoint after write', async (context) => {
|
|
1586
|
+
await using factory = await generateStorageFactory();
|
|
1587
|
+
const r = await factory.configureSyncRules({
|
|
1588
|
+
content: `
|
|
1589
|
+
bucket_definitions:
|
|
1590
|
+
mybucket:
|
|
1591
|
+
data: []
|
|
1592
|
+
`,
|
|
1593
|
+
validate: false
|
|
1594
|
+
});
|
|
1595
|
+
const bucketStorage = factory.getInstance(r.persisted_sync_rules!);
|
|
1596
|
+
await bucketStorage.autoActivate();
|
|
1597
|
+
bucketStorage.setWriteCheckpointMode(storage.WriteCheckpointMode.CUSTOM);
|
|
1598
|
+
|
|
1599
|
+
const abortController = new AbortController();
|
|
1600
|
+
context.onTestFinished(() => abortController.abort());
|
|
1601
|
+
const iter = bucketStorage
|
|
1602
|
+
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })
|
|
1603
|
+
[Symbol.asyncIterator]();
|
|
1604
|
+
|
|
1605
|
+
await bucketStorage.batchCreateCustomWriteCheckpoints([
|
|
1606
|
+
{
|
|
1607
|
+
checkpoint: 5n,
|
|
1608
|
+
user_id: 'user1'
|
|
1609
|
+
}
|
|
1610
|
+
]);
|
|
1611
|
+
|
|
1612
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1613
|
+
await batch.keepalive('5/0');
|
|
1614
|
+
});
|
|
1615
|
+
|
|
1616
|
+
const result = await iter.next();
|
|
1617
|
+
expect(result).toMatchObject({
|
|
1618
|
+
done: false,
|
|
1619
|
+
value: {
|
|
1620
|
+
base: {
|
|
1621
|
+
checkpoint: 0n,
|
|
1622
|
+
lsn: '5/0'
|
|
1623
|
+
},
|
|
1624
|
+
writeCheckpoint: 5n
|
|
1625
|
+
}
|
|
1626
|
+
});
|
|
1627
|
+
});
|
|
1628
|
+
|
|
1629
|
+
test('custom write checkpoints - write after checkpoint', async (context) => {
|
|
1630
|
+
await using factory = await generateStorageFactory();
|
|
1631
|
+
const r = await factory.configureSyncRules({
|
|
1632
|
+
content: `
|
|
1633
|
+
bucket_definitions:
|
|
1634
|
+
mybucket:
|
|
1635
|
+
data: []
|
|
1636
|
+
`,
|
|
1637
|
+
validate: false
|
|
1638
|
+
});
|
|
1639
|
+
const bucketStorage = factory.getInstance(r.persisted_sync_rules!);
|
|
1640
|
+
await bucketStorage.autoActivate();
|
|
1641
|
+
bucketStorage.setWriteCheckpointMode(storage.WriteCheckpointMode.CUSTOM);
|
|
1642
|
+
|
|
1643
|
+
const abortController = new AbortController();
|
|
1644
|
+
context.onTestFinished(() => abortController.abort());
|
|
1645
|
+
const iter = bucketStorage
|
|
1646
|
+
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })
|
|
1647
|
+
[Symbol.asyncIterator]();
|
|
1648
|
+
|
|
1649
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1650
|
+
await batch.keepalive('5/0');
|
|
1651
|
+
});
|
|
1652
|
+
|
|
1653
|
+
const result = await iter.next();
|
|
1654
|
+
expect(result).toMatchObject({
|
|
1655
|
+
done: false,
|
|
1656
|
+
value: {
|
|
1657
|
+
base: {
|
|
1658
|
+
checkpoint: 0n,
|
|
1659
|
+
lsn: '5/0'
|
|
1660
|
+
},
|
|
1661
|
+
writeCheckpoint: null
|
|
1662
|
+
}
|
|
1663
|
+
});
|
|
1664
|
+
|
|
1665
|
+
await bucketStorage.batchCreateCustomWriteCheckpoints([
|
|
1666
|
+
{
|
|
1667
|
+
checkpoint: 6n,
|
|
1668
|
+
user_id: 'user1'
|
|
1669
|
+
}
|
|
1670
|
+
]);
|
|
1671
|
+
// We have to trigger a new keepalive after the checkpoint, at least to cover postgres storage.
|
|
1672
|
+
// This is what is effetively triggered with RouteAPI.createReplicationHead().
|
|
1673
|
+
// MongoDB storage doesn't explicitly need this anymore.
|
|
1674
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1675
|
+
await batch.keepalive('6/0');
|
|
1676
|
+
});
|
|
1677
|
+
|
|
1678
|
+
let result2 = await iter.next();
|
|
1679
|
+
expect(result2).toMatchObject({
|
|
1680
|
+
done: false,
|
|
1681
|
+
value: {
|
|
1682
|
+
base: {
|
|
1683
|
+
checkpoint: 0n
|
|
1684
|
+
// can be 5/0 or 6/0 - actual value not relevant for custom write checkpoints
|
|
1685
|
+
// lsn: '6/0'
|
|
1686
|
+
},
|
|
1687
|
+
writeCheckpoint: 6n
|
|
1688
|
+
}
|
|
1689
|
+
});
|
|
1690
|
+
|
|
1691
|
+
await bucketStorage.batchCreateCustomWriteCheckpoints([
|
|
1692
|
+
{
|
|
1693
|
+
checkpoint: 7n,
|
|
1694
|
+
user_id: 'user1'
|
|
1695
|
+
}
|
|
1696
|
+
]);
|
|
1697
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1698
|
+
await batch.keepalive('7/0');
|
|
1699
|
+
});
|
|
1700
|
+
|
|
1701
|
+
let result3 = await iter.next();
|
|
1702
|
+
expect(result3).toMatchObject({
|
|
1703
|
+
done: false,
|
|
1704
|
+
value: {
|
|
1705
|
+
base: {
|
|
1706
|
+
checkpoint: 0n
|
|
1707
|
+
// can be 5/0, 6/0 or 7/0 - actual value not relevant for custom write checkpoints
|
|
1708
|
+
// lsn: '7/0'
|
|
1709
|
+
},
|
|
1710
|
+
writeCheckpoint: 7n
|
|
1711
|
+
}
|
|
1712
|
+
});
|
|
1713
|
+
});
|
|
1480
1714
|
}
|
|
@@ -1,4 +1,11 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import {
|
|
2
|
+
createCoreAPIMetrics,
|
|
3
|
+
storage,
|
|
4
|
+
StreamingSyncCheckpoint,
|
|
5
|
+
StreamingSyncCheckpointDiff,
|
|
6
|
+
sync,
|
|
7
|
+
utils
|
|
8
|
+
} from '@powersync/service-core';
|
|
2
9
|
import { JSONBig } from '@powersync/service-jsonbig';
|
|
3
10
|
import { RequestParameters } from '@powersync/service-sync-rules';
|
|
4
11
|
import path from 'path';
|
|
@@ -6,6 +13,7 @@ import * as timers from 'timers/promises';
|
|
|
6
13
|
import { fileURLToPath } from 'url';
|
|
7
14
|
import { expect, test } from 'vitest';
|
|
8
15
|
import * as test_utils from '../test-utils/test-utils-index.js';
|
|
16
|
+
import { METRICS_HELPER } from '../test-utils/test-utils-index.js';
|
|
9
17
|
|
|
10
18
|
const __filename = fileURLToPath(import.meta.url);
|
|
11
19
|
const __dirname = path.dirname(__filename);
|
|
@@ -30,7 +38,8 @@ export const SYNC_SNAPSHOT_PATH = path.resolve(__dirname, '../__snapshots/sync.t
|
|
|
30
38
|
* ```
|
|
31
39
|
*/
|
|
32
40
|
export function registerSyncTests(factory: storage.TestStorageFactory) {
|
|
33
|
-
|
|
41
|
+
createCoreAPIMetrics(METRICS_HELPER.metricsEngine);
|
|
42
|
+
const tracker = new sync.RequestTracker(METRICS_HELPER.metricsEngine);
|
|
34
43
|
const syncContext = new sync.SyncContext({
|
|
35
44
|
maxBuckets: 10,
|
|
36
45
|
maxParameterQueryResults: 10,
|
|
@@ -398,7 +407,7 @@ bucket_definitions:
|
|
|
398
407
|
expect(lines).toMatchSnapshot();
|
|
399
408
|
});
|
|
400
409
|
|
|
401
|
-
test('sync updates to global data', async () => {
|
|
410
|
+
test('sync updates to global data', async (context) => {
|
|
402
411
|
await using f = await factory();
|
|
403
412
|
|
|
404
413
|
const syncRules = await f.updateSyncRules({
|
|
@@ -422,6 +431,9 @@ bucket_definitions:
|
|
|
422
431
|
token: { exp: Date.now() / 1000 + 10 } as any
|
|
423
432
|
});
|
|
424
433
|
const iter = stream[Symbol.asyncIterator]();
|
|
434
|
+
context.onTestFinished(() => {
|
|
435
|
+
iter.return?.();
|
|
436
|
+
});
|
|
425
437
|
|
|
426
438
|
expect(await getCheckpointLines(iter)).toMatchSnapshot();
|
|
427
439
|
|
|
@@ -456,11 +468,221 @@ bucket_definitions:
|
|
|
456
468
|
});
|
|
457
469
|
|
|
458
470
|
expect(await getCheckpointLines(iter)).toMatchSnapshot();
|
|
471
|
+
});
|
|
472
|
+
|
|
473
|
+
test('sync updates to parameter query only', async (context) => {
|
|
474
|
+
await using f = await factory();
|
|
475
|
+
|
|
476
|
+
const syncRules = await f.updateSyncRules({
|
|
477
|
+
content: `bucket_definitions:
|
|
478
|
+
by_user:
|
|
479
|
+
parameters: select users.id as user_id from users where users.id = request.user_id()
|
|
480
|
+
data:
|
|
481
|
+
- select * from lists where user_id = bucket.user_id
|
|
482
|
+
`
|
|
483
|
+
});
|
|
484
|
+
|
|
485
|
+
const usersTable = test_utils.makeTestTable('users', ['id']);
|
|
486
|
+
const listsTable = test_utils.makeTestTable('lists', ['id']);
|
|
487
|
+
|
|
488
|
+
const bucketStorage = await f.getInstance(syncRules);
|
|
489
|
+
await bucketStorage.autoActivate();
|
|
490
|
+
|
|
491
|
+
const stream = sync.streamResponse({
|
|
492
|
+
syncContext,
|
|
493
|
+
bucketStorage,
|
|
494
|
+
syncRules: bucketStorage.getParsedSyncRules(test_utils.PARSE_OPTIONS),
|
|
495
|
+
params: {
|
|
496
|
+
buckets: [],
|
|
497
|
+
include_checksum: true,
|
|
498
|
+
raw_data: true
|
|
499
|
+
},
|
|
500
|
+
tracker,
|
|
501
|
+
syncParams: new RequestParameters({ sub: 'user1' }, {}),
|
|
502
|
+
token: { exp: Date.now() / 1000 + 100 } as any
|
|
503
|
+
});
|
|
504
|
+
const iter = stream[Symbol.asyncIterator]();
|
|
505
|
+
context.onTestFinished(() => {
|
|
506
|
+
iter.return?.();
|
|
507
|
+
});
|
|
508
|
+
|
|
509
|
+
// Initial empty checkpoint
|
|
510
|
+
const checkpoint1 = await getCheckpointLines(iter);
|
|
511
|
+
expect((checkpoint1[0] as StreamingSyncCheckpoint).checkpoint?.buckets?.map((b) => b.bucket)).toEqual([]);
|
|
512
|
+
expect(checkpoint1).toMatchSnapshot();
|
|
513
|
+
|
|
514
|
+
// Add user
|
|
515
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
516
|
+
await batch.save({
|
|
517
|
+
sourceTable: usersTable,
|
|
518
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
519
|
+
after: {
|
|
520
|
+
id: 'user1',
|
|
521
|
+
name: 'User 1'
|
|
522
|
+
},
|
|
523
|
+
afterReplicaId: 'user1'
|
|
524
|
+
});
|
|
525
|
+
|
|
526
|
+
await batch.commit('0/1');
|
|
527
|
+
});
|
|
528
|
+
|
|
529
|
+
const checkpoint2 = await getCheckpointLines(iter);
|
|
530
|
+
expect(
|
|
531
|
+
(checkpoint2[0] as StreamingSyncCheckpointDiff).checkpoint_diff?.updated_buckets?.map((b) => b.bucket)
|
|
532
|
+
).toEqual(['by_user["user1"]']);
|
|
533
|
+
expect(checkpoint2).toMatchSnapshot();
|
|
534
|
+
});
|
|
535
|
+
|
|
536
|
+
test('sync updates to data query only', async (context) => {
|
|
537
|
+
await using f = await factory();
|
|
538
|
+
|
|
539
|
+
const syncRules = await f.updateSyncRules({
|
|
540
|
+
content: `bucket_definitions:
|
|
541
|
+
by_user:
|
|
542
|
+
parameters: select users.id as user_id from users where users.id = request.user_id()
|
|
543
|
+
data:
|
|
544
|
+
- select * from lists where user_id = bucket.user_id
|
|
545
|
+
`
|
|
546
|
+
});
|
|
547
|
+
|
|
548
|
+
const usersTable = test_utils.makeTestTable('users', ['id']);
|
|
549
|
+
const listsTable = test_utils.makeTestTable('lists', ['id']);
|
|
550
|
+
|
|
551
|
+
const bucketStorage = await f.getInstance(syncRules);
|
|
552
|
+
await bucketStorage.autoActivate();
|
|
553
|
+
|
|
554
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
555
|
+
await batch.save({
|
|
556
|
+
sourceTable: usersTable,
|
|
557
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
558
|
+
after: {
|
|
559
|
+
id: 'user1',
|
|
560
|
+
name: 'User 1'
|
|
561
|
+
},
|
|
562
|
+
afterReplicaId: 'user1'
|
|
563
|
+
});
|
|
564
|
+
|
|
565
|
+
await batch.commit('0/1');
|
|
566
|
+
});
|
|
567
|
+
|
|
568
|
+
const stream = sync.streamResponse({
|
|
569
|
+
syncContext,
|
|
570
|
+
bucketStorage,
|
|
571
|
+
syncRules: bucketStorage.getParsedSyncRules(test_utils.PARSE_OPTIONS),
|
|
572
|
+
params: {
|
|
573
|
+
buckets: [],
|
|
574
|
+
include_checksum: true,
|
|
575
|
+
raw_data: true
|
|
576
|
+
},
|
|
577
|
+
tracker,
|
|
578
|
+
syncParams: new RequestParameters({ sub: 'user1' }, {}),
|
|
579
|
+
token: { exp: Date.now() / 1000 + 100 } as any
|
|
580
|
+
});
|
|
581
|
+
const iter = stream[Symbol.asyncIterator]();
|
|
582
|
+
context.onTestFinished(() => {
|
|
583
|
+
iter.return?.();
|
|
584
|
+
});
|
|
585
|
+
|
|
586
|
+
const checkpoint1 = await getCheckpointLines(iter);
|
|
587
|
+
expect((checkpoint1[0] as StreamingSyncCheckpoint).checkpoint?.buckets?.map((b) => b.bucket)).toEqual([
|
|
588
|
+
'by_user["user1"]'
|
|
589
|
+
]);
|
|
590
|
+
expect(checkpoint1).toMatchSnapshot();
|
|
591
|
+
|
|
592
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
593
|
+
await batch.save({
|
|
594
|
+
sourceTable: listsTable,
|
|
595
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
596
|
+
after: {
|
|
597
|
+
id: 'list1',
|
|
598
|
+
user_id: 'user1',
|
|
599
|
+
name: 'User 1'
|
|
600
|
+
},
|
|
601
|
+
afterReplicaId: 'list1'
|
|
602
|
+
});
|
|
603
|
+
|
|
604
|
+
await batch.commit('0/1');
|
|
605
|
+
});
|
|
606
|
+
|
|
607
|
+
const checkpoint2 = await getCheckpointLines(iter);
|
|
608
|
+
expect(
|
|
609
|
+
(checkpoint2[0] as StreamingSyncCheckpointDiff).checkpoint_diff?.updated_buckets?.map((b) => b.bucket)
|
|
610
|
+
).toEqual(['by_user["user1"]']);
|
|
611
|
+
expect(checkpoint2).toMatchSnapshot();
|
|
612
|
+
});
|
|
613
|
+
|
|
614
|
+
test('sync updates to parameter query + data', async (context) => {
|
|
615
|
+
await using f = await factory();
|
|
616
|
+
|
|
617
|
+
const syncRules = await f.updateSyncRules({
|
|
618
|
+
content: `bucket_definitions:
|
|
619
|
+
by_user:
|
|
620
|
+
parameters: select users.id as user_id from users where users.id = request.user_id()
|
|
621
|
+
data:
|
|
622
|
+
- select * from lists where user_id = bucket.user_id
|
|
623
|
+
`
|
|
624
|
+
});
|
|
625
|
+
|
|
626
|
+
const usersTable = test_utils.makeTestTable('users', ['id']);
|
|
627
|
+
const listsTable = test_utils.makeTestTable('lists', ['id']);
|
|
628
|
+
|
|
629
|
+
const bucketStorage = await f.getInstance(syncRules);
|
|
630
|
+
await bucketStorage.autoActivate();
|
|
631
|
+
|
|
632
|
+
const stream = sync.streamResponse({
|
|
633
|
+
syncContext,
|
|
634
|
+
bucketStorage,
|
|
635
|
+
syncRules: bucketStorage.getParsedSyncRules(test_utils.PARSE_OPTIONS),
|
|
636
|
+
params: {
|
|
637
|
+
buckets: [],
|
|
638
|
+
include_checksum: true,
|
|
639
|
+
raw_data: true
|
|
640
|
+
},
|
|
641
|
+
tracker,
|
|
642
|
+
syncParams: new RequestParameters({ sub: 'user1' }, {}),
|
|
643
|
+
token: { exp: Date.now() / 1000 + 100 } as any
|
|
644
|
+
});
|
|
645
|
+
const iter = stream[Symbol.asyncIterator]();
|
|
646
|
+
context.onTestFinished(() => {
|
|
647
|
+
iter.return?.();
|
|
648
|
+
});
|
|
649
|
+
|
|
650
|
+
// Initial empty checkpoint
|
|
651
|
+
expect(await getCheckpointLines(iter)).toMatchSnapshot();
|
|
652
|
+
|
|
653
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
654
|
+
await batch.save({
|
|
655
|
+
sourceTable: listsTable,
|
|
656
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
657
|
+
after: {
|
|
658
|
+
id: 'list1',
|
|
659
|
+
user_id: 'user1',
|
|
660
|
+
name: 'User 1'
|
|
661
|
+
},
|
|
662
|
+
afterReplicaId: 'list1'
|
|
663
|
+
});
|
|
459
664
|
|
|
460
|
-
|
|
665
|
+
await batch.save({
|
|
666
|
+
sourceTable: usersTable,
|
|
667
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
668
|
+
after: {
|
|
669
|
+
id: 'user1',
|
|
670
|
+
name: 'User 1'
|
|
671
|
+
},
|
|
672
|
+
afterReplicaId: 'user1'
|
|
673
|
+
});
|
|
674
|
+
|
|
675
|
+
await batch.commit('0/1');
|
|
676
|
+
});
|
|
677
|
+
|
|
678
|
+
const checkpoint2 = await getCheckpointLines(iter);
|
|
679
|
+
expect(
|
|
680
|
+
(checkpoint2[0] as StreamingSyncCheckpointDiff).checkpoint_diff?.updated_buckets?.map((b) => b.bucket)
|
|
681
|
+
).toEqual(['by_user["user1"]']);
|
|
682
|
+
expect(checkpoint2).toMatchSnapshot();
|
|
461
683
|
});
|
|
462
684
|
|
|
463
|
-
test('expiring token', async () => {
|
|
685
|
+
test('expiring token', async (context) => {
|
|
464
686
|
await using f = await factory();
|
|
465
687
|
|
|
466
688
|
const syncRules = await f.updateSyncRules({
|
|
@@ -486,6 +708,9 @@ bucket_definitions:
|
|
|
486
708
|
token: { exp: exp } as any
|
|
487
709
|
});
|
|
488
710
|
const iter = stream[Symbol.asyncIterator]();
|
|
711
|
+
context.onTestFinished(() => {
|
|
712
|
+
iter.return?.();
|
|
713
|
+
});
|
|
489
714
|
|
|
490
715
|
const checkpoint = await getCheckpointLines(iter);
|
|
491
716
|
expect(checkpoint).toMatchSnapshot();
|
|
@@ -494,7 +719,7 @@ bucket_definitions:
|
|
|
494
719
|
expect(expLines).toMatchSnapshot();
|
|
495
720
|
});
|
|
496
721
|
|
|
497
|
-
test('compacting data - invalidate checkpoint', async () => {
|
|
722
|
+
test('compacting data - invalidate checkpoint', async (context) => {
|
|
498
723
|
// This tests a case of a compact operation invalidating a checkpoint in the
|
|
499
724
|
// middle of syncing data.
|
|
500
725
|
// This is expected to be rare in practice, but it is important to handle
|
|
@@ -548,6 +773,9 @@ bucket_definitions:
|
|
|
548
773
|
});
|
|
549
774
|
|
|
550
775
|
const iter = stream[Symbol.asyncIterator]();
|
|
776
|
+
context.onTestFinished(() => {
|
|
777
|
+
iter.return?.();
|
|
778
|
+
});
|
|
551
779
|
|
|
552
780
|
// Only consume the first "checkpoint" message, and pause before receiving data.
|
|
553
781
|
const lines = await consumeIterator(iter, { consume: false, isDone: (line) => (line as any)?.checkpoint != null });
|