@powersync/service-core 0.4.2 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/entry/cli-entry.js +2 -1
- package/dist/entry/cli-entry.js.map +1 -1
- package/dist/entry/commands/compact-action.d.ts +2 -0
- package/dist/entry/commands/compact-action.js +48 -0
- package/dist/entry/commands/compact-action.js.map +1 -0
- package/dist/entry/entry-index.d.ts +1 -0
- package/dist/entry/entry-index.js +1 -0
- package/dist/entry/entry-index.js.map +1 -1
- package/dist/storage/BucketStorage.d.ts +31 -1
- package/dist/storage/BucketStorage.js.map +1 -1
- package/dist/storage/mongo/MongoCompactor.d.ts +40 -0
- package/dist/storage/mongo/MongoCompactor.js +292 -0
- package/dist/storage/mongo/MongoCompactor.js.map +1 -0
- package/dist/storage/mongo/MongoSyncBucketStorage.d.ts +3 -2
- package/dist/storage/mongo/MongoSyncBucketStorage.js +19 -13
- package/dist/storage/mongo/MongoSyncBucketStorage.js.map +1 -1
- package/dist/storage/mongo/models.d.ts +5 -4
- package/dist/storage/mongo/models.js.map +1 -1
- package/dist/storage/mongo/util.d.ts +3 -0
- package/dist/storage/mongo/util.js +22 -0
- package/dist/storage/mongo/util.js.map +1 -1
- package/dist/sync/sync.js +20 -7
- package/dist/sync/sync.js.map +1 -1
- package/package.json +4 -4
- package/src/entry/cli-entry.ts +2 -1
- package/src/entry/commands/compact-action.ts +54 -0
- package/src/entry/entry-index.ts +1 -0
- package/src/storage/BucketStorage.ts +36 -1
- package/src/storage/mongo/MongoCompactor.ts +371 -0
- package/src/storage/mongo/MongoSyncBucketStorage.ts +25 -14
- package/src/storage/mongo/models.ts +5 -4
- package/src/storage/mongo/util.ts +25 -0
- package/src/sync/sync.ts +20 -7
- package/test/src/__snapshots__/sync.test.ts.snap +85 -0
- package/test/src/bucket_validation.test.ts +142 -0
- package/test/src/bucket_validation.ts +116 -0
- package/test/src/compacting.test.ts +207 -0
- package/test/src/data_storage.test.ts +19 -60
- package/test/src/slow_tests.test.ts +144 -102
- package/test/src/sync.test.ts +169 -29
- package/test/src/util.ts +65 -1
- package/test/src/wal_stream_utils.ts +13 -4
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
import { OplogEntry } from '@/util/protocol-types.js';
|
|
2
|
+
import { describe, expect, test } from 'vitest';
|
|
3
|
+
import { reduceBucket, validateBucket } from './bucket_validation.js';
|
|
4
|
+
|
|
5
|
+
// This tests the reduceBucket function.
|
|
6
|
+
// While this function is not used directly in the service implementation,
|
|
7
|
+
// it is an important part of validating consistency in other tests.
|
|
8
|
+
describe('bucket validation', () => {
|
|
9
|
+
const ops1: OplogEntry[] = [
|
|
10
|
+
{
|
|
11
|
+
op_id: '1',
|
|
12
|
+
op: 'PUT',
|
|
13
|
+
object_type: 'test',
|
|
14
|
+
object_id: 't1',
|
|
15
|
+
checksum: 2634521662,
|
|
16
|
+
subkey: '6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423',
|
|
17
|
+
data: '{"id":"t1"}'
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
op_id: '2',
|
|
21
|
+
op: 'PUT',
|
|
22
|
+
object_type: 'test',
|
|
23
|
+
object_id: 't2',
|
|
24
|
+
checksum: 4243212114,
|
|
25
|
+
subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee',
|
|
26
|
+
data: '{"id":"t2"}'
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
op_id: '3',
|
|
30
|
+
op: 'REMOVE',
|
|
31
|
+
object_type: 'test',
|
|
32
|
+
object_id: 't1',
|
|
33
|
+
checksum: 4228978084,
|
|
34
|
+
subkey: '6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423',
|
|
35
|
+
data: null
|
|
36
|
+
},
|
|
37
|
+
{
|
|
38
|
+
op_id: '4',
|
|
39
|
+
op: 'PUT',
|
|
40
|
+
object_type: 'test',
|
|
41
|
+
object_id: 't2',
|
|
42
|
+
checksum: 4243212114,
|
|
43
|
+
subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee',
|
|
44
|
+
data: '{"id":"t2"}'
|
|
45
|
+
}
|
|
46
|
+
];
|
|
47
|
+
|
|
48
|
+
test('reduce 1', () => {
|
|
49
|
+
expect(reduceBucket(ops1)).toEqual([
|
|
50
|
+
{
|
|
51
|
+
checksum: -1778190028,
|
|
52
|
+
op: 'CLEAR',
|
|
53
|
+
op_id: '0'
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
checksum: 4243212114,
|
|
57
|
+
data: '{"id":"t2"}',
|
|
58
|
+
object_id: 't2',
|
|
59
|
+
object_type: 'test',
|
|
60
|
+
op: 'PUT',
|
|
61
|
+
op_id: '4',
|
|
62
|
+
subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee'
|
|
63
|
+
}
|
|
64
|
+
]);
|
|
65
|
+
|
|
66
|
+
expect(reduceBucket(reduceBucket(ops1))).toEqual([
|
|
67
|
+
{
|
|
68
|
+
checksum: -1778190028,
|
|
69
|
+
op: 'CLEAR',
|
|
70
|
+
op_id: '0'
|
|
71
|
+
},
|
|
72
|
+
{
|
|
73
|
+
checksum: 4243212114,
|
|
74
|
+
data: '{"id":"t2"}',
|
|
75
|
+
object_id: 't2',
|
|
76
|
+
object_type: 'test',
|
|
77
|
+
op: 'PUT',
|
|
78
|
+
op_id: '4',
|
|
79
|
+
subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee'
|
|
80
|
+
}
|
|
81
|
+
]);
|
|
82
|
+
|
|
83
|
+
validateBucket(ops1);
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
test('reduce 2', () => {
|
|
87
|
+
const bucket: OplogEntry[] = [
|
|
88
|
+
...ops1,
|
|
89
|
+
|
|
90
|
+
{
|
|
91
|
+
checksum: 93784613,
|
|
92
|
+
op: 'CLEAR',
|
|
93
|
+
op_id: '5'
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
checksum: 5133378,
|
|
97
|
+
data: '{"id":"t3"}',
|
|
98
|
+
object_id: 't3',
|
|
99
|
+
object_type: 'test',
|
|
100
|
+
op: 'PUT',
|
|
101
|
+
op_id: '11',
|
|
102
|
+
subkey: '6544e3899293153fa7b38333/ec27c691-b47a-5d92-927a-9944feb89eee'
|
|
103
|
+
}
|
|
104
|
+
];
|
|
105
|
+
|
|
106
|
+
expect(reduceBucket(bucket)).toEqual([
|
|
107
|
+
{
|
|
108
|
+
checksum: 93784613,
|
|
109
|
+
op: 'CLEAR',
|
|
110
|
+
op_id: '0'
|
|
111
|
+
},
|
|
112
|
+
{
|
|
113
|
+
checksum: 5133378,
|
|
114
|
+
data: '{"id":"t3"}',
|
|
115
|
+
object_id: 't3',
|
|
116
|
+
object_type: 'test',
|
|
117
|
+
op: 'PUT',
|
|
118
|
+
op_id: '11',
|
|
119
|
+
subkey: '6544e3899293153fa7b38333/ec27c691-b47a-5d92-927a-9944feb89eee'
|
|
120
|
+
}
|
|
121
|
+
]);
|
|
122
|
+
|
|
123
|
+
expect(reduceBucket(reduceBucket(bucket))).toEqual([
|
|
124
|
+
{
|
|
125
|
+
checksum: 93784613,
|
|
126
|
+
op: 'CLEAR',
|
|
127
|
+
op_id: '0'
|
|
128
|
+
},
|
|
129
|
+
{
|
|
130
|
+
checksum: 5133378,
|
|
131
|
+
data: '{"id":"t3"}',
|
|
132
|
+
object_id: 't3',
|
|
133
|
+
object_type: 'test',
|
|
134
|
+
op: 'PUT',
|
|
135
|
+
op_id: '11',
|
|
136
|
+
subkey: '6544e3899293153fa7b38333/ec27c691-b47a-5d92-927a-9944feb89eee'
|
|
137
|
+
}
|
|
138
|
+
]);
|
|
139
|
+
|
|
140
|
+
validateBucket(bucket);
|
|
141
|
+
});
|
|
142
|
+
});
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import { OplogEntry } from '@/util/protocol-types.js';
|
|
2
|
+
import { addChecksums } from '@/util/utils.js';
|
|
3
|
+
import { expect } from 'vitest';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Reduce a bucket to the final state as stored on the client.
|
|
7
|
+
*
|
|
8
|
+
* This keeps the final state for each row as a PUT operation.
|
|
9
|
+
*
|
|
10
|
+
* All other operations are replaced with a single CLEAR operation,
|
|
11
|
+
* summing their checksums, and using a 0 as an op_id.
|
|
12
|
+
*
|
|
13
|
+
* This is the function $r(B)$, as described in /docs/bucket-properties.md.
|
|
14
|
+
*/
|
|
15
|
+
export function reduceBucket(operations: OplogEntry[]) {
|
|
16
|
+
let rowState = new Map<string, OplogEntry>();
|
|
17
|
+
let otherChecksum = 0;
|
|
18
|
+
|
|
19
|
+
for (let op of operations) {
|
|
20
|
+
const key = rowKey(op);
|
|
21
|
+
if (op.op == 'PUT') {
|
|
22
|
+
const existing = rowState.get(key);
|
|
23
|
+
if (existing) {
|
|
24
|
+
otherChecksum = addChecksums(otherChecksum, existing.checksum as number);
|
|
25
|
+
}
|
|
26
|
+
rowState.set(key, op);
|
|
27
|
+
} else if (op.op == 'REMOVE') {
|
|
28
|
+
const existing = rowState.get(key);
|
|
29
|
+
if (existing) {
|
|
30
|
+
otherChecksum = addChecksums(otherChecksum, existing.checksum as number);
|
|
31
|
+
}
|
|
32
|
+
rowState.delete(key);
|
|
33
|
+
otherChecksum = addChecksums(otherChecksum, op.checksum as number);
|
|
34
|
+
} else if (op.op == 'CLEAR') {
|
|
35
|
+
rowState.clear();
|
|
36
|
+
otherChecksum = op.checksum as number;
|
|
37
|
+
} else if (op.op == 'MOVE') {
|
|
38
|
+
otherChecksum = addChecksums(otherChecksum, op.checksum as number);
|
|
39
|
+
} else {
|
|
40
|
+
throw new Error(`Unknown operation ${op.op}`);
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const puts = [...rowState.values()].sort((a, b) => {
|
|
45
|
+
return Number(BigInt(a.op_id) - BigInt(b.op_id));
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
let finalState: OplogEntry[] = [
|
|
49
|
+
// Special operation to indiciate the checksum remainder
|
|
50
|
+
{ op_id: '0', op: 'CLEAR', checksum: otherChecksum },
|
|
51
|
+
...puts
|
|
52
|
+
];
|
|
53
|
+
|
|
54
|
+
return finalState;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
function rowKey(entry: OplogEntry) {
|
|
58
|
+
return `${entry.object_type}/${entry.object_id}/${entry.subkey}`;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Validate this property, as described in /docs/bucket-properties.md:
|
|
63
|
+
*
|
|
64
|
+
* $r(B_{[..id_n]}) = r(r(B_{[..id_i]}) \cup B_{[id_{i+1}..id_n]}) \;\forall\; i \in [1..n]$
|
|
65
|
+
*
|
|
66
|
+
* We test that a client syncing the entire bucket in one go (left side of the equation),
|
|
67
|
+
* ends up with the same result as another client syncing up to operation id_i, then sync
|
|
68
|
+
* the rest.
|
|
69
|
+
*/
|
|
70
|
+
export function validateBucket(bucket: OplogEntry[]) {
|
|
71
|
+
const r1 = reduceBucket(bucket);
|
|
72
|
+
for (let i = 0; i <= bucket.length; i++) {
|
|
73
|
+
const r2 = reduceBucket(bucket.slice(0, i + 1));
|
|
74
|
+
const b3 = bucket.slice(i + 1);
|
|
75
|
+
const r3 = r2.concat(b3);
|
|
76
|
+
const r4 = reduceBucket(r3);
|
|
77
|
+
expect(r4).toEqual(r1);
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// This is the same check, just implemented differently
|
|
81
|
+
validateCompactedBucket(bucket, bucket);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Validate these properties for a bucket $B$ and its compacted version $B'$,:
|
|
86
|
+
* as described in /docs/bucket-properties.md:
|
|
87
|
+
*
|
|
88
|
+
* 1. $r(B) = r(B')$
|
|
89
|
+
* 2. $r(B_{[..c]}) = r(r(B_{[..c_i]}) \cup B'_{[c_i+1..c]}) \;\forall\; c_i \in B$
|
|
90
|
+
*
|
|
91
|
+
* The first one is that the result of syncing the original bucket is the same as
|
|
92
|
+
* syncing the compacted bucket.
|
|
93
|
+
*
|
|
94
|
+
* The second property is that result of syncing the entire original bucket, is the same
|
|
95
|
+
* as syncing any partial version of that (up to op $c_i$), and then continue syncing
|
|
96
|
+
* using the compacted bucket.
|
|
97
|
+
*/
|
|
98
|
+
export function validateCompactedBucket(bucket: OplogEntry[], compacted: OplogEntry[]) {
|
|
99
|
+
// r(B_{[..c]})
|
|
100
|
+
const r1 = reduceBucket(bucket);
|
|
101
|
+
// r(B) = r(B')
|
|
102
|
+
expect(reduceBucket(compacted)).toEqual(r1);
|
|
103
|
+
|
|
104
|
+
for (let i = 0; i < bucket.length; i++) {
|
|
105
|
+
// r(B_{[..c_i]})
|
|
106
|
+
const r2 = reduceBucket(bucket.slice(0, i + 1));
|
|
107
|
+
const c_i = BigInt(bucket[i].op_id);
|
|
108
|
+
// B'_{[c_i+1..c]}
|
|
109
|
+
const b3 = compacted.filter((op) => BigInt(op.op_id) > c_i);
|
|
110
|
+
// r(B_{[..c_i]}) \cup B'_{[c_i+1..c]}
|
|
111
|
+
const r3 = r2.concat(b3);
|
|
112
|
+
// r(r(B_{[..c_i]}) \cup B'_{[c_i+1..c]})
|
|
113
|
+
const r4 = reduceBucket(r3);
|
|
114
|
+
expect(r4).toEqual(r1);
|
|
115
|
+
}
|
|
116
|
+
}
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
import { SqlSyncRules } from '@powersync/service-sync-rules';
|
|
2
|
+
import { describe, expect, test } from 'vitest';
|
|
3
|
+
import { makeTestTable, MONGO_STORAGE_FACTORY } from './util.js';
|
|
4
|
+
import { oneFromAsync } from './wal_stream_utils.js';
|
|
5
|
+
import { MongoCompactOptions } from '@/storage/mongo/MongoCompactor.js';
|
|
6
|
+
import { reduceBucket, validateCompactedBucket, validateBucket } from './bucket_validation.js';
|
|
7
|
+
|
|
8
|
+
const TEST_TABLE = makeTestTable('test', ['id']);
|
|
9
|
+
|
|
10
|
+
// Test with the default options - large batch sizes
|
|
11
|
+
describe('compacting buckets - default options', () => compactTests({}));
|
|
12
|
+
|
|
13
|
+
// Also test with the miniumum batch sizes, forcing usage of multiple batches internally
|
|
14
|
+
describe('compacting buckets - batched', () =>
|
|
15
|
+
compactTests({ clearBatchLimit: 2, moveBatchLimit: 1, moveBatchQueryLimit: 1 }));
|
|
16
|
+
|
|
17
|
+
function compactTests(compactOptions: MongoCompactOptions) {
|
|
18
|
+
const factory = MONGO_STORAGE_FACTORY;
|
|
19
|
+
|
|
20
|
+
test('compacting (1)', async () => {
|
|
21
|
+
const sync_rules = SqlSyncRules.fromYaml(`
|
|
22
|
+
bucket_definitions:
|
|
23
|
+
global:
|
|
24
|
+
data: [select * from test]
|
|
25
|
+
`);
|
|
26
|
+
|
|
27
|
+
const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' });
|
|
28
|
+
|
|
29
|
+
const result = await storage.startBatch({}, async (batch) => {
|
|
30
|
+
await batch.save({
|
|
31
|
+
sourceTable: TEST_TABLE,
|
|
32
|
+
tag: 'insert',
|
|
33
|
+
after: {
|
|
34
|
+
id: 't1'
|
|
35
|
+
}
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
await batch.save({
|
|
39
|
+
sourceTable: TEST_TABLE,
|
|
40
|
+
tag: 'insert',
|
|
41
|
+
after: {
|
|
42
|
+
id: 't2'
|
|
43
|
+
}
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
await batch.save({
|
|
47
|
+
sourceTable: TEST_TABLE,
|
|
48
|
+
tag: 'update',
|
|
49
|
+
after: {
|
|
50
|
+
id: 't2'
|
|
51
|
+
}
|
|
52
|
+
});
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
const checkpoint = result!.flushed_op;
|
|
56
|
+
|
|
57
|
+
const batchBefore = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
58
|
+
const dataBefore = batchBefore.batch.data;
|
|
59
|
+
|
|
60
|
+
expect(dataBefore).toMatchObject([
|
|
61
|
+
{
|
|
62
|
+
checksum: 2634521662,
|
|
63
|
+
object_id: 't1',
|
|
64
|
+
op: 'PUT',
|
|
65
|
+
op_id: '1'
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
checksum: 4243212114,
|
|
69
|
+
object_id: 't2',
|
|
70
|
+
op: 'PUT',
|
|
71
|
+
op_id: '2'
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
checksum: 4243212114,
|
|
75
|
+
object_id: 't2',
|
|
76
|
+
op: 'PUT',
|
|
77
|
+
op_id: '3'
|
|
78
|
+
}
|
|
79
|
+
]);
|
|
80
|
+
|
|
81
|
+
await storage.compact(compactOptions);
|
|
82
|
+
|
|
83
|
+
const batchAfter = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
84
|
+
const dataAfter = batchAfter.batch.data;
|
|
85
|
+
|
|
86
|
+
expect(batchAfter.targetOp).toEqual(3n);
|
|
87
|
+
expect(dataAfter).toMatchObject([
|
|
88
|
+
{
|
|
89
|
+
checksum: 2634521662,
|
|
90
|
+
object_id: 't1',
|
|
91
|
+
op: 'PUT',
|
|
92
|
+
op_id: '1'
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
checksum: 4243212114,
|
|
96
|
+
op: 'MOVE',
|
|
97
|
+
op_id: '2'
|
|
98
|
+
},
|
|
99
|
+
{
|
|
100
|
+
checksum: 4243212114,
|
|
101
|
+
object_id: 't2',
|
|
102
|
+
op: 'PUT',
|
|
103
|
+
op_id: '3'
|
|
104
|
+
}
|
|
105
|
+
]);
|
|
106
|
+
|
|
107
|
+
validateCompactedBucket(dataBefore, dataAfter);
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
test('compacting (2)', async () => {
|
|
111
|
+
const sync_rules = SqlSyncRules.fromYaml(`
|
|
112
|
+
bucket_definitions:
|
|
113
|
+
global:
|
|
114
|
+
data: [select * from test]
|
|
115
|
+
`);
|
|
116
|
+
|
|
117
|
+
const storage = (await factory()).getInstance({ id: 1, sync_rules, slot_name: 'test' });
|
|
118
|
+
|
|
119
|
+
const result = await storage.startBatch({}, async (batch) => {
|
|
120
|
+
await batch.save({
|
|
121
|
+
sourceTable: TEST_TABLE,
|
|
122
|
+
tag: 'insert',
|
|
123
|
+
after: {
|
|
124
|
+
id: 't1'
|
|
125
|
+
}
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
await batch.save({
|
|
129
|
+
sourceTable: TEST_TABLE,
|
|
130
|
+
tag: 'insert',
|
|
131
|
+
after: {
|
|
132
|
+
id: 't2'
|
|
133
|
+
}
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
await batch.save({
|
|
137
|
+
sourceTable: TEST_TABLE,
|
|
138
|
+
tag: 'delete',
|
|
139
|
+
before: {
|
|
140
|
+
id: 't1'
|
|
141
|
+
}
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
await batch.save({
|
|
145
|
+
sourceTable: TEST_TABLE,
|
|
146
|
+
tag: 'update',
|
|
147
|
+
after: {
|
|
148
|
+
id: 't2'
|
|
149
|
+
}
|
|
150
|
+
});
|
|
151
|
+
});
|
|
152
|
+
|
|
153
|
+
const checkpoint = result!.flushed_op;
|
|
154
|
+
|
|
155
|
+
const batchBefore = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
156
|
+
const dataBefore = batchBefore.batch.data;
|
|
157
|
+
|
|
158
|
+
expect(dataBefore).toMatchObject([
|
|
159
|
+
{
|
|
160
|
+
checksum: 2634521662,
|
|
161
|
+
object_id: 't1',
|
|
162
|
+
op: 'PUT',
|
|
163
|
+
op_id: '1'
|
|
164
|
+
},
|
|
165
|
+
{
|
|
166
|
+
checksum: 4243212114,
|
|
167
|
+
object_id: 't2',
|
|
168
|
+
op: 'PUT',
|
|
169
|
+
op_id: '2'
|
|
170
|
+
},
|
|
171
|
+
{
|
|
172
|
+
checksum: 4228978084,
|
|
173
|
+
object_id: 't1',
|
|
174
|
+
op: 'REMOVE',
|
|
175
|
+
op_id: '3'
|
|
176
|
+
},
|
|
177
|
+
{
|
|
178
|
+
checksum: 4243212114,
|
|
179
|
+
object_id: 't2',
|
|
180
|
+
op: 'PUT',
|
|
181
|
+
op_id: '4'
|
|
182
|
+
}
|
|
183
|
+
]);
|
|
184
|
+
|
|
185
|
+
await storage.compact(compactOptions);
|
|
186
|
+
|
|
187
|
+
const batchAfter = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
188
|
+
const dataAfter = batchAfter.batch.data;
|
|
189
|
+
|
|
190
|
+
expect(batchAfter.targetOp).toEqual(4n);
|
|
191
|
+
expect(dataAfter).toMatchObject([
|
|
192
|
+
{
|
|
193
|
+
checksum: -1778190028,
|
|
194
|
+
op: 'CLEAR',
|
|
195
|
+
op_id: '3'
|
|
196
|
+
},
|
|
197
|
+
{
|
|
198
|
+
checksum: 4243212114,
|
|
199
|
+
object_id: 't2',
|
|
200
|
+
op: 'PUT',
|
|
201
|
+
op_id: '4'
|
|
202
|
+
}
|
|
203
|
+
]);
|
|
204
|
+
|
|
205
|
+
validateCompactedBucket(dataBefore, dataAfter);
|
|
206
|
+
});
|
|
207
|
+
}
|
|
@@ -1,26 +1,8 @@
|
|
|
1
1
|
import { RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules';
|
|
2
|
-
import * as bson from 'bson';
|
|
3
2
|
import { describe, expect, test } from 'vitest';
|
|
4
|
-
import { SourceTable } from '../../src/storage/SourceTable.js';
|
|
5
|
-
import { hashData } from '../../src/util/utils.js';
|
|
6
|
-
import { MONGO_STORAGE_FACTORY, StorageFactory } from './util.js';
|
|
7
|
-
import { SyncBucketData } from '../../src/util/protocol-types.js';
|
|
8
3
|
import { BucketDataBatchOptions } from '../../src/storage/BucketStorage.js';
|
|
9
|
-
import {
|
|
10
|
-
|
|
11
|
-
function makeTestTable(name: string, columns?: string[] | undefined) {
|
|
12
|
-
const relId = hashData('table', name, (columns ?? ['id']).join(','));
|
|
13
|
-
const id = new bson.ObjectId('6544e3899293153fa7b38331');
|
|
14
|
-
return new SourceTable(
|
|
15
|
-
id,
|
|
16
|
-
SourceTable.DEFAULT_TAG,
|
|
17
|
-
relId,
|
|
18
|
-
SourceTable.DEFAULT_SCHEMA,
|
|
19
|
-
name,
|
|
20
|
-
(columns ?? ['id']).map((column) => ({ name: column, typeOid: 25 })),
|
|
21
|
-
true
|
|
22
|
-
);
|
|
23
|
-
}
|
|
4
|
+
import { getBatchData, getBatchMeta, makeTestTable, MONGO_STORAGE_FACTORY, StorageFactory } from './util.js';
|
|
5
|
+
import { fromAsync, oneFromAsync } from './wal_stream_utils.js';
|
|
24
6
|
|
|
25
7
|
const TEST_TABLE = makeTestTable('test', ['id']);
|
|
26
8
|
|
|
@@ -236,7 +218,7 @@ bucket_definitions:
|
|
|
236
218
|
const checkpoint = result!.flushed_op;
|
|
237
219
|
|
|
238
220
|
const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
239
|
-
const data = batch[0].data.map((d) => {
|
|
221
|
+
const data = batch[0].batch.data.map((d) => {
|
|
240
222
|
return {
|
|
241
223
|
op: d.op,
|
|
242
224
|
object_id: d.object_id,
|
|
@@ -504,7 +486,7 @@ bucket_definitions:
|
|
|
504
486
|
});
|
|
505
487
|
const checkpoint = result!.flushed_op;
|
|
506
488
|
const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
507
|
-
const data = batch[0].data.map((d) => {
|
|
489
|
+
const data = batch[0].batch.data.map((d) => {
|
|
508
490
|
return {
|
|
509
491
|
op: d.op,
|
|
510
492
|
object_id: d.object_id
|
|
@@ -568,7 +550,7 @@ bucket_definitions:
|
|
|
568
550
|
const checkpoint = result!.flushed_op;
|
|
569
551
|
|
|
570
552
|
const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
571
|
-
const data = batch[0].data.map((d) => {
|
|
553
|
+
const data = batch[0].batch.data.map((d) => {
|
|
572
554
|
return {
|
|
573
555
|
op: d.op,
|
|
574
556
|
object_id: d.object_id,
|
|
@@ -680,7 +662,7 @@ bucket_definitions:
|
|
|
680
662
|
|
|
681
663
|
const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
682
664
|
|
|
683
|
-
const data = batch[0].data.map((d) => {
|
|
665
|
+
const data = batch[0].batch.data.map((d) => {
|
|
684
666
|
return {
|
|
685
667
|
op: d.op,
|
|
686
668
|
object_id: d.object_id,
|
|
@@ -855,7 +837,7 @@ bucket_definitions:
|
|
|
855
837
|
const checkpoint2 = result2!.flushed_op;
|
|
856
838
|
|
|
857
839
|
const batch = await fromAsync(storage.getBucketDataBatch(checkpoint2, new Map([['global[]', checkpoint1]])));
|
|
858
|
-
const data = batch[0].data.map((d) => {
|
|
840
|
+
const data = batch[0].batch.data.map((d) => {
|
|
859
841
|
return {
|
|
860
842
|
op: d.op,
|
|
861
843
|
object_id: d.object_id,
|
|
@@ -939,7 +921,7 @@ bucket_definitions:
|
|
|
939
921
|
const checkpoint3 = result3!.flushed_op;
|
|
940
922
|
|
|
941
923
|
const batch = await fromAsync(storage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]])));
|
|
942
|
-
const data = batch[0].data.map((d) => {
|
|
924
|
+
const data = batch[0].batch.data.map((d) => {
|
|
943
925
|
return {
|
|
944
926
|
op: d.op,
|
|
945
927
|
object_id: d.object_id,
|
|
@@ -1031,7 +1013,7 @@ bucket_definitions:
|
|
|
1031
1013
|
const checkpoint3 = result3!.flushed_op;
|
|
1032
1014
|
|
|
1033
1015
|
const batch = await fromAsync(storage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]])));
|
|
1034
|
-
const data = batch[0].data.map((d) => {
|
|
1016
|
+
const data = batch[0].batch.data.map((d) => {
|
|
1035
1017
|
return {
|
|
1036
1018
|
op: d.op,
|
|
1037
1019
|
object_id: d.object_id,
|
|
@@ -1133,7 +1115,7 @@ bucket_definitions:
|
|
|
1133
1115
|
});
|
|
1134
1116
|
|
|
1135
1117
|
const batch2 = await fromAsync(
|
|
1136
|
-
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].next_after]]), options)
|
|
1118
|
+
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options)
|
|
1137
1119
|
);
|
|
1138
1120
|
expect(getBatchData(batch2)).toEqual([
|
|
1139
1121
|
{ op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1795508474 },
|
|
@@ -1146,7 +1128,7 @@ bucket_definitions:
|
|
|
1146
1128
|
});
|
|
1147
1129
|
|
|
1148
1130
|
const batch3 = await fromAsync(
|
|
1149
|
-
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].next_after]]), options)
|
|
1131
|
+
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options)
|
|
1150
1132
|
);
|
|
1151
1133
|
expect(getBatchData(batch3)).toEqual([]);
|
|
1152
1134
|
expect(getBatchMeta(batch3)).toEqual(null);
|
|
@@ -1223,7 +1205,7 @@ bucket_definitions:
|
|
|
1223
1205
|
});
|
|
1224
1206
|
|
|
1225
1207
|
const batch2 = await fromAsync(
|
|
1226
|
-
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].next_after]]), options)
|
|
1208
|
+
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options)
|
|
1227
1209
|
);
|
|
1228
1210
|
expect(getBatchData(batch2)).toEqual([{ op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1607205872 }]);
|
|
1229
1211
|
expect(getBatchMeta(batch2)).toEqual({
|
|
@@ -1233,7 +1215,7 @@ bucket_definitions:
|
|
|
1233
1215
|
});
|
|
1234
1216
|
|
|
1235
1217
|
const batch3 = await fromAsync(
|
|
1236
|
-
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].next_after]]), options)
|
|
1218
|
+
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options)
|
|
1237
1219
|
);
|
|
1238
1220
|
expect(getBatchData(batch3)).toEqual([{ op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 }]);
|
|
1239
1221
|
expect(getBatchMeta(batch3)).toEqual({
|
|
@@ -1270,7 +1252,9 @@ bucket_definitions:
|
|
|
1270
1252
|
|
|
1271
1253
|
const checkpoint = result!.flushed_op;
|
|
1272
1254
|
|
|
1273
|
-
const batch1 = await
|
|
1255
|
+
const batch1 = await oneFromAsync(
|
|
1256
|
+
storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), { limit: 4 })
|
|
1257
|
+
);
|
|
1274
1258
|
|
|
1275
1259
|
expect(getBatchData(batch1)).toEqual([
|
|
1276
1260
|
{ op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 },
|
|
@@ -1285,8 +1269,8 @@ bucket_definitions:
|
|
|
1285
1269
|
next_after: '4'
|
|
1286
1270
|
});
|
|
1287
1271
|
|
|
1288
|
-
const batch2 = await
|
|
1289
|
-
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1
|
|
1272
|
+
const batch2 = await oneFromAsync(
|
|
1273
|
+
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1.batch.next_after]]), {
|
|
1290
1274
|
limit: 4
|
|
1291
1275
|
})
|
|
1292
1276
|
);
|
|
@@ -1302,7 +1286,7 @@ bucket_definitions:
|
|
|
1302
1286
|
});
|
|
1303
1287
|
|
|
1304
1288
|
const batch3 = await fromAsync(
|
|
1305
|
-
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2
|
|
1289
|
+
storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2.batch.next_after]]), {
|
|
1306
1290
|
limit: 4
|
|
1307
1291
|
})
|
|
1308
1292
|
);
|
|
@@ -1311,28 +1295,3 @@ bucket_definitions:
|
|
|
1311
1295
|
expect(getBatchMeta(batch3)).toEqual(null);
|
|
1312
1296
|
});
|
|
1313
1297
|
}
|
|
1314
|
-
|
|
1315
|
-
function getBatchData(batch: SyncBucketData[]) {
|
|
1316
|
-
if (batch.length == 0) {
|
|
1317
|
-
return [];
|
|
1318
|
-
}
|
|
1319
|
-
return batch[0].data.map((d) => {
|
|
1320
|
-
return {
|
|
1321
|
-
op_id: d.op_id,
|
|
1322
|
-
op: d.op,
|
|
1323
|
-
object_id: d.object_id,
|
|
1324
|
-
checksum: d.checksum
|
|
1325
|
-
};
|
|
1326
|
-
});
|
|
1327
|
-
}
|
|
1328
|
-
|
|
1329
|
-
function getBatchMeta(batch: SyncBucketData[]) {
|
|
1330
|
-
if (batch.length == 0) {
|
|
1331
|
-
return null;
|
|
1332
|
-
}
|
|
1333
|
-
return {
|
|
1334
|
-
has_more: batch[0].has_more,
|
|
1335
|
-
after: batch[0].after,
|
|
1336
|
-
next_after: batch[0].next_after
|
|
1337
|
-
};
|
|
1338
|
-
}
|