@powersync/service-core 0.0.0-dev-20241128134723 → 0.0.0-dev-20241219091224
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +63 -4
- package/dist/auth/KeySpec.d.ts +1 -0
- package/dist/auth/KeySpec.js +10 -8
- package/dist/auth/KeySpec.js.map +1 -1
- package/dist/auth/RemoteJWKSCollector.js +2 -2
- package/dist/auth/RemoteJWKSCollector.js.map +1 -1
- package/dist/entry/commands/compact-action.js +15 -15
- package/dist/entry/commands/compact-action.js.map +1 -1
- package/dist/entry/commands/migrate-action.js +15 -4
- package/dist/entry/commands/migrate-action.js.map +1 -1
- package/dist/index.d.ts +1 -3
- package/dist/index.js +1 -3
- package/dist/index.js.map +1 -1
- package/dist/migrations/PowerSyncMigrationManager.d.ts +17 -0
- package/dist/migrations/PowerSyncMigrationManager.js +22 -0
- package/dist/migrations/PowerSyncMigrationManager.js.map +1 -0
- package/dist/migrations/ensure-automatic-migrations.d.ts +4 -0
- package/dist/migrations/ensure-automatic-migrations.js +14 -0
- package/dist/migrations/ensure-automatic-migrations.js.map +1 -0
- package/dist/migrations/migrations-index.d.ts +2 -3
- package/dist/migrations/migrations-index.js +2 -3
- package/dist/migrations/migrations-index.js.map +1 -1
- package/dist/routes/RouterEngine.js +2 -1
- package/dist/routes/RouterEngine.js.map +1 -1
- package/dist/routes/configure-fastify.d.ts +28 -28
- package/dist/routes/endpoints/admin.d.ts +24 -24
- package/dist/storage/BucketStorage.d.ts +41 -1
- package/dist/storage/BucketStorage.js +26 -0
- package/dist/storage/BucketStorage.js.map +1 -1
- package/dist/storage/storage-index.d.ts +2 -14
- package/dist/storage/storage-index.js +2 -14
- package/dist/storage/storage-index.js.map +1 -1
- package/dist/sync/sync.js +12 -3
- package/dist/sync/sync.js.map +1 -1
- package/dist/system/ServiceContext.d.ts +3 -0
- package/dist/system/ServiceContext.js +11 -3
- package/dist/system/ServiceContext.js.map +1 -1
- package/dist/util/config/types.d.ts +2 -2
- package/dist/util/utils.d.ts +14 -1
- package/dist/util/utils.js +56 -0
- package/dist/util/utils.js.map +1 -1
- package/package.json +6 -7
- package/src/auth/KeySpec.ts +12 -9
- package/src/auth/RemoteJWKSCollector.ts +2 -2
- package/src/entry/commands/compact-action.ts +20 -15
- package/src/entry/commands/migrate-action.ts +17 -4
- package/src/index.ts +1 -4
- package/src/migrations/PowerSyncMigrationManager.ts +43 -0
- package/src/migrations/ensure-automatic-migrations.ts +15 -0
- package/src/migrations/migrations-index.ts +2 -3
- package/src/routes/RouterEngine.ts +2 -1
- package/src/storage/BucketStorage.ts +44 -1
- package/src/storage/storage-index.ts +3 -15
- package/src/sync/sync.ts +12 -3
- package/src/system/ServiceContext.ts +17 -4
- package/src/util/config/types.ts +2 -2
- package/src/util/utils.ts +59 -1
- package/test/src/auth.test.ts +54 -21
- package/test/src/env.ts +0 -1
- package/tsconfig.tsbuildinfo +1 -1
- package/dist/db/db-index.d.ts +0 -1
- package/dist/db/db-index.js +0 -2
- package/dist/db/db-index.js.map +0 -1
- package/dist/db/mongo.d.ts +0 -35
- package/dist/db/mongo.js +0 -73
- package/dist/db/mongo.js.map +0 -1
- package/dist/locks/LockManager.d.ts +0 -10
- package/dist/locks/LockManager.js +0 -7
- package/dist/locks/LockManager.js.map +0 -1
- package/dist/locks/MongoLocks.d.ts +0 -36
- package/dist/locks/MongoLocks.js +0 -81
- package/dist/locks/MongoLocks.js.map +0 -1
- package/dist/locks/locks-index.d.ts +0 -2
- package/dist/locks/locks-index.js +0 -3
- package/dist/locks/locks-index.js.map +0 -1
- package/dist/migrations/db/migrations/1684951997326-init.d.ts +0 -3
- package/dist/migrations/db/migrations/1684951997326-init.js +0 -33
- package/dist/migrations/db/migrations/1684951997326-init.js.map +0 -1
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.d.ts +0 -2
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js +0 -5
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js.map +0 -1
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.d.ts +0 -3
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js +0 -56
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js.map +0 -1
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.d.ts +0 -3
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js +0 -29
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js.map +0 -1
- package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.d.ts +0 -3
- package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.js +0 -31
- package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.js.map +0 -1
- package/dist/migrations/definitions.d.ts +0 -18
- package/dist/migrations/definitions.js +0 -6
- package/dist/migrations/definitions.js.map +0 -1
- package/dist/migrations/executor.d.ts +0 -16
- package/dist/migrations/executor.js +0 -64
- package/dist/migrations/executor.js.map +0 -1
- package/dist/migrations/migrations.d.ts +0 -18
- package/dist/migrations/migrations.js +0 -110
- package/dist/migrations/migrations.js.map +0 -1
- package/dist/migrations/store/migration-store.d.ts +0 -11
- package/dist/migrations/store/migration-store.js +0 -46
- package/dist/migrations/store/migration-store.js.map +0 -1
- package/dist/storage/MongoBucketStorage.d.ts +0 -48
- package/dist/storage/MongoBucketStorage.js +0 -426
- package/dist/storage/MongoBucketStorage.js.map +0 -1
- package/dist/storage/mongo/MongoBucketBatch.d.ts +0 -67
- package/dist/storage/mongo/MongoBucketBatch.js +0 -643
- package/dist/storage/mongo/MongoBucketBatch.js.map +0 -1
- package/dist/storage/mongo/MongoCompactor.d.ts +0 -40
- package/dist/storage/mongo/MongoCompactor.js +0 -309
- package/dist/storage/mongo/MongoCompactor.js.map +0 -1
- package/dist/storage/mongo/MongoIdSequence.d.ts +0 -12
- package/dist/storage/mongo/MongoIdSequence.js +0 -21
- package/dist/storage/mongo/MongoIdSequence.js.map +0 -1
- package/dist/storage/mongo/MongoPersistedSyncRules.d.ts +0 -9
- package/dist/storage/mongo/MongoPersistedSyncRules.js +0 -9
- package/dist/storage/mongo/MongoPersistedSyncRules.js.map +0 -1
- package/dist/storage/mongo/MongoPersistedSyncRulesContent.d.ts +0 -20
- package/dist/storage/mongo/MongoPersistedSyncRulesContent.js +0 -26
- package/dist/storage/mongo/MongoPersistedSyncRulesContent.js.map +0 -1
- package/dist/storage/mongo/MongoStorageProvider.d.ts +0 -5
- package/dist/storage/mongo/MongoStorageProvider.js +0 -26
- package/dist/storage/mongo/MongoStorageProvider.js.map +0 -1
- package/dist/storage/mongo/MongoSyncBucketStorage.d.ts +0 -38
- package/dist/storage/mongo/MongoSyncBucketStorage.js +0 -531
- package/dist/storage/mongo/MongoSyncBucketStorage.js.map +0 -1
- package/dist/storage/mongo/MongoSyncRulesLock.d.ts +0 -16
- package/dist/storage/mongo/MongoSyncRulesLock.js +0 -65
- package/dist/storage/mongo/MongoSyncRulesLock.js.map +0 -1
- package/dist/storage/mongo/MongoWriteCheckpointAPI.d.ts +0 -20
- package/dist/storage/mongo/MongoWriteCheckpointAPI.js +0 -103
- package/dist/storage/mongo/MongoWriteCheckpointAPI.js.map +0 -1
- package/dist/storage/mongo/OperationBatch.d.ts +0 -35
- package/dist/storage/mongo/OperationBatch.js +0 -119
- package/dist/storage/mongo/OperationBatch.js.map +0 -1
- package/dist/storage/mongo/PersistedBatch.d.ts +0 -46
- package/dist/storage/mongo/PersistedBatch.js +0 -213
- package/dist/storage/mongo/PersistedBatch.js.map +0 -1
- package/dist/storage/mongo/config.d.ts +0 -19
- package/dist/storage/mongo/config.js +0 -26
- package/dist/storage/mongo/config.js.map +0 -1
- package/dist/storage/mongo/db.d.ts +0 -36
- package/dist/storage/mongo/db.js +0 -47
- package/dist/storage/mongo/db.js.map +0 -1
- package/dist/storage/mongo/models.d.ts +0 -156
- package/dist/storage/mongo/models.js +0 -27
- package/dist/storage/mongo/models.js.map +0 -1
- package/dist/storage/mongo/util.d.ts +0 -40
- package/dist/storage/mongo/util.js +0 -151
- package/dist/storage/mongo/util.js.map +0 -1
- package/src/db/db-index.ts +0 -1
- package/src/db/mongo.ts +0 -81
- package/src/locks/LockManager.ts +0 -16
- package/src/locks/MongoLocks.ts +0 -142
- package/src/locks/locks-index.ts +0 -2
- package/src/migrations/db/migrations/1684951997326-init.ts +0 -38
- package/src/migrations/db/migrations/1688556755264-initial-sync-rules.ts +0 -5
- package/src/migrations/db/migrations/1702295701188-sync-rule-state.ts +0 -102
- package/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts +0 -34
- package/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts +0 -37
- package/src/migrations/definitions.ts +0 -21
- package/src/migrations/executor.ts +0 -87
- package/src/migrations/migrations.ts +0 -142
- package/src/migrations/store/migration-store.ts +0 -63
- package/src/storage/MongoBucketStorage.ts +0 -540
- package/src/storage/mongo/MongoBucketBatch.ts +0 -841
- package/src/storage/mongo/MongoCompactor.ts +0 -392
- package/src/storage/mongo/MongoIdSequence.ts +0 -24
- package/src/storage/mongo/MongoPersistedSyncRules.ts +0 -16
- package/src/storage/mongo/MongoPersistedSyncRulesContent.ts +0 -50
- package/src/storage/mongo/MongoStorageProvider.ts +0 -31
- package/src/storage/mongo/MongoSyncBucketStorage.ts +0 -636
- package/src/storage/mongo/MongoSyncRulesLock.ts +0 -85
- package/src/storage/mongo/MongoWriteCheckpointAPI.ts +0 -151
- package/src/storage/mongo/OperationBatch.ts +0 -131
- package/src/storage/mongo/PersistedBatch.ts +0 -272
- package/src/storage/mongo/config.ts +0 -40
- package/src/storage/mongo/db.ts +0 -88
- package/src/storage/mongo/models.ts +0 -179
- package/src/storage/mongo/util.ts +0 -158
- package/test/src/__snapshots__/sync.test.ts.snap +0 -332
- package/test/src/bucket_validation.test.ts +0 -142
- package/test/src/bucket_validation.ts +0 -116
- package/test/src/compacting.test.ts +0 -295
- package/test/src/data_storage.test.ts +0 -1499
- package/test/src/stream_utils.ts +0 -42
- package/test/src/sync.test.ts +0 -511
- package/test/src/util.ts +0 -148
|
@@ -1,142 +0,0 @@
|
|
|
1
|
-
import { OplogEntry } from '@/util/protocol-types.js';
|
|
2
|
-
import { describe, expect, test } from 'vitest';
|
|
3
|
-
import { reduceBucket, validateBucket } from './bucket_validation.js';
|
|
4
|
-
|
|
5
|
-
// This tests the reduceBucket function.
|
|
6
|
-
// While this function is not used directly in the service implementation,
|
|
7
|
-
// it is an important part of validating consistency in other tests.
|
|
8
|
-
describe('bucket validation', () => {
|
|
9
|
-
const ops1: OplogEntry[] = [
|
|
10
|
-
{
|
|
11
|
-
op_id: '1',
|
|
12
|
-
op: 'PUT',
|
|
13
|
-
object_type: 'test',
|
|
14
|
-
object_id: 't1',
|
|
15
|
-
checksum: 2634521662,
|
|
16
|
-
subkey: '6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423',
|
|
17
|
-
data: '{"id":"t1"}'
|
|
18
|
-
},
|
|
19
|
-
{
|
|
20
|
-
op_id: '2',
|
|
21
|
-
op: 'PUT',
|
|
22
|
-
object_type: 'test',
|
|
23
|
-
object_id: 't2',
|
|
24
|
-
checksum: 4243212114,
|
|
25
|
-
subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee',
|
|
26
|
-
data: '{"id":"t2"}'
|
|
27
|
-
},
|
|
28
|
-
{
|
|
29
|
-
op_id: '3',
|
|
30
|
-
op: 'REMOVE',
|
|
31
|
-
object_type: 'test',
|
|
32
|
-
object_id: 't1',
|
|
33
|
-
checksum: 4228978084,
|
|
34
|
-
subkey: '6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423',
|
|
35
|
-
data: null
|
|
36
|
-
},
|
|
37
|
-
{
|
|
38
|
-
op_id: '4',
|
|
39
|
-
op: 'PUT',
|
|
40
|
-
object_type: 'test',
|
|
41
|
-
object_id: 't2',
|
|
42
|
-
checksum: 4243212114,
|
|
43
|
-
subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee',
|
|
44
|
-
data: '{"id":"t2"}'
|
|
45
|
-
}
|
|
46
|
-
];
|
|
47
|
-
|
|
48
|
-
test('reduce 1', () => {
|
|
49
|
-
expect(reduceBucket(ops1)).toEqual([
|
|
50
|
-
{
|
|
51
|
-
checksum: -1778190028,
|
|
52
|
-
op: 'CLEAR',
|
|
53
|
-
op_id: '0'
|
|
54
|
-
},
|
|
55
|
-
{
|
|
56
|
-
checksum: 4243212114,
|
|
57
|
-
data: '{"id":"t2"}',
|
|
58
|
-
object_id: 't2',
|
|
59
|
-
object_type: 'test',
|
|
60
|
-
op: 'PUT',
|
|
61
|
-
op_id: '4',
|
|
62
|
-
subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee'
|
|
63
|
-
}
|
|
64
|
-
]);
|
|
65
|
-
|
|
66
|
-
expect(reduceBucket(reduceBucket(ops1))).toEqual([
|
|
67
|
-
{
|
|
68
|
-
checksum: -1778190028,
|
|
69
|
-
op: 'CLEAR',
|
|
70
|
-
op_id: '0'
|
|
71
|
-
},
|
|
72
|
-
{
|
|
73
|
-
checksum: 4243212114,
|
|
74
|
-
data: '{"id":"t2"}',
|
|
75
|
-
object_id: 't2',
|
|
76
|
-
object_type: 'test',
|
|
77
|
-
op: 'PUT',
|
|
78
|
-
op_id: '4',
|
|
79
|
-
subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee'
|
|
80
|
-
}
|
|
81
|
-
]);
|
|
82
|
-
|
|
83
|
-
validateBucket(ops1);
|
|
84
|
-
});
|
|
85
|
-
|
|
86
|
-
test('reduce 2', () => {
|
|
87
|
-
const bucket: OplogEntry[] = [
|
|
88
|
-
...ops1,
|
|
89
|
-
|
|
90
|
-
{
|
|
91
|
-
checksum: 93784613,
|
|
92
|
-
op: 'CLEAR',
|
|
93
|
-
op_id: '5'
|
|
94
|
-
},
|
|
95
|
-
{
|
|
96
|
-
checksum: 5133378,
|
|
97
|
-
data: '{"id":"t3"}',
|
|
98
|
-
object_id: 't3',
|
|
99
|
-
object_type: 'test',
|
|
100
|
-
op: 'PUT',
|
|
101
|
-
op_id: '11',
|
|
102
|
-
subkey: '6544e3899293153fa7b38333/ec27c691-b47a-5d92-927a-9944feb89eee'
|
|
103
|
-
}
|
|
104
|
-
];
|
|
105
|
-
|
|
106
|
-
expect(reduceBucket(bucket)).toEqual([
|
|
107
|
-
{
|
|
108
|
-
checksum: 93784613,
|
|
109
|
-
op: 'CLEAR',
|
|
110
|
-
op_id: '0'
|
|
111
|
-
},
|
|
112
|
-
{
|
|
113
|
-
checksum: 5133378,
|
|
114
|
-
data: '{"id":"t3"}',
|
|
115
|
-
object_id: 't3',
|
|
116
|
-
object_type: 'test',
|
|
117
|
-
op: 'PUT',
|
|
118
|
-
op_id: '11',
|
|
119
|
-
subkey: '6544e3899293153fa7b38333/ec27c691-b47a-5d92-927a-9944feb89eee'
|
|
120
|
-
}
|
|
121
|
-
]);
|
|
122
|
-
|
|
123
|
-
expect(reduceBucket(reduceBucket(bucket))).toEqual([
|
|
124
|
-
{
|
|
125
|
-
checksum: 93784613,
|
|
126
|
-
op: 'CLEAR',
|
|
127
|
-
op_id: '0'
|
|
128
|
-
},
|
|
129
|
-
{
|
|
130
|
-
checksum: 5133378,
|
|
131
|
-
data: '{"id":"t3"}',
|
|
132
|
-
object_id: 't3',
|
|
133
|
-
object_type: 'test',
|
|
134
|
-
op: 'PUT',
|
|
135
|
-
op_id: '11',
|
|
136
|
-
subkey: '6544e3899293153fa7b38333/ec27c691-b47a-5d92-927a-9944feb89eee'
|
|
137
|
-
}
|
|
138
|
-
]);
|
|
139
|
-
|
|
140
|
-
validateBucket(bucket);
|
|
141
|
-
});
|
|
142
|
-
});
|
|
@@ -1,116 +0,0 @@
|
|
|
1
|
-
import { OplogEntry } from '@/util/protocol-types.js';
|
|
2
|
-
import { addChecksums } from '@/util/utils.js';
|
|
3
|
-
import { expect } from 'vitest';
|
|
4
|
-
|
|
5
|
-
/**
|
|
6
|
-
* Reduce a bucket to the final state as stored on the client.
|
|
7
|
-
*
|
|
8
|
-
* This keeps the final state for each row as a PUT operation.
|
|
9
|
-
*
|
|
10
|
-
* All other operations are replaced with a single CLEAR operation,
|
|
11
|
-
* summing their checksums, and using a 0 as an op_id.
|
|
12
|
-
*
|
|
13
|
-
* This is the function $r(B)$, as described in /docs/bucket-properties.md.
|
|
14
|
-
*/
|
|
15
|
-
export function reduceBucket(operations: OplogEntry[]) {
|
|
16
|
-
let rowState = new Map<string, OplogEntry>();
|
|
17
|
-
let otherChecksum = 0;
|
|
18
|
-
|
|
19
|
-
for (let op of operations) {
|
|
20
|
-
const key = rowKey(op);
|
|
21
|
-
if (op.op == 'PUT') {
|
|
22
|
-
const existing = rowState.get(key);
|
|
23
|
-
if (existing) {
|
|
24
|
-
otherChecksum = addChecksums(otherChecksum, existing.checksum as number);
|
|
25
|
-
}
|
|
26
|
-
rowState.set(key, op);
|
|
27
|
-
} else if (op.op == 'REMOVE') {
|
|
28
|
-
const existing = rowState.get(key);
|
|
29
|
-
if (existing) {
|
|
30
|
-
otherChecksum = addChecksums(otherChecksum, existing.checksum as number);
|
|
31
|
-
}
|
|
32
|
-
rowState.delete(key);
|
|
33
|
-
otherChecksum = addChecksums(otherChecksum, op.checksum as number);
|
|
34
|
-
} else if (op.op == 'CLEAR') {
|
|
35
|
-
rowState.clear();
|
|
36
|
-
otherChecksum = op.checksum as number;
|
|
37
|
-
} else if (op.op == 'MOVE') {
|
|
38
|
-
otherChecksum = addChecksums(otherChecksum, op.checksum as number);
|
|
39
|
-
} else {
|
|
40
|
-
throw new Error(`Unknown operation ${op.op}`);
|
|
41
|
-
}
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
const puts = [...rowState.values()].sort((a, b) => {
|
|
45
|
-
return Number(BigInt(a.op_id) - BigInt(b.op_id));
|
|
46
|
-
});
|
|
47
|
-
|
|
48
|
-
let finalState: OplogEntry[] = [
|
|
49
|
-
// Special operation to indiciate the checksum remainder
|
|
50
|
-
{ op_id: '0', op: 'CLEAR', checksum: otherChecksum },
|
|
51
|
-
...puts
|
|
52
|
-
];
|
|
53
|
-
|
|
54
|
-
return finalState;
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
function rowKey(entry: OplogEntry) {
|
|
58
|
-
return `${entry.object_type}/${entry.object_id}/${entry.subkey}`;
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
/**
|
|
62
|
-
* Validate this property, as described in /docs/bucket-properties.md:
|
|
63
|
-
*
|
|
64
|
-
* $r(B_{[..id_n]}) = r(r(B_{[..id_i]}) \cup B_{[id_{i+1}..id_n]}) \;\forall\; i \in [1..n]$
|
|
65
|
-
*
|
|
66
|
-
* We test that a client syncing the entire bucket in one go (left side of the equation),
|
|
67
|
-
* ends up with the same result as another client syncing up to operation id_i, then sync
|
|
68
|
-
* the rest.
|
|
69
|
-
*/
|
|
70
|
-
export function validateBucket(bucket: OplogEntry[]) {
|
|
71
|
-
const r1 = reduceBucket(bucket);
|
|
72
|
-
for (let i = 0; i <= bucket.length; i++) {
|
|
73
|
-
const r2 = reduceBucket(bucket.slice(0, i + 1));
|
|
74
|
-
const b3 = bucket.slice(i + 1);
|
|
75
|
-
const r3 = r2.concat(b3);
|
|
76
|
-
const r4 = reduceBucket(r3);
|
|
77
|
-
expect(r4).toEqual(r1);
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
// This is the same check, just implemented differently
|
|
81
|
-
validateCompactedBucket(bucket, bucket);
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
/**
|
|
85
|
-
* Validate these properties for a bucket $B$ and its compacted version $B'$,:
|
|
86
|
-
* as described in /docs/bucket-properties.md:
|
|
87
|
-
*
|
|
88
|
-
* 1. $r(B) = r(B')$
|
|
89
|
-
* 2. $r(B_{[..c]}) = r(r(B_{[..c_i]}) \cup B'_{[c_i+1..c]}) \;\forall\; c_i \in B$
|
|
90
|
-
*
|
|
91
|
-
* The first one is that the result of syncing the original bucket is the same as
|
|
92
|
-
* syncing the compacted bucket.
|
|
93
|
-
*
|
|
94
|
-
* The second property is that result of syncing the entire original bucket, is the same
|
|
95
|
-
* as syncing any partial version of that (up to op $c_i$), and then continue syncing
|
|
96
|
-
* using the compacted bucket.
|
|
97
|
-
*/
|
|
98
|
-
export function validateCompactedBucket(bucket: OplogEntry[], compacted: OplogEntry[]) {
|
|
99
|
-
// r(B_{[..c]})
|
|
100
|
-
const r1 = reduceBucket(bucket);
|
|
101
|
-
// r(B) = r(B')
|
|
102
|
-
expect(reduceBucket(compacted)).toEqual(r1);
|
|
103
|
-
|
|
104
|
-
for (let i = 0; i < bucket.length; i++) {
|
|
105
|
-
// r(B_{[..c_i]})
|
|
106
|
-
const r2 = reduceBucket(bucket.slice(0, i + 1));
|
|
107
|
-
const c_i = BigInt(bucket[i].op_id);
|
|
108
|
-
// B'_{[c_i+1..c]}
|
|
109
|
-
const b3 = compacted.filter((op) => BigInt(op.op_id) > c_i);
|
|
110
|
-
// r(B_{[..c_i]}) \cup B'_{[c_i+1..c]}
|
|
111
|
-
const r3 = r2.concat(b3);
|
|
112
|
-
// r(r(B_{[..c_i]}) \cup B'_{[c_i+1..c]})
|
|
113
|
-
const r4 = reduceBucket(r3);
|
|
114
|
-
expect(r4).toEqual(r1);
|
|
115
|
-
}
|
|
116
|
-
}
|
|
@@ -1,295 +0,0 @@
|
|
|
1
|
-
import { SaveOperationTag } from '@/storage/BucketStorage.js';
|
|
2
|
-
import { MongoCompactOptions } from '@/storage/mongo/MongoCompactor.js';
|
|
3
|
-
import { describe, expect, test } from 'vitest';
|
|
4
|
-
import { validateCompactedBucket } from './bucket_validation.js';
|
|
5
|
-
import { oneFromAsync } from './stream_utils.js';
|
|
6
|
-
import { BATCH_OPTIONS, makeTestTable, MONGO_STORAGE_FACTORY, rid, testRules } from './util.js';
|
|
7
|
-
|
|
8
|
-
const TEST_TABLE = makeTestTable('test', ['id']);
|
|
9
|
-
|
|
10
|
-
// Test with the default options - large batch sizes
|
|
11
|
-
describe('compacting buckets - default options', () => compactTests({}));
|
|
12
|
-
|
|
13
|
-
// Also test with the miniumum batch sizes, forcing usage of multiple batches internally
|
|
14
|
-
describe('compacting buckets - batched', () =>
|
|
15
|
-
compactTests({ clearBatchLimit: 2, moveBatchLimit: 1, moveBatchQueryLimit: 1 }));
|
|
16
|
-
|
|
17
|
-
function compactTests(compactOptions: MongoCompactOptions) {
|
|
18
|
-
const factory = MONGO_STORAGE_FACTORY;
|
|
19
|
-
|
|
20
|
-
test('compacting (1)', async () => {
|
|
21
|
-
const sync_rules = testRules(`
|
|
22
|
-
bucket_definitions:
|
|
23
|
-
global:
|
|
24
|
-
data: [select * from test]
|
|
25
|
-
`);
|
|
26
|
-
|
|
27
|
-
const storage = (await factory()).getInstance(sync_rules);
|
|
28
|
-
|
|
29
|
-
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
|
|
30
|
-
await batch.save({
|
|
31
|
-
sourceTable: TEST_TABLE,
|
|
32
|
-
tag: SaveOperationTag.INSERT,
|
|
33
|
-
after: {
|
|
34
|
-
id: 't1'
|
|
35
|
-
},
|
|
36
|
-
afterReplicaId: rid('t1')
|
|
37
|
-
});
|
|
38
|
-
|
|
39
|
-
await batch.save({
|
|
40
|
-
sourceTable: TEST_TABLE,
|
|
41
|
-
tag: SaveOperationTag.INSERT,
|
|
42
|
-
after: {
|
|
43
|
-
id: 't2'
|
|
44
|
-
},
|
|
45
|
-
afterReplicaId: rid('t2')
|
|
46
|
-
});
|
|
47
|
-
|
|
48
|
-
await batch.save({
|
|
49
|
-
sourceTable: TEST_TABLE,
|
|
50
|
-
tag: SaveOperationTag.UPDATE,
|
|
51
|
-
after: {
|
|
52
|
-
id: 't2'
|
|
53
|
-
},
|
|
54
|
-
afterReplicaId: rid('t2')
|
|
55
|
-
});
|
|
56
|
-
});
|
|
57
|
-
|
|
58
|
-
const checkpoint = result!.flushed_op;
|
|
59
|
-
|
|
60
|
-
const batchBefore = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
61
|
-
const dataBefore = batchBefore.batch.data;
|
|
62
|
-
const checksumBefore = await storage.getChecksums(checkpoint, ['global[]']);
|
|
63
|
-
|
|
64
|
-
expect(dataBefore).toMatchObject([
|
|
65
|
-
{
|
|
66
|
-
checksum: 2634521662,
|
|
67
|
-
object_id: 't1',
|
|
68
|
-
op: 'PUT',
|
|
69
|
-
op_id: '1'
|
|
70
|
-
},
|
|
71
|
-
{
|
|
72
|
-
checksum: 4243212114,
|
|
73
|
-
object_id: 't2',
|
|
74
|
-
op: 'PUT',
|
|
75
|
-
op_id: '2'
|
|
76
|
-
},
|
|
77
|
-
{
|
|
78
|
-
checksum: 4243212114,
|
|
79
|
-
object_id: 't2',
|
|
80
|
-
op: 'PUT',
|
|
81
|
-
op_id: '3'
|
|
82
|
-
}
|
|
83
|
-
]);
|
|
84
|
-
|
|
85
|
-
await storage.compact(compactOptions);
|
|
86
|
-
|
|
87
|
-
const batchAfter = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
88
|
-
const dataAfter = batchAfter.batch.data;
|
|
89
|
-
const checksumAfter = await storage.getChecksums(checkpoint, ['global[]']);
|
|
90
|
-
|
|
91
|
-
expect(batchAfter.targetOp).toEqual(3n);
|
|
92
|
-
expect(dataAfter).toMatchObject([
|
|
93
|
-
{
|
|
94
|
-
checksum: 2634521662,
|
|
95
|
-
object_id: 't1',
|
|
96
|
-
op: 'PUT',
|
|
97
|
-
op_id: '1'
|
|
98
|
-
},
|
|
99
|
-
{
|
|
100
|
-
checksum: 4243212114,
|
|
101
|
-
op: 'MOVE',
|
|
102
|
-
op_id: '2'
|
|
103
|
-
},
|
|
104
|
-
{
|
|
105
|
-
checksum: 4243212114,
|
|
106
|
-
object_id: 't2',
|
|
107
|
-
op: 'PUT',
|
|
108
|
-
op_id: '3'
|
|
109
|
-
}
|
|
110
|
-
]);
|
|
111
|
-
|
|
112
|
-
expect(checksumBefore.get('global[]')).toEqual(checksumAfter.get('global[]'));
|
|
113
|
-
|
|
114
|
-
validateCompactedBucket(dataBefore, dataAfter);
|
|
115
|
-
});
|
|
116
|
-
|
|
117
|
-
test('compacting (2)', async () => {
|
|
118
|
-
const sync_rules = testRules(`
|
|
119
|
-
bucket_definitions:
|
|
120
|
-
global:
|
|
121
|
-
data: [select * from test]
|
|
122
|
-
`);
|
|
123
|
-
|
|
124
|
-
const storage = (await factory()).getInstance(sync_rules);
|
|
125
|
-
|
|
126
|
-
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
|
|
127
|
-
await batch.save({
|
|
128
|
-
sourceTable: TEST_TABLE,
|
|
129
|
-
tag: SaveOperationTag.INSERT,
|
|
130
|
-
after: {
|
|
131
|
-
id: 't1'
|
|
132
|
-
},
|
|
133
|
-
afterReplicaId: rid('t1')
|
|
134
|
-
});
|
|
135
|
-
|
|
136
|
-
await batch.save({
|
|
137
|
-
sourceTable: TEST_TABLE,
|
|
138
|
-
tag: SaveOperationTag.INSERT,
|
|
139
|
-
after: {
|
|
140
|
-
id: 't2'
|
|
141
|
-
},
|
|
142
|
-
afterReplicaId: rid('t2')
|
|
143
|
-
});
|
|
144
|
-
|
|
145
|
-
await batch.save({
|
|
146
|
-
sourceTable: TEST_TABLE,
|
|
147
|
-
tag: SaveOperationTag.DELETE,
|
|
148
|
-
before: {
|
|
149
|
-
id: 't1'
|
|
150
|
-
},
|
|
151
|
-
beforeReplicaId: rid('t1')
|
|
152
|
-
});
|
|
153
|
-
|
|
154
|
-
await batch.save({
|
|
155
|
-
sourceTable: TEST_TABLE,
|
|
156
|
-
tag: SaveOperationTag.UPDATE,
|
|
157
|
-
after: {
|
|
158
|
-
id: 't2'
|
|
159
|
-
},
|
|
160
|
-
afterReplicaId: rid('t2')
|
|
161
|
-
});
|
|
162
|
-
});
|
|
163
|
-
|
|
164
|
-
const checkpoint = result!.flushed_op;
|
|
165
|
-
|
|
166
|
-
const batchBefore = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
167
|
-
const dataBefore = batchBefore.batch.data;
|
|
168
|
-
const checksumBefore = await storage.getChecksums(checkpoint, ['global[]']);
|
|
169
|
-
|
|
170
|
-
expect(dataBefore).toMatchObject([
|
|
171
|
-
{
|
|
172
|
-
checksum: 2634521662,
|
|
173
|
-
object_id: 't1',
|
|
174
|
-
op: 'PUT',
|
|
175
|
-
op_id: '1'
|
|
176
|
-
},
|
|
177
|
-
{
|
|
178
|
-
checksum: 4243212114,
|
|
179
|
-
object_id: 't2',
|
|
180
|
-
op: 'PUT',
|
|
181
|
-
op_id: '2'
|
|
182
|
-
},
|
|
183
|
-
{
|
|
184
|
-
checksum: 4228978084,
|
|
185
|
-
object_id: 't1',
|
|
186
|
-
op: 'REMOVE',
|
|
187
|
-
op_id: '3'
|
|
188
|
-
},
|
|
189
|
-
{
|
|
190
|
-
checksum: 4243212114,
|
|
191
|
-
object_id: 't2',
|
|
192
|
-
op: 'PUT',
|
|
193
|
-
op_id: '4'
|
|
194
|
-
}
|
|
195
|
-
]);
|
|
196
|
-
|
|
197
|
-
await storage.compact(compactOptions);
|
|
198
|
-
|
|
199
|
-
const batchAfter = await oneFromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
|
|
200
|
-
const dataAfter = batchAfter.batch.data;
|
|
201
|
-
const checksumAfter = await storage.getChecksums(checkpoint, ['global[]']);
|
|
202
|
-
|
|
203
|
-
expect(batchAfter.targetOp).toEqual(4n);
|
|
204
|
-
expect(dataAfter).toMatchObject([
|
|
205
|
-
{
|
|
206
|
-
checksum: -1778190028,
|
|
207
|
-
op: 'CLEAR',
|
|
208
|
-
op_id: '3'
|
|
209
|
-
},
|
|
210
|
-
{
|
|
211
|
-
checksum: 4243212114,
|
|
212
|
-
object_id: 't2',
|
|
213
|
-
op: 'PUT',
|
|
214
|
-
op_id: '4'
|
|
215
|
-
}
|
|
216
|
-
]);
|
|
217
|
-
expect(checksumBefore.get('global[]')).toEqual(checksumAfter.get('global[]'));
|
|
218
|
-
|
|
219
|
-
validateCompactedBucket(dataBefore, dataAfter);
|
|
220
|
-
});
|
|
221
|
-
|
|
222
|
-
test('compacting (3)', async () => {
|
|
223
|
-
const sync_rules = testRules(`
|
|
224
|
-
bucket_definitions:
|
|
225
|
-
global:
|
|
226
|
-
data: [select * from test]
|
|
227
|
-
`);
|
|
228
|
-
|
|
229
|
-
const storage = (await factory()).getInstance(sync_rules);
|
|
230
|
-
|
|
231
|
-
const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
|
|
232
|
-
await batch.save({
|
|
233
|
-
sourceTable: TEST_TABLE,
|
|
234
|
-
tag: SaveOperationTag.INSERT,
|
|
235
|
-
after: {
|
|
236
|
-
id: 't1'
|
|
237
|
-
},
|
|
238
|
-
afterReplicaId: 't1'
|
|
239
|
-
});
|
|
240
|
-
|
|
241
|
-
await batch.save({
|
|
242
|
-
sourceTable: TEST_TABLE,
|
|
243
|
-
tag: SaveOperationTag.INSERT,
|
|
244
|
-
after: {
|
|
245
|
-
id: 't2'
|
|
246
|
-
},
|
|
247
|
-
afterReplicaId: 't2'
|
|
248
|
-
});
|
|
249
|
-
|
|
250
|
-
await batch.save({
|
|
251
|
-
sourceTable: TEST_TABLE,
|
|
252
|
-
tag: SaveOperationTag.DELETE,
|
|
253
|
-
before: {
|
|
254
|
-
id: 't1'
|
|
255
|
-
},
|
|
256
|
-
beforeReplicaId: 't1'
|
|
257
|
-
});
|
|
258
|
-
});
|
|
259
|
-
|
|
260
|
-
const checkpoint1 = result!.flushed_op;
|
|
261
|
-
const checksumBefore = await storage.getChecksums(checkpoint1, ['global[]']);
|
|
262
|
-
|
|
263
|
-
const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
|
|
264
|
-
await batch.save({
|
|
265
|
-
sourceTable: TEST_TABLE,
|
|
266
|
-
tag: SaveOperationTag.DELETE,
|
|
267
|
-
before: {
|
|
268
|
-
id: 't2'
|
|
269
|
-
},
|
|
270
|
-
beforeReplicaId: 't2'
|
|
271
|
-
});
|
|
272
|
-
});
|
|
273
|
-
const checkpoint2 = result2!.flushed_op;
|
|
274
|
-
|
|
275
|
-
await storage.compact(compactOptions);
|
|
276
|
-
|
|
277
|
-
const batchAfter = await oneFromAsync(storage.getBucketDataBatch(checkpoint2, new Map([['global[]', '0']])));
|
|
278
|
-
const dataAfter = batchAfter.batch.data;
|
|
279
|
-
const checksumAfter = await storage.getChecksums(checkpoint2, ['global[]']);
|
|
280
|
-
|
|
281
|
-
expect(batchAfter.targetOp).toEqual(4n);
|
|
282
|
-
expect(dataAfter).toMatchObject([
|
|
283
|
-
{
|
|
284
|
-
checksum: 1874612650,
|
|
285
|
-
op: 'CLEAR',
|
|
286
|
-
op_id: '4'
|
|
287
|
-
}
|
|
288
|
-
]);
|
|
289
|
-
expect(checksumAfter.get('global[]')).toEqual({
|
|
290
|
-
bucket: 'global[]',
|
|
291
|
-
count: 1,
|
|
292
|
-
checksum: 1874612650
|
|
293
|
-
});
|
|
294
|
-
});
|
|
295
|
-
}
|