@powersync/service-core-tests 0.14.0 → 0.15.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +40 -0
- package/dist/test-utils/general-utils.d.ts +22 -3
- package/dist/test-utils/general-utils.js +56 -3
- package/dist/test-utils/general-utils.js.map +1 -1
- package/dist/test-utils/stream_utils.js +2 -2
- package/dist/test-utils/stream_utils.js.map +1 -1
- package/dist/tests/register-compacting-tests.d.ts +1 -1
- package/dist/tests/register-compacting-tests.js +360 -297
- package/dist/tests/register-compacting-tests.js.map +1 -1
- package/dist/tests/register-data-storage-checkpoint-tests.d.ts +1 -1
- package/dist/tests/register-data-storage-checkpoint-tests.js +59 -48
- package/dist/tests/register-data-storage-checkpoint-tests.js.map +1 -1
- package/dist/tests/register-data-storage-data-tests.d.ts +2 -2
- package/dist/tests/register-data-storage-data-tests.js +1112 -612
- package/dist/tests/register-data-storage-data-tests.js.map +1 -1
- package/dist/tests/register-data-storage-parameter-tests.d.ts +1 -1
- package/dist/tests/register-data-storage-parameter-tests.js +273 -254
- package/dist/tests/register-data-storage-parameter-tests.js.map +1 -1
- package/dist/tests/register-parameter-compacting-tests.d.ts +1 -1
- package/dist/tests/register-parameter-compacting-tests.js +83 -87
- package/dist/tests/register-parameter-compacting-tests.js.map +1 -1
- package/dist/tests/register-sync-tests.d.ts +2 -1
- package/dist/tests/register-sync-tests.js +479 -451
- package/dist/tests/register-sync-tests.js.map +1 -1
- package/dist/tests/util.d.ts +5 -4
- package/dist/tests/util.js +27 -12
- package/dist/tests/util.js.map +1 -1
- package/package.json +3 -3
- package/src/test-utils/general-utils.ts +81 -4
- package/src/test-utils/stream_utils.ts +2 -2
- package/src/tests/register-compacting-tests.ts +376 -322
- package/src/tests/register-data-storage-checkpoint-tests.ts +85 -53
- package/src/tests/register-data-storage-data-tests.ts +1050 -559
- package/src/tests/register-data-storage-parameter-tests.ts +330 -288
- package/src/tests/register-parameter-compacting-tests.ts +87 -90
- package/src/tests/register-sync-tests.ts +390 -380
- package/src/tests/util.ts +46 -17
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -1,11 +1,12 @@
|
|
|
1
|
-
import { storage, updateSyncRulesFromYaml } from '@powersync/service-core';
|
|
1
|
+
import { addChecksums, storage, updateSyncRulesFromYaml } from '@powersync/service-core';
|
|
2
2
|
import { expect, test } from 'vitest';
|
|
3
3
|
import * as test_utils from '../test-utils/test-utils-index.js';
|
|
4
|
-
import { bucketRequest
|
|
4
|
+
import { bucketRequest } from '../test-utils/test-utils-index.js';
|
|
5
|
+
import { bucketRequestMap, bucketRequests } from './util.js';
|
|
5
6
|
|
|
6
|
-
|
|
7
|
+
export function registerCompactTests(config: storage.TestStorageConfig) {
|
|
8
|
+
const generateStorageFactory = config.factory;
|
|
7
9
|
|
|
8
|
-
export function registerCompactTests(generateStorageFactory: storage.TestStorageFactory) {
|
|
9
10
|
test('compacting (1)', async () => {
|
|
10
11
|
await using factory = await generateStorageFactory();
|
|
11
12
|
const syncRules = await factory.updateSyncRules(
|
|
@@ -17,60 +18,59 @@ bucket_definitions:
|
|
|
17
18
|
);
|
|
18
19
|
const bucketStorage = factory.getInstance(syncRules);
|
|
19
20
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
sourceTable: TEST_TABLE,
|
|
32
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
33
|
-
after: {
|
|
34
|
-
id: 't2'
|
|
35
|
-
},
|
|
36
|
-
afterReplicaId: test_utils.rid('t2')
|
|
37
|
-
});
|
|
21
|
+
await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
|
|
22
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
23
|
+
await writer.markAllSnapshotDone('1/1');
|
|
24
|
+
await writer.save({
|
|
25
|
+
sourceTable: testTable,
|
|
26
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
27
|
+
after: {
|
|
28
|
+
id: 't1'
|
|
29
|
+
},
|
|
30
|
+
afterReplicaId: test_utils.rid('t1')
|
|
31
|
+
});
|
|
38
32
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
33
|
+
await writer.save({
|
|
34
|
+
sourceTable: testTable,
|
|
35
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
36
|
+
after: {
|
|
37
|
+
id: 't2'
|
|
38
|
+
},
|
|
39
|
+
afterReplicaId: test_utils.rid('t2')
|
|
40
|
+
});
|
|
47
41
|
|
|
48
|
-
|
|
42
|
+
await writer.save({
|
|
43
|
+
sourceTable: testTable,
|
|
44
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
45
|
+
after: {
|
|
46
|
+
id: 't2'
|
|
47
|
+
},
|
|
48
|
+
afterReplicaId: test_utils.rid('t2')
|
|
49
49
|
});
|
|
50
50
|
|
|
51
|
-
|
|
51
|
+
await writer.commit('1/1');
|
|
52
|
+
await writer.flush();
|
|
52
53
|
|
|
53
|
-
const
|
|
54
|
-
|
|
55
|
-
);
|
|
54
|
+
const checkpoint = writer.last_flushed_op!;
|
|
55
|
+
|
|
56
|
+
const request = bucketRequest(syncRules, 'global[]');
|
|
57
|
+
|
|
58
|
+
const batchBefore = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
|
|
56
59
|
const dataBefore = batchBefore.chunkData.data;
|
|
57
|
-
const checksumBefore = await bucketStorage.getChecksums(checkpoint,
|
|
60
|
+
const checksumBefore = await bucketStorage.getChecksums(checkpoint, [request]);
|
|
58
61
|
|
|
59
62
|
expect(dataBefore).toMatchObject([
|
|
60
63
|
{
|
|
61
|
-
checksum: 2634521662,
|
|
62
64
|
object_id: 't1',
|
|
63
65
|
op: 'PUT',
|
|
64
66
|
op_id: '1'
|
|
65
67
|
},
|
|
66
68
|
{
|
|
67
|
-
checksum: 4243212114,
|
|
68
69
|
object_id: 't2',
|
|
69
70
|
op: 'PUT',
|
|
70
71
|
op_id: '2'
|
|
71
72
|
},
|
|
72
73
|
{
|
|
73
|
-
checksum: 4243212114,
|
|
74
74
|
object_id: 't2',
|
|
75
75
|
op: 'PUT',
|
|
76
76
|
op_id: '3'
|
|
@@ -86,41 +86,30 @@ bucket_definitions:
|
|
|
86
86
|
minChangeRatio: 0
|
|
87
87
|
});
|
|
88
88
|
|
|
89
|
-
const batchAfter = await test_utils.oneFromAsync(
|
|
90
|
-
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
91
|
-
);
|
|
89
|
+
const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
|
|
92
90
|
const dataAfter = batchAfter.chunkData.data;
|
|
93
|
-
const checksumAfter = await bucketStorage.getChecksums(checkpoint,
|
|
91
|
+
const checksumAfter = await bucketStorage.getChecksums(checkpoint, [request]);
|
|
94
92
|
bucketStorage.clearChecksumCache();
|
|
95
|
-
const checksumAfter2 = await bucketStorage.getChecksums(checkpoint,
|
|
93
|
+
const checksumAfter2 = await bucketStorage.getChecksums(checkpoint, [request]);
|
|
96
94
|
|
|
97
95
|
expect(batchAfter.targetOp).toEqual(3n);
|
|
98
96
|
expect(dataAfter).toMatchObject([
|
|
97
|
+
dataBefore[0],
|
|
99
98
|
{
|
|
100
|
-
checksum:
|
|
101
|
-
object_id: 't1',
|
|
102
|
-
op: 'PUT',
|
|
103
|
-
op_id: '1'
|
|
104
|
-
},
|
|
105
|
-
{
|
|
106
|
-
checksum: 4243212114,
|
|
99
|
+
checksum: dataBefore[1].checksum,
|
|
107
100
|
op: 'MOVE',
|
|
108
101
|
op_id: '2'
|
|
109
102
|
},
|
|
110
103
|
{
|
|
111
|
-
checksum:
|
|
104
|
+
checksum: dataBefore[2].checksum,
|
|
112
105
|
object_id: 't2',
|
|
113
106
|
op: 'PUT',
|
|
114
107
|
op_id: '3'
|
|
115
108
|
}
|
|
116
109
|
]);
|
|
117
110
|
|
|
118
|
-
expect(checksumAfter.get(
|
|
119
|
-
|
|
120
|
-
);
|
|
121
|
-
expect(checksumAfter2.get(bucketRequest(syncRules, 'global[]'))).toEqual(
|
|
122
|
-
checksumBefore.get(bucketRequest(syncRules, 'global[]'))
|
|
123
|
-
);
|
|
111
|
+
expect(checksumAfter.get(request.bucket)).toEqual(checksumBefore.get(request.bucket));
|
|
112
|
+
expect(checksumAfter2.get(request.bucket)).toEqual(checksumBefore.get(request.bucket));
|
|
124
113
|
|
|
125
114
|
test_utils.validateCompactedBucket(dataBefore, dataAfter);
|
|
126
115
|
});
|
|
@@ -136,78 +125,72 @@ bucket_definitions:
|
|
|
136
125
|
);
|
|
137
126
|
const bucketStorage = factory.getInstance(syncRules);
|
|
138
127
|
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
sourceTable: TEST_TABLE,
|
|
151
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
152
|
-
after: {
|
|
153
|
-
id: 't2'
|
|
154
|
-
},
|
|
155
|
-
afterReplicaId: test_utils.rid('t2')
|
|
156
|
-
});
|
|
128
|
+
await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
|
|
129
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
130
|
+
await writer.markAllSnapshotDone('1/1');
|
|
131
|
+
await writer.save({
|
|
132
|
+
sourceTable: testTable,
|
|
133
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
134
|
+
after: {
|
|
135
|
+
id: 't1'
|
|
136
|
+
},
|
|
137
|
+
afterReplicaId: test_utils.rid('t1')
|
|
138
|
+
});
|
|
157
139
|
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
140
|
+
await writer.save({
|
|
141
|
+
sourceTable: testTable,
|
|
142
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
143
|
+
after: {
|
|
144
|
+
id: 't2'
|
|
145
|
+
},
|
|
146
|
+
afterReplicaId: test_utils.rid('t2')
|
|
147
|
+
});
|
|
166
148
|
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
149
|
+
await writer.save({
|
|
150
|
+
sourceTable: testTable,
|
|
151
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
152
|
+
before: {
|
|
153
|
+
id: 't1'
|
|
154
|
+
},
|
|
155
|
+
beforeReplicaId: test_utils.rid('t1')
|
|
156
|
+
});
|
|
175
157
|
|
|
176
|
-
|
|
158
|
+
await writer.save({
|
|
159
|
+
sourceTable: testTable,
|
|
160
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
161
|
+
after: {
|
|
162
|
+
id: 't2'
|
|
163
|
+
},
|
|
164
|
+
afterReplicaId: test_utils.rid('t2')
|
|
177
165
|
});
|
|
178
166
|
|
|
179
|
-
|
|
167
|
+
await writer.commit('1/1');
|
|
168
|
+
await writer.flush();
|
|
180
169
|
|
|
181
|
-
const
|
|
182
|
-
|
|
183
|
-
|
|
170
|
+
const checkpoint = writer.last_flushed_op!;
|
|
171
|
+
const request = bucketRequest(syncRules, 'global[]');
|
|
172
|
+
|
|
173
|
+
const batchBefore = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
|
|
184
174
|
const dataBefore = batchBefore.chunkData.data;
|
|
185
|
-
const checksumBefore = await bucketStorage.getChecksums(checkpoint,
|
|
175
|
+
const checksumBefore = await bucketStorage.getChecksums(checkpoint, [request]);
|
|
186
176
|
|
|
177
|
+
// op_id sequence depends on the storage implementation
|
|
187
178
|
expect(dataBefore).toMatchObject([
|
|
188
179
|
{
|
|
189
|
-
checksum: 2634521662,
|
|
190
180
|
object_id: 't1',
|
|
191
|
-
op: 'PUT'
|
|
192
|
-
op_id: '1'
|
|
181
|
+
op: 'PUT'
|
|
193
182
|
},
|
|
194
183
|
{
|
|
195
|
-
checksum: 4243212114,
|
|
196
184
|
object_id: 't2',
|
|
197
|
-
op: 'PUT'
|
|
198
|
-
op_id: '2'
|
|
185
|
+
op: 'PUT'
|
|
199
186
|
},
|
|
200
187
|
{
|
|
201
|
-
checksum: 4228978084,
|
|
202
188
|
object_id: 't1',
|
|
203
|
-
op: 'REMOVE'
|
|
204
|
-
op_id: '3'
|
|
189
|
+
op: 'REMOVE'
|
|
205
190
|
},
|
|
206
191
|
{
|
|
207
|
-
checksum: 4243212114,
|
|
208
192
|
object_id: 't2',
|
|
209
|
-
op: 'PUT'
|
|
210
|
-
op_id: '4'
|
|
193
|
+
op: 'PUT'
|
|
211
194
|
}
|
|
212
195
|
]);
|
|
213
196
|
|
|
@@ -219,29 +202,28 @@ bucket_definitions:
|
|
|
219
202
|
minChangeRatio: 0
|
|
220
203
|
});
|
|
221
204
|
|
|
222
|
-
const batchAfter = await test_utils.oneFromAsync(
|
|
223
|
-
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
224
|
-
);
|
|
205
|
+
const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
|
|
225
206
|
const dataAfter = batchAfter.chunkData.data;
|
|
226
207
|
bucketStorage.clearChecksumCache();
|
|
227
|
-
const checksumAfter = await bucketStorage.getChecksums(checkpoint,
|
|
208
|
+
const checksumAfter = await bucketStorage.getChecksums(checkpoint, [request]);
|
|
228
209
|
|
|
229
|
-
expect(batchAfter.targetOp).
|
|
210
|
+
expect(batchAfter.targetOp).toBeLessThanOrEqual(checkpoint);
|
|
230
211
|
expect(dataAfter).toMatchObject([
|
|
231
212
|
{
|
|
232
|
-
checksum:
|
|
233
|
-
|
|
234
|
-
|
|
213
|
+
checksum: addChecksums(
|
|
214
|
+
addChecksums(dataBefore[0].checksum as number, dataBefore[1].checksum as number),
|
|
215
|
+
dataBefore[2].checksum as number
|
|
216
|
+
),
|
|
217
|
+
op: 'CLEAR'
|
|
235
218
|
},
|
|
236
219
|
{
|
|
237
|
-
checksum:
|
|
220
|
+
checksum: dataBefore[3].checksum,
|
|
238
221
|
object_id: 't2',
|
|
239
|
-
op: 'PUT'
|
|
240
|
-
op_id: '4'
|
|
222
|
+
op: 'PUT'
|
|
241
223
|
}
|
|
242
224
|
]);
|
|
243
|
-
expect(checksumAfter.get(
|
|
244
|
-
...checksumBefore.get(
|
|
225
|
+
expect(checksumAfter.get(request.bucket)).toEqual({
|
|
226
|
+
...checksumBefore.get(request.bucket),
|
|
245
227
|
count: 2
|
|
246
228
|
});
|
|
247
229
|
|
|
@@ -259,52 +241,54 @@ bucket_definitions:
|
|
|
259
241
|
);
|
|
260
242
|
const bucketStorage = factory.getInstance(syncRules);
|
|
261
243
|
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
}
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
sourceTable: TEST_TABLE,
|
|
274
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
275
|
-
after: {
|
|
276
|
-
id: 't2'
|
|
277
|
-
},
|
|
278
|
-
afterReplicaId: 't2'
|
|
279
|
-
});
|
|
280
|
-
|
|
281
|
-
await batch.save({
|
|
282
|
-
sourceTable: TEST_TABLE,
|
|
283
|
-
tag: storage.SaveOperationTag.DELETE,
|
|
284
|
-
before: {
|
|
285
|
-
id: 't1'
|
|
286
|
-
},
|
|
287
|
-
beforeReplicaId: 't1'
|
|
288
|
-
});
|
|
244
|
+
await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
|
|
245
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
246
|
+
await writer.markAllSnapshotDone('1/1');
|
|
247
|
+
await writer.save({
|
|
248
|
+
sourceTable: testTable,
|
|
249
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
250
|
+
after: {
|
|
251
|
+
id: 't1'
|
|
252
|
+
},
|
|
253
|
+
afterReplicaId: 't1'
|
|
254
|
+
});
|
|
289
255
|
|
|
290
|
-
|
|
256
|
+
await writer.save({
|
|
257
|
+
sourceTable: testTable,
|
|
258
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
259
|
+
after: {
|
|
260
|
+
id: 't2'
|
|
261
|
+
},
|
|
262
|
+
afterReplicaId: 't2'
|
|
291
263
|
});
|
|
292
264
|
|
|
293
|
-
|
|
294
|
-
|
|
265
|
+
await writer.save({
|
|
266
|
+
sourceTable: testTable,
|
|
267
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
268
|
+
before: {
|
|
269
|
+
id: 't1'
|
|
270
|
+
},
|
|
271
|
+
beforeReplicaId: 't1'
|
|
272
|
+
});
|
|
295
273
|
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
274
|
+
await writer.commit('1/1');
|
|
275
|
+
await writer.flush();
|
|
276
|
+
|
|
277
|
+
const checkpoint1 = writer.last_flushed_op!;
|
|
278
|
+
const request = bucketRequest(syncRules, 'global[]');
|
|
279
|
+
await using writer2 = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
|
|
280
|
+
const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
|
|
281
|
+
await writer2.save({
|
|
282
|
+
sourceTable: testTable2,
|
|
283
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
284
|
+
before: {
|
|
285
|
+
id: 't2'
|
|
286
|
+
},
|
|
287
|
+
beforeReplicaId: 't2'
|
|
306
288
|
});
|
|
307
|
-
|
|
289
|
+
await writer2.commit('2/1');
|
|
290
|
+
await writer2.flush();
|
|
291
|
+
const checkpoint2 = writer2.last_flushed_op!;
|
|
308
292
|
|
|
309
293
|
await bucketStorage.compact({
|
|
310
294
|
clearBatchLimit: 2,
|
|
@@ -314,25 +298,20 @@ bucket_definitions:
|
|
|
314
298
|
minChangeRatio: 0
|
|
315
299
|
});
|
|
316
300
|
|
|
317
|
-
const batchAfter = await test_utils.oneFromAsync(
|
|
318
|
-
bucketStorage.getBucketDataBatch(checkpoint2, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
319
|
-
);
|
|
301
|
+
const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint2, [request]));
|
|
320
302
|
const dataAfter = batchAfter.chunkData.data;
|
|
321
303
|
await bucketStorage.clearChecksumCache();
|
|
322
|
-
const checksumAfter = await bucketStorage.getChecksums(checkpoint2,
|
|
304
|
+
const checksumAfter = await bucketStorage.getChecksums(checkpoint2, [request]);
|
|
323
305
|
|
|
324
|
-
expect(batchAfter.targetOp).toEqual(4n);
|
|
325
306
|
expect(dataAfter).toMatchObject([
|
|
326
307
|
{
|
|
327
|
-
|
|
328
|
-
op: 'CLEAR',
|
|
329
|
-
op_id: '4'
|
|
308
|
+
op: 'CLEAR'
|
|
330
309
|
}
|
|
331
310
|
]);
|
|
332
|
-
expect(checksumAfter.get(
|
|
333
|
-
bucket:
|
|
311
|
+
expect(checksumAfter.get(request.bucket)).toEqual({
|
|
312
|
+
bucket: request.bucket,
|
|
334
313
|
count: 1,
|
|
335
|
-
checksum:
|
|
314
|
+
checksum: dataAfter[0].checksum
|
|
336
315
|
});
|
|
337
316
|
});
|
|
338
317
|
|
|
@@ -350,75 +329,78 @@ bucket_definitions:
|
|
|
350
329
|
);
|
|
351
330
|
const bucketStorage = factory.getInstance(syncRules);
|
|
352
331
|
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
}
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
}
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
}
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
sourceTable: TEST_TABLE,
|
|
397
|
-
tag: storage.SaveOperationTag.UPDATE,
|
|
398
|
-
after: {
|
|
399
|
-
id: 't1',
|
|
400
|
-
b: 'b1',
|
|
401
|
-
value: 'final'
|
|
402
|
-
},
|
|
403
|
-
afterReplicaId: test_utils.rid('t1')
|
|
404
|
-
});
|
|
405
|
-
|
|
406
|
-
await batch.save({
|
|
407
|
-
sourceTable: TEST_TABLE,
|
|
408
|
-
tag: storage.SaveOperationTag.UPDATE,
|
|
409
|
-
after: {
|
|
410
|
-
id: 't2',
|
|
411
|
-
b: 'b2',
|
|
412
|
-
value: 'final'
|
|
413
|
-
},
|
|
414
|
-
afterReplicaId: test_utils.rid('t2')
|
|
415
|
-
});
|
|
416
|
-
|
|
417
|
-
await batch.commit('1/1');
|
|
418
|
-
}
|
|
419
|
-
});
|
|
332
|
+
await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
|
|
333
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
334
|
+
await writer.markAllSnapshotDone('1/1');
|
|
335
|
+
/**
|
|
336
|
+
* Repeatedly create operations which fall into different buckets.
|
|
337
|
+
* The bucket operations are purposely interleaved as the op_id increases.
|
|
338
|
+
* A large amount of operations are created here.
|
|
339
|
+
* The configured window of compacting operations is 100. This means the initial window will
|
|
340
|
+
* contain operations from multiple buckets.
|
|
341
|
+
*/
|
|
342
|
+
for (let count = 0; count < 100; count++) {
|
|
343
|
+
await writer.save({
|
|
344
|
+
sourceTable: testTable,
|
|
345
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
346
|
+
after: {
|
|
347
|
+
id: 't1',
|
|
348
|
+
b: 'b1',
|
|
349
|
+
value: 'start'
|
|
350
|
+
},
|
|
351
|
+
afterReplicaId: test_utils.rid('t1')
|
|
352
|
+
});
|
|
353
|
+
|
|
354
|
+
await writer.save({
|
|
355
|
+
sourceTable: testTable,
|
|
356
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
357
|
+
after: {
|
|
358
|
+
id: 't1',
|
|
359
|
+
b: 'b1',
|
|
360
|
+
value: 'intermediate'
|
|
361
|
+
},
|
|
362
|
+
afterReplicaId: test_utils.rid('t1')
|
|
363
|
+
});
|
|
364
|
+
|
|
365
|
+
await writer.save({
|
|
366
|
+
sourceTable: testTable,
|
|
367
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
368
|
+
after: {
|
|
369
|
+
id: 't2',
|
|
370
|
+
b: 'b2',
|
|
371
|
+
value: 'start'
|
|
372
|
+
},
|
|
373
|
+
afterReplicaId: test_utils.rid('t2')
|
|
374
|
+
});
|
|
420
375
|
|
|
421
|
-
|
|
376
|
+
await writer.save({
|
|
377
|
+
sourceTable: testTable,
|
|
378
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
379
|
+
after: {
|
|
380
|
+
id: 't1',
|
|
381
|
+
b: 'b1',
|
|
382
|
+
value: 'final'
|
|
383
|
+
},
|
|
384
|
+
afterReplicaId: test_utils.rid('t1')
|
|
385
|
+
});
|
|
386
|
+
|
|
387
|
+
await writer.save({
|
|
388
|
+
sourceTable: testTable,
|
|
389
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
390
|
+
after: {
|
|
391
|
+
id: 't2',
|
|
392
|
+
b: 'b2',
|
|
393
|
+
value: 'final'
|
|
394
|
+
},
|
|
395
|
+
afterReplicaId: test_utils.rid('t2')
|
|
396
|
+
});
|
|
397
|
+
|
|
398
|
+
await writer.commit('1/1');
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
await writer.flush();
|
|
402
|
+
|
|
403
|
+
const checkpoint = writer.last_flushed_op!;
|
|
422
404
|
|
|
423
405
|
await bucketStorage.compact({
|
|
424
406
|
clearBatchLimit: 100,
|
|
@@ -477,37 +459,39 @@ bucket_definitions:
|
|
|
477
459
|
);
|
|
478
460
|
const bucketStorage = factory.getInstance(syncRules);
|
|
479
461
|
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
}
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
sourceTable: TEST_TABLE,
|
|
492
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
493
|
-
after: {
|
|
494
|
-
id: 't2'
|
|
495
|
-
},
|
|
496
|
-
afterReplicaId: 't2'
|
|
497
|
-
});
|
|
462
|
+
await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
|
|
463
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
464
|
+
await writer.markAllSnapshotDone('1/1');
|
|
465
|
+
await writer.save({
|
|
466
|
+
sourceTable: testTable,
|
|
467
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
468
|
+
after: {
|
|
469
|
+
id: 't1'
|
|
470
|
+
},
|
|
471
|
+
afterReplicaId: 't1'
|
|
472
|
+
});
|
|
498
473
|
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
474
|
+
await writer.save({
|
|
475
|
+
sourceTable: testTable,
|
|
476
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
477
|
+
after: {
|
|
478
|
+
id: 't2'
|
|
479
|
+
},
|
|
480
|
+
afterReplicaId: 't2'
|
|
481
|
+
});
|
|
507
482
|
|
|
508
|
-
|
|
483
|
+
await writer.save({
|
|
484
|
+
sourceTable: testTable,
|
|
485
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
486
|
+
before: {
|
|
487
|
+
id: 't1'
|
|
488
|
+
},
|
|
489
|
+
beforeReplicaId: 't1'
|
|
509
490
|
});
|
|
510
491
|
|
|
492
|
+
await writer.commit('1/1');
|
|
493
|
+
await writer.flush();
|
|
494
|
+
|
|
511
495
|
await bucketStorage.compact({
|
|
512
496
|
clearBatchLimit: 2,
|
|
513
497
|
moveBatchLimit: 1,
|
|
@@ -516,25 +500,30 @@ bucket_definitions:
|
|
|
516
500
|
minChangeRatio: 0
|
|
517
501
|
});
|
|
518
502
|
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
await batch.commit('2/1');
|
|
503
|
+
await using writer2 = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
|
|
504
|
+
const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
|
|
505
|
+
await writer2.save({
|
|
506
|
+
sourceTable: testTable2,
|
|
507
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
508
|
+
before: {
|
|
509
|
+
id: 't2'
|
|
510
|
+
},
|
|
511
|
+
beforeReplicaId: 't2'
|
|
529
512
|
});
|
|
530
|
-
|
|
513
|
+
await writer2.commit('2/1');
|
|
514
|
+
await writer2.flush();
|
|
515
|
+
const checkpoint2 = writer2.last_flushed_op!;
|
|
516
|
+
const request = bucketRequest(syncRules, 'global[]');
|
|
531
517
|
await bucketStorage.clearChecksumCache();
|
|
532
|
-
const checksumAfter = await bucketStorage.getChecksums(checkpoint2,
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
518
|
+
const checksumAfter = await bucketStorage.getChecksums(checkpoint2, [request]);
|
|
519
|
+
const globalChecksum = checksumAfter.get(request.bucket);
|
|
520
|
+
expect(globalChecksum).toMatchObject({
|
|
521
|
+
bucket: request.bucket,
|
|
522
|
+
count: 4
|
|
537
523
|
});
|
|
524
|
+
|
|
525
|
+
// storage-specific checksum - just check that it does not change
|
|
526
|
+
expect(globalChecksum).toMatchSnapshot();
|
|
538
527
|
});
|
|
539
528
|
|
|
540
529
|
test('partial checksums after compacting (2)', async () => {
|
|
@@ -548,41 +537,44 @@ bucket_definitions:
|
|
|
548
537
|
);
|
|
549
538
|
const bucketStorage = factory.getInstance(syncRules);
|
|
550
539
|
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
}
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
sourceTable: TEST_TABLE,
|
|
563
|
-
tag: storage.SaveOperationTag.UPDATE,
|
|
564
|
-
after: {
|
|
565
|
-
id: 't1'
|
|
566
|
-
},
|
|
567
|
-
afterReplicaId: 't1'
|
|
568
|
-
});
|
|
540
|
+
await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
|
|
541
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
542
|
+
await writer.markAllSnapshotDone('1/1');
|
|
543
|
+
await writer.save({
|
|
544
|
+
sourceTable: testTable,
|
|
545
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
546
|
+
after: {
|
|
547
|
+
id: 't1'
|
|
548
|
+
},
|
|
549
|
+
afterReplicaId: 't1'
|
|
550
|
+
});
|
|
569
551
|
|
|
570
|
-
|
|
552
|
+
await writer.save({
|
|
553
|
+
sourceTable: testTable,
|
|
554
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
555
|
+
after: {
|
|
556
|
+
id: 't1'
|
|
557
|
+
},
|
|
558
|
+
afterReplicaId: 't1'
|
|
571
559
|
});
|
|
572
560
|
|
|
561
|
+
await writer.commit('1/1');
|
|
562
|
+
await writer.flush();
|
|
563
|
+
|
|
573
564
|
// Get checksums here just to populate the cache
|
|
574
|
-
await bucketStorage.getChecksums(
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
await batch.commit('2/1');
|
|
565
|
+
await bucketStorage.getChecksums(writer.last_flushed_op!, bucketRequests(syncRules, ['global[]']));
|
|
566
|
+
await using writer2 = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
|
|
567
|
+
const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
|
|
568
|
+
await writer2.save({
|
|
569
|
+
sourceTable: testTable2,
|
|
570
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
571
|
+
before: {
|
|
572
|
+
id: 't1'
|
|
573
|
+
},
|
|
574
|
+
beforeReplicaId: 't1'
|
|
585
575
|
});
|
|
576
|
+
await writer2.commit('2/1');
|
|
577
|
+
await writer2.flush();
|
|
586
578
|
|
|
587
579
|
await bucketStorage.compact({
|
|
588
580
|
clearBatchLimit: 20,
|
|
@@ -592,13 +584,75 @@ bucket_definitions:
|
|
|
592
584
|
minChangeRatio: 0
|
|
593
585
|
});
|
|
594
586
|
|
|
595
|
-
const checkpoint2 =
|
|
587
|
+
const checkpoint2 = writer2.last_flushed_op!;
|
|
588
|
+
const request = bucketRequest(syncRules, 'global[]');
|
|
596
589
|
// Check that the checksum was correctly updated with the clear operation after having a cached checksum
|
|
597
|
-
const checksumAfter = await bucketStorage.getChecksums(checkpoint2,
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
590
|
+
const checksumAfter = await bucketStorage.getChecksums(checkpoint2, [request]);
|
|
591
|
+
const globalChecksum = checksumAfter.get(request.bucket);
|
|
592
|
+
expect(globalChecksum).toMatchObject({
|
|
593
|
+
bucket: request.bucket,
|
|
594
|
+
count: 1
|
|
595
|
+
});
|
|
596
|
+
// storage-specific checksum - just check that it does not change
|
|
597
|
+
expect(globalChecksum).toMatchSnapshot();
|
|
598
|
+
});
|
|
599
|
+
|
|
600
|
+
test('defaults maxOpId to current checkpoint', async () => {
|
|
601
|
+
await using factory = await generateStorageFactory();
|
|
602
|
+
const syncRules = await factory.updateSyncRules(
|
|
603
|
+
updateSyncRulesFromYaml(`
|
|
604
|
+
bucket_definitions:
|
|
605
|
+
global:
|
|
606
|
+
data: [select * from test]
|
|
607
|
+
`)
|
|
608
|
+
);
|
|
609
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
610
|
+
|
|
611
|
+
await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
|
|
612
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
613
|
+
await writer.markAllSnapshotDone('1/1');
|
|
614
|
+
await writer.save({
|
|
615
|
+
sourceTable: testTable,
|
|
616
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
617
|
+
after: { id: 't1' },
|
|
618
|
+
afterReplicaId: test_utils.rid('t1')
|
|
602
619
|
});
|
|
620
|
+
await writer.commit('1/1');
|
|
621
|
+
await writer.flush();
|
|
622
|
+
|
|
623
|
+
const checkpoint1 = writer.last_flushed_op!;
|
|
624
|
+
|
|
625
|
+
await using writer2 = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
|
|
626
|
+
const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
|
|
627
|
+
// This is flushed but not committed (does not advance the checkpoint)
|
|
628
|
+
await writer2.save({
|
|
629
|
+
sourceTable: testTable2,
|
|
630
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
631
|
+
after: { id: 't1' },
|
|
632
|
+
afterReplicaId: test_utils.rid('t1')
|
|
633
|
+
});
|
|
634
|
+
await writer2.flush();
|
|
635
|
+
const checkpoint2 = writer2.last_flushed_op!;
|
|
636
|
+
|
|
637
|
+
const checkpointBeforeCompact = await bucketStorage.getCheckpoint();
|
|
638
|
+
expect(checkpointBeforeCompact.checkpoint).toEqual(checkpoint1);
|
|
639
|
+
|
|
640
|
+
// With default options, Postgres compaction should use the active checkpoint.
|
|
641
|
+
await bucketStorage.compact({
|
|
642
|
+
moveBatchLimit: 1,
|
|
643
|
+
moveBatchQueryLimit: 1,
|
|
644
|
+
minBucketChanges: 1,
|
|
645
|
+
minChangeRatio: 0
|
|
646
|
+
});
|
|
647
|
+
|
|
648
|
+
const batchAfterDefaultCompact = await test_utils.oneFromAsync(
|
|
649
|
+
bucketStorage.getBucketDataBatch(checkpoint2, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
650
|
+
);
|
|
651
|
+
|
|
652
|
+
// Operation 1 should remain a PUT because op_id=2 is above the default maxOpId checkpoint.
|
|
653
|
+
expect(batchAfterDefaultCompact.chunkData.data).toMatchObject([
|
|
654
|
+
{ op_id: '1', op: 'PUT', object_id: 't1' },
|
|
655
|
+
{ op_id: '2', op: 'PUT', object_id: 't1' }
|
|
656
|
+
]);
|
|
603
657
|
});
|
|
604
658
|
}
|