@powersync/service-core-tests 0.13.2 → 0.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +43 -0
- package/dist/test-utils/general-utils.d.ts +9 -3
- package/dist/test-utils/general-utils.js +26 -26
- package/dist/test-utils/general-utils.js.map +1 -1
- package/dist/tests/register-compacting-tests.d.ts +1 -1
- package/dist/tests/register-compacting-tests.js +136 -93
- package/dist/tests/register-compacting-tests.js.map +1 -1
- package/dist/tests/register-data-storage-checkpoint-tests.d.ts +1 -1
- package/dist/tests/register-data-storage-checkpoint-tests.js +44 -27
- package/dist/tests/register-data-storage-checkpoint-tests.js.map +1 -1
- package/dist/tests/register-data-storage-data-tests.d.ts +2 -2
- package/dist/tests/register-data-storage-data-tests.js +715 -207
- package/dist/tests/register-data-storage-data-tests.js.map +1 -1
- package/dist/tests/register-data-storage-parameter-tests.d.ts +1 -1
- package/dist/tests/register-data-storage-parameter-tests.js +123 -58
- package/dist/tests/register-data-storage-parameter-tests.js.map +1 -1
- package/dist/tests/register-parameter-compacting-tests.d.ts +1 -1
- package/dist/tests/register-parameter-compacting-tests.js +13 -13
- package/dist/tests/register-parameter-compacting-tests.js.map +1 -1
- package/dist/tests/register-sync-tests.d.ts +4 -1
- package/dist/tests/register-sync-tests.js +63 -34
- package/dist/tests/register-sync-tests.js.map +1 -1
- package/dist/tests/util.d.ts +6 -1
- package/dist/tests/util.js +31 -2
- package/dist/tests/util.js.map +1 -1
- package/package.json +3 -3
- package/src/test-utils/general-utils.ts +42 -28
- package/src/tests/register-compacting-tests.ts +153 -103
- package/src/tests/register-data-storage-checkpoint-tests.ts +70 -22
- package/src/tests/register-data-storage-data-tests.ts +732 -110
- package/src/tests/register-data-storage-parameter-tests.ts +168 -59
- package/src/tests/register-parameter-compacting-tests.ts +18 -13
- package/src/tests/register-sync-tests.ts +71 -35
- package/src/tests/util.ts +52 -2
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -1,7 +1,16 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import {
|
|
2
|
+
BucketDataBatchOptions,
|
|
3
|
+
CURRENT_STORAGE_VERSION,
|
|
4
|
+
getUuidReplicaIdentityBson,
|
|
5
|
+
OplogEntry,
|
|
6
|
+
reduceBucket,
|
|
7
|
+
storage,
|
|
8
|
+
updateSyncRulesFromYaml
|
|
9
|
+
} from '@powersync/service-core';
|
|
2
10
|
import { describe, expect, test } from 'vitest';
|
|
3
11
|
import * as test_utils from '../test-utils/test-utils-index.js';
|
|
4
|
-
import {
|
|
12
|
+
import { bucketRequest } from '../test-utils/test-utils-index.js';
|
|
13
|
+
import { bucketRequestMap, bucketRequests } from './util.js';
|
|
5
14
|
|
|
6
15
|
/**
|
|
7
16
|
* Normalize data from OplogEntries for comparison in tests.
|
|
@@ -24,21 +33,30 @@ const normalizeOplogData = (data: OplogEntry['data']) => {
|
|
|
24
33
|
*
|
|
25
34
|
* ```
|
|
26
35
|
*/
|
|
27
|
-
export function registerDataStorageDataTests(
|
|
36
|
+
export function registerDataStorageDataTests(config: storage.TestStorageConfig) {
|
|
37
|
+
const generateStorageFactory = config.factory;
|
|
38
|
+
const storageVersion = config.storageVersion ?? storage.CURRENT_STORAGE_VERSION;
|
|
39
|
+
|
|
40
|
+
const TEST_TABLE = test_utils.makeTestTable('test', ['id'], config);
|
|
41
|
+
|
|
28
42
|
test('removing row', async () => {
|
|
29
43
|
await using factory = await generateStorageFactory();
|
|
30
|
-
const syncRules = await factory.updateSyncRules(
|
|
31
|
-
|
|
44
|
+
const syncRules = await factory.updateSyncRules(
|
|
45
|
+
updateSyncRulesFromYaml(
|
|
46
|
+
`
|
|
32
47
|
bucket_definitions:
|
|
33
48
|
global:
|
|
34
49
|
data:
|
|
35
50
|
- SELECT id, description FROM "%"
|
|
36
|
-
|
|
37
|
-
|
|
51
|
+
`,
|
|
52
|
+
{ storageVersion }
|
|
53
|
+
)
|
|
54
|
+
);
|
|
38
55
|
const bucketStorage = factory.getInstance(syncRules);
|
|
39
56
|
|
|
40
57
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
41
58
|
const sourceTable = TEST_TABLE;
|
|
59
|
+
await batch.markAllSnapshotDone('1/1');
|
|
42
60
|
|
|
43
61
|
await batch.save({
|
|
44
62
|
sourceTable,
|
|
@@ -59,7 +77,9 @@ bucket_definitions:
|
|
|
59
77
|
|
|
60
78
|
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
61
79
|
|
|
62
|
-
const batch = await test_utils.fromAsync(
|
|
80
|
+
const batch = await test_utils.fromAsync(
|
|
81
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
82
|
+
);
|
|
63
83
|
const data = batch[0].chunkData.data.map((d) => {
|
|
64
84
|
return {
|
|
65
85
|
op: d.op,
|
|
@@ -76,30 +96,332 @@ bucket_definitions:
|
|
|
76
96
|
{ op: 'REMOVE', object_id: 'test1', checksum: c2 }
|
|
77
97
|
]);
|
|
78
98
|
|
|
79
|
-
const checksums = [
|
|
99
|
+
const checksums = [
|
|
100
|
+
...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values()
|
|
101
|
+
];
|
|
80
102
|
expect(checksums).toEqual([
|
|
81
103
|
{
|
|
82
|
-
bucket: 'global[]',
|
|
104
|
+
bucket: bucketRequest(syncRules, 'global[]').bucket,
|
|
83
105
|
checksum: (c1 + c2) & 0xffffffff,
|
|
84
106
|
count: 2
|
|
85
107
|
}
|
|
86
108
|
]);
|
|
87
109
|
});
|
|
88
110
|
|
|
111
|
+
test('insert after delete in new batch', async () => {
|
|
112
|
+
await using factory = await generateStorageFactory();
|
|
113
|
+
const syncRules = await factory.updateSyncRules(
|
|
114
|
+
updateSyncRulesFromYaml(
|
|
115
|
+
`
|
|
116
|
+
bucket_definitions:
|
|
117
|
+
global:
|
|
118
|
+
data:
|
|
119
|
+
- SELECT id, description FROM "%"
|
|
120
|
+
`,
|
|
121
|
+
{ storageVersion }
|
|
122
|
+
)
|
|
123
|
+
);
|
|
124
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
125
|
+
|
|
126
|
+
const sourceTable = TEST_TABLE;
|
|
127
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
128
|
+
await batch.markAllSnapshotDone('1/1');
|
|
129
|
+
|
|
130
|
+
await batch.save({
|
|
131
|
+
sourceTable,
|
|
132
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
133
|
+
beforeReplicaId: test_utils.rid('test1')
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
await batch.commit('0/1');
|
|
137
|
+
});
|
|
138
|
+
|
|
139
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
140
|
+
const sourceTable = TEST_TABLE;
|
|
141
|
+
|
|
142
|
+
await batch.save({
|
|
143
|
+
sourceTable,
|
|
144
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
145
|
+
after: {
|
|
146
|
+
id: 'test1',
|
|
147
|
+
description: 'test1'
|
|
148
|
+
},
|
|
149
|
+
afterReplicaId: test_utils.rid('test1')
|
|
150
|
+
});
|
|
151
|
+
await batch.commit('2/1');
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
155
|
+
|
|
156
|
+
const batch = await test_utils.fromAsync(
|
|
157
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
158
|
+
);
|
|
159
|
+
const data = batch[0].chunkData.data.map((d) => {
|
|
160
|
+
return {
|
|
161
|
+
op: d.op,
|
|
162
|
+
object_id: d.object_id,
|
|
163
|
+
checksum: d.checksum
|
|
164
|
+
};
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
const c1 = 2871785649;
|
|
168
|
+
|
|
169
|
+
expect(data).toEqual([{ op: 'PUT', object_id: 'test1', checksum: c1 }]);
|
|
170
|
+
|
|
171
|
+
const checksums = [
|
|
172
|
+
...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values()
|
|
173
|
+
];
|
|
174
|
+
expect(checksums).toEqual([
|
|
175
|
+
{
|
|
176
|
+
bucket: bucketRequest(syncRules, 'global[]').bucket,
|
|
177
|
+
checksum: c1 & 0xffffffff,
|
|
178
|
+
count: 1
|
|
179
|
+
}
|
|
180
|
+
]);
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
test('update after delete in new batch', async () => {
|
|
184
|
+
// Update after delete may not be common, but the storage layer should handle it in an eventually-consistent way.
|
|
185
|
+
await using factory = await generateStorageFactory();
|
|
186
|
+
const syncRules = await factory.updateSyncRules(
|
|
187
|
+
updateSyncRulesFromYaml(
|
|
188
|
+
`
|
|
189
|
+
bucket_definitions:
|
|
190
|
+
global:
|
|
191
|
+
data:
|
|
192
|
+
- SELECT id, description FROM "%"
|
|
193
|
+
`,
|
|
194
|
+
{ storageVersion }
|
|
195
|
+
)
|
|
196
|
+
);
|
|
197
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
198
|
+
|
|
199
|
+
const sourceTable = TEST_TABLE;
|
|
200
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
201
|
+
await batch.markAllSnapshotDone('1/1');
|
|
202
|
+
|
|
203
|
+
await batch.save({
|
|
204
|
+
sourceTable,
|
|
205
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
206
|
+
beforeReplicaId: test_utils.rid('test1')
|
|
207
|
+
});
|
|
208
|
+
|
|
209
|
+
await batch.commit('0/1');
|
|
210
|
+
});
|
|
211
|
+
|
|
212
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
213
|
+
const sourceTable = TEST_TABLE;
|
|
214
|
+
|
|
215
|
+
await batch.save({
|
|
216
|
+
sourceTable,
|
|
217
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
218
|
+
before: {
|
|
219
|
+
id: 'test1'
|
|
220
|
+
},
|
|
221
|
+
after: {
|
|
222
|
+
id: 'test1',
|
|
223
|
+
description: 'test1'
|
|
224
|
+
},
|
|
225
|
+
beforeReplicaId: test_utils.rid('test1'),
|
|
226
|
+
afterReplicaId: test_utils.rid('test1')
|
|
227
|
+
});
|
|
228
|
+
await batch.commit('2/1');
|
|
229
|
+
});
|
|
230
|
+
|
|
231
|
+
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
232
|
+
|
|
233
|
+
const batch = await test_utils.fromAsync(
|
|
234
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
235
|
+
);
|
|
236
|
+
const data = batch[0].chunkData.data.map((d) => {
|
|
237
|
+
return {
|
|
238
|
+
op: d.op,
|
|
239
|
+
object_id: d.object_id,
|
|
240
|
+
checksum: d.checksum
|
|
241
|
+
};
|
|
242
|
+
});
|
|
243
|
+
|
|
244
|
+
const c1 = 2871785649;
|
|
245
|
+
|
|
246
|
+
expect(data).toEqual([{ op: 'PUT', object_id: 'test1', checksum: c1 }]);
|
|
247
|
+
|
|
248
|
+
const checksums = [
|
|
249
|
+
...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values()
|
|
250
|
+
];
|
|
251
|
+
expect(checksums).toEqual([
|
|
252
|
+
{
|
|
253
|
+
bucket: bucketRequest(syncRules, 'global[]').bucket,
|
|
254
|
+
checksum: c1 & 0xffffffff,
|
|
255
|
+
count: 1
|
|
256
|
+
}
|
|
257
|
+
]);
|
|
258
|
+
});
|
|
259
|
+
|
|
260
|
+
test('insert after delete in same batch', async () => {
|
|
261
|
+
await using factory = await generateStorageFactory();
|
|
262
|
+
const syncRules = await factory.updateSyncRules(
|
|
263
|
+
updateSyncRulesFromYaml(
|
|
264
|
+
`
|
|
265
|
+
bucket_definitions:
|
|
266
|
+
global:
|
|
267
|
+
data:
|
|
268
|
+
- SELECT id, description FROM "%"
|
|
269
|
+
`,
|
|
270
|
+
{
|
|
271
|
+
storageVersion
|
|
272
|
+
}
|
|
273
|
+
)
|
|
274
|
+
);
|
|
275
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
276
|
+
|
|
277
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
278
|
+
const sourceTable = TEST_TABLE;
|
|
279
|
+
await batch.markAllSnapshotDone('1/1');
|
|
280
|
+
|
|
281
|
+
await batch.save({
|
|
282
|
+
sourceTable,
|
|
283
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
284
|
+
beforeReplicaId: test_utils.rid('test1')
|
|
285
|
+
});
|
|
286
|
+
await batch.save({
|
|
287
|
+
sourceTable,
|
|
288
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
289
|
+
after: {
|
|
290
|
+
id: 'test1',
|
|
291
|
+
description: 'test1'
|
|
292
|
+
},
|
|
293
|
+
afterReplicaId: test_utils.rid('test1')
|
|
294
|
+
});
|
|
295
|
+
await batch.commit('1/1');
|
|
296
|
+
});
|
|
297
|
+
|
|
298
|
+
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
299
|
+
|
|
300
|
+
const batch = await test_utils.fromAsync(
|
|
301
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
302
|
+
);
|
|
303
|
+
const data = batch[0].chunkData.data.map((d) => {
|
|
304
|
+
return {
|
|
305
|
+
op: d.op,
|
|
306
|
+
object_id: d.object_id,
|
|
307
|
+
checksum: d.checksum
|
|
308
|
+
};
|
|
309
|
+
});
|
|
310
|
+
|
|
311
|
+
const c1 = 2871785649;
|
|
312
|
+
|
|
313
|
+
expect(data).toEqual([{ op: 'PUT', object_id: 'test1', checksum: c1 }]);
|
|
314
|
+
|
|
315
|
+
const checksums = [
|
|
316
|
+
...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values()
|
|
317
|
+
];
|
|
318
|
+
expect(checksums).toEqual([
|
|
319
|
+
{
|
|
320
|
+
bucket: bucketRequest(syncRules, 'global[]').bucket,
|
|
321
|
+
checksum: c1 & 0xffffffff,
|
|
322
|
+
count: 1
|
|
323
|
+
}
|
|
324
|
+
]);
|
|
325
|
+
});
|
|
326
|
+
|
|
327
|
+
test('(insert, delete, insert), (delete)', async () => {
|
|
328
|
+
await using factory = await generateStorageFactory();
|
|
329
|
+
const syncRules = await factory.updateSyncRules(
|
|
330
|
+
updateSyncRulesFromYaml(
|
|
331
|
+
`
|
|
332
|
+
bucket_definitions:
|
|
333
|
+
global:
|
|
334
|
+
data:
|
|
335
|
+
- SELECT id, description FROM "%"
|
|
336
|
+
`,
|
|
337
|
+
{
|
|
338
|
+
storageVersion
|
|
339
|
+
}
|
|
340
|
+
)
|
|
341
|
+
);
|
|
342
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
343
|
+
|
|
344
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
345
|
+
const sourceTable = TEST_TABLE;
|
|
346
|
+
await batch.markAllSnapshotDone('1/1');
|
|
347
|
+
|
|
348
|
+
await batch.save({
|
|
349
|
+
sourceTable,
|
|
350
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
351
|
+
after: {
|
|
352
|
+
id: 'test1',
|
|
353
|
+
description: 'test1'
|
|
354
|
+
},
|
|
355
|
+
afterReplicaId: test_utils.rid('test1')
|
|
356
|
+
});
|
|
357
|
+
await batch.save({
|
|
358
|
+
sourceTable,
|
|
359
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
360
|
+
beforeReplicaId: test_utils.rid('test1')
|
|
361
|
+
});
|
|
362
|
+
await batch.save({
|
|
363
|
+
sourceTable,
|
|
364
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
365
|
+
after: {
|
|
366
|
+
id: 'test1',
|
|
367
|
+
description: 'test1'
|
|
368
|
+
},
|
|
369
|
+
afterReplicaId: test_utils.rid('test1')
|
|
370
|
+
});
|
|
371
|
+
await batch.commit('1/1');
|
|
372
|
+
});
|
|
373
|
+
|
|
374
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
375
|
+
const sourceTable = TEST_TABLE;
|
|
376
|
+
await batch.markAllSnapshotDone('1/1');
|
|
377
|
+
|
|
378
|
+
await batch.save({
|
|
379
|
+
sourceTable,
|
|
380
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
381
|
+
beforeReplicaId: test_utils.rid('test1')
|
|
382
|
+
});
|
|
383
|
+
await batch.commit('2/1');
|
|
384
|
+
});
|
|
385
|
+
|
|
386
|
+
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
387
|
+
|
|
388
|
+
const batch = await test_utils.fromAsync(
|
|
389
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
390
|
+
);
|
|
391
|
+
|
|
392
|
+
expect(reduceBucket(batch[0].chunkData.data).slice(1)).toEqual([]);
|
|
393
|
+
|
|
394
|
+
const data = batch[0].chunkData.data.map((d) => {
|
|
395
|
+
return {
|
|
396
|
+
op: d.op,
|
|
397
|
+
object_id: d.object_id,
|
|
398
|
+
checksum: d.checksum
|
|
399
|
+
};
|
|
400
|
+
});
|
|
401
|
+
|
|
402
|
+
expect(data).toMatchSnapshot();
|
|
403
|
+
});
|
|
404
|
+
|
|
89
405
|
test('changing client ids', async () => {
|
|
90
406
|
await using factory = await generateStorageFactory();
|
|
91
|
-
const syncRules = await factory.updateSyncRules(
|
|
92
|
-
|
|
407
|
+
const syncRules = await factory.updateSyncRules(
|
|
408
|
+
updateSyncRulesFromYaml(
|
|
409
|
+
`
|
|
93
410
|
bucket_definitions:
|
|
94
411
|
global:
|
|
95
412
|
data:
|
|
96
413
|
- SELECT client_id as id, description FROM "%"
|
|
97
|
-
|
|
98
|
-
|
|
414
|
+
`,
|
|
415
|
+
{
|
|
416
|
+
storageVersion
|
|
417
|
+
}
|
|
418
|
+
)
|
|
419
|
+
);
|
|
99
420
|
const bucketStorage = factory.getInstance(syncRules);
|
|
100
421
|
|
|
101
422
|
const sourceTable = TEST_TABLE;
|
|
102
423
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
424
|
+
await batch.markAllSnapshotDone('1/1');
|
|
103
425
|
await batch.save({
|
|
104
426
|
sourceTable,
|
|
105
427
|
tag: storage.SaveOperationTag.INSERT,
|
|
@@ -135,7 +457,9 @@ bucket_definitions:
|
|
|
135
457
|
await batch.commit('1/1');
|
|
136
458
|
});
|
|
137
459
|
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
138
|
-
const batch = await test_utils.fromAsync(
|
|
460
|
+
const batch = await test_utils.fromAsync(
|
|
461
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
462
|
+
);
|
|
139
463
|
const data = batch[0].chunkData.data.map((d) => {
|
|
140
464
|
return {
|
|
141
465
|
op: d.op,
|
|
@@ -153,18 +477,24 @@ bucket_definitions:
|
|
|
153
477
|
|
|
154
478
|
test('re-apply delete', async () => {
|
|
155
479
|
await using factory = await generateStorageFactory();
|
|
156
|
-
const syncRules = await factory.updateSyncRules(
|
|
157
|
-
|
|
480
|
+
const syncRules = await factory.updateSyncRules(
|
|
481
|
+
updateSyncRulesFromYaml(
|
|
482
|
+
`
|
|
158
483
|
bucket_definitions:
|
|
159
484
|
global:
|
|
160
485
|
data:
|
|
161
486
|
- SELECT id, description FROM "%"
|
|
162
|
-
|
|
163
|
-
|
|
487
|
+
`,
|
|
488
|
+
{
|
|
489
|
+
storageVersion
|
|
490
|
+
}
|
|
491
|
+
)
|
|
492
|
+
);
|
|
164
493
|
const bucketStorage = factory.getInstance(syncRules);
|
|
165
494
|
|
|
166
495
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
167
496
|
const sourceTable = TEST_TABLE;
|
|
497
|
+
await batch.markAllSnapshotDone('1/1');
|
|
168
498
|
|
|
169
499
|
await batch.save({
|
|
170
500
|
sourceTable,
|
|
@@ -201,7 +531,9 @@ bucket_definitions:
|
|
|
201
531
|
|
|
202
532
|
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
203
533
|
|
|
204
|
-
const batch = await test_utils.fromAsync(
|
|
534
|
+
const batch = await test_utils.fromAsync(
|
|
535
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
536
|
+
);
|
|
205
537
|
const data = batch[0].chunkData.data.map((d) => {
|
|
206
538
|
return {
|
|
207
539
|
op: d.op,
|
|
@@ -218,10 +550,12 @@ bucket_definitions:
|
|
|
218
550
|
{ op: 'REMOVE', object_id: 'test1', checksum: c2 }
|
|
219
551
|
]);
|
|
220
552
|
|
|
221
|
-
const checksums = [
|
|
553
|
+
const checksums = [
|
|
554
|
+
...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values()
|
|
555
|
+
];
|
|
222
556
|
expect(checksums).toEqual([
|
|
223
557
|
{
|
|
224
|
-
bucket: 'global[]',
|
|
558
|
+
bucket: bucketRequest(syncRules, 'global[]').bucket,
|
|
225
559
|
checksum: (c1 + c2) & 0xffffffff,
|
|
226
560
|
count: 2
|
|
227
561
|
}
|
|
@@ -230,17 +564,21 @@ bucket_definitions:
|
|
|
230
564
|
|
|
231
565
|
test('re-apply update + delete', async () => {
|
|
232
566
|
await using factory = await generateStorageFactory();
|
|
233
|
-
const syncRules = await factory.updateSyncRules(
|
|
234
|
-
|
|
567
|
+
const syncRules = await factory.updateSyncRules(
|
|
568
|
+
updateSyncRulesFromYaml(
|
|
569
|
+
`
|
|
235
570
|
bucket_definitions:
|
|
236
571
|
global:
|
|
237
572
|
data:
|
|
238
573
|
- SELECT id, description FROM "%"
|
|
239
|
-
|
|
240
|
-
|
|
574
|
+
`,
|
|
575
|
+
{ storageVersion }
|
|
576
|
+
)
|
|
577
|
+
);
|
|
241
578
|
const bucketStorage = factory.getInstance(syncRules);
|
|
242
579
|
|
|
243
580
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
581
|
+
await batch.markAllSnapshotDone('1/1');
|
|
244
582
|
const sourceTable = TEST_TABLE;
|
|
245
583
|
|
|
246
584
|
await batch.save({
|
|
@@ -255,6 +593,7 @@ bucket_definitions:
|
|
|
255
593
|
});
|
|
256
594
|
|
|
257
595
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
596
|
+
await batch.markAllSnapshotDone('1/1');
|
|
258
597
|
const sourceTable = TEST_TABLE;
|
|
259
598
|
|
|
260
599
|
await batch.save({
|
|
@@ -287,6 +626,7 @@ bucket_definitions:
|
|
|
287
626
|
});
|
|
288
627
|
|
|
289
628
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
629
|
+
await batch.markAllSnapshotDone('1/1');
|
|
290
630
|
const sourceTable = TEST_TABLE;
|
|
291
631
|
|
|
292
632
|
await batch.save({
|
|
@@ -320,7 +660,9 @@ bucket_definitions:
|
|
|
320
660
|
|
|
321
661
|
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
322
662
|
|
|
323
|
-
const batch = await test_utils.fromAsync(
|
|
663
|
+
const batch = await test_utils.fromAsync(
|
|
664
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
665
|
+
);
|
|
324
666
|
|
|
325
667
|
const data = batch[0].chunkData.data.map((d) => {
|
|
326
668
|
return {
|
|
@@ -340,10 +682,12 @@ bucket_definitions:
|
|
|
340
682
|
{ op: 'REMOVE', object_id: 'test1', checksum: c2 }
|
|
341
683
|
]);
|
|
342
684
|
|
|
343
|
-
const checksums = [
|
|
685
|
+
const checksums = [
|
|
686
|
+
...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values()
|
|
687
|
+
];
|
|
344
688
|
expect(checksums).toEqual([
|
|
345
689
|
{
|
|
346
|
-
bucket: 'global[]',
|
|
690
|
+
bucket: bucketRequest(syncRules, 'global[]').bucket,
|
|
347
691
|
checksum: (c1 + c1 + c1 + c2) & 0xffffffff,
|
|
348
692
|
count: 4
|
|
349
693
|
}
|
|
@@ -360,18 +704,22 @@ bucket_definitions:
|
|
|
360
704
|
// 2. Output order not being correct.
|
|
361
705
|
|
|
362
706
|
await using factory = await generateStorageFactory();
|
|
363
|
-
const syncRules = await factory.updateSyncRules(
|
|
364
|
-
|
|
707
|
+
const syncRules = await factory.updateSyncRules(
|
|
708
|
+
updateSyncRulesFromYaml(
|
|
709
|
+
`
|
|
365
710
|
bucket_definitions:
|
|
366
711
|
global:
|
|
367
712
|
data:
|
|
368
713
|
- SELECT id, description FROM "test"
|
|
369
|
-
|
|
370
|
-
|
|
714
|
+
`,
|
|
715
|
+
{ storageVersion }
|
|
716
|
+
)
|
|
717
|
+
);
|
|
371
718
|
const bucketStorage = factory.getInstance(syncRules);
|
|
372
719
|
|
|
373
720
|
// Pre-setup
|
|
374
721
|
const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
722
|
+
await batch.markAllSnapshotDone('1/1');
|
|
375
723
|
const sourceTable = TEST_TABLE;
|
|
376
724
|
|
|
377
725
|
await batch.save({
|
|
@@ -480,7 +828,7 @@ bucket_definitions:
|
|
|
480
828
|
const checkpoint2 = result2!.flushed_op;
|
|
481
829
|
|
|
482
830
|
const batch = await test_utils.fromAsync(
|
|
483
|
-
bucketStorage.getBucketDataBatch(checkpoint2,
|
|
831
|
+
bucketStorage.getBucketDataBatch(checkpoint2, bucketRequestMap(syncRules, [['global[]', checkpoint1]]))
|
|
484
832
|
);
|
|
485
833
|
|
|
486
834
|
const data = batch[0].chunkData.data.map((d) => {
|
|
@@ -518,20 +866,26 @@ bucket_definitions:
|
|
|
518
866
|
]);
|
|
519
867
|
}
|
|
520
868
|
await using factory = await generateStorageFactory();
|
|
521
|
-
const syncRules = await factory.updateSyncRules(
|
|
522
|
-
|
|
869
|
+
const syncRules = await factory.updateSyncRules(
|
|
870
|
+
updateSyncRulesFromYaml(
|
|
871
|
+
`
|
|
523
872
|
bucket_definitions:
|
|
524
873
|
global:
|
|
525
874
|
data:
|
|
526
875
|
- SELECT id, description FROM "test"
|
|
527
|
-
|
|
528
|
-
|
|
876
|
+
`,
|
|
877
|
+
{
|
|
878
|
+
storageVersion
|
|
879
|
+
}
|
|
880
|
+
)
|
|
881
|
+
);
|
|
529
882
|
const bucketStorage = factory.getInstance(syncRules);
|
|
530
883
|
|
|
531
|
-
const sourceTable = test_utils.makeTestTable('test', ['id', 'description']);
|
|
884
|
+
const sourceTable = test_utils.makeTestTable('test', ['id', 'description'], config);
|
|
532
885
|
|
|
533
886
|
// Pre-setup
|
|
534
887
|
const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
888
|
+
await batch.markAllSnapshotDone('1/1');
|
|
535
889
|
await batch.save({
|
|
536
890
|
sourceTable,
|
|
537
891
|
tag: storage.SaveOperationTag.INSERT,
|
|
@@ -580,7 +934,7 @@ bucket_definitions:
|
|
|
580
934
|
const checkpoint3 = result3!.flushed_op;
|
|
581
935
|
|
|
582
936
|
const batch = await test_utils.fromAsync(
|
|
583
|
-
bucketStorage.getBucketDataBatch(checkpoint3,
|
|
937
|
+
bucketStorage.getBucketDataBatch(checkpoint3, bucketRequestMap(syncRules, [['global[]', checkpoint1]]))
|
|
584
938
|
);
|
|
585
939
|
const data = batch[0].chunkData.data.map((d) => {
|
|
586
940
|
return {
|
|
@@ -626,20 +980,26 @@ bucket_definitions:
|
|
|
626
980
|
}
|
|
627
981
|
|
|
628
982
|
await using factory = await generateStorageFactory();
|
|
629
|
-
const syncRules = await factory.updateSyncRules(
|
|
630
|
-
|
|
983
|
+
const syncRules = await factory.updateSyncRules(
|
|
984
|
+
updateSyncRulesFromYaml(
|
|
985
|
+
`
|
|
631
986
|
bucket_definitions:
|
|
632
987
|
global:
|
|
633
988
|
data:
|
|
634
989
|
- SELECT id, description FROM "test"
|
|
635
|
-
|
|
636
|
-
|
|
990
|
+
`,
|
|
991
|
+
{
|
|
992
|
+
storageVersion
|
|
993
|
+
}
|
|
994
|
+
)
|
|
995
|
+
);
|
|
637
996
|
const bucketStorage = factory.getInstance(syncRules);
|
|
638
997
|
|
|
639
|
-
const sourceTable = test_utils.makeTestTable('test', ['id', 'description']);
|
|
998
|
+
const sourceTable = test_utils.makeTestTable('test', ['id', 'description'], config);
|
|
640
999
|
|
|
641
1000
|
// Pre-setup
|
|
642
1001
|
const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1002
|
+
await batch.markAllSnapshotDone('1/1');
|
|
643
1003
|
await batch.save({
|
|
644
1004
|
sourceTable,
|
|
645
1005
|
tag: storage.SaveOperationTag.INSERT,
|
|
@@ -688,7 +1048,7 @@ bucket_definitions:
|
|
|
688
1048
|
const checkpoint3 = result3!.flushed_op;
|
|
689
1049
|
|
|
690
1050
|
const batch = await test_utils.fromAsync(
|
|
691
|
-
bucketStorage.getBucketDataBatch(checkpoint3,
|
|
1051
|
+
bucketStorage.getBucketDataBatch(checkpoint3, bucketRequestMap(syncRules, [['global[]', checkpoint1]]))
|
|
692
1052
|
);
|
|
693
1053
|
const data = batch[0].chunkData.data.map((d) => {
|
|
694
1054
|
return {
|
|
@@ -724,17 +1084,23 @@ bucket_definitions:
|
|
|
724
1084
|
// The specific batch splits is an implementation detail of the storage driver,
|
|
725
1085
|
// and the test will have to updated when other implementations are added.
|
|
726
1086
|
await using factory = await generateStorageFactory();
|
|
727
|
-
const syncRules = await factory.updateSyncRules(
|
|
728
|
-
|
|
1087
|
+
const syncRules = await factory.updateSyncRules(
|
|
1088
|
+
updateSyncRulesFromYaml(
|
|
1089
|
+
`
|
|
729
1090
|
bucket_definitions:
|
|
730
1091
|
global:
|
|
731
1092
|
data:
|
|
732
1093
|
- SELECT id, description FROM "%"
|
|
733
|
-
|
|
734
|
-
|
|
1094
|
+
`,
|
|
1095
|
+
{
|
|
1096
|
+
storageVersion
|
|
1097
|
+
}
|
|
1098
|
+
)
|
|
1099
|
+
);
|
|
735
1100
|
const bucketStorage = factory.getInstance(syncRules);
|
|
736
1101
|
|
|
737
1102
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1103
|
+
await batch.markAllSnapshotDone('1/1');
|
|
738
1104
|
const sourceTable = TEST_TABLE;
|
|
739
1105
|
|
|
740
1106
|
const largeDescription = '0123456789'.repeat(12_000_00);
|
|
@@ -790,7 +1156,7 @@ bucket_definitions:
|
|
|
790
1156
|
};
|
|
791
1157
|
|
|
792
1158
|
const batch1 = await test_utils.fromAsync(
|
|
793
|
-
bucketStorage.getBucketDataBatch(checkpoint,
|
|
1159
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]), options)
|
|
794
1160
|
);
|
|
795
1161
|
expect(test_utils.getBatchData(batch1)).toEqual([
|
|
796
1162
|
{ op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 },
|
|
@@ -805,7 +1171,7 @@ bucket_definitions:
|
|
|
805
1171
|
const batch2 = await test_utils.fromAsync(
|
|
806
1172
|
bucketStorage.getBucketDataBatch(
|
|
807
1173
|
checkpoint,
|
|
808
|
-
|
|
1174
|
+
bucketRequestMap(syncRules, [['global[]', BigInt(batch1[0].chunkData.next_after)]]),
|
|
809
1175
|
options
|
|
810
1176
|
)
|
|
811
1177
|
);
|
|
@@ -822,7 +1188,7 @@ bucket_definitions:
|
|
|
822
1188
|
const batch3 = await test_utils.fromAsync(
|
|
823
1189
|
bucketStorage.getBucketDataBatch(
|
|
824
1190
|
checkpoint,
|
|
825
|
-
|
|
1191
|
+
bucketRequestMap(syncRules, [['global[]', BigInt(batch2[0].chunkData.next_after)]]),
|
|
826
1192
|
options
|
|
827
1193
|
)
|
|
828
1194
|
);
|
|
@@ -833,17 +1199,23 @@ bucket_definitions:
|
|
|
833
1199
|
test('long batch', async () => {
|
|
834
1200
|
// Test syncing a batch of data that is limited by count.
|
|
835
1201
|
await using factory = await generateStorageFactory();
|
|
836
|
-
const syncRules = await factory.updateSyncRules(
|
|
837
|
-
|
|
1202
|
+
const syncRules = await factory.updateSyncRules(
|
|
1203
|
+
updateSyncRulesFromYaml(
|
|
1204
|
+
`
|
|
838
1205
|
bucket_definitions:
|
|
839
1206
|
global:
|
|
840
1207
|
data:
|
|
841
1208
|
- SELECT id, description FROM "%"
|
|
842
|
-
|
|
843
|
-
|
|
1209
|
+
`,
|
|
1210
|
+
{
|
|
1211
|
+
storageVersion
|
|
1212
|
+
}
|
|
1213
|
+
)
|
|
1214
|
+
);
|
|
844
1215
|
const bucketStorage = factory.getInstance(syncRules);
|
|
845
1216
|
|
|
846
1217
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1218
|
+
await batch.markAllSnapshotDone('1/1');
|
|
847
1219
|
const sourceTable = TEST_TABLE;
|
|
848
1220
|
|
|
849
1221
|
for (let i = 1; i <= 6; i++) {
|
|
@@ -864,7 +1236,7 @@ bucket_definitions:
|
|
|
864
1236
|
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
865
1237
|
|
|
866
1238
|
const batch1 = await test_utils.oneFromAsync(
|
|
867
|
-
bucketStorage.getBucketDataBatch(checkpoint,
|
|
1239
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]), { limit: 4 })
|
|
868
1240
|
);
|
|
869
1241
|
|
|
870
1242
|
expect(test_utils.getBatchData(batch1)).toEqual([
|
|
@@ -881,9 +1253,13 @@ bucket_definitions:
|
|
|
881
1253
|
});
|
|
882
1254
|
|
|
883
1255
|
const batch2 = await test_utils.oneFromAsync(
|
|
884
|
-
bucketStorage.getBucketDataBatch(
|
|
885
|
-
|
|
886
|
-
|
|
1256
|
+
bucketStorage.getBucketDataBatch(
|
|
1257
|
+
checkpoint,
|
|
1258
|
+
bucketRequestMap(syncRules, [['global[]', BigInt(batch1.chunkData.next_after)]]),
|
|
1259
|
+
{
|
|
1260
|
+
limit: 4
|
|
1261
|
+
}
|
|
1262
|
+
)
|
|
887
1263
|
);
|
|
888
1264
|
expect(test_utils.getBatchData(batch2)).toEqual([
|
|
889
1265
|
{ op_id: '5', op: 'PUT', object_id: 'test5', checksum: 3686902721 },
|
|
@@ -897,9 +1273,13 @@ bucket_definitions:
|
|
|
897
1273
|
});
|
|
898
1274
|
|
|
899
1275
|
const batch3 = await test_utils.fromAsync(
|
|
900
|
-
bucketStorage.getBucketDataBatch(
|
|
901
|
-
|
|
902
|
-
|
|
1276
|
+
bucketStorage.getBucketDataBatch(
|
|
1277
|
+
checkpoint,
|
|
1278
|
+
bucketRequestMap(syncRules, [['global[]', BigInt(batch2.chunkData.next_after)]]),
|
|
1279
|
+
{
|
|
1280
|
+
limit: 4
|
|
1281
|
+
}
|
|
1282
|
+
)
|
|
903
1283
|
);
|
|
904
1284
|
expect(test_utils.getBatchData(batch3)).toEqual([]);
|
|
905
1285
|
|
|
@@ -909,8 +1289,9 @@ bucket_definitions:
|
|
|
909
1289
|
describe('batch has_more', () => {
|
|
910
1290
|
const setup = async (options: BucketDataBatchOptions) => {
|
|
911
1291
|
await using factory = await generateStorageFactory();
|
|
912
|
-
const syncRules = await factory.updateSyncRules(
|
|
913
|
-
|
|
1292
|
+
const syncRules = await factory.updateSyncRules(
|
|
1293
|
+
updateSyncRulesFromYaml(
|
|
1294
|
+
`
|
|
914
1295
|
bucket_definitions:
|
|
915
1296
|
global1:
|
|
916
1297
|
data:
|
|
@@ -918,11 +1299,14 @@ bucket_definitions:
|
|
|
918
1299
|
global2:
|
|
919
1300
|
data:
|
|
920
1301
|
- SELECT id, description FROM test WHERE bucket = 'global2'
|
|
921
|
-
|
|
922
|
-
|
|
1302
|
+
`,
|
|
1303
|
+
{ storageVersion }
|
|
1304
|
+
)
|
|
1305
|
+
);
|
|
923
1306
|
const bucketStorage = factory.getInstance(syncRules);
|
|
924
1307
|
|
|
925
1308
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1309
|
+
await batch.markAllSnapshotDone('1/1');
|
|
926
1310
|
const sourceTable = TEST_TABLE;
|
|
927
1311
|
|
|
928
1312
|
for (let i = 1; i <= 10; i++) {
|
|
@@ -942,24 +1326,26 @@ bucket_definitions:
|
|
|
942
1326
|
});
|
|
943
1327
|
|
|
944
1328
|
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
945
|
-
|
|
1329
|
+
const batch = await test_utils.fromAsync(
|
|
946
1330
|
bucketStorage.getBucketDataBatch(
|
|
947
1331
|
checkpoint,
|
|
948
|
-
|
|
1332
|
+
bucketRequestMap(syncRules, [
|
|
949
1333
|
['global1[]', 0n],
|
|
950
1334
|
['global2[]', 0n]
|
|
951
1335
|
]),
|
|
952
1336
|
options
|
|
953
1337
|
)
|
|
954
1338
|
);
|
|
1339
|
+
|
|
1340
|
+
return { syncRules, batch };
|
|
955
1341
|
};
|
|
956
1342
|
|
|
957
1343
|
test('batch has_more (1)', async () => {
|
|
958
|
-
const batch = await setup({ limit: 5 });
|
|
1344
|
+
const { batch, syncRules } = await setup({ limit: 5 });
|
|
959
1345
|
expect(batch.length).toEqual(2);
|
|
960
1346
|
|
|
961
|
-
expect(batch[0].chunkData.bucket).toEqual('global1[]');
|
|
962
|
-
expect(batch[1].chunkData.bucket).toEqual('global2[]');
|
|
1347
|
+
expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]').bucket);
|
|
1348
|
+
expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]').bucket);
|
|
963
1349
|
|
|
964
1350
|
expect(test_utils.getBatchData(batch[0])).toEqual([
|
|
965
1351
|
{ op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }
|
|
@@ -986,11 +1372,11 @@ bucket_definitions:
|
|
|
986
1372
|
});
|
|
987
1373
|
|
|
988
1374
|
test('batch has_more (2)', async () => {
|
|
989
|
-
const batch = await setup({ limit: 11 });
|
|
1375
|
+
const { batch, syncRules } = await setup({ limit: 11 });
|
|
990
1376
|
expect(batch.length).toEqual(2);
|
|
991
1377
|
|
|
992
|
-
expect(batch[0].chunkData.bucket).toEqual('global1[]');
|
|
993
|
-
expect(batch[1].chunkData.bucket).toEqual('global2[]');
|
|
1378
|
+
expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]').bucket);
|
|
1379
|
+
expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]').bucket);
|
|
994
1380
|
|
|
995
1381
|
expect(test_utils.getBatchData(batch[0])).toEqual([
|
|
996
1382
|
{ op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }
|
|
@@ -1023,12 +1409,12 @@ bucket_definitions:
|
|
|
1023
1409
|
|
|
1024
1410
|
test('batch has_more (3)', async () => {
|
|
1025
1411
|
// 50 bytes is more than 1 row, less than 2 rows
|
|
1026
|
-
const batch = await setup({ limit: 3, chunkLimitBytes: 50 });
|
|
1412
|
+
const { batch, syncRules } = await setup({ limit: 3, chunkLimitBytes: 50 });
|
|
1027
1413
|
|
|
1028
1414
|
expect(batch.length).toEqual(3);
|
|
1029
|
-
expect(batch[0].chunkData.bucket).toEqual('global1[]');
|
|
1030
|
-
expect(batch[1].chunkData.bucket).toEqual('global2[]');
|
|
1031
|
-
expect(batch[2].chunkData.bucket).toEqual('global2[]');
|
|
1415
|
+
expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]').bucket);
|
|
1416
|
+
expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]').bucket);
|
|
1417
|
+
expect(batch[2].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]').bucket);
|
|
1032
1418
|
|
|
1033
1419
|
expect(test_utils.getBatchData(batch[0])).toEqual([
|
|
1034
1420
|
{ op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }
|
|
@@ -1070,14 +1456,15 @@ bucket_definitions:
|
|
|
1070
1456
|
replication_size_bytes: 0
|
|
1071
1457
|
});
|
|
1072
1458
|
|
|
1073
|
-
const r = await f.configureSyncRules(
|
|
1459
|
+
const r = await f.configureSyncRules(updateSyncRulesFromYaml('bucket_definitions: {}'));
|
|
1074
1460
|
const storage = f.getInstance(r.persisted_sync_rules!);
|
|
1075
1461
|
await storage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1462
|
+
await batch.markAllSnapshotDone('1/0');
|
|
1076
1463
|
await batch.keepalive('1/0');
|
|
1077
1464
|
});
|
|
1078
1465
|
|
|
1079
|
-
|
|
1080
|
-
|
|
1466
|
+
await f.getStorageMetrics();
|
|
1467
|
+
// We don't care about the specific values here
|
|
1081
1468
|
});
|
|
1082
1469
|
|
|
1083
1470
|
test('op_id initialization edge case', async () => {
|
|
@@ -1085,21 +1472,27 @@ bucket_definitions:
|
|
|
1085
1472
|
// but large enough in size to be split over multiple returned chunks.
|
|
1086
1473
|
// Similar to the above test, but splits over 1MB chunks.
|
|
1087
1474
|
await using factory = await generateStorageFactory();
|
|
1088
|
-
const syncRules = await factory.updateSyncRules(
|
|
1089
|
-
|
|
1475
|
+
const syncRules = await factory.updateSyncRules(
|
|
1476
|
+
updateSyncRulesFromYaml(
|
|
1477
|
+
`
|
|
1090
1478
|
bucket_definitions:
|
|
1091
1479
|
global:
|
|
1092
1480
|
data:
|
|
1093
1481
|
- SELECT id FROM test
|
|
1094
1482
|
- SELECT id FROM test_ignore WHERE false
|
|
1095
|
-
|
|
1096
|
-
|
|
1483
|
+
`,
|
|
1484
|
+
{
|
|
1485
|
+
storageVersion
|
|
1486
|
+
}
|
|
1487
|
+
)
|
|
1488
|
+
);
|
|
1097
1489
|
const bucketStorage = factory.getInstance(syncRules);
|
|
1098
1490
|
|
|
1099
|
-
const sourceTable = test_utils.makeTestTable('test', ['id']);
|
|
1100
|
-
const sourceTableIgnore = test_utils.makeTestTable('test_ignore', ['id']);
|
|
1491
|
+
const sourceTable = test_utils.makeTestTable('test', ['id'], config);
|
|
1492
|
+
const sourceTableIgnore = test_utils.makeTestTable('test_ignore', ['id'], config);
|
|
1101
1493
|
|
|
1102
1494
|
const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1495
|
+
await batch.markAllSnapshotDone('1/1');
|
|
1103
1496
|
// This saves a record to current_data, but not bucket_data.
|
|
1104
1497
|
// This causes a checkpoint to be created without increasing the op_id sequence.
|
|
1105
1498
|
await batch.save({
|
|
@@ -1132,18 +1525,24 @@ bucket_definitions:
|
|
|
1132
1525
|
|
|
1133
1526
|
test('unchanged checksums', async () => {
|
|
1134
1527
|
await using factory = await generateStorageFactory();
|
|
1135
|
-
const syncRules = await factory.updateSyncRules(
|
|
1136
|
-
|
|
1528
|
+
const syncRules = await factory.updateSyncRules(
|
|
1529
|
+
updateSyncRulesFromYaml(
|
|
1530
|
+
`
|
|
1137
1531
|
bucket_definitions:
|
|
1138
1532
|
global:
|
|
1139
1533
|
data:
|
|
1140
1534
|
- SELECT client_id as id, description FROM "%"
|
|
1141
|
-
|
|
1142
|
-
|
|
1535
|
+
`,
|
|
1536
|
+
{
|
|
1537
|
+
storageVersion
|
|
1538
|
+
}
|
|
1539
|
+
)
|
|
1540
|
+
);
|
|
1143
1541
|
const bucketStorage = factory.getInstance(syncRules);
|
|
1144
1542
|
|
|
1145
1543
|
const sourceTable = TEST_TABLE;
|
|
1146
1544
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1545
|
+
await batch.markAllSnapshotDone('1/1');
|
|
1147
1546
|
await batch.save({
|
|
1148
1547
|
sourceTable,
|
|
1149
1548
|
tag: storage.SaveOperationTag.INSERT,
|
|
@@ -1157,13 +1556,229 @@ bucket_definitions:
|
|
|
1157
1556
|
});
|
|
1158
1557
|
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
1159
1558
|
|
|
1160
|
-
const checksums = [
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
expect(
|
|
1559
|
+
const checksums = [
|
|
1560
|
+
...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values()
|
|
1561
|
+
];
|
|
1562
|
+
expect(checksums).toEqual([
|
|
1563
|
+
{ bucket: bucketRequest(syncRules, 'global[]').bucket, checksum: 1917136889, count: 1 }
|
|
1564
|
+
]);
|
|
1565
|
+
const checksums2 = [
|
|
1566
|
+
...(await bucketStorage.getChecksums(checkpoint + 1n, bucketRequests(syncRules, ['global[]']))).values()
|
|
1567
|
+
];
|
|
1568
|
+
expect(checksums2).toEqual([
|
|
1569
|
+
{ bucket: bucketRequest(syncRules, 'global[]').bucket, checksum: 1917136889, count: 1 }
|
|
1570
|
+
]);
|
|
1164
1571
|
});
|
|
1165
1572
|
|
|
1166
|
-
testChecksumBatching(
|
|
1573
|
+
testChecksumBatching(config);
|
|
1574
|
+
|
|
1575
|
+
test('empty checkpoints (1)', async () => {
|
|
1576
|
+
await using factory = await generateStorageFactory();
|
|
1577
|
+
const syncRules = await factory.updateSyncRules(
|
|
1578
|
+
updateSyncRulesFromYaml(
|
|
1579
|
+
`
|
|
1580
|
+
bucket_definitions:
|
|
1581
|
+
global:
|
|
1582
|
+
data:
|
|
1583
|
+
- SELECT id, description FROM "%"
|
|
1584
|
+
`,
|
|
1585
|
+
{ storageVersion }
|
|
1586
|
+
)
|
|
1587
|
+
);
|
|
1588
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
1589
|
+
|
|
1590
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1591
|
+
await batch.markAllSnapshotDone('1/1');
|
|
1592
|
+
await batch.commit('1/1');
|
|
1593
|
+
|
|
1594
|
+
const cp1 = await bucketStorage.getCheckpoint();
|
|
1595
|
+
expect(cp1.lsn).toEqual('1/1');
|
|
1596
|
+
|
|
1597
|
+
await batch.commit('2/1', { createEmptyCheckpoints: true });
|
|
1598
|
+
const cp2 = await bucketStorage.getCheckpoint();
|
|
1599
|
+
expect(cp2.lsn).toEqual('2/1');
|
|
1600
|
+
|
|
1601
|
+
await batch.keepalive('3/1');
|
|
1602
|
+
const cp3 = await bucketStorage.getCheckpoint();
|
|
1603
|
+
expect(cp3.lsn).toEqual('3/1');
|
|
1604
|
+
|
|
1605
|
+
// For the last one, we skip creating empty checkpoints
|
|
1606
|
+
// This means the LSN stays at 3/1.
|
|
1607
|
+
await batch.commit('4/1', { createEmptyCheckpoints: false });
|
|
1608
|
+
const cp4 = await bucketStorage.getCheckpoint();
|
|
1609
|
+
expect(cp4.lsn).toEqual('3/1');
|
|
1610
|
+
});
|
|
1611
|
+
});
|
|
1612
|
+
|
|
1613
|
+
test('empty checkpoints (2)', async () => {
|
|
1614
|
+
await using factory = await generateStorageFactory();
|
|
1615
|
+
const syncRules = await factory.updateSyncRules(
|
|
1616
|
+
updateSyncRulesFromYaml(
|
|
1617
|
+
`
|
|
1618
|
+
bucket_definitions:
|
|
1619
|
+
global:
|
|
1620
|
+
data:
|
|
1621
|
+
- SELECT id, description FROM "%"
|
|
1622
|
+
`,
|
|
1623
|
+
{
|
|
1624
|
+
storageVersion
|
|
1625
|
+
}
|
|
1626
|
+
)
|
|
1627
|
+
);
|
|
1628
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
1629
|
+
|
|
1630
|
+
const sourceTable = TEST_TABLE;
|
|
1631
|
+
// We simulate two concurrent batches, but nesting is the easiest way to do this.
|
|
1632
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch1) => {
|
|
1633
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch2) => {
|
|
1634
|
+
await batch1.markAllSnapshotDone('1/1');
|
|
1635
|
+
await batch1.commit('1/1');
|
|
1636
|
+
|
|
1637
|
+
await batch1.commit('2/1', { createEmptyCheckpoints: false });
|
|
1638
|
+
const cp2 = await bucketStorage.getCheckpoint();
|
|
1639
|
+
expect(cp2.lsn).toEqual('1/1'); // checkpoint 2/1 skipped
|
|
1640
|
+
|
|
1641
|
+
await batch2.save({
|
|
1642
|
+
sourceTable,
|
|
1643
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
1644
|
+
after: {
|
|
1645
|
+
id: 'test1',
|
|
1646
|
+
description: 'test1a'
|
|
1647
|
+
},
|
|
1648
|
+
afterReplicaId: test_utils.rid('test1')
|
|
1649
|
+
});
|
|
1650
|
+
// This simulates what happens on a snapshot processor.
|
|
1651
|
+
// This may later change to a flush() rather than commit().
|
|
1652
|
+
await batch2.commit(test_utils.BATCH_OPTIONS.zeroLSN);
|
|
1653
|
+
|
|
1654
|
+
const cp3 = await bucketStorage.getCheckpoint();
|
|
1655
|
+
expect(cp3.lsn).toEqual('1/1'); // Still unchanged
|
|
1656
|
+
|
|
1657
|
+
// This now needs to advance the LSN, despite {createEmptyCheckpoints: false}
|
|
1658
|
+
await batch1.commit('4/1', { createEmptyCheckpoints: false });
|
|
1659
|
+
const cp4 = await bucketStorage.getCheckpoint();
|
|
1660
|
+
expect(cp4.lsn).toEqual('4/1');
|
|
1661
|
+
});
|
|
1662
|
+
});
|
|
1663
|
+
});
|
|
1664
|
+
|
|
1665
|
+
test('empty checkpoints (sync rule activation)', async () => {
|
|
1666
|
+
await using factory = await generateStorageFactory();
|
|
1667
|
+
const syncRules = await factory.updateSyncRules(
|
|
1668
|
+
updateSyncRulesFromYaml(
|
|
1669
|
+
`
|
|
1670
|
+
bucket_definitions:
|
|
1671
|
+
global:
|
|
1672
|
+
data:
|
|
1673
|
+
- SELECT id, description FROM "%"
|
|
1674
|
+
`,
|
|
1675
|
+
{
|
|
1676
|
+
storageVersion
|
|
1677
|
+
}
|
|
1678
|
+
)
|
|
1679
|
+
);
|
|
1680
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
1681
|
+
|
|
1682
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1683
|
+
const result = await batch.commit('1/1', { createEmptyCheckpoints: false });
|
|
1684
|
+
expect(result).toEqual({ checkpointBlocked: true, checkpointCreated: false });
|
|
1685
|
+
// Snapshot is only valid once we reach 3/1
|
|
1686
|
+
await batch.markAllSnapshotDone('3/1');
|
|
1687
|
+
});
|
|
1688
|
+
|
|
1689
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1690
|
+
// 2/1 < 3/1 - snapshot not valid yet, block checkpoint
|
|
1691
|
+
const result = await batch.commit('2/1', { createEmptyCheckpoints: false });
|
|
1692
|
+
expect(result).toEqual({ checkpointBlocked: true, checkpointCreated: false });
|
|
1693
|
+
});
|
|
1694
|
+
|
|
1695
|
+
// No empty checkpoint should be created by the commit above.
|
|
1696
|
+
const cp1 = await bucketStorage.getCheckpoint();
|
|
1697
|
+
expect(cp1.lsn).toEqual(null);
|
|
1698
|
+
|
|
1699
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1700
|
+
// After this commit, the snapshot should be valid.
|
|
1701
|
+
// We specifically check that this is done even if createEmptyCheckpoints: false.
|
|
1702
|
+
const result = await batch.commit('3/1', { createEmptyCheckpoints: false });
|
|
1703
|
+
expect(result).toEqual({ checkpointBlocked: false, checkpointCreated: true });
|
|
1704
|
+
});
|
|
1705
|
+
|
|
1706
|
+
// Now, the checkpoint should advance the sync rules active.
|
|
1707
|
+
const cp2 = await bucketStorage.getCheckpoint();
|
|
1708
|
+
expect(cp2.lsn).toEqual('3/1');
|
|
1709
|
+
|
|
1710
|
+
const activeSyncRules = await factory.getActiveSyncRulesContent();
|
|
1711
|
+
expect(activeSyncRules?.id).toEqual(syncRules.id);
|
|
1712
|
+
|
|
1713
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1714
|
+
// At this point, it should be a truely empty checkpoint
|
|
1715
|
+
const result = await batch.commit('4/1', { createEmptyCheckpoints: false });
|
|
1716
|
+
expect(result).toEqual({ checkpointBlocked: false, checkpointCreated: false });
|
|
1717
|
+
});
|
|
1718
|
+
|
|
1719
|
+
// Unchanged
|
|
1720
|
+
const cp3 = await bucketStorage.getCheckpoint();
|
|
1721
|
+
expect(cp3.lsn).toEqual('3/1');
|
|
1722
|
+
});
|
|
1723
|
+
|
|
1724
|
+
test.runIf(storageVersion >= 3)('deleting while streaming', async () => {
|
|
1725
|
+
await using factory = await generateStorageFactory();
|
|
1726
|
+
const syncRules = await factory.updateSyncRules(
|
|
1727
|
+
updateSyncRulesFromYaml(
|
|
1728
|
+
`
|
|
1729
|
+
bucket_definitions:
|
|
1730
|
+
global:
|
|
1731
|
+
data:
|
|
1732
|
+
- SELECT id, description FROM "%"
|
|
1733
|
+
`,
|
|
1734
|
+
{
|
|
1735
|
+
storageVersion
|
|
1736
|
+
}
|
|
1737
|
+
)
|
|
1738
|
+
);
|
|
1739
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
1740
|
+
|
|
1741
|
+
const sourceTable = TEST_TABLE;
|
|
1742
|
+
// We simulate two concurrent batches, and nesting is the easiest way to do this.
|
|
1743
|
+
// For this test, we assume that we start with a row "test1", which is picked up by a snapshot
|
|
1744
|
+
// query, right before the delete is streamed. But the snapshot query is only persisted _after_
|
|
1745
|
+
// the delete is streamed, and we need to ensure that the streamed delete takes precedence.
|
|
1746
|
+
await bucketStorage.startBatch({ ...test_utils.BATCH_OPTIONS, skipExistingRows: true }, async (snapshotBatch) => {
|
|
1747
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (streamingBatch) => {
|
|
1748
|
+
streamingBatch.save({
|
|
1749
|
+
sourceTable,
|
|
1750
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
1751
|
+
before: {
|
|
1752
|
+
id: 'test1'
|
|
1753
|
+
},
|
|
1754
|
+
beforeReplicaId: test_utils.rid('test1')
|
|
1755
|
+
});
|
|
1756
|
+
await streamingBatch.commit('2/1');
|
|
1757
|
+
|
|
1758
|
+
await snapshotBatch.save({
|
|
1759
|
+
sourceTable,
|
|
1760
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
1761
|
+
after: {
|
|
1762
|
+
id: 'test1',
|
|
1763
|
+
description: 'test1a'
|
|
1764
|
+
},
|
|
1765
|
+
afterReplicaId: test_utils.rid('test1')
|
|
1766
|
+
});
|
|
1767
|
+
await snapshotBatch.markAllSnapshotDone('3/1');
|
|
1768
|
+
await snapshotBatch.commit('1/1');
|
|
1769
|
+
|
|
1770
|
+
await streamingBatch.keepalive('3/1');
|
|
1771
|
+
});
|
|
1772
|
+
});
|
|
1773
|
+
|
|
1774
|
+
const cp = await bucketStorage.getCheckpoint();
|
|
1775
|
+
expect(cp.lsn).toEqual('3/1');
|
|
1776
|
+
const data = await test_utils.fromAsync(
|
|
1777
|
+
bucketStorage.getBucketDataBatch(cp.checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
1778
|
+
);
|
|
1779
|
+
|
|
1780
|
+
expect(data).toEqual([]);
|
|
1781
|
+
});
|
|
1167
1782
|
}
|
|
1168
1783
|
|
|
1169
1784
|
/**
|
|
@@ -1171,22 +1786,29 @@ bucket_definitions:
|
|
|
1171
1786
|
*
|
|
1172
1787
|
* Exposed as a separate test so we can test with more storage parameters.
|
|
1173
1788
|
*/
|
|
1174
|
-
export function testChecksumBatching(
|
|
1789
|
+
export function testChecksumBatching(config: storage.TestStorageConfig) {
|
|
1790
|
+
const storageVersion = config.storageVersion ?? CURRENT_STORAGE_VERSION;
|
|
1175
1791
|
test('checksums for multiple buckets', async () => {
|
|
1176
|
-
await using factory = await
|
|
1177
|
-
const syncRules = await factory.updateSyncRules(
|
|
1178
|
-
|
|
1792
|
+
await using factory = await config.factory();
|
|
1793
|
+
const syncRules = await factory.updateSyncRules(
|
|
1794
|
+
updateSyncRulesFromYaml(
|
|
1795
|
+
`
|
|
1179
1796
|
bucket_definitions:
|
|
1180
1797
|
user:
|
|
1181
1798
|
parameters: select request.user_id() as user_id
|
|
1182
1799
|
data:
|
|
1183
1800
|
- select id, description from test where user_id = bucket.user_id
|
|
1184
|
-
|
|
1185
|
-
|
|
1801
|
+
`,
|
|
1802
|
+
{
|
|
1803
|
+
storageVersion
|
|
1804
|
+
}
|
|
1805
|
+
)
|
|
1806
|
+
);
|
|
1186
1807
|
const bucketStorage = factory.getInstance(syncRules);
|
|
1187
1808
|
|
|
1188
|
-
const sourceTable =
|
|
1809
|
+
const sourceTable = test_utils.makeTestTable('test', ['id'], config);
|
|
1189
1810
|
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1811
|
+
await batch.markAllSnapshotDone('1/1');
|
|
1190
1812
|
for (let u of ['u1', 'u2', 'u3', 'u4']) {
|
|
1191
1813
|
for (let t of ['t1', 't2', 't3', 't4']) {
|
|
1192
1814
|
const id = `${t}_${u}`;
|
|
@@ -1207,14 +1829,14 @@ bucket_definitions:
|
|
|
1207
1829
|
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
1208
1830
|
|
|
1209
1831
|
bucketStorage.clearChecksumCache();
|
|
1210
|
-
const buckets = ['user["u1"]', 'user["u2"]', 'user["u3"]', 'user["u4"]'];
|
|
1832
|
+
const buckets = bucketRequests(syncRules, ['user["u1"]', 'user["u2"]', 'user["u3"]', 'user["u4"]']);
|
|
1211
1833
|
const checksums = [...(await bucketStorage.getChecksums(checkpoint, buckets)).values()];
|
|
1212
1834
|
checksums.sort((a, b) => a.bucket.localeCompare(b.bucket));
|
|
1213
1835
|
expect(checksums).toEqual([
|
|
1214
|
-
{ bucket: 'user["u1"]', count: 4, checksum: 346204588 },
|
|
1215
|
-
{ bucket: 'user["u2"]', count: 4, checksum: 5261081 },
|
|
1216
|
-
{ bucket: 'user["u3"]', count: 4, checksum: 134760718 },
|
|
1217
|
-
{ bucket: 'user["u4"]', count: 4, checksum: -302639724 }
|
|
1836
|
+
{ bucket: bucketRequest(syncRules, 'user["u1"]').bucket, count: 4, checksum: 346204588 },
|
|
1837
|
+
{ bucket: bucketRequest(syncRules, 'user["u2"]').bucket, count: 4, checksum: 5261081 },
|
|
1838
|
+
{ bucket: bucketRequest(syncRules, 'user["u3"]').bucket, count: 4, checksum: 134760718 },
|
|
1839
|
+
{ bucket: bucketRequest(syncRules, 'user["u4"]').bucket, count: 4, checksum: -302639724 }
|
|
1218
1840
|
]);
|
|
1219
1841
|
});
|
|
1220
1842
|
}
|