@powersync/service-core-tests 0.14.0 → 0.15.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +40 -0
- package/dist/test-utils/general-utils.d.ts +22 -3
- package/dist/test-utils/general-utils.js +56 -3
- package/dist/test-utils/general-utils.js.map +1 -1
- package/dist/test-utils/stream_utils.js +2 -2
- package/dist/test-utils/stream_utils.js.map +1 -1
- package/dist/tests/register-compacting-tests.d.ts +1 -1
- package/dist/tests/register-compacting-tests.js +360 -297
- package/dist/tests/register-compacting-tests.js.map +1 -1
- package/dist/tests/register-data-storage-checkpoint-tests.d.ts +1 -1
- package/dist/tests/register-data-storage-checkpoint-tests.js +59 -48
- package/dist/tests/register-data-storage-checkpoint-tests.js.map +1 -1
- package/dist/tests/register-data-storage-data-tests.d.ts +2 -2
- package/dist/tests/register-data-storage-data-tests.js +1112 -612
- package/dist/tests/register-data-storage-data-tests.js.map +1 -1
- package/dist/tests/register-data-storage-parameter-tests.d.ts +1 -1
- package/dist/tests/register-data-storage-parameter-tests.js +273 -254
- package/dist/tests/register-data-storage-parameter-tests.js.map +1 -1
- package/dist/tests/register-parameter-compacting-tests.d.ts +1 -1
- package/dist/tests/register-parameter-compacting-tests.js +83 -87
- package/dist/tests/register-parameter-compacting-tests.js.map +1 -1
- package/dist/tests/register-sync-tests.d.ts +2 -1
- package/dist/tests/register-sync-tests.js +479 -451
- package/dist/tests/register-sync-tests.js.map +1 -1
- package/dist/tests/util.d.ts +5 -4
- package/dist/tests/util.js +27 -12
- package/dist/tests/util.js.map +1 -1
- package/package.json +3 -3
- package/src/test-utils/general-utils.ts +81 -4
- package/src/test-utils/stream_utils.ts +2 -2
- package/src/tests/register-compacting-tests.ts +376 -322
- package/src/tests/register-data-storage-checkpoint-tests.ts +85 -53
- package/src/tests/register-data-storage-data-tests.ts +1050 -559
- package/src/tests/register-data-storage-parameter-tests.ts +330 -288
- package/src/tests/register-parameter-compacting-tests.ts +87 -90
- package/src/tests/register-sync-tests.ts +390 -380
- package/src/tests/util.ts +46 -17
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -50,12 +50,13 @@ var __disposeResources = (this && this.__disposeResources) || (function (Suppres
|
|
|
50
50
|
var e = new Error(message);
|
|
51
51
|
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
52
52
|
});
|
|
53
|
-
import { storage, updateSyncRulesFromYaml } from '@powersync/service-core';
|
|
53
|
+
import { addChecksums, storage, updateSyncRulesFromYaml } from '@powersync/service-core';
|
|
54
54
|
import { expect, test } from 'vitest';
|
|
55
55
|
import * as test_utils from '../test-utils/test-utils-index.js';
|
|
56
|
-
import { bucketRequest
|
|
57
|
-
|
|
58
|
-
export function registerCompactTests(
|
|
56
|
+
import { bucketRequest } from '../test-utils/test-utils-index.js';
|
|
57
|
+
import { bucketRequestMap, bucketRequests } from './util.js';
|
|
58
|
+
export function registerCompactTests(config) {
|
|
59
|
+
const generateStorageFactory = config.factory;
|
|
59
60
|
test('compacting (1)', async () => {
|
|
60
61
|
const env_1 = { stack: [], error: void 0, hasError: false };
|
|
61
62
|
try {
|
|
@@ -66,52 +67,52 @@ bucket_definitions:
|
|
|
66
67
|
data: [select * from test]
|
|
67
68
|
`));
|
|
68
69
|
const bucketStorage = factory.getInstance(syncRules);
|
|
69
|
-
const
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
}
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
}
|
|
94
|
-
|
|
70
|
+
const writer = __addDisposableResource(env_1, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
|
|
71
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
72
|
+
await writer.markAllSnapshotDone('1/1');
|
|
73
|
+
await writer.save({
|
|
74
|
+
sourceTable: testTable,
|
|
75
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
76
|
+
after: {
|
|
77
|
+
id: 't1'
|
|
78
|
+
},
|
|
79
|
+
afterReplicaId: test_utils.rid('t1')
|
|
80
|
+
});
|
|
81
|
+
await writer.save({
|
|
82
|
+
sourceTable: testTable,
|
|
83
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
84
|
+
after: {
|
|
85
|
+
id: 't2'
|
|
86
|
+
},
|
|
87
|
+
afterReplicaId: test_utils.rid('t2')
|
|
88
|
+
});
|
|
89
|
+
await writer.save({
|
|
90
|
+
sourceTable: testTable,
|
|
91
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
92
|
+
after: {
|
|
93
|
+
id: 't2'
|
|
94
|
+
},
|
|
95
|
+
afterReplicaId: test_utils.rid('t2')
|
|
95
96
|
});
|
|
96
|
-
|
|
97
|
-
|
|
97
|
+
await writer.commit('1/1');
|
|
98
|
+
await writer.flush();
|
|
99
|
+
const checkpoint = writer.last_flushed_op;
|
|
100
|
+
const request = bucketRequest(syncRules, 'global[]');
|
|
101
|
+
const batchBefore = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
|
|
98
102
|
const dataBefore = batchBefore.chunkData.data;
|
|
99
|
-
const checksumBefore = await bucketStorage.getChecksums(checkpoint,
|
|
103
|
+
const checksumBefore = await bucketStorage.getChecksums(checkpoint, [request]);
|
|
100
104
|
expect(dataBefore).toMatchObject([
|
|
101
105
|
{
|
|
102
|
-
checksum: 2634521662,
|
|
103
106
|
object_id: 't1',
|
|
104
107
|
op: 'PUT',
|
|
105
108
|
op_id: '1'
|
|
106
109
|
},
|
|
107
110
|
{
|
|
108
|
-
checksum: 4243212114,
|
|
109
111
|
object_id: 't2',
|
|
110
112
|
op: 'PUT',
|
|
111
113
|
op_id: '2'
|
|
112
114
|
},
|
|
113
115
|
{
|
|
114
|
-
checksum: 4243212114,
|
|
115
116
|
object_id: 't2',
|
|
116
117
|
op: 'PUT',
|
|
117
118
|
op_id: '3'
|
|
@@ -125,33 +126,28 @@ bucket_definitions:
|
|
|
125
126
|
minBucketChanges: 1,
|
|
126
127
|
minChangeRatio: 0
|
|
127
128
|
});
|
|
128
|
-
const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint,
|
|
129
|
+
const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
|
|
129
130
|
const dataAfter = batchAfter.chunkData.data;
|
|
130
|
-
const checksumAfter = await bucketStorage.getChecksums(checkpoint,
|
|
131
|
+
const checksumAfter = await bucketStorage.getChecksums(checkpoint, [request]);
|
|
131
132
|
bucketStorage.clearChecksumCache();
|
|
132
|
-
const checksumAfter2 = await bucketStorage.getChecksums(checkpoint,
|
|
133
|
+
const checksumAfter2 = await bucketStorage.getChecksums(checkpoint, [request]);
|
|
133
134
|
expect(batchAfter.targetOp).toEqual(3n);
|
|
134
135
|
expect(dataAfter).toMatchObject([
|
|
136
|
+
dataBefore[0],
|
|
135
137
|
{
|
|
136
|
-
checksum:
|
|
137
|
-
object_id: 't1',
|
|
138
|
-
op: 'PUT',
|
|
139
|
-
op_id: '1'
|
|
140
|
-
},
|
|
141
|
-
{
|
|
142
|
-
checksum: 4243212114,
|
|
138
|
+
checksum: dataBefore[1].checksum,
|
|
143
139
|
op: 'MOVE',
|
|
144
140
|
op_id: '2'
|
|
145
141
|
},
|
|
146
142
|
{
|
|
147
|
-
checksum:
|
|
143
|
+
checksum: dataBefore[2].checksum,
|
|
148
144
|
object_id: 't2',
|
|
149
145
|
op: 'PUT',
|
|
150
146
|
op_id: '3'
|
|
151
147
|
}
|
|
152
148
|
]);
|
|
153
|
-
expect(checksumAfter.get(
|
|
154
|
-
expect(checksumAfter2.get(
|
|
149
|
+
expect(checksumAfter.get(request.bucket)).toEqual(checksumBefore.get(request.bucket));
|
|
150
|
+
expect(checksumAfter2.get(request.bucket)).toEqual(checksumBefore.get(request.bucket));
|
|
155
151
|
test_utils.validateCompactedBucket(dataBefore, dataAfter);
|
|
156
152
|
}
|
|
157
153
|
catch (e_1) {
|
|
@@ -174,69 +170,65 @@ bucket_definitions:
|
|
|
174
170
|
data: [select * from test]
|
|
175
171
|
`));
|
|
176
172
|
const bucketStorage = factory.getInstance(syncRules);
|
|
177
|
-
const
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
}
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
}
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
}
|
|
210
|
-
|
|
173
|
+
const writer = __addDisposableResource(env_2, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
|
|
174
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
175
|
+
await writer.markAllSnapshotDone('1/1');
|
|
176
|
+
await writer.save({
|
|
177
|
+
sourceTable: testTable,
|
|
178
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
179
|
+
after: {
|
|
180
|
+
id: 't1'
|
|
181
|
+
},
|
|
182
|
+
afterReplicaId: test_utils.rid('t1')
|
|
183
|
+
});
|
|
184
|
+
await writer.save({
|
|
185
|
+
sourceTable: testTable,
|
|
186
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
187
|
+
after: {
|
|
188
|
+
id: 't2'
|
|
189
|
+
},
|
|
190
|
+
afterReplicaId: test_utils.rid('t2')
|
|
191
|
+
});
|
|
192
|
+
await writer.save({
|
|
193
|
+
sourceTable: testTable,
|
|
194
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
195
|
+
before: {
|
|
196
|
+
id: 't1'
|
|
197
|
+
},
|
|
198
|
+
beforeReplicaId: test_utils.rid('t1')
|
|
199
|
+
});
|
|
200
|
+
await writer.save({
|
|
201
|
+
sourceTable: testTable,
|
|
202
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
203
|
+
after: {
|
|
204
|
+
id: 't2'
|
|
205
|
+
},
|
|
206
|
+
afterReplicaId: test_utils.rid('t2')
|
|
211
207
|
});
|
|
212
|
-
|
|
213
|
-
|
|
208
|
+
await writer.commit('1/1');
|
|
209
|
+
await writer.flush();
|
|
210
|
+
const checkpoint = writer.last_flushed_op;
|
|
211
|
+
const request = bucketRequest(syncRules, 'global[]');
|
|
212
|
+
const batchBefore = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
|
|
214
213
|
const dataBefore = batchBefore.chunkData.data;
|
|
215
|
-
const checksumBefore = await bucketStorage.getChecksums(checkpoint,
|
|
214
|
+
const checksumBefore = await bucketStorage.getChecksums(checkpoint, [request]);
|
|
215
|
+
// op_id sequence depends on the storage implementation
|
|
216
216
|
expect(dataBefore).toMatchObject([
|
|
217
217
|
{
|
|
218
|
-
checksum: 2634521662,
|
|
219
218
|
object_id: 't1',
|
|
220
|
-
op: 'PUT'
|
|
221
|
-
op_id: '1'
|
|
219
|
+
op: 'PUT'
|
|
222
220
|
},
|
|
223
221
|
{
|
|
224
|
-
checksum: 4243212114,
|
|
225
222
|
object_id: 't2',
|
|
226
|
-
op: 'PUT'
|
|
227
|
-
op_id: '2'
|
|
223
|
+
op: 'PUT'
|
|
228
224
|
},
|
|
229
225
|
{
|
|
230
|
-
checksum: 4228978084,
|
|
231
226
|
object_id: 't1',
|
|
232
|
-
op: 'REMOVE'
|
|
233
|
-
op_id: '3'
|
|
227
|
+
op: 'REMOVE'
|
|
234
228
|
},
|
|
235
229
|
{
|
|
236
|
-
checksum: 4243212114,
|
|
237
230
|
object_id: 't2',
|
|
238
|
-
op: 'PUT'
|
|
239
|
-
op_id: '4'
|
|
231
|
+
op: 'PUT'
|
|
240
232
|
}
|
|
241
233
|
]);
|
|
242
234
|
await bucketStorage.compact({
|
|
@@ -246,26 +238,24 @@ bucket_definitions:
|
|
|
246
238
|
minBucketChanges: 1,
|
|
247
239
|
minChangeRatio: 0
|
|
248
240
|
});
|
|
249
|
-
const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint,
|
|
241
|
+
const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
|
|
250
242
|
const dataAfter = batchAfter.chunkData.data;
|
|
251
243
|
bucketStorage.clearChecksumCache();
|
|
252
|
-
const checksumAfter = await bucketStorage.getChecksums(checkpoint,
|
|
253
|
-
expect(batchAfter.targetOp).
|
|
244
|
+
const checksumAfter = await bucketStorage.getChecksums(checkpoint, [request]);
|
|
245
|
+
expect(batchAfter.targetOp).toBeLessThanOrEqual(checkpoint);
|
|
254
246
|
expect(dataAfter).toMatchObject([
|
|
255
247
|
{
|
|
256
|
-
checksum:
|
|
257
|
-
op: 'CLEAR'
|
|
258
|
-
op_id: '3'
|
|
248
|
+
checksum: addChecksums(addChecksums(dataBefore[0].checksum, dataBefore[1].checksum), dataBefore[2].checksum),
|
|
249
|
+
op: 'CLEAR'
|
|
259
250
|
},
|
|
260
251
|
{
|
|
261
|
-
checksum:
|
|
252
|
+
checksum: dataBefore[3].checksum,
|
|
262
253
|
object_id: 't2',
|
|
263
|
-
op: 'PUT'
|
|
264
|
-
op_id: '4'
|
|
254
|
+
op: 'PUT'
|
|
265
255
|
}
|
|
266
256
|
]);
|
|
267
|
-
expect(checksumAfter.get(
|
|
268
|
-
...checksumBefore.get(
|
|
257
|
+
expect(checksumAfter.get(request.bucket)).toEqual({
|
|
258
|
+
...checksumBefore.get(request.bucket),
|
|
269
259
|
count: 2
|
|
270
260
|
});
|
|
271
261
|
test_utils.validateCompactedBucket(dataBefore, dataAfter);
|
|
@@ -290,47 +280,50 @@ bucket_definitions:
|
|
|
290
280
|
data: [select * from test]
|
|
291
281
|
`));
|
|
292
282
|
const bucketStorage = factory.getInstance(syncRules);
|
|
293
|
-
const
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
}
|
|
302
|
-
|
|
303
|
-
sourceTable: TEST_TABLE,
|
|
304
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
305
|
-
after: {
|
|
306
|
-
id: 't2'
|
|
307
|
-
},
|
|
308
|
-
afterReplicaId: 't2'
|
|
309
|
-
});
|
|
310
|
-
await batch.save({
|
|
311
|
-
sourceTable: TEST_TABLE,
|
|
312
|
-
tag: storage.SaveOperationTag.DELETE,
|
|
313
|
-
before: {
|
|
314
|
-
id: 't1'
|
|
315
|
-
},
|
|
316
|
-
beforeReplicaId: 't1'
|
|
317
|
-
});
|
|
318
|
-
await batch.commit('1/1');
|
|
283
|
+
const writer = __addDisposableResource(env_3, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
|
|
284
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
285
|
+
await writer.markAllSnapshotDone('1/1');
|
|
286
|
+
await writer.save({
|
|
287
|
+
sourceTable: testTable,
|
|
288
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
289
|
+
after: {
|
|
290
|
+
id: 't1'
|
|
291
|
+
},
|
|
292
|
+
afterReplicaId: 't1'
|
|
319
293
|
});
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
id: 't2'
|
|
328
|
-
},
|
|
329
|
-
beforeReplicaId: 't2'
|
|
330
|
-
});
|
|
331
|
-
await batch.commit('2/1');
|
|
294
|
+
await writer.save({
|
|
295
|
+
sourceTable: testTable,
|
|
296
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
297
|
+
after: {
|
|
298
|
+
id: 't2'
|
|
299
|
+
},
|
|
300
|
+
afterReplicaId: 't2'
|
|
332
301
|
});
|
|
333
|
-
|
|
302
|
+
await writer.save({
|
|
303
|
+
sourceTable: testTable,
|
|
304
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
305
|
+
before: {
|
|
306
|
+
id: 't1'
|
|
307
|
+
},
|
|
308
|
+
beforeReplicaId: 't1'
|
|
309
|
+
});
|
|
310
|
+
await writer.commit('1/1');
|
|
311
|
+
await writer.flush();
|
|
312
|
+
const checkpoint1 = writer.last_flushed_op;
|
|
313
|
+
const request = bucketRequest(syncRules, 'global[]');
|
|
314
|
+
const writer2 = __addDisposableResource(env_3, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
|
|
315
|
+
const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
|
|
316
|
+
await writer2.save({
|
|
317
|
+
sourceTable: testTable2,
|
|
318
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
319
|
+
before: {
|
|
320
|
+
id: 't2'
|
|
321
|
+
},
|
|
322
|
+
beforeReplicaId: 't2'
|
|
323
|
+
});
|
|
324
|
+
await writer2.commit('2/1');
|
|
325
|
+
await writer2.flush();
|
|
326
|
+
const checkpoint2 = writer2.last_flushed_op;
|
|
334
327
|
await bucketStorage.compact({
|
|
335
328
|
clearBatchLimit: 2,
|
|
336
329
|
moveBatchLimit: 1,
|
|
@@ -338,22 +331,19 @@ bucket_definitions:
|
|
|
338
331
|
minBucketChanges: 1,
|
|
339
332
|
minChangeRatio: 0
|
|
340
333
|
});
|
|
341
|
-
const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint2,
|
|
334
|
+
const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint2, [request]));
|
|
342
335
|
const dataAfter = batchAfter.chunkData.data;
|
|
343
336
|
await bucketStorage.clearChecksumCache();
|
|
344
|
-
const checksumAfter = await bucketStorage.getChecksums(checkpoint2,
|
|
345
|
-
expect(batchAfter.targetOp).toEqual(4n);
|
|
337
|
+
const checksumAfter = await bucketStorage.getChecksums(checkpoint2, [request]);
|
|
346
338
|
expect(dataAfter).toMatchObject([
|
|
347
339
|
{
|
|
348
|
-
|
|
349
|
-
op: 'CLEAR',
|
|
350
|
-
op_id: '4'
|
|
340
|
+
op: 'CLEAR'
|
|
351
341
|
}
|
|
352
342
|
]);
|
|
353
|
-
expect(checksumAfter.get(
|
|
354
|
-
bucket:
|
|
343
|
+
expect(checksumAfter.get(request.bucket)).toEqual({
|
|
344
|
+
bucket: request.bucket,
|
|
355
345
|
count: 1,
|
|
356
|
-
checksum:
|
|
346
|
+
checksum: dataAfter[0].checksum
|
|
357
347
|
});
|
|
358
348
|
}
|
|
359
349
|
catch (e_3) {
|
|
@@ -379,69 +369,71 @@ bucket_definitions:
|
|
|
379
369
|
data:
|
|
380
370
|
- select * from test where b = bucket.b`));
|
|
381
371
|
const bucketStorage = factory.getInstance(syncRules);
|
|
382
|
-
const
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
}
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
}
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
}
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
}
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
}
|
|
441
|
-
|
|
442
|
-
}
|
|
443
|
-
|
|
444
|
-
|
|
372
|
+
const writer = __addDisposableResource(env_4, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
|
|
373
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
374
|
+
await writer.markAllSnapshotDone('1/1');
|
|
375
|
+
/**
|
|
376
|
+
* Repeatedly create operations which fall into different buckets.
|
|
377
|
+
* The bucket operations are purposely interleaved as the op_id increases.
|
|
378
|
+
* A large amount of operations are created here.
|
|
379
|
+
* The configured window of compacting operations is 100. This means the initial window will
|
|
380
|
+
* contain operations from multiple buckets.
|
|
381
|
+
*/
|
|
382
|
+
for (let count = 0; count < 100; count++) {
|
|
383
|
+
await writer.save({
|
|
384
|
+
sourceTable: testTable,
|
|
385
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
386
|
+
after: {
|
|
387
|
+
id: 't1',
|
|
388
|
+
b: 'b1',
|
|
389
|
+
value: 'start'
|
|
390
|
+
},
|
|
391
|
+
afterReplicaId: test_utils.rid('t1')
|
|
392
|
+
});
|
|
393
|
+
await writer.save({
|
|
394
|
+
sourceTable: testTable,
|
|
395
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
396
|
+
after: {
|
|
397
|
+
id: 't1',
|
|
398
|
+
b: 'b1',
|
|
399
|
+
value: 'intermediate'
|
|
400
|
+
},
|
|
401
|
+
afterReplicaId: test_utils.rid('t1')
|
|
402
|
+
});
|
|
403
|
+
await writer.save({
|
|
404
|
+
sourceTable: testTable,
|
|
405
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
406
|
+
after: {
|
|
407
|
+
id: 't2',
|
|
408
|
+
b: 'b2',
|
|
409
|
+
value: 'start'
|
|
410
|
+
},
|
|
411
|
+
afterReplicaId: test_utils.rid('t2')
|
|
412
|
+
});
|
|
413
|
+
await writer.save({
|
|
414
|
+
sourceTable: testTable,
|
|
415
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
416
|
+
after: {
|
|
417
|
+
id: 't1',
|
|
418
|
+
b: 'b1',
|
|
419
|
+
value: 'final'
|
|
420
|
+
},
|
|
421
|
+
afterReplicaId: test_utils.rid('t1')
|
|
422
|
+
});
|
|
423
|
+
await writer.save({
|
|
424
|
+
sourceTable: testTable,
|
|
425
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
426
|
+
after: {
|
|
427
|
+
id: 't2',
|
|
428
|
+
b: 'b2',
|
|
429
|
+
value: 'final'
|
|
430
|
+
},
|
|
431
|
+
afterReplicaId: test_utils.rid('t2')
|
|
432
|
+
});
|
|
433
|
+
await writer.commit('1/1');
|
|
434
|
+
}
|
|
435
|
+
await writer.flush();
|
|
436
|
+
const checkpoint = writer.last_flushed_op;
|
|
445
437
|
await bucketStorage.compact({
|
|
446
438
|
clearBatchLimit: 100,
|
|
447
439
|
moveBatchLimit: 100,
|
|
@@ -498,33 +490,35 @@ bucket_definitions:
|
|
|
498
490
|
data: [select * from test]
|
|
499
491
|
`));
|
|
500
492
|
const bucketStorage = factory.getInstance(syncRules);
|
|
501
|
-
const
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
}
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
}
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
}
|
|
526
|
-
|
|
493
|
+
const writer = __addDisposableResource(env_5, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
|
|
494
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
495
|
+
await writer.markAllSnapshotDone('1/1');
|
|
496
|
+
await writer.save({
|
|
497
|
+
sourceTable: testTable,
|
|
498
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
499
|
+
after: {
|
|
500
|
+
id: 't1'
|
|
501
|
+
},
|
|
502
|
+
afterReplicaId: 't1'
|
|
503
|
+
});
|
|
504
|
+
await writer.save({
|
|
505
|
+
sourceTable: testTable,
|
|
506
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
507
|
+
after: {
|
|
508
|
+
id: 't2'
|
|
509
|
+
},
|
|
510
|
+
afterReplicaId: 't2'
|
|
511
|
+
});
|
|
512
|
+
await writer.save({
|
|
513
|
+
sourceTable: testTable,
|
|
514
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
515
|
+
before: {
|
|
516
|
+
id: 't1'
|
|
517
|
+
},
|
|
518
|
+
beforeReplicaId: 't1'
|
|
527
519
|
});
|
|
520
|
+
await writer.commit('1/1');
|
|
521
|
+
await writer.flush();
|
|
528
522
|
await bucketStorage.compact({
|
|
529
523
|
clearBatchLimit: 2,
|
|
530
524
|
moveBatchLimit: 1,
|
|
@@ -532,25 +526,29 @@ bucket_definitions:
|
|
|
532
526
|
minBucketChanges: 1,
|
|
533
527
|
minChangeRatio: 0
|
|
534
528
|
});
|
|
535
|
-
const
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
await batch.commit('2/1');
|
|
529
|
+
const writer2 = __addDisposableResource(env_5, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
|
|
530
|
+
const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
|
|
531
|
+
await writer2.save({
|
|
532
|
+
sourceTable: testTable2,
|
|
533
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
534
|
+
before: {
|
|
535
|
+
id: 't2'
|
|
536
|
+
},
|
|
537
|
+
beforeReplicaId: 't2'
|
|
545
538
|
});
|
|
546
|
-
|
|
539
|
+
await writer2.commit('2/1');
|
|
540
|
+
await writer2.flush();
|
|
541
|
+
const checkpoint2 = writer2.last_flushed_op;
|
|
542
|
+
const request = bucketRequest(syncRules, 'global[]');
|
|
547
543
|
await bucketStorage.clearChecksumCache();
|
|
548
|
-
const checksumAfter = await bucketStorage.getChecksums(checkpoint2,
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
544
|
+
const checksumAfter = await bucketStorage.getChecksums(checkpoint2, [request]);
|
|
545
|
+
const globalChecksum = checksumAfter.get(request.bucket);
|
|
546
|
+
expect(globalChecksum).toMatchObject({
|
|
547
|
+
bucket: request.bucket,
|
|
548
|
+
count: 4
|
|
553
549
|
});
|
|
550
|
+
// storage-specific checksum - just check that it does not change
|
|
551
|
+
expect(globalChecksum).toMatchSnapshot();
|
|
554
552
|
}
|
|
555
553
|
catch (e_5) {
|
|
556
554
|
env_5.error = e_5;
|
|
@@ -572,38 +570,41 @@ bucket_definitions:
|
|
|
572
570
|
data: [select * from test]
|
|
573
571
|
`));
|
|
574
572
|
const bucketStorage = factory.getInstance(syncRules);
|
|
575
|
-
const
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
}
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
}
|
|
592
|
-
|
|
573
|
+
const writer = __addDisposableResource(env_6, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
|
|
574
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
575
|
+
await writer.markAllSnapshotDone('1/1');
|
|
576
|
+
await writer.save({
|
|
577
|
+
sourceTable: testTable,
|
|
578
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
579
|
+
after: {
|
|
580
|
+
id: 't1'
|
|
581
|
+
},
|
|
582
|
+
afterReplicaId: 't1'
|
|
583
|
+
});
|
|
584
|
+
await writer.save({
|
|
585
|
+
sourceTable: testTable,
|
|
586
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
587
|
+
after: {
|
|
588
|
+
id: 't1'
|
|
589
|
+
},
|
|
590
|
+
afterReplicaId: 't1'
|
|
593
591
|
});
|
|
592
|
+
await writer.commit('1/1');
|
|
593
|
+
await writer.flush();
|
|
594
594
|
// Get checksums here just to populate the cache
|
|
595
|
-
await bucketStorage.getChecksums(
|
|
596
|
-
const
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
await batch.commit('2/1');
|
|
595
|
+
await bucketStorage.getChecksums(writer.last_flushed_op, bucketRequests(syncRules, ['global[]']));
|
|
596
|
+
const writer2 = __addDisposableResource(env_6, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
|
|
597
|
+
const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
|
|
598
|
+
await writer2.save({
|
|
599
|
+
sourceTable: testTable2,
|
|
600
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
601
|
+
before: {
|
|
602
|
+
id: 't1'
|
|
603
|
+
},
|
|
604
|
+
beforeReplicaId: 't1'
|
|
606
605
|
});
|
|
606
|
+
await writer2.commit('2/1');
|
|
607
|
+
await writer2.flush();
|
|
607
608
|
await bucketStorage.compact({
|
|
608
609
|
clearBatchLimit: 20,
|
|
609
610
|
moveBatchLimit: 10,
|
|
@@ -611,14 +612,17 @@ bucket_definitions:
|
|
|
611
612
|
minBucketChanges: 1,
|
|
612
613
|
minChangeRatio: 0
|
|
613
614
|
});
|
|
614
|
-
const checkpoint2 =
|
|
615
|
+
const checkpoint2 = writer2.last_flushed_op;
|
|
616
|
+
const request = bucketRequest(syncRules, 'global[]');
|
|
615
617
|
// Check that the checksum was correctly updated with the clear operation after having a cached checksum
|
|
616
|
-
const checksumAfter = await bucketStorage.getChecksums(checkpoint2,
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
618
|
+
const checksumAfter = await bucketStorage.getChecksums(checkpoint2, [request]);
|
|
619
|
+
const globalChecksum = checksumAfter.get(request.bucket);
|
|
620
|
+
expect(globalChecksum).toMatchObject({
|
|
621
|
+
bucket: request.bucket,
|
|
622
|
+
count: 1
|
|
621
623
|
});
|
|
624
|
+
// storage-specific checksum - just check that it does not change
|
|
625
|
+
expect(globalChecksum).toMatchSnapshot();
|
|
622
626
|
}
|
|
623
627
|
catch (e_6) {
|
|
624
628
|
env_6.error = e_6;
|
|
@@ -630,5 +634,64 @@ bucket_definitions:
|
|
|
630
634
|
await result_6;
|
|
631
635
|
}
|
|
632
636
|
});
|
|
637
|
+
test('defaults maxOpId to current checkpoint', async () => {
|
|
638
|
+
const env_7 = { stack: [], error: void 0, hasError: false };
|
|
639
|
+
try {
|
|
640
|
+
const factory = __addDisposableResource(env_7, await generateStorageFactory(), true);
|
|
641
|
+
const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
|
|
642
|
+
bucket_definitions:
|
|
643
|
+
global:
|
|
644
|
+
data: [select * from test]
|
|
645
|
+
`));
|
|
646
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
647
|
+
const writer = __addDisposableResource(env_7, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
|
|
648
|
+
const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
|
|
649
|
+
await writer.markAllSnapshotDone('1/1');
|
|
650
|
+
await writer.save({
|
|
651
|
+
sourceTable: testTable,
|
|
652
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
653
|
+
after: { id: 't1' },
|
|
654
|
+
afterReplicaId: test_utils.rid('t1')
|
|
655
|
+
});
|
|
656
|
+
await writer.commit('1/1');
|
|
657
|
+
await writer.flush();
|
|
658
|
+
const checkpoint1 = writer.last_flushed_op;
|
|
659
|
+
const writer2 = __addDisposableResource(env_7, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
|
|
660
|
+
const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
|
|
661
|
+
// This is flushed but not committed (does not advance the checkpoint)
|
|
662
|
+
await writer2.save({
|
|
663
|
+
sourceTable: testTable2,
|
|
664
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
665
|
+
after: { id: 't1' },
|
|
666
|
+
afterReplicaId: test_utils.rid('t1')
|
|
667
|
+
});
|
|
668
|
+
await writer2.flush();
|
|
669
|
+
const checkpoint2 = writer2.last_flushed_op;
|
|
670
|
+
const checkpointBeforeCompact = await bucketStorage.getCheckpoint();
|
|
671
|
+
expect(checkpointBeforeCompact.checkpoint).toEqual(checkpoint1);
|
|
672
|
+
// With default options, Postgres compaction should use the active checkpoint.
|
|
673
|
+
await bucketStorage.compact({
|
|
674
|
+
moveBatchLimit: 1,
|
|
675
|
+
moveBatchQueryLimit: 1,
|
|
676
|
+
minBucketChanges: 1,
|
|
677
|
+
minChangeRatio: 0
|
|
678
|
+
});
|
|
679
|
+
const batchAfterDefaultCompact = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint2, bucketRequestMap(syncRules, [['global[]', 0n]])));
|
|
680
|
+
// Operation 1 should remain a PUT because op_id=2 is above the default maxOpId checkpoint.
|
|
681
|
+
expect(batchAfterDefaultCompact.chunkData.data).toMatchObject([
|
|
682
|
+
{ op_id: '1', op: 'PUT', object_id: 't1' },
|
|
683
|
+
{ op_id: '2', op: 'PUT', object_id: 't1' }
|
|
684
|
+
]);
|
|
685
|
+
}
|
|
686
|
+
catch (e_7) {
|
|
687
|
+
env_7.error = e_7;
|
|
688
|
+
env_7.hasError = true;
|
|
689
|
+
}
|
|
690
|
+
finally {
|
|
691
|
+
const result_7 = __disposeResources(env_7);
|
|
692
|
+
if (result_7)
|
|
693
|
+
await result_7;
|
|
694
|
+
}
|
|
695
|
+
});
|
|
633
696
|
}
|
|
634
697
|
//# sourceMappingURL=register-compacting-tests.js.map
|