@powersync/service-core-tests 0.14.0 → 0.15.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/CHANGELOG.md +40 -0
  2. package/dist/test-utils/general-utils.d.ts +22 -3
  3. package/dist/test-utils/general-utils.js +56 -3
  4. package/dist/test-utils/general-utils.js.map +1 -1
  5. package/dist/test-utils/stream_utils.js +2 -2
  6. package/dist/test-utils/stream_utils.js.map +1 -1
  7. package/dist/tests/register-compacting-tests.d.ts +1 -1
  8. package/dist/tests/register-compacting-tests.js +360 -297
  9. package/dist/tests/register-compacting-tests.js.map +1 -1
  10. package/dist/tests/register-data-storage-checkpoint-tests.d.ts +1 -1
  11. package/dist/tests/register-data-storage-checkpoint-tests.js +59 -48
  12. package/dist/tests/register-data-storage-checkpoint-tests.js.map +1 -1
  13. package/dist/tests/register-data-storage-data-tests.d.ts +2 -2
  14. package/dist/tests/register-data-storage-data-tests.js +1112 -612
  15. package/dist/tests/register-data-storage-data-tests.js.map +1 -1
  16. package/dist/tests/register-data-storage-parameter-tests.d.ts +1 -1
  17. package/dist/tests/register-data-storage-parameter-tests.js +273 -254
  18. package/dist/tests/register-data-storage-parameter-tests.js.map +1 -1
  19. package/dist/tests/register-parameter-compacting-tests.d.ts +1 -1
  20. package/dist/tests/register-parameter-compacting-tests.js +83 -87
  21. package/dist/tests/register-parameter-compacting-tests.js.map +1 -1
  22. package/dist/tests/register-sync-tests.d.ts +2 -1
  23. package/dist/tests/register-sync-tests.js +479 -451
  24. package/dist/tests/register-sync-tests.js.map +1 -1
  25. package/dist/tests/util.d.ts +5 -4
  26. package/dist/tests/util.js +27 -12
  27. package/dist/tests/util.js.map +1 -1
  28. package/package.json +3 -3
  29. package/src/test-utils/general-utils.ts +81 -4
  30. package/src/test-utils/stream_utils.ts +2 -2
  31. package/src/tests/register-compacting-tests.ts +376 -322
  32. package/src/tests/register-data-storage-checkpoint-tests.ts +85 -53
  33. package/src/tests/register-data-storage-data-tests.ts +1050 -559
  34. package/src/tests/register-data-storage-parameter-tests.ts +330 -288
  35. package/src/tests/register-parameter-compacting-tests.ts +87 -90
  36. package/src/tests/register-sync-tests.ts +390 -380
  37. package/src/tests/util.ts +46 -17
  38. package/tsconfig.tsbuildinfo +1 -1
@@ -50,12 +50,13 @@ var __disposeResources = (this && this.__disposeResources) || (function (Suppres
50
50
  var e = new Error(message);
51
51
  return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
52
52
  });
53
- import { storage, updateSyncRulesFromYaml } from '@powersync/service-core';
53
+ import { addChecksums, storage, updateSyncRulesFromYaml } from '@powersync/service-core';
54
54
  import { expect, test } from 'vitest';
55
55
  import * as test_utils from '../test-utils/test-utils-index.js';
56
- import { bucketRequest, bucketRequestMap, bucketRequests } from './util.js';
57
- const TEST_TABLE = test_utils.makeTestTable('test', ['id']);
58
- export function registerCompactTests(generateStorageFactory) {
56
+ import { bucketRequest } from '../test-utils/test-utils-index.js';
57
+ import { bucketRequestMap, bucketRequests } from './util.js';
58
+ export function registerCompactTests(config) {
59
+ const generateStorageFactory = config.factory;
59
60
  test('compacting (1)', async () => {
60
61
  const env_1 = { stack: [], error: void 0, hasError: false };
61
62
  try {
@@ -66,52 +67,52 @@ bucket_definitions:
66
67
  data: [select * from test]
67
68
  `));
68
69
  const bucketStorage = factory.getInstance(syncRules);
69
- const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
70
- await batch.save({
71
- sourceTable: TEST_TABLE,
72
- tag: storage.SaveOperationTag.INSERT,
73
- after: {
74
- id: 't1'
75
- },
76
- afterReplicaId: test_utils.rid('t1')
77
- });
78
- await batch.save({
79
- sourceTable: TEST_TABLE,
80
- tag: storage.SaveOperationTag.INSERT,
81
- after: {
82
- id: 't2'
83
- },
84
- afterReplicaId: test_utils.rid('t2')
85
- });
86
- await batch.save({
87
- sourceTable: TEST_TABLE,
88
- tag: storage.SaveOperationTag.UPDATE,
89
- after: {
90
- id: 't2'
91
- },
92
- afterReplicaId: test_utils.rid('t2')
93
- });
94
- await batch.commit('1/1');
70
+ const writer = __addDisposableResource(env_1, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
71
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
72
+ await writer.markAllSnapshotDone('1/1');
73
+ await writer.save({
74
+ sourceTable: testTable,
75
+ tag: storage.SaveOperationTag.INSERT,
76
+ after: {
77
+ id: 't1'
78
+ },
79
+ afterReplicaId: test_utils.rid('t1')
80
+ });
81
+ await writer.save({
82
+ sourceTable: testTable,
83
+ tag: storage.SaveOperationTag.INSERT,
84
+ after: {
85
+ id: 't2'
86
+ },
87
+ afterReplicaId: test_utils.rid('t2')
88
+ });
89
+ await writer.save({
90
+ sourceTable: testTable,
91
+ tag: storage.SaveOperationTag.UPDATE,
92
+ after: {
93
+ id: 't2'
94
+ },
95
+ afterReplicaId: test_utils.rid('t2')
95
96
  });
96
- const checkpoint = result.flushed_op;
97
- const batchBefore = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])));
97
+ await writer.commit('1/1');
98
+ await writer.flush();
99
+ const checkpoint = writer.last_flushed_op;
100
+ const request = bucketRequest(syncRules, 'global[]');
101
+ const batchBefore = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
98
102
  const dataBefore = batchBefore.chunkData.data;
99
- const checksumBefore = await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']));
103
+ const checksumBefore = await bucketStorage.getChecksums(checkpoint, [request]);
100
104
  expect(dataBefore).toMatchObject([
101
105
  {
102
- checksum: 2634521662,
103
106
  object_id: 't1',
104
107
  op: 'PUT',
105
108
  op_id: '1'
106
109
  },
107
110
  {
108
- checksum: 4243212114,
109
111
  object_id: 't2',
110
112
  op: 'PUT',
111
113
  op_id: '2'
112
114
  },
113
115
  {
114
- checksum: 4243212114,
115
116
  object_id: 't2',
116
117
  op: 'PUT',
117
118
  op_id: '3'
@@ -125,33 +126,28 @@ bucket_definitions:
125
126
  minBucketChanges: 1,
126
127
  minChangeRatio: 0
127
128
  });
128
- const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])));
129
+ const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
129
130
  const dataAfter = batchAfter.chunkData.data;
130
- const checksumAfter = await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']));
131
+ const checksumAfter = await bucketStorage.getChecksums(checkpoint, [request]);
131
132
  bucketStorage.clearChecksumCache();
132
- const checksumAfter2 = await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']));
133
+ const checksumAfter2 = await bucketStorage.getChecksums(checkpoint, [request]);
133
134
  expect(batchAfter.targetOp).toEqual(3n);
134
135
  expect(dataAfter).toMatchObject([
136
+ dataBefore[0],
135
137
  {
136
- checksum: 2634521662,
137
- object_id: 't1',
138
- op: 'PUT',
139
- op_id: '1'
140
- },
141
- {
142
- checksum: 4243212114,
138
+ checksum: dataBefore[1].checksum,
143
139
  op: 'MOVE',
144
140
  op_id: '2'
145
141
  },
146
142
  {
147
- checksum: 4243212114,
143
+ checksum: dataBefore[2].checksum,
148
144
  object_id: 't2',
149
145
  op: 'PUT',
150
146
  op_id: '3'
151
147
  }
152
148
  ]);
153
- expect(checksumAfter.get(bucketRequest(syncRules, 'global[]'))).toEqual(checksumBefore.get(bucketRequest(syncRules, 'global[]')));
154
- expect(checksumAfter2.get(bucketRequest(syncRules, 'global[]'))).toEqual(checksumBefore.get(bucketRequest(syncRules, 'global[]')));
149
+ expect(checksumAfter.get(request.bucket)).toEqual(checksumBefore.get(request.bucket));
150
+ expect(checksumAfter2.get(request.bucket)).toEqual(checksumBefore.get(request.bucket));
155
151
  test_utils.validateCompactedBucket(dataBefore, dataAfter);
156
152
  }
157
153
  catch (e_1) {
@@ -174,69 +170,65 @@ bucket_definitions:
174
170
  data: [select * from test]
175
171
  `));
176
172
  const bucketStorage = factory.getInstance(syncRules);
177
- const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
178
- await batch.save({
179
- sourceTable: TEST_TABLE,
180
- tag: storage.SaveOperationTag.INSERT,
181
- after: {
182
- id: 't1'
183
- },
184
- afterReplicaId: test_utils.rid('t1')
185
- });
186
- await batch.save({
187
- sourceTable: TEST_TABLE,
188
- tag: storage.SaveOperationTag.INSERT,
189
- after: {
190
- id: 't2'
191
- },
192
- afterReplicaId: test_utils.rid('t2')
193
- });
194
- await batch.save({
195
- sourceTable: TEST_TABLE,
196
- tag: storage.SaveOperationTag.DELETE,
197
- before: {
198
- id: 't1'
199
- },
200
- beforeReplicaId: test_utils.rid('t1')
201
- });
202
- await batch.save({
203
- sourceTable: TEST_TABLE,
204
- tag: storage.SaveOperationTag.UPDATE,
205
- after: {
206
- id: 't2'
207
- },
208
- afterReplicaId: test_utils.rid('t2')
209
- });
210
- await batch.commit('1/1');
173
+ const writer = __addDisposableResource(env_2, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
174
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
175
+ await writer.markAllSnapshotDone('1/1');
176
+ await writer.save({
177
+ sourceTable: testTable,
178
+ tag: storage.SaveOperationTag.INSERT,
179
+ after: {
180
+ id: 't1'
181
+ },
182
+ afterReplicaId: test_utils.rid('t1')
183
+ });
184
+ await writer.save({
185
+ sourceTable: testTable,
186
+ tag: storage.SaveOperationTag.INSERT,
187
+ after: {
188
+ id: 't2'
189
+ },
190
+ afterReplicaId: test_utils.rid('t2')
191
+ });
192
+ await writer.save({
193
+ sourceTable: testTable,
194
+ tag: storage.SaveOperationTag.DELETE,
195
+ before: {
196
+ id: 't1'
197
+ },
198
+ beforeReplicaId: test_utils.rid('t1')
199
+ });
200
+ await writer.save({
201
+ sourceTable: testTable,
202
+ tag: storage.SaveOperationTag.UPDATE,
203
+ after: {
204
+ id: 't2'
205
+ },
206
+ afterReplicaId: test_utils.rid('t2')
211
207
  });
212
- const checkpoint = result.flushed_op;
213
- const batchBefore = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])));
208
+ await writer.commit('1/1');
209
+ await writer.flush();
210
+ const checkpoint = writer.last_flushed_op;
211
+ const request = bucketRequest(syncRules, 'global[]');
212
+ const batchBefore = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
214
213
  const dataBefore = batchBefore.chunkData.data;
215
- const checksumBefore = await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']));
214
+ const checksumBefore = await bucketStorage.getChecksums(checkpoint, [request]);
215
+ // op_id sequence depends on the storage implementation
216
216
  expect(dataBefore).toMatchObject([
217
217
  {
218
- checksum: 2634521662,
219
218
  object_id: 't1',
220
- op: 'PUT',
221
- op_id: '1'
219
+ op: 'PUT'
222
220
  },
223
221
  {
224
- checksum: 4243212114,
225
222
  object_id: 't2',
226
- op: 'PUT',
227
- op_id: '2'
223
+ op: 'PUT'
228
224
  },
229
225
  {
230
- checksum: 4228978084,
231
226
  object_id: 't1',
232
- op: 'REMOVE',
233
- op_id: '3'
227
+ op: 'REMOVE'
234
228
  },
235
229
  {
236
- checksum: 4243212114,
237
230
  object_id: 't2',
238
- op: 'PUT',
239
- op_id: '4'
231
+ op: 'PUT'
240
232
  }
241
233
  ]);
242
234
  await bucketStorage.compact({
@@ -246,26 +238,24 @@ bucket_definitions:
246
238
  minBucketChanges: 1,
247
239
  minChangeRatio: 0
248
240
  });
249
- const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])));
241
+ const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
250
242
  const dataAfter = batchAfter.chunkData.data;
251
243
  bucketStorage.clearChecksumCache();
252
- const checksumAfter = await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']));
253
- expect(batchAfter.targetOp).toEqual(4n);
244
+ const checksumAfter = await bucketStorage.getChecksums(checkpoint, [request]);
245
+ expect(batchAfter.targetOp).toBeLessThanOrEqual(checkpoint);
254
246
  expect(dataAfter).toMatchObject([
255
247
  {
256
- checksum: -1778190028,
257
- op: 'CLEAR',
258
- op_id: '3'
248
+ checksum: addChecksums(addChecksums(dataBefore[0].checksum, dataBefore[1].checksum), dataBefore[2].checksum),
249
+ op: 'CLEAR'
259
250
  },
260
251
  {
261
- checksum: 4243212114,
252
+ checksum: dataBefore[3].checksum,
262
253
  object_id: 't2',
263
- op: 'PUT',
264
- op_id: '4'
254
+ op: 'PUT'
265
255
  }
266
256
  ]);
267
- expect(checksumAfter.get(bucketRequest(syncRules, 'global[]'))).toEqual({
268
- ...checksumBefore.get(bucketRequest(syncRules, 'global[]')),
257
+ expect(checksumAfter.get(request.bucket)).toEqual({
258
+ ...checksumBefore.get(request.bucket),
269
259
  count: 2
270
260
  });
271
261
  test_utils.validateCompactedBucket(dataBefore, dataAfter);
@@ -290,47 +280,50 @@ bucket_definitions:
290
280
  data: [select * from test]
291
281
  `));
292
282
  const bucketStorage = factory.getInstance(syncRules);
293
- const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
294
- await batch.save({
295
- sourceTable: TEST_TABLE,
296
- tag: storage.SaveOperationTag.INSERT,
297
- after: {
298
- id: 't1'
299
- },
300
- afterReplicaId: 't1'
301
- });
302
- await batch.save({
303
- sourceTable: TEST_TABLE,
304
- tag: storage.SaveOperationTag.INSERT,
305
- after: {
306
- id: 't2'
307
- },
308
- afterReplicaId: 't2'
309
- });
310
- await batch.save({
311
- sourceTable: TEST_TABLE,
312
- tag: storage.SaveOperationTag.DELETE,
313
- before: {
314
- id: 't1'
315
- },
316
- beforeReplicaId: 't1'
317
- });
318
- await batch.commit('1/1');
283
+ const writer = __addDisposableResource(env_3, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
284
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
285
+ await writer.markAllSnapshotDone('1/1');
286
+ await writer.save({
287
+ sourceTable: testTable,
288
+ tag: storage.SaveOperationTag.INSERT,
289
+ after: {
290
+ id: 't1'
291
+ },
292
+ afterReplicaId: 't1'
319
293
  });
320
- const checkpoint1 = result.flushed_op;
321
- const checksumBefore = await bucketStorage.getChecksums(checkpoint1, bucketRequests(syncRules, ['global[]']));
322
- const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
323
- await batch.save({
324
- sourceTable: TEST_TABLE,
325
- tag: storage.SaveOperationTag.DELETE,
326
- before: {
327
- id: 't2'
328
- },
329
- beforeReplicaId: 't2'
330
- });
331
- await batch.commit('2/1');
294
+ await writer.save({
295
+ sourceTable: testTable,
296
+ tag: storage.SaveOperationTag.INSERT,
297
+ after: {
298
+ id: 't2'
299
+ },
300
+ afterReplicaId: 't2'
332
301
  });
333
- const checkpoint2 = result2.flushed_op;
302
+ await writer.save({
303
+ sourceTable: testTable,
304
+ tag: storage.SaveOperationTag.DELETE,
305
+ before: {
306
+ id: 't1'
307
+ },
308
+ beforeReplicaId: 't1'
309
+ });
310
+ await writer.commit('1/1');
311
+ await writer.flush();
312
+ const checkpoint1 = writer.last_flushed_op;
313
+ const request = bucketRequest(syncRules, 'global[]');
314
+ const writer2 = __addDisposableResource(env_3, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
315
+ const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
316
+ await writer2.save({
317
+ sourceTable: testTable2,
318
+ tag: storage.SaveOperationTag.DELETE,
319
+ before: {
320
+ id: 't2'
321
+ },
322
+ beforeReplicaId: 't2'
323
+ });
324
+ await writer2.commit('2/1');
325
+ await writer2.flush();
326
+ const checkpoint2 = writer2.last_flushed_op;
334
327
  await bucketStorage.compact({
335
328
  clearBatchLimit: 2,
336
329
  moveBatchLimit: 1,
@@ -338,22 +331,19 @@ bucket_definitions:
338
331
  minBucketChanges: 1,
339
332
  minChangeRatio: 0
340
333
  });
341
- const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint2, bucketRequestMap(syncRules, [['global[]', 0n]])));
334
+ const batchAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint2, [request]));
342
335
  const dataAfter = batchAfter.chunkData.data;
343
336
  await bucketStorage.clearChecksumCache();
344
- const checksumAfter = await bucketStorage.getChecksums(checkpoint2, bucketRequests(syncRules, ['global[]']));
345
- expect(batchAfter.targetOp).toEqual(4n);
337
+ const checksumAfter = await bucketStorage.getChecksums(checkpoint2, [request]);
346
338
  expect(dataAfter).toMatchObject([
347
339
  {
348
- checksum: 1874612650,
349
- op: 'CLEAR',
350
- op_id: '4'
340
+ op: 'CLEAR'
351
341
  }
352
342
  ]);
353
- expect(checksumAfter.get(bucketRequest(syncRules, 'global[]'))).toEqual({
354
- bucket: bucketRequest(syncRules, 'global[]'),
343
+ expect(checksumAfter.get(request.bucket)).toEqual({
344
+ bucket: request.bucket,
355
345
  count: 1,
356
- checksum: 1874612650
346
+ checksum: dataAfter[0].checksum
357
347
  });
358
348
  }
359
349
  catch (e_3) {
@@ -379,69 +369,71 @@ bucket_definitions:
379
369
  data:
380
370
  - select * from test where b = bucket.b`));
381
371
  const bucketStorage = factory.getInstance(syncRules);
382
- const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
383
- /**
384
- * Repeatedly create operations which fall into different buckets.
385
- * The bucket operations are purposely interleaved as the op_id increases.
386
- * A large amount of operations are created here.
387
- * The configured window of compacting operations is 100. This means the initial window will
388
- * contain operations from multiple buckets.
389
- */
390
- for (let count = 0; count < 100; count++) {
391
- await batch.save({
392
- sourceTable: TEST_TABLE,
393
- tag: storage.SaveOperationTag.INSERT,
394
- after: {
395
- id: 't1',
396
- b: 'b1',
397
- value: 'start'
398
- },
399
- afterReplicaId: test_utils.rid('t1')
400
- });
401
- await batch.save({
402
- sourceTable: TEST_TABLE,
403
- tag: storage.SaveOperationTag.UPDATE,
404
- after: {
405
- id: 't1',
406
- b: 'b1',
407
- value: 'intermediate'
408
- },
409
- afterReplicaId: test_utils.rid('t1')
410
- });
411
- await batch.save({
412
- sourceTable: TEST_TABLE,
413
- tag: storage.SaveOperationTag.INSERT,
414
- after: {
415
- id: 't2',
416
- b: 'b2',
417
- value: 'start'
418
- },
419
- afterReplicaId: test_utils.rid('t2')
420
- });
421
- await batch.save({
422
- sourceTable: TEST_TABLE,
423
- tag: storage.SaveOperationTag.UPDATE,
424
- after: {
425
- id: 't1',
426
- b: 'b1',
427
- value: 'final'
428
- },
429
- afterReplicaId: test_utils.rid('t1')
430
- });
431
- await batch.save({
432
- sourceTable: TEST_TABLE,
433
- tag: storage.SaveOperationTag.UPDATE,
434
- after: {
435
- id: 't2',
436
- b: 'b2',
437
- value: 'final'
438
- },
439
- afterReplicaId: test_utils.rid('t2')
440
- });
441
- await batch.commit('1/1');
442
- }
443
- });
444
- const checkpoint = result.flushed_op;
372
+ const writer = __addDisposableResource(env_4, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
373
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
374
+ await writer.markAllSnapshotDone('1/1');
375
+ /**
376
+ * Repeatedly create operations which fall into different buckets.
377
+ * The bucket operations are purposely interleaved as the op_id increases.
378
+ * A large amount of operations are created here.
379
+ * The configured window of compacting operations is 100. This means the initial window will
380
+ * contain operations from multiple buckets.
381
+ */
382
+ for (let count = 0; count < 100; count++) {
383
+ await writer.save({
384
+ sourceTable: testTable,
385
+ tag: storage.SaveOperationTag.INSERT,
386
+ after: {
387
+ id: 't1',
388
+ b: 'b1',
389
+ value: 'start'
390
+ },
391
+ afterReplicaId: test_utils.rid('t1')
392
+ });
393
+ await writer.save({
394
+ sourceTable: testTable,
395
+ tag: storage.SaveOperationTag.UPDATE,
396
+ after: {
397
+ id: 't1',
398
+ b: 'b1',
399
+ value: 'intermediate'
400
+ },
401
+ afterReplicaId: test_utils.rid('t1')
402
+ });
403
+ await writer.save({
404
+ sourceTable: testTable,
405
+ tag: storage.SaveOperationTag.INSERT,
406
+ after: {
407
+ id: 't2',
408
+ b: 'b2',
409
+ value: 'start'
410
+ },
411
+ afterReplicaId: test_utils.rid('t2')
412
+ });
413
+ await writer.save({
414
+ sourceTable: testTable,
415
+ tag: storage.SaveOperationTag.UPDATE,
416
+ after: {
417
+ id: 't1',
418
+ b: 'b1',
419
+ value: 'final'
420
+ },
421
+ afterReplicaId: test_utils.rid('t1')
422
+ });
423
+ await writer.save({
424
+ sourceTable: testTable,
425
+ tag: storage.SaveOperationTag.UPDATE,
426
+ after: {
427
+ id: 't2',
428
+ b: 'b2',
429
+ value: 'final'
430
+ },
431
+ afterReplicaId: test_utils.rid('t2')
432
+ });
433
+ await writer.commit('1/1');
434
+ }
435
+ await writer.flush();
436
+ const checkpoint = writer.last_flushed_op;
445
437
  await bucketStorage.compact({
446
438
  clearBatchLimit: 100,
447
439
  moveBatchLimit: 100,
@@ -498,33 +490,35 @@ bucket_definitions:
498
490
  data: [select * from test]
499
491
  `));
500
492
  const bucketStorage = factory.getInstance(syncRules);
501
- const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
502
- await batch.save({
503
- sourceTable: TEST_TABLE,
504
- tag: storage.SaveOperationTag.INSERT,
505
- after: {
506
- id: 't1'
507
- },
508
- afterReplicaId: 't1'
509
- });
510
- await batch.save({
511
- sourceTable: TEST_TABLE,
512
- tag: storage.SaveOperationTag.INSERT,
513
- after: {
514
- id: 't2'
515
- },
516
- afterReplicaId: 't2'
517
- });
518
- await batch.save({
519
- sourceTable: TEST_TABLE,
520
- tag: storage.SaveOperationTag.DELETE,
521
- before: {
522
- id: 't1'
523
- },
524
- beforeReplicaId: 't1'
525
- });
526
- await batch.commit('1/1');
493
+ const writer = __addDisposableResource(env_5, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
494
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
495
+ await writer.markAllSnapshotDone('1/1');
496
+ await writer.save({
497
+ sourceTable: testTable,
498
+ tag: storage.SaveOperationTag.INSERT,
499
+ after: {
500
+ id: 't1'
501
+ },
502
+ afterReplicaId: 't1'
503
+ });
504
+ await writer.save({
505
+ sourceTable: testTable,
506
+ tag: storage.SaveOperationTag.INSERT,
507
+ after: {
508
+ id: 't2'
509
+ },
510
+ afterReplicaId: 't2'
511
+ });
512
+ await writer.save({
513
+ sourceTable: testTable,
514
+ tag: storage.SaveOperationTag.DELETE,
515
+ before: {
516
+ id: 't1'
517
+ },
518
+ beforeReplicaId: 't1'
527
519
  });
520
+ await writer.commit('1/1');
521
+ await writer.flush();
528
522
  await bucketStorage.compact({
529
523
  clearBatchLimit: 2,
530
524
  moveBatchLimit: 1,
@@ -532,25 +526,29 @@ bucket_definitions:
532
526
  minBucketChanges: 1,
533
527
  minChangeRatio: 0
534
528
  });
535
- const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
536
- await batch.save({
537
- sourceTable: TEST_TABLE,
538
- tag: storage.SaveOperationTag.DELETE,
539
- before: {
540
- id: 't2'
541
- },
542
- beforeReplicaId: 't2'
543
- });
544
- await batch.commit('2/1');
529
+ const writer2 = __addDisposableResource(env_5, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
530
+ const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
531
+ await writer2.save({
532
+ sourceTable: testTable2,
533
+ tag: storage.SaveOperationTag.DELETE,
534
+ before: {
535
+ id: 't2'
536
+ },
537
+ beforeReplicaId: 't2'
545
538
  });
546
- const checkpoint2 = result2.flushed_op;
539
+ await writer2.commit('2/1');
540
+ await writer2.flush();
541
+ const checkpoint2 = writer2.last_flushed_op;
542
+ const request = bucketRequest(syncRules, 'global[]');
547
543
  await bucketStorage.clearChecksumCache();
548
- const checksumAfter = await bucketStorage.getChecksums(checkpoint2, bucketRequests(syncRules, ['global[]']));
549
- expect(checksumAfter.get(bucketRequest(syncRules, 'global[]'))).toEqual({
550
- bucket: bucketRequest(syncRules, 'global[]'),
551
- count: 4,
552
- checksum: 1874612650
544
+ const checksumAfter = await bucketStorage.getChecksums(checkpoint2, [request]);
545
+ const globalChecksum = checksumAfter.get(request.bucket);
546
+ expect(globalChecksum).toMatchObject({
547
+ bucket: request.bucket,
548
+ count: 4
553
549
  });
550
+ // storage-specific checksum - just check that it does not change
551
+ expect(globalChecksum).toMatchSnapshot();
554
552
  }
555
553
  catch (e_5) {
556
554
  env_5.error = e_5;
@@ -572,38 +570,41 @@ bucket_definitions:
572
570
  data: [select * from test]
573
571
  `));
574
572
  const bucketStorage = factory.getInstance(syncRules);
575
- const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
576
- await batch.save({
577
- sourceTable: TEST_TABLE,
578
- tag: storage.SaveOperationTag.INSERT,
579
- after: {
580
- id: 't1'
581
- },
582
- afterReplicaId: 't1'
583
- });
584
- await batch.save({
585
- sourceTable: TEST_TABLE,
586
- tag: storage.SaveOperationTag.UPDATE,
587
- after: {
588
- id: 't1'
589
- },
590
- afterReplicaId: 't1'
591
- });
592
- await batch.commit('1/1');
573
+ const writer = __addDisposableResource(env_6, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
574
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
575
+ await writer.markAllSnapshotDone('1/1');
576
+ await writer.save({
577
+ sourceTable: testTable,
578
+ tag: storage.SaveOperationTag.INSERT,
579
+ after: {
580
+ id: 't1'
581
+ },
582
+ afterReplicaId: 't1'
583
+ });
584
+ await writer.save({
585
+ sourceTable: testTable,
586
+ tag: storage.SaveOperationTag.UPDATE,
587
+ after: {
588
+ id: 't1'
589
+ },
590
+ afterReplicaId: 't1'
593
591
  });
592
+ await writer.commit('1/1');
593
+ await writer.flush();
594
594
  // Get checksums here just to populate the cache
595
- await bucketStorage.getChecksums(result.flushed_op, bucketRequests(syncRules, ['global[]']));
596
- const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
597
- await batch.save({
598
- sourceTable: TEST_TABLE,
599
- tag: storage.SaveOperationTag.DELETE,
600
- before: {
601
- id: 't1'
602
- },
603
- beforeReplicaId: 't1'
604
- });
605
- await batch.commit('2/1');
595
+ await bucketStorage.getChecksums(writer.last_flushed_op, bucketRequests(syncRules, ['global[]']));
596
+ const writer2 = __addDisposableResource(env_6, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
597
+ const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
598
+ await writer2.save({
599
+ sourceTable: testTable2,
600
+ tag: storage.SaveOperationTag.DELETE,
601
+ before: {
602
+ id: 't1'
603
+ },
604
+ beforeReplicaId: 't1'
606
605
  });
606
+ await writer2.commit('2/1');
607
+ await writer2.flush();
607
608
  await bucketStorage.compact({
608
609
  clearBatchLimit: 20,
609
610
  moveBatchLimit: 10,
@@ -611,14 +612,17 @@ bucket_definitions:
611
612
  minBucketChanges: 1,
612
613
  minChangeRatio: 0
613
614
  });
614
- const checkpoint2 = result2.flushed_op;
615
+ const checkpoint2 = writer2.last_flushed_op;
616
+ const request = bucketRequest(syncRules, 'global[]');
615
617
  // Check that the checksum was correctly updated with the clear operation after having a cached checksum
616
- const checksumAfter = await bucketStorage.getChecksums(checkpoint2, bucketRequests(syncRules, ['global[]']));
617
- expect(checksumAfter.get(bucketRequest(syncRules, 'global[]'))).toMatchObject({
618
- bucket: bucketRequest(syncRules, 'global[]'),
619
- count: 1,
620
- checksum: -1481659821
618
+ const checksumAfter = await bucketStorage.getChecksums(checkpoint2, [request]);
619
+ const globalChecksum = checksumAfter.get(request.bucket);
620
+ expect(globalChecksum).toMatchObject({
621
+ bucket: request.bucket,
622
+ count: 1
621
623
  });
624
+ // storage-specific checksum - just check that it does not change
625
+ expect(globalChecksum).toMatchSnapshot();
622
626
  }
623
627
  catch (e_6) {
624
628
  env_6.error = e_6;
@@ -630,5 +634,64 @@ bucket_definitions:
630
634
  await result_6;
631
635
  }
632
636
  });
637
+ test('defaults maxOpId to current checkpoint', async () => {
638
+ const env_7 = { stack: [], error: void 0, hasError: false };
639
+ try {
640
+ const factory = __addDisposableResource(env_7, await generateStorageFactory(), true);
641
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
642
+ bucket_definitions:
643
+ global:
644
+ data: [select * from test]
645
+ `));
646
+ const bucketStorage = factory.getInstance(syncRules);
647
+ const writer = __addDisposableResource(env_7, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
648
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
649
+ await writer.markAllSnapshotDone('1/1');
650
+ await writer.save({
651
+ sourceTable: testTable,
652
+ tag: storage.SaveOperationTag.INSERT,
653
+ after: { id: 't1' },
654
+ afterReplicaId: test_utils.rid('t1')
655
+ });
656
+ await writer.commit('1/1');
657
+ await writer.flush();
658
+ const checkpoint1 = writer.last_flushed_op;
659
+ const writer2 = __addDisposableResource(env_7, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
660
+ const testTable2 = await test_utils.resolveTestTable(writer2, 'test', ['id'], config);
661
+ // This is flushed but not committed (does not advance the checkpoint)
662
+ await writer2.save({
663
+ sourceTable: testTable2,
664
+ tag: storage.SaveOperationTag.UPDATE,
665
+ after: { id: 't1' },
666
+ afterReplicaId: test_utils.rid('t1')
667
+ });
668
+ await writer2.flush();
669
+ const checkpoint2 = writer2.last_flushed_op;
670
+ const checkpointBeforeCompact = await bucketStorage.getCheckpoint();
671
+ expect(checkpointBeforeCompact.checkpoint).toEqual(checkpoint1);
672
+ // With default options, Postgres compaction should use the active checkpoint.
673
+ await bucketStorage.compact({
674
+ moveBatchLimit: 1,
675
+ moveBatchQueryLimit: 1,
676
+ minBucketChanges: 1,
677
+ minChangeRatio: 0
678
+ });
679
+ const batchAfterDefaultCompact = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint2, bucketRequestMap(syncRules, [['global[]', 0n]])));
680
+ // Operation 1 should remain a PUT because op_id=2 is above the default maxOpId checkpoint.
681
+ expect(batchAfterDefaultCompact.chunkData.data).toMatchObject([
682
+ { op_id: '1', op: 'PUT', object_id: 't1' },
683
+ { op_id: '2', op: 'PUT', object_id: 't1' }
684
+ ]);
685
+ }
686
+ catch (e_7) {
687
+ env_7.error = e_7;
688
+ env_7.hasError = true;
689
+ }
690
+ finally {
691
+ const result_7 = __disposeResources(env_7);
692
+ if (result_7)
693
+ await result_7;
694
+ }
695
+ });
633
696
  }
634
697
  //# sourceMappingURL=register-compacting-tests.js.map