@powersync/service-core-tests 0.14.0 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/CHANGELOG.md +19 -0
  2. package/dist/test-utils/general-utils.d.ts +9 -2
  3. package/dist/test-utils/general-utils.js +26 -2
  4. package/dist/test-utils/general-utils.js.map +1 -1
  5. package/dist/tests/register-compacting-tests.d.ts +1 -1
  6. package/dist/tests/register-compacting-tests.js +122 -68
  7. package/dist/tests/register-compacting-tests.js.map +1 -1
  8. package/dist/tests/register-data-storage-checkpoint-tests.d.ts +1 -1
  9. package/dist/tests/register-data-storage-checkpoint-tests.js +38 -6
  10. package/dist/tests/register-data-storage-checkpoint-tests.js.map +1 -1
  11. package/dist/tests/register-data-storage-data-tests.d.ts +2 -2
  12. package/dist/tests/register-data-storage-data-tests.js +666 -142
  13. package/dist/tests/register-data-storage-data-tests.js.map +1 -1
  14. package/dist/tests/register-data-storage-parameter-tests.d.ts +1 -1
  15. package/dist/tests/register-data-storage-parameter-tests.js +60 -33
  16. package/dist/tests/register-data-storage-parameter-tests.js.map +1 -1
  17. package/dist/tests/register-parameter-compacting-tests.d.ts +1 -1
  18. package/dist/tests/register-parameter-compacting-tests.js +8 -4
  19. package/dist/tests/register-parameter-compacting-tests.js.map +1 -1
  20. package/dist/tests/register-sync-tests.d.ts +2 -1
  21. package/dist/tests/register-sync-tests.js +40 -18
  22. package/dist/tests/register-sync-tests.js.map +1 -1
  23. package/dist/tests/util.d.ts +5 -4
  24. package/dist/tests/util.js +27 -12
  25. package/dist/tests/util.js.map +1 -1
  26. package/package.json +3 -3
  27. package/src/test-utils/general-utils.ts +41 -3
  28. package/src/tests/register-compacting-tests.ts +127 -82
  29. package/src/tests/register-data-storage-checkpoint-tests.ts +64 -11
  30. package/src/tests/register-data-storage-data-tests.ts +640 -52
  31. package/src/tests/register-data-storage-parameter-tests.ts +101 -47
  32. package/src/tests/register-parameter-compacting-tests.ts +9 -4
  33. package/src/tests/register-sync-tests.ts +45 -19
  34. package/src/tests/util.ts +46 -17
  35. package/tsconfig.tsbuildinfo +1 -1
@@ -50,10 +50,11 @@ var __disposeResources = (this && this.__disposeResources) || (function (Suppres
50
50
  var e = new Error(message);
51
51
  return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
52
52
  });
53
- import { getUuidReplicaIdentityBson, storage, updateSyncRulesFromYaml } from '@powersync/service-core';
53
+ import { CURRENT_STORAGE_VERSION, getUuidReplicaIdentityBson, reduceBucket, storage, updateSyncRulesFromYaml } from '@powersync/service-core';
54
54
  import { describe, expect, test } from 'vitest';
55
55
  import * as test_utils from '../test-utils/test-utils-index.js';
56
- import { bucketRequest, bucketRequestMap, bucketRequests, TEST_TABLE } from './util.js';
56
+ import { bucketRequest } from '../test-utils/test-utils-index.js';
57
+ import { bucketRequestMap, bucketRequests } from './util.js';
57
58
  /**
58
59
  * Normalize data from OplogEntries for comparison in tests.
59
60
  * Tests typically expect the stringified result
@@ -74,7 +75,10 @@ const normalizeOplogData = (data) => {
74
75
  *
75
76
  * ```
76
77
  */
77
- export function registerDataStorageDataTests(generateStorageFactory) {
78
+ export function registerDataStorageDataTests(config) {
79
+ const generateStorageFactory = config.factory;
80
+ const storageVersion = config.storageVersion ?? storage.CURRENT_STORAGE_VERSION;
81
+ const TEST_TABLE = test_utils.makeTestTable('test', ['id'], config);
78
82
  test('removing row', async () => {
79
83
  const env_1 = { stack: [], error: void 0, hasError: false };
80
84
  try {
@@ -84,10 +88,11 @@ bucket_definitions:
84
88
  global:
85
89
  data:
86
90
  - SELECT id, description FROM "%"
87
- `));
91
+ `, { storageVersion }));
88
92
  const bucketStorage = factory.getInstance(syncRules);
89
93
  await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
90
94
  const sourceTable = TEST_TABLE;
95
+ await batch.markAllSnapshotDone('1/1');
91
96
  await batch.save({
92
97
  sourceTable,
93
98
  tag: storage.SaveOperationTag.INSERT,
@@ -124,7 +129,7 @@ bucket_definitions:
124
129
  ];
125
130
  expect(checksums).toEqual([
126
131
  {
127
- bucket: bucketRequest(syncRules, 'global[]'),
132
+ bucket: bucketRequest(syncRules, 'global[]').bucket,
128
133
  checksum: (c1 + c2) & 0xffffffff,
129
134
  count: 2
130
135
  }
@@ -140,19 +145,296 @@ bucket_definitions:
140
145
  await result_1;
141
146
  }
142
147
  });
143
- test('changing client ids', async () => {
148
+ test('insert after delete in new batch', async () => {
144
149
  const env_2 = { stack: [], error: void 0, hasError: false };
145
150
  try {
146
151
  const factory = __addDisposableResource(env_2, await generateStorageFactory(), true);
147
152
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
153
+ bucket_definitions:
154
+ global:
155
+ data:
156
+ - SELECT id, description FROM "%"
157
+ `, { storageVersion }));
158
+ const bucketStorage = factory.getInstance(syncRules);
159
+ const sourceTable = TEST_TABLE;
160
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
161
+ await batch.markAllSnapshotDone('1/1');
162
+ await batch.save({
163
+ sourceTable,
164
+ tag: storage.SaveOperationTag.DELETE,
165
+ beforeReplicaId: test_utils.rid('test1')
166
+ });
167
+ await batch.commit('0/1');
168
+ });
169
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
170
+ const sourceTable = TEST_TABLE;
171
+ await batch.save({
172
+ sourceTable,
173
+ tag: storage.SaveOperationTag.INSERT,
174
+ after: {
175
+ id: 'test1',
176
+ description: 'test1'
177
+ },
178
+ afterReplicaId: test_utils.rid('test1')
179
+ });
180
+ await batch.commit('2/1');
181
+ });
182
+ const { checkpoint } = await bucketStorage.getCheckpoint();
183
+ const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])));
184
+ const data = batch[0].chunkData.data.map((d) => {
185
+ return {
186
+ op: d.op,
187
+ object_id: d.object_id,
188
+ checksum: d.checksum
189
+ };
190
+ });
191
+ const c1 = 2871785649;
192
+ expect(data).toEqual([{ op: 'PUT', object_id: 'test1', checksum: c1 }]);
193
+ const checksums = [
194
+ ...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values()
195
+ ];
196
+ expect(checksums).toEqual([
197
+ {
198
+ bucket: bucketRequest(syncRules, 'global[]').bucket,
199
+ checksum: c1 & 0xffffffff,
200
+ count: 1
201
+ }
202
+ ]);
203
+ }
204
+ catch (e_2) {
205
+ env_2.error = e_2;
206
+ env_2.hasError = true;
207
+ }
208
+ finally {
209
+ const result_2 = __disposeResources(env_2);
210
+ if (result_2)
211
+ await result_2;
212
+ }
213
+ });
214
+ test('update after delete in new batch', async () => {
215
+ const env_3 = { stack: [], error: void 0, hasError: false };
216
+ try {
217
+ // Update after delete may not be common, but the storage layer should handle it in an eventually-consistent way.
218
+ const factory = __addDisposableResource(env_3, await generateStorageFactory(), true);
219
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
220
+ bucket_definitions:
221
+ global:
222
+ data:
223
+ - SELECT id, description FROM "%"
224
+ `, { storageVersion }));
225
+ const bucketStorage = factory.getInstance(syncRules);
226
+ const sourceTable = TEST_TABLE;
227
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
228
+ await batch.markAllSnapshotDone('1/1');
229
+ await batch.save({
230
+ sourceTable,
231
+ tag: storage.SaveOperationTag.DELETE,
232
+ beforeReplicaId: test_utils.rid('test1')
233
+ });
234
+ await batch.commit('0/1');
235
+ });
236
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
237
+ const sourceTable = TEST_TABLE;
238
+ await batch.save({
239
+ sourceTable,
240
+ tag: storage.SaveOperationTag.UPDATE,
241
+ before: {
242
+ id: 'test1'
243
+ },
244
+ after: {
245
+ id: 'test1',
246
+ description: 'test1'
247
+ },
248
+ beforeReplicaId: test_utils.rid('test1'),
249
+ afterReplicaId: test_utils.rid('test1')
250
+ });
251
+ await batch.commit('2/1');
252
+ });
253
+ const { checkpoint } = await bucketStorage.getCheckpoint();
254
+ const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])));
255
+ const data = batch[0].chunkData.data.map((d) => {
256
+ return {
257
+ op: d.op,
258
+ object_id: d.object_id,
259
+ checksum: d.checksum
260
+ };
261
+ });
262
+ const c1 = 2871785649;
263
+ expect(data).toEqual([{ op: 'PUT', object_id: 'test1', checksum: c1 }]);
264
+ const checksums = [
265
+ ...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values()
266
+ ];
267
+ expect(checksums).toEqual([
268
+ {
269
+ bucket: bucketRequest(syncRules, 'global[]').bucket,
270
+ checksum: c1 & 0xffffffff,
271
+ count: 1
272
+ }
273
+ ]);
274
+ }
275
+ catch (e_3) {
276
+ env_3.error = e_3;
277
+ env_3.hasError = true;
278
+ }
279
+ finally {
280
+ const result_3 = __disposeResources(env_3);
281
+ if (result_3)
282
+ await result_3;
283
+ }
284
+ });
285
+ test('insert after delete in same batch', async () => {
286
+ const env_4 = { stack: [], error: void 0, hasError: false };
287
+ try {
288
+ const factory = __addDisposableResource(env_4, await generateStorageFactory(), true);
289
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
290
+ bucket_definitions:
291
+ global:
292
+ data:
293
+ - SELECT id, description FROM "%"
294
+ `, {
295
+ storageVersion
296
+ }));
297
+ const bucketStorage = factory.getInstance(syncRules);
298
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
299
+ const sourceTable = TEST_TABLE;
300
+ await batch.markAllSnapshotDone('1/1');
301
+ await batch.save({
302
+ sourceTable,
303
+ tag: storage.SaveOperationTag.DELETE,
304
+ beforeReplicaId: test_utils.rid('test1')
305
+ });
306
+ await batch.save({
307
+ sourceTable,
308
+ tag: storage.SaveOperationTag.INSERT,
309
+ after: {
310
+ id: 'test1',
311
+ description: 'test1'
312
+ },
313
+ afterReplicaId: test_utils.rid('test1')
314
+ });
315
+ await batch.commit('1/1');
316
+ });
317
+ const { checkpoint } = await bucketStorage.getCheckpoint();
318
+ const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])));
319
+ const data = batch[0].chunkData.data.map((d) => {
320
+ return {
321
+ op: d.op,
322
+ object_id: d.object_id,
323
+ checksum: d.checksum
324
+ };
325
+ });
326
+ const c1 = 2871785649;
327
+ expect(data).toEqual([{ op: 'PUT', object_id: 'test1', checksum: c1 }]);
328
+ const checksums = [
329
+ ...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values()
330
+ ];
331
+ expect(checksums).toEqual([
332
+ {
333
+ bucket: bucketRequest(syncRules, 'global[]').bucket,
334
+ checksum: c1 & 0xffffffff,
335
+ count: 1
336
+ }
337
+ ]);
338
+ }
339
+ catch (e_4) {
340
+ env_4.error = e_4;
341
+ env_4.hasError = true;
342
+ }
343
+ finally {
344
+ const result_4 = __disposeResources(env_4);
345
+ if (result_4)
346
+ await result_4;
347
+ }
348
+ });
349
+ test('(insert, delete, insert), (delete)', async () => {
350
+ const env_5 = { stack: [], error: void 0, hasError: false };
351
+ try {
352
+ const factory = __addDisposableResource(env_5, await generateStorageFactory(), true);
353
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
354
+ bucket_definitions:
355
+ global:
356
+ data:
357
+ - SELECT id, description FROM "%"
358
+ `, {
359
+ storageVersion
360
+ }));
361
+ const bucketStorage = factory.getInstance(syncRules);
362
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
363
+ const sourceTable = TEST_TABLE;
364
+ await batch.markAllSnapshotDone('1/1');
365
+ await batch.save({
366
+ sourceTable,
367
+ tag: storage.SaveOperationTag.INSERT,
368
+ after: {
369
+ id: 'test1',
370
+ description: 'test1'
371
+ },
372
+ afterReplicaId: test_utils.rid('test1')
373
+ });
374
+ await batch.save({
375
+ sourceTable,
376
+ tag: storage.SaveOperationTag.DELETE,
377
+ beforeReplicaId: test_utils.rid('test1')
378
+ });
379
+ await batch.save({
380
+ sourceTable,
381
+ tag: storage.SaveOperationTag.INSERT,
382
+ after: {
383
+ id: 'test1',
384
+ description: 'test1'
385
+ },
386
+ afterReplicaId: test_utils.rid('test1')
387
+ });
388
+ await batch.commit('1/1');
389
+ });
390
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
391
+ const sourceTable = TEST_TABLE;
392
+ await batch.markAllSnapshotDone('1/1');
393
+ await batch.save({
394
+ sourceTable,
395
+ tag: storage.SaveOperationTag.DELETE,
396
+ beforeReplicaId: test_utils.rid('test1')
397
+ });
398
+ await batch.commit('2/1');
399
+ });
400
+ const { checkpoint } = await bucketStorage.getCheckpoint();
401
+ const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])));
402
+ expect(reduceBucket(batch[0].chunkData.data).slice(1)).toEqual([]);
403
+ const data = batch[0].chunkData.data.map((d) => {
404
+ return {
405
+ op: d.op,
406
+ object_id: d.object_id,
407
+ checksum: d.checksum
408
+ };
409
+ });
410
+ expect(data).toMatchSnapshot();
411
+ }
412
+ catch (e_5) {
413
+ env_5.error = e_5;
414
+ env_5.hasError = true;
415
+ }
416
+ finally {
417
+ const result_5 = __disposeResources(env_5);
418
+ if (result_5)
419
+ await result_5;
420
+ }
421
+ });
422
+ test('changing client ids', async () => {
423
+ const env_6 = { stack: [], error: void 0, hasError: false };
424
+ try {
425
+ const factory = __addDisposableResource(env_6, await generateStorageFactory(), true);
426
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
148
427
  bucket_definitions:
149
428
  global:
150
429
  data:
151
430
  - SELECT client_id as id, description FROM "%"
152
- `));
431
+ `, {
432
+ storageVersion
433
+ }));
153
434
  const bucketStorage = factory.getInstance(syncRules);
154
435
  const sourceTable = TEST_TABLE;
155
436
  await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
437
+ await batch.markAllSnapshotDone('1/1');
156
438
  await batch.save({
157
439
  sourceTable,
158
440
  tag: storage.SaveOperationTag.INSERT,
@@ -200,29 +482,32 @@ bucket_definitions:
200
482
  { op: 'PUT', object_id: 'client2' }
201
483
  ]);
202
484
  }
203
- catch (e_2) {
204
- env_2.error = e_2;
205
- env_2.hasError = true;
485
+ catch (e_6) {
486
+ env_6.error = e_6;
487
+ env_6.hasError = true;
206
488
  }
207
489
  finally {
208
- const result_2 = __disposeResources(env_2);
209
- if (result_2)
210
- await result_2;
490
+ const result_6 = __disposeResources(env_6);
491
+ if (result_6)
492
+ await result_6;
211
493
  }
212
494
  });
213
495
  test('re-apply delete', async () => {
214
- const env_3 = { stack: [], error: void 0, hasError: false };
496
+ const env_7 = { stack: [], error: void 0, hasError: false };
215
497
  try {
216
- const factory = __addDisposableResource(env_3, await generateStorageFactory(), true);
498
+ const factory = __addDisposableResource(env_7, await generateStorageFactory(), true);
217
499
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
218
500
  bucket_definitions:
219
501
  global:
220
502
  data:
221
503
  - SELECT id, description FROM "%"
222
- `));
504
+ `, {
505
+ storageVersion
506
+ }));
223
507
  const bucketStorage = factory.getInstance(syncRules);
224
508
  await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
225
509
  const sourceTable = TEST_TABLE;
510
+ await batch.markAllSnapshotDone('1/1');
226
511
  await batch.save({
227
512
  sourceTable,
228
513
  tag: storage.SaveOperationTag.INSERT,
@@ -270,34 +555,35 @@ bucket_definitions:
270
555
  ];
271
556
  expect(checksums).toEqual([
272
557
  {
273
- bucket: bucketRequest(syncRules, 'global[]'),
558
+ bucket: bucketRequest(syncRules, 'global[]').bucket,
274
559
  checksum: (c1 + c2) & 0xffffffff,
275
560
  count: 2
276
561
  }
277
562
  ]);
278
563
  }
279
- catch (e_3) {
280
- env_3.error = e_3;
281
- env_3.hasError = true;
564
+ catch (e_7) {
565
+ env_7.error = e_7;
566
+ env_7.hasError = true;
282
567
  }
283
568
  finally {
284
- const result_3 = __disposeResources(env_3);
285
- if (result_3)
286
- await result_3;
569
+ const result_7 = __disposeResources(env_7);
570
+ if (result_7)
571
+ await result_7;
287
572
  }
288
573
  });
289
574
  test('re-apply update + delete', async () => {
290
- const env_4 = { stack: [], error: void 0, hasError: false };
575
+ const env_8 = { stack: [], error: void 0, hasError: false };
291
576
  try {
292
- const factory = __addDisposableResource(env_4, await generateStorageFactory(), true);
577
+ const factory = __addDisposableResource(env_8, await generateStorageFactory(), true);
293
578
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
294
579
  bucket_definitions:
295
580
  global:
296
581
  data:
297
582
  - SELECT id, description FROM "%"
298
- `));
583
+ `, { storageVersion }));
299
584
  const bucketStorage = factory.getInstance(syncRules);
300
585
  await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
586
+ await batch.markAllSnapshotDone('1/1');
301
587
  const sourceTable = TEST_TABLE;
302
588
  await batch.save({
303
589
  sourceTable,
@@ -310,6 +596,7 @@ bucket_definitions:
310
596
  });
311
597
  });
312
598
  await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
599
+ await batch.markAllSnapshotDone('1/1');
313
600
  const sourceTable = TEST_TABLE;
314
601
  await batch.save({
315
602
  sourceTable,
@@ -337,6 +624,7 @@ bucket_definitions:
337
624
  await batch.commit('1/1');
338
625
  });
339
626
  await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
627
+ await batch.markAllSnapshotDone('1/1');
340
628
  const sourceTable = TEST_TABLE;
341
629
  await batch.save({
342
630
  sourceTable,
@@ -385,24 +673,24 @@ bucket_definitions:
385
673
  ];
386
674
  expect(checksums).toEqual([
387
675
  {
388
- bucket: bucketRequest(syncRules, 'global[]'),
676
+ bucket: bucketRequest(syncRules, 'global[]').bucket,
389
677
  checksum: (c1 + c1 + c1 + c2) & 0xffffffff,
390
678
  count: 4
391
679
  }
392
680
  ]);
393
681
  }
394
- catch (e_4) {
395
- env_4.error = e_4;
396
- env_4.hasError = true;
682
+ catch (e_8) {
683
+ env_8.error = e_8;
684
+ env_8.hasError = true;
397
685
  }
398
686
  finally {
399
- const result_4 = __disposeResources(env_4);
400
- if (result_4)
401
- await result_4;
687
+ const result_8 = __disposeResources(env_8);
688
+ if (result_8)
689
+ await result_8;
402
690
  }
403
691
  });
404
692
  test('batch with overlapping replica ids', async () => {
405
- const env_5 = { stack: [], error: void 0, hasError: false };
693
+ const env_9 = { stack: [], error: void 0, hasError: false };
406
694
  try {
407
695
  // This test checks that we get the correct output when processing rows with:
408
696
  // 1. changing replica ids
@@ -411,16 +699,17 @@ bucket_definitions:
411
699
  // It can break at two places:
412
700
  // 1. Not getting the correct "current_data" state for each operation.
413
701
  // 2. Output order not being correct.
414
- const factory = __addDisposableResource(env_5, await generateStorageFactory(), true);
702
+ const factory = __addDisposableResource(env_9, await generateStorageFactory(), true);
415
703
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
416
704
  bucket_definitions:
417
705
  global:
418
706
  data:
419
707
  - SELECT id, description FROM "test"
420
- `));
708
+ `, { storageVersion }));
421
709
  const bucketStorage = factory.getInstance(syncRules);
422
710
  // Pre-setup
423
711
  const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
712
+ await batch.markAllSnapshotDone('1/1');
424
713
  const sourceTable = TEST_TABLE;
425
714
  await batch.save({
426
715
  sourceTable,
@@ -540,18 +829,18 @@ bucket_definitions:
540
829
  { op: 'PUT', object_id: 'test5', data: JSON.stringify({ id: 'test5', description: 'test5d' }) }
541
830
  ]);
542
831
  }
543
- catch (e_5) {
544
- env_5.error = e_5;
545
- env_5.hasError = true;
832
+ catch (e_9) {
833
+ env_9.error = e_9;
834
+ env_9.hasError = true;
546
835
  }
547
836
  finally {
548
- const result_5 = __disposeResources(env_5);
549
- if (result_5)
550
- await result_5;
837
+ const result_9 = __disposeResources(env_9);
838
+ if (result_9)
839
+ await result_9;
551
840
  }
552
841
  });
553
842
  test('changed data with replica identity full', async () => {
554
- const env_6 = { stack: [], error: void 0, hasError: false };
843
+ const env_10 = { stack: [], error: void 0, hasError: false };
555
844
  try {
556
845
  function rid2(id, description) {
557
846
  return getUuidReplicaIdentityBson({ id, description }, [
@@ -559,17 +848,20 @@ bucket_definitions:
559
848
  { name: 'description', type: 'VARCHAR', typeId: 25 }
560
849
  ]);
561
850
  }
562
- const factory = __addDisposableResource(env_6, await generateStorageFactory(), true);
851
+ const factory = __addDisposableResource(env_10, await generateStorageFactory(), true);
563
852
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
564
853
  bucket_definitions:
565
854
  global:
566
855
  data:
567
856
  - SELECT id, description FROM "test"
568
- `));
857
+ `, {
858
+ storageVersion
859
+ }));
569
860
  const bucketStorage = factory.getInstance(syncRules);
570
- const sourceTable = test_utils.makeTestTable('test', ['id', 'description']);
861
+ const sourceTable = test_utils.makeTestTable('test', ['id', 'description'], config);
571
862
  // Pre-setup
572
863
  const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
864
+ await batch.markAllSnapshotDone('1/1');
573
865
  await batch.save({
574
866
  sourceTable,
575
867
  tag: storage.SaveOperationTag.INSERT,
@@ -646,18 +938,18 @@ bucket_definitions:
646
938
  }
647
939
  ]);
648
940
  }
649
- catch (e_6) {
650
- env_6.error = e_6;
651
- env_6.hasError = true;
941
+ catch (e_10) {
942
+ env_10.error = e_10;
943
+ env_10.hasError = true;
652
944
  }
653
945
  finally {
654
- const result_6 = __disposeResources(env_6);
655
- if (result_6)
656
- await result_6;
946
+ const result_10 = __disposeResources(env_10);
947
+ if (result_10)
948
+ await result_10;
657
949
  }
658
950
  });
659
951
  test('unchanged data with replica identity full', async () => {
660
- const env_7 = { stack: [], error: void 0, hasError: false };
952
+ const env_11 = { stack: [], error: void 0, hasError: false };
661
953
  try {
662
954
  function rid2(id, description) {
663
955
  return getUuidReplicaIdentityBson({ id, description }, [
@@ -665,17 +957,20 @@ bucket_definitions:
665
957
  { name: 'description', type: 'VARCHAR', typeId: 25 }
666
958
  ]);
667
959
  }
668
- const factory = __addDisposableResource(env_7, await generateStorageFactory(), true);
960
+ const factory = __addDisposableResource(env_11, await generateStorageFactory(), true);
669
961
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
670
962
  bucket_definitions:
671
963
  global:
672
964
  data:
673
965
  - SELECT id, description FROM "test"
674
- `));
966
+ `, {
967
+ storageVersion
968
+ }));
675
969
  const bucketStorage = factory.getInstance(syncRules);
676
- const sourceTable = test_utils.makeTestTable('test', ['id', 'description']);
970
+ const sourceTable = test_utils.makeTestTable('test', ['id', 'description'], config);
677
971
  // Pre-setup
678
972
  const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
973
+ await batch.markAllSnapshotDone('1/1');
679
974
  await batch.save({
680
975
  sourceTable,
681
976
  tag: storage.SaveOperationTag.INSERT,
@@ -745,32 +1040,35 @@ bucket_definitions:
745
1040
  }
746
1041
  ]);
747
1042
  }
748
- catch (e_7) {
749
- env_7.error = e_7;
750
- env_7.hasError = true;
1043
+ catch (e_11) {
1044
+ env_11.error = e_11;
1045
+ env_11.hasError = true;
751
1046
  }
752
1047
  finally {
753
- const result_7 = __disposeResources(env_7);
754
- if (result_7)
755
- await result_7;
1048
+ const result_11 = __disposeResources(env_11);
1049
+ if (result_11)
1050
+ await result_11;
756
1051
  }
757
1052
  });
758
1053
  test('large batch', async () => {
759
- const env_8 = { stack: [], error: void 0, hasError: false };
1054
+ const env_12 = { stack: [], error: void 0, hasError: false };
760
1055
  try {
761
1056
  // Test syncing a batch of data that is small in count,
762
1057
  // but large enough in size to be split over multiple returned batches.
763
1058
  // The specific batch splits is an implementation detail of the storage driver,
764
1059
  // and the test will have to updated when other implementations are added.
765
- const factory = __addDisposableResource(env_8, await generateStorageFactory(), true);
1060
+ const factory = __addDisposableResource(env_12, await generateStorageFactory(), true);
766
1061
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
767
1062
  bucket_definitions:
768
1063
  global:
769
1064
  data:
770
1065
  - SELECT id, description FROM "%"
771
- `));
1066
+ `, {
1067
+ storageVersion
1068
+ }));
772
1069
  const bucketStorage = factory.getInstance(syncRules);
773
1070
  await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1071
+ await batch.markAllSnapshotDone('1/1');
774
1072
  const sourceTable = TEST_TABLE;
775
1073
  const largeDescription = '0123456789'.repeat(12_000_00);
776
1074
  await batch.save({
@@ -840,29 +1138,32 @@ bucket_definitions:
840
1138
  expect(test_utils.getBatchData(batch3)).toEqual([]);
841
1139
  expect(test_utils.getBatchMeta(batch3)).toEqual(null);
842
1140
  }
843
- catch (e_8) {
844
- env_8.error = e_8;
845
- env_8.hasError = true;
1141
+ catch (e_12) {
1142
+ env_12.error = e_12;
1143
+ env_12.hasError = true;
846
1144
  }
847
1145
  finally {
848
- const result_8 = __disposeResources(env_8);
849
- if (result_8)
850
- await result_8;
1146
+ const result_12 = __disposeResources(env_12);
1147
+ if (result_12)
1148
+ await result_12;
851
1149
  }
852
1150
  });
853
1151
  test('long batch', async () => {
854
- const env_9 = { stack: [], error: void 0, hasError: false };
1152
+ const env_13 = { stack: [], error: void 0, hasError: false };
855
1153
  try {
856
1154
  // Test syncing a batch of data that is limited by count.
857
- const factory = __addDisposableResource(env_9, await generateStorageFactory(), true);
1155
+ const factory = __addDisposableResource(env_13, await generateStorageFactory(), true);
858
1156
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
859
1157
  bucket_definitions:
860
1158
  global:
861
1159
  data:
862
1160
  - SELECT id, description FROM "%"
863
- `));
1161
+ `, {
1162
+ storageVersion
1163
+ }));
864
1164
  const bucketStorage = factory.getInstance(syncRules);
865
1165
  await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1166
+ await batch.markAllSnapshotDone('1/1');
866
1167
  const sourceTable = TEST_TABLE;
867
1168
  for (let i = 1; i <= 6; i++) {
868
1169
  await batch.save({
@@ -908,21 +1209,21 @@ bucket_definitions:
908
1209
  expect(test_utils.getBatchData(batch3)).toEqual([]);
909
1210
  expect(test_utils.getBatchMeta(batch3)).toEqual(null);
910
1211
  }
911
- catch (e_9) {
912
- env_9.error = e_9;
913
- env_9.hasError = true;
1212
+ catch (e_13) {
1213
+ env_13.error = e_13;
1214
+ env_13.hasError = true;
914
1215
  }
915
1216
  finally {
916
- const result_9 = __disposeResources(env_9);
917
- if (result_9)
918
- await result_9;
1217
+ const result_13 = __disposeResources(env_13);
1218
+ if (result_13)
1219
+ await result_13;
919
1220
  }
920
1221
  });
921
1222
  describe('batch has_more', () => {
922
1223
  const setup = async (options) => {
923
- const env_10 = { stack: [], error: void 0, hasError: false };
1224
+ const env_14 = { stack: [], error: void 0, hasError: false };
924
1225
  try {
925
- const factory = __addDisposableResource(env_10, await generateStorageFactory(), true);
1226
+ const factory = __addDisposableResource(env_14, await generateStorageFactory(), true);
926
1227
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
927
1228
  bucket_definitions:
928
1229
  global1:
@@ -931,9 +1232,10 @@ bucket_definitions:
931
1232
  global2:
932
1233
  data:
933
1234
  - SELECT id, description FROM test WHERE bucket = 'global2'
934
- `));
1235
+ `, { storageVersion }));
935
1236
  const bucketStorage = factory.getInstance(syncRules);
936
1237
  await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1238
+ await batch.markAllSnapshotDone('1/1');
937
1239
  const sourceTable = TEST_TABLE;
938
1240
  for (let i = 1; i <= 10; i++) {
939
1241
  await batch.save({
@@ -956,21 +1258,21 @@ bucket_definitions:
956
1258
  ]), options));
957
1259
  return { syncRules, batch };
958
1260
  }
959
- catch (e_10) {
960
- env_10.error = e_10;
961
- env_10.hasError = true;
1261
+ catch (e_14) {
1262
+ env_14.error = e_14;
1263
+ env_14.hasError = true;
962
1264
  }
963
1265
  finally {
964
- const result_10 = __disposeResources(env_10);
965
- if (result_10)
966
- await result_10;
1266
+ const result_14 = __disposeResources(env_14);
1267
+ if (result_14)
1268
+ await result_14;
967
1269
  }
968
1270
  };
969
1271
  test('batch has_more (1)', async () => {
970
1272
  const { batch, syncRules } = await setup({ limit: 5 });
971
1273
  expect(batch.length).toEqual(2);
972
- expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]'));
973
- expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]'));
1274
+ expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]').bucket);
1275
+ expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]').bucket);
974
1276
  expect(test_utils.getBatchData(batch[0])).toEqual([
975
1277
  { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }
976
1278
  ]);
@@ -994,8 +1296,8 @@ bucket_definitions:
994
1296
  test('batch has_more (2)', async () => {
995
1297
  const { batch, syncRules } = await setup({ limit: 11 });
996
1298
  expect(batch.length).toEqual(2);
997
- expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]'));
998
- expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]'));
1299
+ expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]').bucket);
1300
+ expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]').bucket);
999
1301
  expect(test_utils.getBatchData(batch[0])).toEqual([
1000
1302
  { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }
1001
1303
  ]);
@@ -1025,9 +1327,9 @@ bucket_definitions:
1025
1327
  // 50 bytes is more than 1 row, less than 2 rows
1026
1328
  const { batch, syncRules } = await setup({ limit: 3, chunkLimitBytes: 50 });
1027
1329
  expect(batch.length).toEqual(3);
1028
- expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]'));
1029
- expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]'));
1030
- expect(batch[2].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]'));
1330
+ expect(batch[0].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global1[]').bucket);
1331
+ expect(batch[1].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]').bucket);
1332
+ expect(batch[2].chunkData.bucket).toEqual(bucketRequest(syncRules, 'global2[]').bucket);
1031
1333
  expect(test_utils.getBatchData(batch[0])).toEqual([
1032
1334
  { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }
1033
1335
  ]);
@@ -1055,9 +1357,9 @@ bucket_definitions:
1055
1357
  });
1056
1358
  });
1057
1359
  test('empty storage metrics', async () => {
1058
- const env_11 = { stack: [], error: void 0, hasError: false };
1360
+ const env_15 = { stack: [], error: void 0, hasError: false };
1059
1361
  try {
1060
- const f = __addDisposableResource(env_11, await generateStorageFactory({ dropAll: true }), true);
1362
+ const f = __addDisposableResource(env_15, await generateStorageFactory({ dropAll: true }), true);
1061
1363
  const metrics = await f.getStorageMetrics();
1062
1364
  expect(metrics).toEqual({
1063
1365
  operations_size_bytes: 0,
@@ -1067,38 +1369,42 @@ bucket_definitions:
1067
1369
  const r = await f.configureSyncRules(updateSyncRulesFromYaml('bucket_definitions: {}'));
1068
1370
  const storage = f.getInstance(r.persisted_sync_rules);
1069
1371
  await storage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1372
+ await batch.markAllSnapshotDone('1/0');
1070
1373
  await batch.keepalive('1/0');
1071
1374
  });
1072
1375
  await f.getStorageMetrics();
1073
1376
  }
1074
- catch (e_11) {
1075
- env_11.error = e_11;
1076
- env_11.hasError = true;
1377
+ catch (e_15) {
1378
+ env_15.error = e_15;
1379
+ env_15.hasError = true;
1077
1380
  }
1078
1381
  finally {
1079
- const result_11 = __disposeResources(env_11);
1080
- if (result_11)
1081
- await result_11;
1382
+ const result_15 = __disposeResources(env_15);
1383
+ if (result_15)
1384
+ await result_15;
1082
1385
  }
1083
1386
  });
1084
1387
  test('op_id initialization edge case', async () => {
1085
- const env_12 = { stack: [], error: void 0, hasError: false };
1388
+ const env_16 = { stack: [], error: void 0, hasError: false };
1086
1389
  try {
1087
1390
  // Test syncing a batch of data that is small in count,
1088
1391
  // but large enough in size to be split over multiple returned chunks.
1089
1392
  // Similar to the above test, but splits over 1MB chunks.
1090
- const factory = __addDisposableResource(env_12, await generateStorageFactory(), true);
1393
+ const factory = __addDisposableResource(env_16, await generateStorageFactory(), true);
1091
1394
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
1092
1395
  bucket_definitions:
1093
1396
  global:
1094
1397
  data:
1095
1398
  - SELECT id FROM test
1096
1399
  - SELECT id FROM test_ignore WHERE false
1097
- `));
1400
+ `, {
1401
+ storageVersion
1402
+ }));
1098
1403
  const bucketStorage = factory.getInstance(syncRules);
1099
- const sourceTable = test_utils.makeTestTable('test', ['id']);
1100
- const sourceTableIgnore = test_utils.makeTestTable('test_ignore', ['id']);
1404
+ const sourceTable = test_utils.makeTestTable('test', ['id'], config);
1405
+ const sourceTableIgnore = test_utils.makeTestTable('test_ignore', ['id'], config);
1101
1406
  const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1407
+ await batch.markAllSnapshotDone('1/1');
1102
1408
  // This saves a record to current_data, but not bucket_data.
1103
1409
  // This causes a checkpoint to be created without increasing the op_id sequence.
1104
1410
  await batch.save({
@@ -1125,29 +1431,32 @@ bucket_definitions:
1125
1431
  // we expect 0n and 1n, or 1n and 2n.
1126
1432
  expect(checkpoint2).toBeGreaterThan(checkpoint1);
1127
1433
  }
1128
- catch (e_12) {
1129
- env_12.error = e_12;
1130
- env_12.hasError = true;
1434
+ catch (e_16) {
1435
+ env_16.error = e_16;
1436
+ env_16.hasError = true;
1131
1437
  }
1132
1438
  finally {
1133
- const result_12 = __disposeResources(env_12);
1134
- if (result_12)
1135
- await result_12;
1439
+ const result_16 = __disposeResources(env_16);
1440
+ if (result_16)
1441
+ await result_16;
1136
1442
  }
1137
1443
  });
1138
1444
  test('unchanged checksums', async () => {
1139
- const env_13 = { stack: [], error: void 0, hasError: false };
1445
+ const env_17 = { stack: [], error: void 0, hasError: false };
1140
1446
  try {
1141
- const factory = __addDisposableResource(env_13, await generateStorageFactory(), true);
1447
+ const factory = __addDisposableResource(env_17, await generateStorageFactory(), true);
1142
1448
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
1143
1449
  bucket_definitions:
1144
1450
  global:
1145
1451
  data:
1146
1452
  - SELECT client_id as id, description FROM "%"
1147
- `));
1453
+ `, {
1454
+ storageVersion
1455
+ }));
1148
1456
  const bucketStorage = factory.getInstance(syncRules);
1149
1457
  const sourceTable = TEST_TABLE;
1150
1458
  await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1459
+ await batch.markAllSnapshotDone('1/1');
1151
1460
  await batch.save({
1152
1461
  sourceTable,
1153
1462
  tag: storage.SaveOperationTag.INSERT,
@@ -1163,44 +1472,259 @@ bucket_definitions:
1163
1472
  const checksums = [
1164
1473
  ...(await bucketStorage.getChecksums(checkpoint, bucketRequests(syncRules, ['global[]']))).values()
1165
1474
  ];
1166
- expect(checksums).toEqual([{ bucket: bucketRequest(syncRules, 'global[]'), checksum: 1917136889, count: 1 }]);
1475
+ expect(checksums).toEqual([
1476
+ { bucket: bucketRequest(syncRules, 'global[]').bucket, checksum: 1917136889, count: 1 }
1477
+ ]);
1167
1478
  const checksums2 = [
1168
1479
  ...(await bucketStorage.getChecksums(checkpoint + 1n, bucketRequests(syncRules, ['global[]']))).values()
1169
1480
  ];
1170
- expect(checksums2).toEqual([{ bucket: bucketRequest(syncRules, 'global[]'), checksum: 1917136889, count: 1 }]);
1481
+ expect(checksums2).toEqual([
1482
+ { bucket: bucketRequest(syncRules, 'global[]').bucket, checksum: 1917136889, count: 1 }
1483
+ ]);
1171
1484
  }
1172
- catch (e_13) {
1173
- env_13.error = e_13;
1174
- env_13.hasError = true;
1485
+ catch (e_17) {
1486
+ env_17.error = e_17;
1487
+ env_17.hasError = true;
1175
1488
  }
1176
1489
  finally {
1177
- const result_13 = __disposeResources(env_13);
1178
- if (result_13)
1179
- await result_13;
1490
+ const result_17 = __disposeResources(env_17);
1491
+ if (result_17)
1492
+ await result_17;
1493
+ }
1494
+ });
1495
+ testChecksumBatching(config);
1496
+ test('empty checkpoints (1)', async () => {
1497
+ const env_18 = { stack: [], error: void 0, hasError: false };
1498
+ try {
1499
+ const factory = __addDisposableResource(env_18, await generateStorageFactory(), true);
1500
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
1501
+ bucket_definitions:
1502
+ global:
1503
+ data:
1504
+ - SELECT id, description FROM "%"
1505
+ `, { storageVersion }));
1506
+ const bucketStorage = factory.getInstance(syncRules);
1507
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1508
+ await batch.markAllSnapshotDone('1/1');
1509
+ await batch.commit('1/1');
1510
+ const cp1 = await bucketStorage.getCheckpoint();
1511
+ expect(cp1.lsn).toEqual('1/1');
1512
+ await batch.commit('2/1', { createEmptyCheckpoints: true });
1513
+ const cp2 = await bucketStorage.getCheckpoint();
1514
+ expect(cp2.lsn).toEqual('2/1');
1515
+ await batch.keepalive('3/1');
1516
+ const cp3 = await bucketStorage.getCheckpoint();
1517
+ expect(cp3.lsn).toEqual('3/1');
1518
+ // For the last one, we skip creating empty checkpoints
1519
+ // This means the LSN stays at 3/1.
1520
+ await batch.commit('4/1', { createEmptyCheckpoints: false });
1521
+ const cp4 = await bucketStorage.getCheckpoint();
1522
+ expect(cp4.lsn).toEqual('3/1');
1523
+ });
1524
+ }
1525
+ catch (e_18) {
1526
+ env_18.error = e_18;
1527
+ env_18.hasError = true;
1528
+ }
1529
+ finally {
1530
+ const result_18 = __disposeResources(env_18);
1531
+ if (result_18)
1532
+ await result_18;
1533
+ }
1534
+ });
1535
+ test('empty checkpoints (2)', async () => {
1536
+ const env_19 = { stack: [], error: void 0, hasError: false };
1537
+ try {
1538
+ const factory = __addDisposableResource(env_19, await generateStorageFactory(), true);
1539
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
1540
+ bucket_definitions:
1541
+ global:
1542
+ data:
1543
+ - SELECT id, description FROM "%"
1544
+ `, {
1545
+ storageVersion
1546
+ }));
1547
+ const bucketStorage = factory.getInstance(syncRules);
1548
+ const sourceTable = TEST_TABLE;
1549
+ // We simulate two concurrent batches, but nesting is the easiest way to do this.
1550
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch1) => {
1551
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch2) => {
1552
+ await batch1.markAllSnapshotDone('1/1');
1553
+ await batch1.commit('1/1');
1554
+ await batch1.commit('2/1', { createEmptyCheckpoints: false });
1555
+ const cp2 = await bucketStorage.getCheckpoint();
1556
+ expect(cp2.lsn).toEqual('1/1'); // checkpoint 2/1 skipped
1557
+ await batch2.save({
1558
+ sourceTable,
1559
+ tag: storage.SaveOperationTag.INSERT,
1560
+ after: {
1561
+ id: 'test1',
1562
+ description: 'test1a'
1563
+ },
1564
+ afterReplicaId: test_utils.rid('test1')
1565
+ });
1566
+ // This simulates what happens on a snapshot processor.
1567
+ // This may later change to a flush() rather than commit().
1568
+ await batch2.commit(test_utils.BATCH_OPTIONS.zeroLSN);
1569
+ const cp3 = await bucketStorage.getCheckpoint();
1570
+ expect(cp3.lsn).toEqual('1/1'); // Still unchanged
1571
+ // This now needs to advance the LSN, despite {createEmptyCheckpoints: false}
1572
+ await batch1.commit('4/1', { createEmptyCheckpoints: false });
1573
+ const cp4 = await bucketStorage.getCheckpoint();
1574
+ expect(cp4.lsn).toEqual('4/1');
1575
+ });
1576
+ });
1577
+ }
1578
+ catch (e_19) {
1579
+ env_19.error = e_19;
1580
+ env_19.hasError = true;
1581
+ }
1582
+ finally {
1583
+ const result_19 = __disposeResources(env_19);
1584
+ if (result_19)
1585
+ await result_19;
1586
+ }
1587
+ });
1588
+ test('empty checkpoints (sync rule activation)', async () => {
1589
+ const env_20 = { stack: [], error: void 0, hasError: false };
1590
+ try {
1591
+ const factory = __addDisposableResource(env_20, await generateStorageFactory(), true);
1592
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
1593
+ bucket_definitions:
1594
+ global:
1595
+ data:
1596
+ - SELECT id, description FROM "%"
1597
+ `, {
1598
+ storageVersion
1599
+ }));
1600
+ const bucketStorage = factory.getInstance(syncRules);
1601
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1602
+ const result = await batch.commit('1/1', { createEmptyCheckpoints: false });
1603
+ expect(result).toEqual({ checkpointBlocked: true, checkpointCreated: false });
1604
+ // Snapshot is only valid once we reach 3/1
1605
+ await batch.markAllSnapshotDone('3/1');
1606
+ });
1607
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1608
+ // 2/1 < 3/1 - snapshot not valid yet, block checkpoint
1609
+ const result = await batch.commit('2/1', { createEmptyCheckpoints: false });
1610
+ expect(result).toEqual({ checkpointBlocked: true, checkpointCreated: false });
1611
+ });
1612
+ // No empty checkpoint should be created by the commit above.
1613
+ const cp1 = await bucketStorage.getCheckpoint();
1614
+ expect(cp1.lsn).toEqual(null);
1615
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1616
+ // After this commit, the snapshot should be valid.
1617
+ // We specifically check that this is done even if createEmptyCheckpoints: false.
1618
+ const result = await batch.commit('3/1', { createEmptyCheckpoints: false });
1619
+ expect(result).toEqual({ checkpointBlocked: false, checkpointCreated: true });
1620
+ });
1621
+ // Now, the checkpoint should advance the sync rules active.
1622
+ const cp2 = await bucketStorage.getCheckpoint();
1623
+ expect(cp2.lsn).toEqual('3/1');
1624
+ const activeSyncRules = await factory.getActiveSyncRulesContent();
1625
+ expect(activeSyncRules?.id).toEqual(syncRules.id);
1626
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1627
+ // At this point, it should be a truely empty checkpoint
1628
+ const result = await batch.commit('4/1', { createEmptyCheckpoints: false });
1629
+ expect(result).toEqual({ checkpointBlocked: false, checkpointCreated: false });
1630
+ });
1631
+ // Unchanged
1632
+ const cp3 = await bucketStorage.getCheckpoint();
1633
+ expect(cp3.lsn).toEqual('3/1');
1634
+ }
1635
+ catch (e_20) {
1636
+ env_20.error = e_20;
1637
+ env_20.hasError = true;
1638
+ }
1639
+ finally {
1640
+ const result_20 = __disposeResources(env_20);
1641
+ if (result_20)
1642
+ await result_20;
1643
+ }
1644
+ });
1645
+ test.runIf(storageVersion >= 3)('deleting while streaming', async () => {
1646
+ const env_21 = { stack: [], error: void 0, hasError: false };
1647
+ try {
1648
+ const factory = __addDisposableResource(env_21, await generateStorageFactory(), true);
1649
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
1650
+ bucket_definitions:
1651
+ global:
1652
+ data:
1653
+ - SELECT id, description FROM "%"
1654
+ `, {
1655
+ storageVersion
1656
+ }));
1657
+ const bucketStorage = factory.getInstance(syncRules);
1658
+ const sourceTable = TEST_TABLE;
1659
+ // We simulate two concurrent batches, and nesting is the easiest way to do this.
1660
+ // For this test, we assume that we start with a row "test1", which is picked up by a snapshot
1661
+ // query, right before the delete is streamed. But the snapshot query is only persisted _after_
1662
+ // the delete is streamed, and we need to ensure that the streamed delete takes precedence.
1663
+ await bucketStorage.startBatch({ ...test_utils.BATCH_OPTIONS, skipExistingRows: true }, async (snapshotBatch) => {
1664
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (streamingBatch) => {
1665
+ streamingBatch.save({
1666
+ sourceTable,
1667
+ tag: storage.SaveOperationTag.DELETE,
1668
+ before: {
1669
+ id: 'test1'
1670
+ },
1671
+ beforeReplicaId: test_utils.rid('test1')
1672
+ });
1673
+ await streamingBatch.commit('2/1');
1674
+ await snapshotBatch.save({
1675
+ sourceTable,
1676
+ tag: storage.SaveOperationTag.INSERT,
1677
+ after: {
1678
+ id: 'test1',
1679
+ description: 'test1a'
1680
+ },
1681
+ afterReplicaId: test_utils.rid('test1')
1682
+ });
1683
+ await snapshotBatch.markAllSnapshotDone('3/1');
1684
+ await snapshotBatch.commit('1/1');
1685
+ await streamingBatch.keepalive('3/1');
1686
+ });
1687
+ });
1688
+ const cp = await bucketStorage.getCheckpoint();
1689
+ expect(cp.lsn).toEqual('3/1');
1690
+ const data = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(cp.checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]])));
1691
+ expect(data).toEqual([]);
1692
+ }
1693
+ catch (e_21) {
1694
+ env_21.error = e_21;
1695
+ env_21.hasError = true;
1696
+ }
1697
+ finally {
1698
+ const result_21 = __disposeResources(env_21);
1699
+ if (result_21)
1700
+ await result_21;
1180
1701
  }
1181
1702
  });
1182
- testChecksumBatching(generateStorageFactory);
1183
1703
  }
1184
1704
  /**
1185
1705
  * This specifically tests an issue we ran into with MongoDB storage.
1186
1706
  *
1187
1707
  * Exposed as a separate test so we can test with more storage parameters.
1188
1708
  */
1189
- export function testChecksumBatching(generateStorageFactory) {
1709
+ export function testChecksumBatching(config) {
1710
+ const storageVersion = config.storageVersion ?? CURRENT_STORAGE_VERSION;
1190
1711
  test('checksums for multiple buckets', async () => {
1191
- const env_14 = { stack: [], error: void 0, hasError: false };
1712
+ const env_22 = { stack: [], error: void 0, hasError: false };
1192
1713
  try {
1193
- const factory = __addDisposableResource(env_14, await generateStorageFactory(), true);
1714
+ const factory = __addDisposableResource(env_22, await config.factory(), true);
1194
1715
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
1195
1716
  bucket_definitions:
1196
1717
  user:
1197
1718
  parameters: select request.user_id() as user_id
1198
1719
  data:
1199
1720
  - select id, description from test where user_id = bucket.user_id
1200
- `));
1721
+ `, {
1722
+ storageVersion
1723
+ }));
1201
1724
  const bucketStorage = factory.getInstance(syncRules);
1202
- const sourceTable = TEST_TABLE;
1725
+ const sourceTable = test_utils.makeTestTable('test', ['id'], config);
1203
1726
  await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1727
+ await batch.markAllSnapshotDone('1/1');
1204
1728
  for (let u of ['u1', 'u2', 'u3', 'u4']) {
1205
1729
  for (let t of ['t1', 't2', 't3', 't4']) {
1206
1730
  const id = `${t}_${u}`;
@@ -1224,20 +1748,20 @@ bucket_definitions:
1224
1748
  const checksums = [...(await bucketStorage.getChecksums(checkpoint, buckets)).values()];
1225
1749
  checksums.sort((a, b) => a.bucket.localeCompare(b.bucket));
1226
1750
  expect(checksums).toEqual([
1227
- { bucket: bucketRequest(syncRules, 'user["u1"]'), count: 4, checksum: 346204588 },
1228
- { bucket: bucketRequest(syncRules, 'user["u2"]'), count: 4, checksum: 5261081 },
1229
- { bucket: bucketRequest(syncRules, 'user["u3"]'), count: 4, checksum: 134760718 },
1230
- { bucket: bucketRequest(syncRules, 'user["u4"]'), count: 4, checksum: -302639724 }
1751
+ { bucket: bucketRequest(syncRules, 'user["u1"]').bucket, count: 4, checksum: 346204588 },
1752
+ { bucket: bucketRequest(syncRules, 'user["u2"]').bucket, count: 4, checksum: 5261081 },
1753
+ { bucket: bucketRequest(syncRules, 'user["u3"]').bucket, count: 4, checksum: 134760718 },
1754
+ { bucket: bucketRequest(syncRules, 'user["u4"]').bucket, count: 4, checksum: -302639724 }
1231
1755
  ]);
1232
1756
  }
1233
- catch (e_14) {
1234
- env_14.error = e_14;
1235
- env_14.hasError = true;
1757
+ catch (e_22) {
1758
+ env_22.error = e_22;
1759
+ env_22.hasError = true;
1236
1760
  }
1237
1761
  finally {
1238
- const result_14 = __disposeResources(env_14);
1239
- if (result_14)
1240
- await result_14;
1762
+ const result_22 = __disposeResources(env_22);
1763
+ if (result_22)
1764
+ await result_22;
1241
1765
  }
1242
1766
  });
1243
1767
  }