@powersync/service-core-tests 0.9.3 → 0.9.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -207,8 +207,7 @@ bucket_definitions:
207
207
  await result_2;
208
208
  }
209
209
  });
210
- // Temporarily skipped - interruption disabled
211
- test.skip('sync interrupts low-priority buckets on new checkpoints', async () => {
210
+ test('sync interrupts low-priority buckets on new checkpoints', async () => {
212
211
  const env_3 = { stack: [], error: void 0, hasError: false };
213
212
  try {
214
213
  const f = __addDisposableResource(env_3, await factory(), true);
@@ -319,10 +318,290 @@ bucket_definitions:
319
318
  await result_3;
320
319
  }
321
320
  });
322
- test('sends checkpoint complete line for empty checkpoint', async () => {
321
+ test('sync interruptions with unrelated data', async () => {
323
322
  const env_4 = { stack: [], error: void 0, hasError: false };
324
323
  try {
325
324
  const f = __addDisposableResource(env_4, await factory(), true);
325
+ const syncRules = await f.updateSyncRules({
326
+ content: `
327
+ bucket_definitions:
328
+ b0:
329
+ priority: 2
330
+ data:
331
+ - SELECT * FROM test WHERE LENGTH(id) <= 5;
332
+ b1:
333
+ priority: 1
334
+ parameters: SELECT request.user_id() as user_id
335
+ data:
336
+ - SELECT * FROM test WHERE LENGTH(id) > 5 AND description = bucket.user_id;
337
+ `
338
+ });
339
+ const bucketStorage = f.getInstance(syncRules);
340
+ await bucketStorage.autoActivate();
341
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
342
+ // Initial data: Add one priority row and 10k low-priority rows.
343
+ await batch.save({
344
+ sourceTable: TEST_TABLE,
345
+ tag: storage.SaveOperationTag.INSERT,
346
+ after: {
347
+ id: 'highprio',
348
+ description: 'user_one'
349
+ },
350
+ afterReplicaId: 'highprio'
351
+ });
352
+ for (let i = 0; i < 10_000; i++) {
353
+ await batch.save({
354
+ sourceTable: TEST_TABLE,
355
+ tag: storage.SaveOperationTag.INSERT,
356
+ after: {
357
+ id: `${i}`,
358
+ description: 'low prio'
359
+ },
360
+ afterReplicaId: `${i}`
361
+ });
362
+ }
363
+ await batch.commit('0/1');
364
+ });
365
+ const stream = sync.streamResponse({
366
+ syncContext,
367
+ bucketStorage,
368
+ syncRules: bucketStorage.getParsedSyncRules(test_utils.PARSE_OPTIONS),
369
+ params: {
370
+ buckets: [],
371
+ include_checksum: true,
372
+ raw_data: true
373
+ },
374
+ tracker,
375
+ syncParams: new RequestParameters({ sub: 'user_one' }, {}),
376
+ token: { sub: 'user_one', exp: Date.now() / 1000 + 100000 }
377
+ });
378
+ let sentCheckpoints = 0;
379
+ let completedCheckpoints = 0;
380
+ let sentRows = 0;
381
+ // Expected flow:
382
+ // 1. Stream starts, we receive a checkpoint followed by the one high-prio row and a partial completion.
383
+ // 2. We insert a new row that is not part of a bucket relevant to this stream.
384
+ // 3. This means that no interruption happens and we receive all the low-priority data, followed by a checkpoint.
385
+ // 4. After the checkpoint, add a new row that _is_ relevant for this sync, which should trigger a new iteration.
386
+ for await (let next of stream) {
387
+ if (typeof next == 'string') {
388
+ next = JSON.parse(next);
389
+ }
390
+ if (typeof next === 'object' && next !== null) {
391
+ if ('partial_checkpoint_complete' in next) {
392
+ if (sentCheckpoints == 1) {
393
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
394
+ // Add a high-priority row that doesn't affect this sync stream.
395
+ await batch.save({
396
+ sourceTable: TEST_TABLE,
397
+ tag: storage.SaveOperationTag.INSERT,
398
+ after: {
399
+ id: 'highprio2',
400
+ description: 'user_two'
401
+ },
402
+ afterReplicaId: 'highprio2'
403
+ });
404
+ await batch.commit('0/2');
405
+ });
406
+ }
407
+ else {
408
+ expect(sentCheckpoints).toBe(2);
409
+ expect(sentRows).toBe(10002);
410
+ }
411
+ }
412
+ if ('checkpoint' in next || 'checkpoint_diff' in next) {
413
+ sentCheckpoints += 1;
414
+ }
415
+ if ('data' in next) {
416
+ sentRows += next.data.data.length;
417
+ }
418
+ if ('checkpoint_complete' in next) {
419
+ completedCheckpoints++;
420
+ if (completedCheckpoints == 2) {
421
+ break;
422
+ }
423
+ if (completedCheckpoints == 1) {
424
+ expect(sentRows).toBe(10001);
425
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
426
+ // Add a high-priority row that affects this sync stream.
427
+ await batch.save({
428
+ sourceTable: TEST_TABLE,
429
+ tag: storage.SaveOperationTag.INSERT,
430
+ after: {
431
+ id: 'highprio3',
432
+ description: 'user_one'
433
+ },
434
+ afterReplicaId: 'highprio3'
435
+ });
436
+ await batch.commit('0/3');
437
+ });
438
+ }
439
+ }
440
+ }
441
+ }
442
+ expect(sentCheckpoints).toBe(2);
443
+ expect(sentRows).toBe(10002);
444
+ }
445
+ catch (e_4) {
446
+ env_4.error = e_4;
447
+ env_4.hasError = true;
448
+ }
449
+ finally {
450
+ const result_4 = __disposeResources(env_4);
451
+ if (result_4)
452
+ await result_4;
453
+ }
454
+ });
455
+ test('sync interrupts low-priority buckets on new checkpoints (2)', async () => {
456
+ const env_5 = { stack: [], error: void 0, hasError: false };
457
+ try {
458
+ const f = __addDisposableResource(env_5, await factory(), true);
459
+ // bucket0a -> send all data
460
+ // then interrupt checkpoint with new data for all buckets
461
+ // -> data for all buckets should be sent in the new checkpoint
462
+ const syncRules = await f.updateSyncRules({
463
+ content: `
464
+ bucket_definitions:
465
+ b0a:
466
+ priority: 2
467
+ data:
468
+ - SELECT * FROM test WHERE LENGTH(id) <= 5;
469
+ b0b:
470
+ priority: 2
471
+ data:
472
+ - SELECT * FROM test WHERE LENGTH(id) <= 5;
473
+ b1:
474
+ priority: 1
475
+ data:
476
+ - SELECT * FROM test WHERE LENGTH(id) > 5;
477
+ `
478
+ });
479
+ const bucketStorage = f.getInstance(syncRules);
480
+ await bucketStorage.autoActivate();
481
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
482
+ // Initial data: Add one priority row and 10k low-priority rows.
483
+ await batch.save({
484
+ sourceTable: TEST_TABLE,
485
+ tag: storage.SaveOperationTag.INSERT,
486
+ after: {
487
+ id: 'highprio',
488
+ description: 'High priority row'
489
+ },
490
+ afterReplicaId: 'highprio'
491
+ });
492
+ for (let i = 0; i < 2_000; i++) {
493
+ await batch.save({
494
+ sourceTable: TEST_TABLE,
495
+ tag: storage.SaveOperationTag.INSERT,
496
+ after: {
497
+ id: `${i}`,
498
+ description: 'low prio'
499
+ },
500
+ afterReplicaId: `${i}`
501
+ });
502
+ }
503
+ await batch.commit('0/1');
504
+ });
505
+ const stream = sync.streamResponse({
506
+ syncContext,
507
+ bucketStorage,
508
+ syncRules: bucketStorage.getParsedSyncRules(test_utils.PARSE_OPTIONS),
509
+ params: {
510
+ buckets: [],
511
+ include_checksum: true,
512
+ raw_data: true
513
+ },
514
+ tracker,
515
+ syncParams: new RequestParameters({ sub: '' }, {}),
516
+ token: { exp: Date.now() / 1000 + 10 }
517
+ });
518
+ let sentRows = 0;
519
+ let lines = [];
520
+ for await (let next of stream) {
521
+ if (typeof next == 'string') {
522
+ next = JSON.parse(next);
523
+ }
524
+ if (typeof next === 'object' && next !== null) {
525
+ if ('partial_checkpoint_complete' in next) {
526
+ lines.push(next);
527
+ }
528
+ if ('checkpoint' in next || 'checkpoint_diff' in next) {
529
+ lines.push(next);
530
+ }
531
+ if ('data' in next) {
532
+ lines.push({ data: { ...next.data, data: undefined } });
533
+ sentRows += next.data.data.length;
534
+ if (sentRows == 1001) {
535
+ // Save new data to interrupt the low-priority sync.
536
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
537
+ // Add another high-priority row. This should interrupt the long-running low-priority sync.
538
+ await batch.save({
539
+ sourceTable: TEST_TABLE,
540
+ tag: storage.SaveOperationTag.INSERT,
541
+ after: {
542
+ id: 'highprio2',
543
+ description: 'Another high-priority row'
544
+ },
545
+ afterReplicaId: 'highprio2'
546
+ });
547
+ // Also add a low-priority row
548
+ await batch.save({
549
+ sourceTable: TEST_TABLE,
550
+ tag: storage.SaveOperationTag.INSERT,
551
+ after: {
552
+ id: '2001',
553
+ description: 'Another low-priority row'
554
+ },
555
+ afterReplicaId: '2001'
556
+ });
557
+ await batch.commit('0/2');
558
+ });
559
+ if (sentRows >= 1000 && sentRows <= 2001) {
560
+ // pause for a bit to give the stream time to process interruptions.
561
+ // This covers the data batch above and the next one.
562
+ await timers.setTimeout(50);
563
+ }
564
+ }
565
+ }
566
+ if ('checkpoint_complete' in next) {
567
+ lines.push(next);
568
+ break;
569
+ }
570
+ }
571
+ }
572
+ // Expected lines (full details in snapshot):
573
+ //
574
+ // checkpoint (4001)
575
+ // data (b1[] 0 -> 1)
576
+ // partial_checkpoint_complete (4001, priority 1)
577
+ // data (b0a[], 0 -> 2000)
578
+ // ## adds new data, interrupting the checkpoint
579
+ // data (b0a[], 2000 -> 4000) # expected - stream is already busy with this by the time it receives the interruption
580
+ // checkpoint_diff (4004)
581
+ // data (b1[], 1 -> 4002)
582
+ // partial_checkpoint_complete (4004, priority 1)
583
+ // data (b0a[], 4000 -> 4003)
584
+ // data (b0b[], 0 -> 1999)
585
+ // data (b0b[], 1999 -> 3999)
586
+ // data (b0b[], 3999 -> 4004)
587
+ // checkpoint_complete (4004)
588
+ expect(lines).toMatchSnapshot();
589
+ expect(sentRows).toBe(4004);
590
+ }
591
+ catch (e_5) {
592
+ env_5.error = e_5;
593
+ env_5.hasError = true;
594
+ }
595
+ finally {
596
+ const result_5 = __disposeResources(env_5);
597
+ if (result_5)
598
+ await result_5;
599
+ }
600
+ });
601
+ test('sends checkpoint complete line for empty checkpoint', async () => {
602
+ const env_6 = { stack: [], error: void 0, hasError: false };
603
+ try {
604
+ const f = __addDisposableResource(env_6, await factory(), true);
326
605
  const syncRules = await f.updateSyncRules({
327
606
  content: BASIC_SYNC_RULES
328
607
  });
@@ -378,14 +657,14 @@ bucket_definitions:
378
657
  }
379
658
  expect(lines).toMatchSnapshot();
380
659
  }
381
- catch (e_4) {
382
- env_4.error = e_4;
383
- env_4.hasError = true;
660
+ catch (e_6) {
661
+ env_6.error = e_6;
662
+ env_6.hasError = true;
384
663
  }
385
664
  finally {
386
- const result_4 = __disposeResources(env_4);
387
- if (result_4)
388
- await result_4;
665
+ const result_6 = __disposeResources(env_6);
666
+ if (result_6)
667
+ await result_6;
389
668
  }
390
669
  });
391
670
  test('sync legacy non-raw data', async () => {
@@ -427,9 +706,9 @@ bucket_definitions:
427
706
  expect(lines[1].data.data[0].data.large_num).toEqual(12345678901234567890n);
428
707
  });
429
708
  test('expired token', async () => {
430
- const env_5 = { stack: [], error: void 0, hasError: false };
709
+ const env_7 = { stack: [], error: void 0, hasError: false };
431
710
  try {
432
- const f = __addDisposableResource(env_5, await factory(), true);
711
+ const f = __addDisposableResource(env_7, await factory(), true);
433
712
  const syncRules = await f.updateSyncRules({
434
713
  content: BASIC_SYNC_RULES
435
714
  });
@@ -451,20 +730,20 @@ bucket_definitions:
451
730
  const lines = await consumeCheckpointLines(stream);
452
731
  expect(lines).toMatchSnapshot();
453
732
  }
454
- catch (e_5) {
455
- env_5.error = e_5;
456
- env_5.hasError = true;
733
+ catch (e_7) {
734
+ env_7.error = e_7;
735
+ env_7.hasError = true;
457
736
  }
458
737
  finally {
459
- const result_5 = __disposeResources(env_5);
460
- if (result_5)
461
- await result_5;
738
+ const result_7 = __disposeResources(env_7);
739
+ if (result_7)
740
+ await result_7;
462
741
  }
463
742
  });
464
743
  test('sync updates to global data', async (context) => {
465
- const env_6 = { stack: [], error: void 0, hasError: false };
744
+ const env_8 = { stack: [], error: void 0, hasError: false };
466
745
  try {
467
- const f = __addDisposableResource(env_6, await factory(), true);
746
+ const f = __addDisposableResource(env_8, await factory(), true);
468
747
  const syncRules = await f.updateSyncRules({
469
748
  content: BASIC_SYNC_RULES
470
749
  });
@@ -515,20 +794,20 @@ bucket_definitions:
515
794
  });
516
795
  expect(await getCheckpointLines(iter)).toMatchSnapshot();
517
796
  }
518
- catch (e_6) {
519
- env_6.error = e_6;
520
- env_6.hasError = true;
797
+ catch (e_8) {
798
+ env_8.error = e_8;
799
+ env_8.hasError = true;
521
800
  }
522
801
  finally {
523
- const result_6 = __disposeResources(env_6);
524
- if (result_6)
525
- await result_6;
802
+ const result_8 = __disposeResources(env_8);
803
+ if (result_8)
804
+ await result_8;
526
805
  }
527
806
  });
528
807
  test('sync updates to parameter query only', async (context) => {
529
- const env_7 = { stack: [], error: void 0, hasError: false };
808
+ const env_9 = { stack: [], error: void 0, hasError: false };
530
809
  try {
531
- const f = __addDisposableResource(env_7, await factory(), true);
810
+ const f = __addDisposableResource(env_9, await factory(), true);
532
811
  const syncRules = await f.updateSyncRules({
533
812
  content: `bucket_definitions:
534
813
  by_user:
@@ -579,20 +858,20 @@ bucket_definitions:
579
858
  expect(checkpoint2[0].checkpoint_diff?.updated_buckets?.map((b) => b.bucket)).toEqual(['by_user["user1"]']);
580
859
  expect(checkpoint2).toMatchSnapshot();
581
860
  }
582
- catch (e_7) {
583
- env_7.error = e_7;
584
- env_7.hasError = true;
861
+ catch (e_9) {
862
+ env_9.error = e_9;
863
+ env_9.hasError = true;
585
864
  }
586
865
  finally {
587
- const result_7 = __disposeResources(env_7);
588
- if (result_7)
589
- await result_7;
866
+ const result_9 = __disposeResources(env_9);
867
+ if (result_9)
868
+ await result_9;
590
869
  }
591
870
  });
592
871
  test('sync updates to data query only', async (context) => {
593
- const env_8 = { stack: [], error: void 0, hasError: false };
872
+ const env_10 = { stack: [], error: void 0, hasError: false };
594
873
  try {
595
- const f = __addDisposableResource(env_8, await factory(), true);
874
+ const f = __addDisposableResource(env_10, await factory(), true);
596
875
  const syncRules = await f.updateSyncRules({
597
876
  content: `bucket_definitions:
598
877
  by_user:
@@ -656,20 +935,20 @@ bucket_definitions:
656
935
  expect(checkpoint2[0].checkpoint_diff?.updated_buckets?.map((b) => b.bucket)).toEqual(['by_user["user1"]']);
657
936
  expect(checkpoint2).toMatchSnapshot();
658
937
  }
659
- catch (e_8) {
660
- env_8.error = e_8;
661
- env_8.hasError = true;
938
+ catch (e_10) {
939
+ env_10.error = e_10;
940
+ env_10.hasError = true;
662
941
  }
663
942
  finally {
664
- const result_8 = __disposeResources(env_8);
665
- if (result_8)
666
- await result_8;
943
+ const result_10 = __disposeResources(env_10);
944
+ if (result_10)
945
+ await result_10;
667
946
  }
668
947
  });
669
948
  test('sync updates to parameter query + data', async (context) => {
670
- const env_9 = { stack: [], error: void 0, hasError: false };
949
+ const env_11 = { stack: [], error: void 0, hasError: false };
671
950
  try {
672
- const f = __addDisposableResource(env_9, await factory(), true);
951
+ const f = __addDisposableResource(env_11, await factory(), true);
673
952
  const syncRules = await f.updateSyncRules({
674
953
  content: `bucket_definitions:
675
954
  by_user:
@@ -727,20 +1006,20 @@ bucket_definitions:
727
1006
  expect(checkpoint2[0].checkpoint_diff?.updated_buckets?.map((b) => b.bucket)).toEqual(['by_user["user1"]']);
728
1007
  expect(checkpoint2).toMatchSnapshot();
729
1008
  }
730
- catch (e_9) {
731
- env_9.error = e_9;
732
- env_9.hasError = true;
1009
+ catch (e_11) {
1010
+ env_11.error = e_11;
1011
+ env_11.hasError = true;
733
1012
  }
734
1013
  finally {
735
- const result_9 = __disposeResources(env_9);
736
- if (result_9)
737
- await result_9;
1014
+ const result_11 = __disposeResources(env_11);
1015
+ if (result_11)
1016
+ await result_11;
738
1017
  }
739
1018
  });
740
1019
  test('expiring token', async (context) => {
741
- const env_10 = { stack: [], error: void 0, hasError: false };
1020
+ const env_12 = { stack: [], error: void 0, hasError: false };
742
1021
  try {
743
- const f = __addDisposableResource(env_10, await factory(), true);
1022
+ const f = __addDisposableResource(env_12, await factory(), true);
744
1023
  const syncRules = await f.updateSyncRules({
745
1024
  content: BASIC_SYNC_RULES
746
1025
  });
@@ -769,24 +1048,24 @@ bucket_definitions:
769
1048
  const expLines = await getCheckpointLines(iter);
770
1049
  expect(expLines).toMatchSnapshot();
771
1050
  }
772
- catch (e_10) {
773
- env_10.error = e_10;
774
- env_10.hasError = true;
1051
+ catch (e_12) {
1052
+ env_12.error = e_12;
1053
+ env_12.hasError = true;
775
1054
  }
776
1055
  finally {
777
- const result_10 = __disposeResources(env_10);
778
- if (result_10)
779
- await result_10;
1056
+ const result_12 = __disposeResources(env_12);
1057
+ if (result_12)
1058
+ await result_12;
780
1059
  }
781
1060
  });
782
1061
  test('compacting data - invalidate checkpoint', async (context) => {
783
- const env_11 = { stack: [], error: void 0, hasError: false };
1062
+ const env_13 = { stack: [], error: void 0, hasError: false };
784
1063
  try {
785
1064
  // This tests a case of a compact operation invalidating a checkpoint in the
786
1065
  // middle of syncing data.
787
1066
  // This is expected to be rare in practice, but it is important to handle
788
1067
  // this case correctly to maintain consistency on the client.
789
- const f = __addDisposableResource(env_11, await factory(), true);
1068
+ const f = __addDisposableResource(env_13, await factory(), true);
790
1069
  const syncRules = await f.updateSyncRules({
791
1070
  content: BASIC_SYNC_RULES
792
1071
  });
@@ -905,20 +1184,20 @@ bucket_definitions:
905
1184
  })
906
1185
  });
907
1186
  }
908
- catch (e_11) {
909
- env_11.error = e_11;
910
- env_11.hasError = true;
1187
+ catch (e_13) {
1188
+ env_13.error = e_13;
1189
+ env_13.hasError = true;
911
1190
  }
912
1191
  finally {
913
- const result_11 = __disposeResources(env_11);
914
- if (result_11)
915
- await result_11;
1192
+ const result_13 = __disposeResources(env_13);
1193
+ if (result_13)
1194
+ await result_13;
916
1195
  }
917
1196
  });
918
1197
  test('write checkpoint', async () => {
919
- const env_12 = { stack: [], error: void 0, hasError: false };
1198
+ const env_14 = { stack: [], error: void 0, hasError: false };
920
1199
  try {
921
- const f = __addDisposableResource(env_12, await factory(), true);
1200
+ const f = __addDisposableResource(env_14, await factory(), true);
922
1201
  const syncRules = await f.updateSyncRules({
923
1202
  content: BASIC_SYNC_RULES
924
1203
  });
@@ -970,14 +1249,14 @@ bucket_definitions:
970
1249
  })
971
1250
  });
972
1251
  }
973
- catch (e_12) {
974
- env_12.error = e_12;
975
- env_12.hasError = true;
1252
+ catch (e_14) {
1253
+ env_14.error = e_14;
1254
+ env_14.hasError = true;
976
1255
  }
977
1256
  finally {
978
- const result_12 = __disposeResources(env_12);
979
- if (result_12)
980
- await result_12;
1257
+ const result_14 = __disposeResources(env_14);
1258
+ if (result_14)
1259
+ await result_14;
981
1260
  }
982
1261
  });
983
1262
  }