@powersync/service-core-tests 0.9.4 → 0.9.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -405,6 +405,155 @@ bucket_definitions:
405
405
  expect(sentRows).toBe(10002);
406
406
  });
407
407
 
408
+ test('sync interrupts low-priority buckets on new checkpoints (2)', async () => {
409
+ await using f = await factory();
410
+
411
+ // bucket0a -> send all data
412
+ // then interrupt checkpoint with new data for all buckets
413
+ // -> data for all buckets should be sent in the new checkpoint
414
+
415
+ const syncRules = await f.updateSyncRules({
416
+ content: `
417
+ bucket_definitions:
418
+ b0a:
419
+ priority: 2
420
+ data:
421
+ - SELECT * FROM test WHERE LENGTH(id) <= 5;
422
+ b0b:
423
+ priority: 2
424
+ data:
425
+ - SELECT * FROM test WHERE LENGTH(id) <= 5;
426
+ b1:
427
+ priority: 1
428
+ data:
429
+ - SELECT * FROM test WHERE LENGTH(id) > 5;
430
+ `
431
+ });
432
+
433
+ const bucketStorage = f.getInstance(syncRules);
434
+ await bucketStorage.autoActivate();
435
+
436
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
437
+ // Initial data: Add one priority row and 10k low-priority rows.
438
+ await batch.save({
439
+ sourceTable: TEST_TABLE,
440
+ tag: storage.SaveOperationTag.INSERT,
441
+ after: {
442
+ id: 'highprio',
443
+ description: 'High priority row'
444
+ },
445
+ afterReplicaId: 'highprio'
446
+ });
447
+ for (let i = 0; i < 2_000; i++) {
448
+ await batch.save({
449
+ sourceTable: TEST_TABLE,
450
+ tag: storage.SaveOperationTag.INSERT,
451
+ after: {
452
+ id: `${i}`,
453
+ description: 'low prio'
454
+ },
455
+ afterReplicaId: `${i}`
456
+ });
457
+ }
458
+
459
+ await batch.commit('0/1');
460
+ });
461
+
462
+ const stream = sync.streamResponse({
463
+ syncContext,
464
+ bucketStorage,
465
+ syncRules: bucketStorage.getParsedSyncRules(test_utils.PARSE_OPTIONS),
466
+ params: {
467
+ buckets: [],
468
+ include_checksum: true,
469
+ raw_data: true
470
+ },
471
+ tracker,
472
+ syncParams: new RequestParameters({ sub: '' }, {}),
473
+ token: { exp: Date.now() / 1000 + 10 } as any
474
+ });
475
+
476
+ let sentRows = 0;
477
+ let lines: any[] = [];
478
+
479
+ for await (let next of stream) {
480
+ if (typeof next == 'string') {
481
+ next = JSON.parse(next);
482
+ }
483
+ if (typeof next === 'object' && next !== null) {
484
+ if ('partial_checkpoint_complete' in next) {
485
+ lines.push(next);
486
+ }
487
+ if ('checkpoint' in next || 'checkpoint_diff' in next) {
488
+ lines.push(next);
489
+ }
490
+
491
+ if ('data' in next) {
492
+ lines.push({ data: { ...next.data, data: undefined } });
493
+ sentRows += next.data.data.length;
494
+
495
+ if (sentRows == 1001) {
496
+ // Save new data to interrupt the low-priority sync.
497
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
498
+ // Add another high-priority row. This should interrupt the long-running low-priority sync.
499
+ await batch.save({
500
+ sourceTable: TEST_TABLE,
501
+ tag: storage.SaveOperationTag.INSERT,
502
+ after: {
503
+ id: 'highprio2',
504
+ description: 'Another high-priority row'
505
+ },
506
+ afterReplicaId: 'highprio2'
507
+ });
508
+
509
+ // Also add a low-priority row
510
+ await batch.save({
511
+ sourceTable: TEST_TABLE,
512
+ tag: storage.SaveOperationTag.INSERT,
513
+ after: {
514
+ id: '2001',
515
+ description: 'Another low-priority row'
516
+ },
517
+ afterReplicaId: '2001'
518
+ });
519
+
520
+ await batch.commit('0/2');
521
+ });
522
+
523
+ if (sentRows >= 1000 && sentRows <= 2001) {
524
+ // pause for a bit to give the stream time to process interruptions.
525
+ // This covers the data batch above and the next one.
526
+ await timers.setTimeout(50);
527
+ }
528
+ }
529
+ }
530
+ if ('checkpoint_complete' in next) {
531
+ lines.push(next);
532
+ break;
533
+ }
534
+ }
535
+ }
536
+
537
+ // Expected lines (full details in snapshot):
538
+ //
539
+ // checkpoint (4001)
540
+ // data (b1[] 0 -> 1)
541
+ // partial_checkpoint_complete (4001, priority 1)
542
+ // data (b0a[], 0 -> 2000)
543
+ // ## adds new data, interrupting the checkpoint
544
+ // data (b0a[], 2000 -> 4000) # expected - stream is already busy with this by the time it receives the interruption
545
+ // checkpoint_diff (4004)
546
+ // data (b1[], 1 -> 4002)
547
+ // partial_checkpoint_complete (4004, priority 1)
548
+ // data (b0a[], 4000 -> 4003)
549
+ // data (b0b[], 0 -> 1999)
550
+ // data (b0b[], 1999 -> 3999)
551
+ // data (b0b[], 3999 -> 4004)
552
+ // checkpoint_complete (4004)
553
+ expect(lines).toMatchSnapshot();
554
+ expect(sentRows).toBe(4004);
555
+ });
556
+
408
557
  test('sends checkpoint complete line for empty checkpoint', async () => {
409
558
  await using f = await factory();
410
559