@powersync/service-core-tests 0.15.0 → 0.15.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. package/CHANGELOG.md +27 -0
  2. package/dist/test-utils/general-utils.d.ts +13 -1
  3. package/dist/test-utils/general-utils.js +30 -1
  4. package/dist/test-utils/general-utils.js.map +1 -1
  5. package/dist/test-utils/stream_utils.js +2 -2
  6. package/dist/test-utils/stream_utils.js.map +1 -1
  7. package/dist/tests/register-compacting-tests.js +266 -257
  8. package/dist/tests/register-compacting-tests.js.map +1 -1
  9. package/dist/tests/register-data-storage-checkpoint-tests.js +36 -57
  10. package/dist/tests/register-data-storage-checkpoint-tests.js.map +1 -1
  11. package/dist/tests/register-data-storage-data-tests.js +839 -863
  12. package/dist/tests/register-data-storage-data-tests.js.map +1 -1
  13. package/dist/tests/register-data-storage-parameter-tests.js +228 -236
  14. package/dist/tests/register-data-storage-parameter-tests.js.map +1 -1
  15. package/dist/tests/register-parameter-compacting-tests.js +81 -89
  16. package/dist/tests/register-parameter-compacting-tests.js.map +1 -1
  17. package/dist/tests/register-sync-tests.js +468 -462
  18. package/dist/tests/register-sync-tests.js.map +1 -1
  19. package/package.json +3 -3
  20. package/src/test-utils/general-utils.ts +41 -2
  21. package/src/test-utils/stream_utils.ts +2 -2
  22. package/src/tests/register-compacting-tests.ts +279 -270
  23. package/src/tests/register-data-storage-checkpoint-tests.ts +36 -57
  24. package/src/tests/register-data-storage-data-tests.ts +673 -770
  25. package/src/tests/register-data-storage-parameter-tests.ts +245 -257
  26. package/src/tests/register-parameter-compacting-tests.ts +84 -92
  27. package/src/tests/register-sync-tests.ts +375 -391
  28. package/tsconfig.tsbuildinfo +1 -1
@@ -69,7 +69,6 @@ import { parameterLookupScope } from './util.js';
69
69
  export function registerDataStorageParameterTests(config) {
70
70
  const generateStorageFactory = config.factory;
71
71
  const storageVersion = config.storageVersion ?? CURRENT_STORAGE_VERSION;
72
- const TEST_TABLE = test_utils.makeTestTable('test', ['id'], config);
73
72
  const MYBUCKET_1 = parameterLookupScope('mybucket', '1');
74
73
  test('save and load parameters', async () => {
75
74
  const env_1 = { stack: [], error: void 0, hasError: false };
@@ -85,32 +84,32 @@ bucket_definitions:
85
84
  storageVersion
86
85
  }));
87
86
  const bucketStorage = factory.getInstance(syncRules);
88
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
89
- await batch.markAllSnapshotDone('1/1');
90
- await batch.save({
91
- sourceTable: TEST_TABLE,
92
- tag: storage.SaveOperationTag.INSERT,
93
- after: {
94
- id: 't2',
95
- id1: 'user3',
96
- id2: 'user4',
97
- group_id: 'group2a'
98
- },
99
- afterReplicaId: test_utils.rid('t2')
100
- });
101
- await batch.save({
102
- sourceTable: TEST_TABLE,
103
- tag: storage.SaveOperationTag.INSERT,
104
- after: {
105
- id: 't1',
106
- id1: 'user1',
107
- id2: 'user2',
108
- group_id: 'group1a'
109
- },
110
- afterReplicaId: test_utils.rid('t1')
111
- });
112
- await batch.commit('1/1');
87
+ const writer = __addDisposableResource(env_1, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
88
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
89
+ await writer.markAllSnapshotDone('1/1');
90
+ await writer.save({
91
+ sourceTable: testTable,
92
+ tag: storage.SaveOperationTag.INSERT,
93
+ after: {
94
+ id: 't2',
95
+ id1: 'user3',
96
+ id2: 'user4',
97
+ group_id: 'group2a'
98
+ },
99
+ afterReplicaId: test_utils.rid('t2')
100
+ });
101
+ await writer.save({
102
+ sourceTable: testTable,
103
+ tag: storage.SaveOperationTag.INSERT,
104
+ after: {
105
+ id: 't1',
106
+ id1: 'user1',
107
+ id2: 'user2',
108
+ group_id: 'group1a'
109
+ },
110
+ afterReplicaId: test_utils.rid('t1')
113
111
  });
112
+ await writer.commit('1/1');
114
113
  const checkpoint = await bucketStorage.getCheckpoint();
115
114
  const parameters = await checkpoint.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, ['user1'])]);
116
115
  expect(parameters).toEqual([
@@ -143,32 +142,30 @@ bucket_definitions:
143
142
  storageVersion
144
143
  }));
145
144
  const bucketStorage = factory.getInstance(syncRules);
146
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
147
- await batch.markAllSnapshotDone('1/1');
148
- await batch.save({
149
- sourceTable: TEST_TABLE,
150
- tag: storage.SaveOperationTag.INSERT,
151
- after: {
152
- id: 'user1',
153
- group_id: 'group1'
154
- },
155
- afterReplicaId: test_utils.rid('user1')
156
- });
157
- await batch.commit('1/1');
145
+ const writer = __addDisposableResource(env_2, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
146
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
147
+ await writer.markAllSnapshotDone('1/1');
148
+ await writer.save({
149
+ sourceTable: testTable,
150
+ tag: storage.SaveOperationTag.INSERT,
151
+ after: {
152
+ id: 'user1',
153
+ group_id: 'group1'
154
+ },
155
+ afterReplicaId: test_utils.rid('user1')
158
156
  });
157
+ await writer.commit('1/1');
159
158
  const checkpoint1 = await bucketStorage.getCheckpoint();
160
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
161
- await batch.save({
162
- sourceTable: TEST_TABLE,
163
- tag: storage.SaveOperationTag.INSERT,
164
- after: {
165
- id: 'user1',
166
- group_id: 'group2'
167
- },
168
- afterReplicaId: test_utils.rid('user1')
169
- });
170
- await batch.commit('1/2');
159
+ await writer.save({
160
+ sourceTable: testTable,
161
+ tag: storage.SaveOperationTag.INSERT,
162
+ after: {
163
+ id: 'user1',
164
+ group_id: 'group2'
165
+ },
166
+ afterReplicaId: test_utils.rid('user1')
171
167
  });
168
+ await writer.commit('1/2');
172
169
  const checkpoint2 = await bucketStorage.getCheckpoint();
173
170
  const parameters = await checkpoint2.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, ['user1'])]);
174
171
  expect(parameters).toEqual([
@@ -208,43 +205,40 @@ bucket_definitions:
208
205
  data: []
209
206
  `, { storageVersion }));
210
207
  const bucketStorage = factory.getInstance(syncRules);
211
- const table = test_utils.makeTestTable('todos', ['id', 'list_id'], config);
212
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
213
- await batch.markAllSnapshotDone('1/1');
214
- // Create two todos which initially belong to different lists
215
- await batch.save({
216
- sourceTable: table,
217
- tag: storage.SaveOperationTag.INSERT,
218
- after: {
219
- id: 'todo1',
220
- list_id: 'list1'
221
- },
222
- afterReplicaId: test_utils.rid('todo1')
223
- });
224
- await batch.save({
225
- sourceTable: table,
226
- tag: storage.SaveOperationTag.INSERT,
227
- after: {
228
- id: 'todo2',
229
- list_id: 'list2'
230
- },
231
- afterReplicaId: test_utils.rid('todo2')
232
- });
233
- await batch.commit('1/1');
208
+ const writer = __addDisposableResource(env_3, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
209
+ const table = await test_utils.resolveTestTable(writer, 'todos', ['id', 'list_id'], config);
210
+ await writer.markAllSnapshotDone('1/1');
211
+ // Create two todos which initially belong to different lists
212
+ await writer.save({
213
+ sourceTable: table,
214
+ tag: storage.SaveOperationTag.INSERT,
215
+ after: {
216
+ id: 'todo1',
217
+ list_id: 'list1'
218
+ },
219
+ afterReplicaId: test_utils.rid('todo1')
220
+ });
221
+ await writer.save({
222
+ sourceTable: table,
223
+ tag: storage.SaveOperationTag.INSERT,
224
+ after: {
225
+ id: 'todo2',
226
+ list_id: 'list2'
227
+ },
228
+ afterReplicaId: test_utils.rid('todo2')
234
229
  });
235
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
236
- // Update the second todo item to now belong to list 1
237
- await batch.save({
238
- sourceTable: table,
239
- tag: storage.SaveOperationTag.UPDATE,
240
- after: {
241
- id: 'todo2',
242
- list_id: 'list1'
243
- },
244
- afterReplicaId: test_utils.rid('todo2')
245
- });
246
- await batch.commit('1/1');
230
+ await writer.commit('1/1');
231
+ // Update the second todo item to now belong to list 1
232
+ await writer.save({
233
+ sourceTable: table,
234
+ tag: storage.SaveOperationTag.UPDATE,
235
+ after: {
236
+ id: 'todo2',
237
+ list_id: 'list1'
238
+ },
239
+ afterReplicaId: test_utils.rid('todo2')
247
240
  });
241
+ await writer.commit('1/1');
248
242
  // We specifically request the todo_ids for both lists.
249
243
  // There removal operation for the association of `list2`::`todo2` should not interfere with the new
250
244
  // association of `list1`::`todo2`
@@ -286,22 +280,22 @@ bucket_definitions:
286
280
  storageVersion
287
281
  }));
288
282
  const bucketStorage = factory.getInstance(syncRules);
289
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
290
- await batch.markAllSnapshotDone('1/1');
291
- await batch.save({
292
- sourceTable: TEST_TABLE,
293
- tag: storage.SaveOperationTag.INSERT,
294
- after: {
295
- id: 't1',
296
- group_id: 'group1',
297
- n1: 314n,
298
- f2: 314,
299
- f3: 3.14
300
- },
301
- afterReplicaId: test_utils.rid('t1')
302
- });
303
- await batch.commit('1/1');
283
+ const writer = __addDisposableResource(env_4, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
284
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
285
+ await writer.markAllSnapshotDone('1/1');
286
+ await writer.save({
287
+ sourceTable: testTable,
288
+ tag: storage.SaveOperationTag.INSERT,
289
+ after: {
290
+ id: 't1',
291
+ group_id: 'group1',
292
+ n1: 314n,
293
+ f2: 314,
294
+ f3: 3.14
295
+ },
296
+ afterReplicaId: test_utils.rid('t1')
304
297
  });
298
+ await writer.commit('1/1');
305
299
  const TEST_PARAMS = { group_id: 'group1' };
306
300
  const checkpoint = await bucketStorage.getCheckpoint();
307
301
  const parameters1 = await checkpoint.getParameterSets([
@@ -342,32 +336,32 @@ bucket_definitions:
342
336
  storageVersion
343
337
  }));
344
338
  const bucketStorage = factory.getInstance(syncRules);
345
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
346
- await batch.markAllSnapshotDone('1/1');
347
- await batch.save({
348
- sourceTable: TEST_TABLE,
349
- tag: storage.SaveOperationTag.INSERT,
350
- after: {
351
- id: 't1',
352
- group_id: 'group1',
353
- n1: 1152921504606846976n // 2^60
354
- },
355
- afterReplicaId: test_utils.rid('t1')
356
- });
357
- await batch.save({
358
- sourceTable: TEST_TABLE,
359
- tag: storage.SaveOperationTag.UPDATE,
360
- after: {
361
- id: 't1',
362
- group_id: 'group1',
363
- // Simulate a TOAST value, even though it can't happen for values like this
364
- // in practice.
365
- n1: undefined
366
- },
367
- afterReplicaId: test_utils.rid('t1')
368
- });
369
- await batch.commit('1/1');
339
+ const writer = __addDisposableResource(env_5, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
340
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
341
+ await writer.markAllSnapshotDone('1/1');
342
+ await writer.save({
343
+ sourceTable: testTable,
344
+ tag: storage.SaveOperationTag.INSERT,
345
+ after: {
346
+ id: 't1',
347
+ group_id: 'group1',
348
+ n1: 1152921504606846976n // 2^60
349
+ },
350
+ afterReplicaId: test_utils.rid('t1')
351
+ });
352
+ await writer.save({
353
+ sourceTable: testTable,
354
+ tag: storage.SaveOperationTag.UPDATE,
355
+ after: {
356
+ id: 't1',
357
+ group_id: 'group1',
358
+ // Simulate a TOAST value, even though it can't happen for values like this
359
+ // in practice.
360
+ n1: undefined
361
+ },
362
+ afterReplicaId: test_utils.rid('t1')
370
363
  });
364
+ await writer.commit('1/1');
371
365
  const TEST_PARAMS = { group_id: 'group1' };
372
366
  const checkpoint = await bucketStorage.getCheckpoint();
373
367
  const parameters1 = await checkpoint.getParameterSets([
@@ -388,7 +382,6 @@ bucket_definitions:
388
382
  test('save and load parameters with workspaceId', async () => {
389
383
  const env_6 = { stack: [], error: void 0, hasError: false };
390
384
  try {
391
- const WORKSPACE_TABLE = test_utils.makeTestTable('workspace', ['id'], config);
392
385
  const factory = __addDisposableResource(env_6, await generateStorageFactory(), true);
393
386
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
394
387
  bucket_definitions:
@@ -402,19 +395,19 @@ bucket_definitions:
402
395
  }));
403
396
  const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).hydratedSyncRules();
404
397
  const bucketStorage = factory.getInstance(syncRules);
405
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
406
- await batch.markAllSnapshotDone('1/1');
407
- await batch.save({
408
- sourceTable: WORKSPACE_TABLE,
409
- tag: storage.SaveOperationTag.INSERT,
410
- after: {
411
- id: 'workspace1',
412
- userId: 'u1'
413
- },
414
- afterReplicaId: test_utils.rid('workspace1')
415
- });
416
- await batch.commit('1/1');
398
+ const writer = __addDisposableResource(env_6, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
399
+ const workspaceTable = await test_utils.resolveTestTable(writer, 'workspace', ['id'], config);
400
+ await writer.markAllSnapshotDone('1/1');
401
+ await writer.save({
402
+ sourceTable: workspaceTable,
403
+ tag: storage.SaveOperationTag.INSERT,
404
+ after: {
405
+ id: 'workspace1',
406
+ userId: 'u1'
407
+ },
408
+ afterReplicaId: test_utils.rid('workspace1')
417
409
  });
410
+ await writer.commit('1/1');
418
411
  const checkpoint = await bucketStorage.getCheckpoint();
419
412
  const parameters = new RequestParameters(new JwtPayload({ sub: 'u1' }), {});
420
413
  const querier = sync_rules.getBucketParameterQuerier(test_utils.querierOptions(parameters)).querier;
@@ -448,7 +441,6 @@ bucket_definitions:
448
441
  test('save and load parameters with dynamic global buckets', async () => {
449
442
  const env_7 = { stack: [], error: void 0, hasError: false };
450
443
  try {
451
- const WORKSPACE_TABLE = test_utils.makeTestTable('workspace', undefined, config);
452
444
  const factory = __addDisposableResource(env_7, await generateStorageFactory(), true);
453
445
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
454
446
  bucket_definitions:
@@ -462,37 +454,37 @@ bucket_definitions:
462
454
  }));
463
455
  const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).hydratedSyncRules();
464
456
  const bucketStorage = factory.getInstance(syncRules);
465
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
466
- await batch.markAllSnapshotDone('1/1');
467
- await batch.save({
468
- sourceTable: WORKSPACE_TABLE,
469
- tag: storage.SaveOperationTag.INSERT,
470
- after: {
471
- id: 'workspace1',
472
- visibility: 'public'
473
- },
474
- afterReplicaId: test_utils.rid('workspace1')
475
- });
476
- await batch.save({
477
- sourceTable: WORKSPACE_TABLE,
478
- tag: storage.SaveOperationTag.INSERT,
479
- after: {
480
- id: 'workspace2',
481
- visibility: 'private'
482
- },
483
- afterReplicaId: test_utils.rid('workspace2')
484
- });
485
- await batch.save({
486
- sourceTable: WORKSPACE_TABLE,
487
- tag: storage.SaveOperationTag.INSERT,
488
- after: {
489
- id: 'workspace3',
490
- visibility: 'public'
491
- },
492
- afterReplicaId: test_utils.rid('workspace3')
493
- });
494
- await batch.commit('1/1');
457
+ const writer = __addDisposableResource(env_7, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
458
+ const workspaceTable = await test_utils.resolveTestTable(writer, 'workspace', undefined, config);
459
+ await writer.markAllSnapshotDone('1/1');
460
+ await writer.save({
461
+ sourceTable: workspaceTable,
462
+ tag: storage.SaveOperationTag.INSERT,
463
+ after: {
464
+ id: 'workspace1',
465
+ visibility: 'public'
466
+ },
467
+ afterReplicaId: test_utils.rid('workspace1')
468
+ });
469
+ await writer.save({
470
+ sourceTable: workspaceTable,
471
+ tag: storage.SaveOperationTag.INSERT,
472
+ after: {
473
+ id: 'workspace2',
474
+ visibility: 'private'
475
+ },
476
+ afterReplicaId: test_utils.rid('workspace2')
495
477
  });
478
+ await writer.save({
479
+ sourceTable: workspaceTable,
480
+ tag: storage.SaveOperationTag.INSERT,
481
+ after: {
482
+ id: 'workspace3',
483
+ visibility: 'public'
484
+ },
485
+ afterReplicaId: test_utils.rid('workspace3')
486
+ });
487
+ await writer.commit('1/1');
496
488
  const checkpoint = await bucketStorage.getCheckpoint();
497
489
  const parameters = new RequestParameters(new JwtPayload({ sub: 'unknown' }), {});
498
490
  const querier = sync_rules.getBucketParameterQuerier(test_utils.querierOptions(parameters)).querier;
@@ -534,7 +526,6 @@ bucket_definitions:
534
526
  test('multiple parameter queries', async () => {
535
527
  const env_8 = { stack: [], error: void 0, hasError: false };
536
528
  try {
537
- const WORKSPACE_TABLE = test_utils.makeTestTable('workspace', undefined, config);
538
529
  const factory = __addDisposableResource(env_8, await generateStorageFactory(), true);
539
530
  const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
540
531
  bucket_definitions:
@@ -550,48 +541,48 @@ bucket_definitions:
550
541
  }));
551
542
  const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).hydratedSyncRules();
552
543
  const bucketStorage = factory.getInstance(syncRules);
553
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
554
- await batch.markAllSnapshotDone('1/1');
555
- await batch.save({
556
- sourceTable: WORKSPACE_TABLE,
557
- tag: storage.SaveOperationTag.INSERT,
558
- after: {
559
- id: 'workspace1',
560
- visibility: 'public'
561
- },
562
- afterReplicaId: test_utils.rid('workspace1')
563
- });
564
- await batch.save({
565
- sourceTable: WORKSPACE_TABLE,
566
- tag: storage.SaveOperationTag.INSERT,
567
- after: {
568
- id: 'workspace2',
569
- visibility: 'private'
570
- },
571
- afterReplicaId: test_utils.rid('workspace2')
572
- });
573
- await batch.save({
574
- sourceTable: WORKSPACE_TABLE,
575
- tag: storage.SaveOperationTag.INSERT,
576
- after: {
577
- id: 'workspace3',
578
- user_id: 'u1',
579
- visibility: 'private'
580
- },
581
- afterReplicaId: test_utils.rid('workspace3')
582
- });
583
- await batch.save({
584
- sourceTable: WORKSPACE_TABLE,
585
- tag: storage.SaveOperationTag.INSERT,
586
- after: {
587
- id: 'workspace4',
588
- user_id: 'u2',
589
- visibility: 'private'
590
- },
591
- afterReplicaId: test_utils.rid('workspace4')
592
- });
593
- await batch.commit('1/1');
544
+ const writer = __addDisposableResource(env_8, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
545
+ const workspaceTable = await test_utils.resolveTestTable(writer, 'workspace', undefined, config);
546
+ await writer.markAllSnapshotDone('1/1');
547
+ await writer.save({
548
+ sourceTable: workspaceTable,
549
+ tag: storage.SaveOperationTag.INSERT,
550
+ after: {
551
+ id: 'workspace1',
552
+ visibility: 'public'
553
+ },
554
+ afterReplicaId: test_utils.rid('workspace1')
594
555
  });
556
+ await writer.save({
557
+ sourceTable: workspaceTable,
558
+ tag: storage.SaveOperationTag.INSERT,
559
+ after: {
560
+ id: 'workspace2',
561
+ visibility: 'private'
562
+ },
563
+ afterReplicaId: test_utils.rid('workspace2')
564
+ });
565
+ await writer.save({
566
+ sourceTable: workspaceTable,
567
+ tag: storage.SaveOperationTag.INSERT,
568
+ after: {
569
+ id: 'workspace3',
570
+ user_id: 'u1',
571
+ visibility: 'private'
572
+ },
573
+ afterReplicaId: test_utils.rid('workspace3')
574
+ });
575
+ await writer.save({
576
+ sourceTable: workspaceTable,
577
+ tag: storage.SaveOperationTag.INSERT,
578
+ after: {
579
+ id: 'workspace4',
580
+ user_id: 'u2',
581
+ visibility: 'private'
582
+ },
583
+ afterReplicaId: test_utils.rid('workspace4')
584
+ });
585
+ await writer.commit('1/1');
595
586
  const checkpoint = await bucketStorage.getCheckpoint();
596
587
  const parameters = new RequestParameters(new JwtPayload({ sub: 'u1' }), {});
597
588
  // Test intermediate values - could be moved to sync_rules.test.ts
@@ -643,21 +634,22 @@ bucket_definitions:
643
634
  storageVersion
644
635
  }));
645
636
  const bucketStorage = factory.getInstance(syncRules);
646
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
647
- await batch.markAllSnapshotDone('1/1');
648
- await batch.save({
649
- sourceTable: TEST_TABLE,
650
- tag: storage.SaveOperationTag.INSERT,
651
- after: {
652
- id: 't2',
653
- id1: 'user3',
654
- id2: 'user4',
655
- group_id: 'group2a'
656
- },
657
- afterReplicaId: test_utils.rid('t2')
658
- });
659
- await batch.truncate([TEST_TABLE]);
637
+ const writer = __addDisposableResource(env_9, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
638
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
639
+ await writer.markAllSnapshotDone('1/1');
640
+ await writer.save({
641
+ sourceTable: testTable,
642
+ tag: storage.SaveOperationTag.INSERT,
643
+ after: {
644
+ id: 't2',
645
+ id1: 'user3',
646
+ id2: 'user4',
647
+ group_id: 'group2a'
648
+ },
649
+ afterReplicaId: test_utils.rid('t2')
660
650
  });
651
+ await writer.truncate([testTable]);
652
+ await writer.flush();
661
653
  const checkpoint = await bucketStorage.getCheckpoint();
662
654
  const parameters = await checkpoint.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, ['user1'])]);
663
655
  expect(parameters).toEqual([]);
@@ -728,19 +720,19 @@ streams:
728
720
  WHERE data.foo = param.bar AND param.baz = auth.user_id()
729
721
  `));
730
722
  const bucketStorage = factory.getInstance(syncRules);
731
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
732
- await batch.markAllSnapshotDone('1/1');
733
- await batch.save({
734
- sourceTable: TEST_TABLE,
735
- tag: storage.SaveOperationTag.INSERT,
736
- after: {
737
- baz: 'baz',
738
- bar: 'bar'
739
- },
740
- afterReplicaId: test_utils.rid('t1')
741
- });
742
- await batch.commit('1/1');
723
+ const writer = __addDisposableResource(env_11, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
724
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
725
+ await writer.markAllSnapshotDone('1/1');
726
+ await writer.save({
727
+ sourceTable: testTable,
728
+ tag: storage.SaveOperationTag.INSERT,
729
+ after: {
730
+ baz: 'baz',
731
+ bar: 'bar'
732
+ },
733
+ afterReplicaId: test_utils.rid('t1')
743
734
  });
735
+ await writer.commit('1/1');
744
736
  const checkpoint = await bucketStorage.getCheckpoint();
745
737
  const parameters = await checkpoint.getParameterSets([
746
738
  ScopedParameterLookup.direct(parameterLookupScope('lookup', '0'), ['baz'])