@powersync/service-core-tests 0.12.1 → 0.12.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/CHANGELOG.md +8 -0
  2. package/dist/tests/register-compacting-tests.js +68 -0
  3. package/dist/tests/register-compacting-tests.js.map +1 -1
  4. package/dist/tests/register-data-storage-checkpoint-tests.d.ts +12 -0
  5. package/dist/tests/register-data-storage-checkpoint-tests.js +357 -0
  6. package/dist/tests/register-data-storage-checkpoint-tests.js.map +1 -0
  7. package/dist/tests/register-data-storage-data-tests.d.ts +12 -0
  8. package/dist/tests/{register-data-storage-tests.js → register-data-storage-data-tests.js} +151 -1035
  9. package/dist/tests/register-data-storage-data-tests.js.map +1 -0
  10. package/dist/tests/{register-data-storage-tests.d.ts → register-data-storage-parameter-tests.d.ts} +1 -2
  11. package/dist/tests/register-data-storage-parameter-tests.js +707 -0
  12. package/dist/tests/register-data-storage-parameter-tests.js.map +1 -0
  13. package/dist/tests/register-sync-tests.js +2 -1
  14. package/dist/tests/register-sync-tests.js.map +1 -1
  15. package/dist/tests/tests-index.d.ts +4 -1
  16. package/dist/tests/tests-index.js +4 -1
  17. package/dist/tests/tests-index.js.map +1 -1
  18. package/dist/tests/util.d.ts +1 -0
  19. package/dist/tests/util.js +3 -0
  20. package/dist/tests/util.js.map +1 -0
  21. package/package.json +2 -2
  22. package/src/tests/register-compacting-tests.ts +63 -0
  23. package/src/tests/register-data-storage-checkpoint-tests.ts +277 -0
  24. package/src/tests/{register-data-storage-tests.ts → register-data-storage-data-tests.ts} +38 -865
  25. package/src/tests/register-data-storage-parameter-tests.ts +613 -0
  26. package/src/tests/register-sync-tests.ts +2 -1
  27. package/src/tests/tests-index.ts +4 -1
  28. package/src/tests/util.ts +3 -0
  29. package/tsconfig.tsbuildinfo +1 -1
  30. package/dist/tests/register-data-storage-tests.js.map +0 -1
@@ -1,17 +1,14 @@
1
1
  import {
2
2
  BucketDataBatchOptions,
3
3
  getUuidReplicaIdentityBson,
4
- InternalOpId,
5
4
  OplogEntry,
6
5
  SaveOptions,
7
6
  storage
8
7
  } from '@powersync/service-core';
9
- import { DateTimeValue, ParameterLookup, RequestParameters } from '@powersync/service-sync-rules';
10
- import { expect, test, describe, beforeEach } from 'vitest';
8
+ import { DateTimeValue } from '@powersync/service-sync-rules';
9
+ import { describe, expect, test } from 'vitest';
11
10
  import * as test_utils from '../test-utils/test-utils-index.js';
12
- import { SqlBucketDescriptor } from '@powersync/service-sync-rules/src/SqlBucketDescriptor.js';
13
-
14
- export const TEST_TABLE = test_utils.makeTestTable('test', ['id']);
11
+ import { TEST_TABLE } from './util.js';
15
12
 
16
13
  /**
17
14
  * Normalize data from OplogEntries for comparison in tests.
@@ -29,296 +26,12 @@ const normalizeOplogData = (data: OplogEntry['data']) => {
29
26
  * ```TypeScript
30
27
  *
31
28
  * describe('store - mongodb', function () {
32
- * registerDataStorageTests(MONGO_STORAGE_FACTORY);
29
+ * registerDataStorageDataTests(MONGO_STORAGE_FACTORY);
33
30
  * });
34
31
  *
35
32
  * ```
36
33
  */
37
- export function registerDataStorageTests(generateStorageFactory: storage.TestStorageFactory) {
38
- test('save and load parameters', async () => {
39
- await using factory = await generateStorageFactory();
40
- const syncRules = await factory.updateSyncRules({
41
- content: `
42
- bucket_definitions:
43
- mybucket:
44
- parameters:
45
- - SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
46
- data: []
47
- `
48
- });
49
- const bucketStorage = factory.getInstance(syncRules);
50
-
51
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
52
- await batch.save({
53
- sourceTable: TEST_TABLE,
54
- tag: storage.SaveOperationTag.INSERT,
55
- after: {
56
- id: 't2',
57
- id1: 'user3',
58
- id2: 'user4',
59
- group_id: 'group2a'
60
- },
61
- afterReplicaId: test_utils.rid('t2')
62
- });
63
-
64
- await batch.save({
65
- sourceTable: TEST_TABLE,
66
- tag: storage.SaveOperationTag.INSERT,
67
- after: {
68
- id: 't1',
69
- id1: 'user1',
70
- id2: 'user2',
71
- group_id: 'group1a'
72
- },
73
- afterReplicaId: test_utils.rid('t1')
74
- });
75
-
76
- await batch.commit('1/1');
77
- });
78
-
79
- const checkpoint = await bucketStorage.getCheckpoint();
80
- const parameters = await checkpoint.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
81
- expect(parameters).toEqual([
82
- {
83
- group_id: 'group1a'
84
- }
85
- ]);
86
- });
87
-
88
- test('it should use the latest version', async () => {
89
- await using factory = await generateStorageFactory();
90
- const syncRules = await factory.updateSyncRules({
91
- content: `
92
- bucket_definitions:
93
- mybucket:
94
- parameters:
95
- - SELECT group_id FROM test WHERE id = token_parameters.user_id
96
- data: []
97
- `
98
- });
99
- const bucketStorage = factory.getInstance(syncRules);
100
-
101
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
102
- await batch.save({
103
- sourceTable: TEST_TABLE,
104
- tag: storage.SaveOperationTag.INSERT,
105
- after: {
106
- id: 'user1',
107
- group_id: 'group1'
108
- },
109
- afterReplicaId: test_utils.rid('user1')
110
- });
111
- await batch.commit('1/1');
112
- });
113
- const checkpoint1 = await bucketStorage.getCheckpoint();
114
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
115
- await batch.save({
116
- sourceTable: TEST_TABLE,
117
- tag: storage.SaveOperationTag.INSERT,
118
- after: {
119
- id: 'user1',
120
- group_id: 'group2'
121
- },
122
- afterReplicaId: test_utils.rid('user1')
123
- });
124
- await batch.commit('1/2');
125
- });
126
- const checkpoint2 = await bucketStorage.getCheckpoint();
127
-
128
- const parameters = await checkpoint2.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
129
- expect(parameters).toEqual([
130
- {
131
- group_id: 'group2'
132
- }
133
- ]);
134
-
135
- // Use the checkpoint to get older data if relevant
136
- const parameters2 = await checkpoint1.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
137
- expect(parameters2).toEqual([
138
- {
139
- group_id: 'group1'
140
- }
141
- ]);
142
- });
143
-
144
- test('it should use the latest version after updates', async () => {
145
- await using factory = await generateStorageFactory();
146
- const syncRules = await factory.updateSyncRules({
147
- content: `
148
- bucket_definitions:
149
- mybucket:
150
- parameters:
151
- - SELECT id AS todo_id
152
- FROM todos
153
- WHERE list_id IN token_parameters.list_id
154
- data: []
155
- `
156
- });
157
- const bucketStorage = factory.getInstance(syncRules);
158
-
159
- const table = test_utils.makeTestTable('todos', ['id', 'list_id']);
160
-
161
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
162
- // Create two todos which initially belong to different lists
163
- await batch.save({
164
- sourceTable: table,
165
- tag: storage.SaveOperationTag.INSERT,
166
- after: {
167
- id: 'todo1',
168
- list_id: 'list1'
169
- },
170
- afterReplicaId: test_utils.rid('todo1')
171
- });
172
- await batch.save({
173
- sourceTable: table,
174
- tag: storage.SaveOperationTag.INSERT,
175
- after: {
176
- id: 'todo2',
177
- list_id: 'list2'
178
- },
179
- afterReplicaId: test_utils.rid('todo2')
180
- });
181
-
182
- await batch.commit('1/1');
183
- });
184
-
185
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
186
- // Update the second todo item to now belong to list 1
187
- await batch.save({
188
- sourceTable: table,
189
- tag: storage.SaveOperationTag.UPDATE,
190
- after: {
191
- id: 'todo2',
192
- list_id: 'list1'
193
- },
194
- afterReplicaId: test_utils.rid('todo2')
195
- });
196
-
197
- await batch.commit('1/1');
198
- });
199
-
200
- // We specifically request the todo_ids for both lists.
201
- // There removal operation for the association of `list2`::`todo2` should not interfere with the new
202
- // association of `list1`::`todo2`
203
- const checkpoint = await bucketStorage.getCheckpoint();
204
- const parameters = await checkpoint.getParameterSets([
205
- ParameterLookup.normalized('mybucket', '1', ['list1']),
206
- ParameterLookup.normalized('mybucket', '1', ['list2'])
207
- ]);
208
-
209
- expect(parameters.sort((a, b) => (a.todo_id as string).localeCompare(b.todo_id as string))).toEqual([
210
- {
211
- todo_id: 'todo1'
212
- },
213
- {
214
- todo_id: 'todo2'
215
- }
216
- ]);
217
- });
218
-
219
- test('save and load parameters with different number types', async () => {
220
- await using factory = await generateStorageFactory();
221
- const syncRules = await factory.updateSyncRules({
222
- content: `
223
- bucket_definitions:
224
- mybucket:
225
- parameters:
226
- - SELECT group_id FROM test WHERE n1 = token_parameters.n1 and f2 = token_parameters.f2 and f3 = token_parameters.f3
227
- data: []
228
- `
229
- });
230
- const bucketStorage = factory.getInstance(syncRules);
231
-
232
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
233
- await batch.save({
234
- sourceTable: TEST_TABLE,
235
- tag: storage.SaveOperationTag.INSERT,
236
- after: {
237
- id: 't1',
238
- group_id: 'group1',
239
- n1: 314n,
240
- f2: 314,
241
- f3: 3.14
242
- },
243
- afterReplicaId: test_utils.rid('t1')
244
- });
245
-
246
- await batch.commit('1/1');
247
- });
248
-
249
- const TEST_PARAMS = { group_id: 'group1' };
250
-
251
- const checkpoint = await bucketStorage.getCheckpoint();
252
-
253
- const parameters1 = await checkpoint.getParameterSets([
254
- ParameterLookup.normalized('mybucket', '1', [314n, 314, 3.14])
255
- ]);
256
- expect(parameters1).toEqual([TEST_PARAMS]);
257
- const parameters2 = await checkpoint.getParameterSets([
258
- ParameterLookup.normalized('mybucket', '1', [314, 314n, 3.14])
259
- ]);
260
- expect(parameters2).toEqual([TEST_PARAMS]);
261
- const parameters3 = await checkpoint.getParameterSets([
262
- ParameterLookup.normalized('mybucket', '1', [314n, 314, 3])
263
- ]);
264
- expect(parameters3).toEqual([]);
265
- });
266
-
267
- test('save and load parameters with large numbers', async () => {
268
- // This ensures serialization / deserialization of "current_data" is done correctly.
269
- // This specific case tested here cannot happen with postgres in practice, but we still
270
- // test this to ensure correct deserialization.
271
-
272
- await using factory = await generateStorageFactory();
273
- const syncRules = await factory.updateSyncRules({
274
- content: `
275
- bucket_definitions:
276
- mybucket:
277
- parameters:
278
- - SELECT group_id FROM test WHERE n1 = token_parameters.n1
279
- data: []
280
- `
281
- });
282
- const bucketStorage = factory.getInstance(syncRules);
283
-
284
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
285
- await batch.save({
286
- sourceTable: TEST_TABLE,
287
- tag: storage.SaveOperationTag.INSERT,
288
- after: {
289
- id: 't1',
290
- group_id: 'group1',
291
- n1: 1152921504606846976n // 2^60
292
- },
293
- afterReplicaId: test_utils.rid('t1')
294
- });
295
-
296
- await batch.save({
297
- sourceTable: TEST_TABLE,
298
- tag: storage.SaveOperationTag.UPDATE,
299
- after: {
300
- id: 't1',
301
- group_id: 'group1',
302
- // Simulate a TOAST value, even though it can't happen for values like this
303
- // in practice.
304
- n1: undefined
305
- },
306
- afterReplicaId: test_utils.rid('t1')
307
- });
308
-
309
- await batch.commit('1/1');
310
- });
311
-
312
- const TEST_PARAMS = { group_id: 'group1' };
313
-
314
- const checkpoint = await bucketStorage.getCheckpoint();
315
-
316
- const parameters1 = await checkpoint.getParameterSets([
317
- ParameterLookup.normalized('mybucket', '1', [1152921504606846976n])
318
- ]);
319
- expect(parameters1).toEqual([TEST_PARAMS]);
320
- });
321
-
34
+ export function registerDataStorageDataTests(generateStorageFactory: storage.TestStorageFactory) {
322
35
  test('removing row', async () => {
323
36
  await using factory = await generateStorageFactory();
324
37
  const syncRules = await factory.updateSyncRules({
@@ -380,247 +93,6 @@ bucket_definitions:
380
93
  ]);
381
94
  });
382
95
 
383
- test('save and load parameters with workspaceId', async () => {
384
- const WORKSPACE_TABLE = test_utils.makeTestTable('workspace', ['id']);
385
-
386
- await using factory = await generateStorageFactory();
387
- const syncRules = await factory.updateSyncRules({
388
- content: `
389
- bucket_definitions:
390
- by_workspace:
391
- parameters:
392
- - SELECT id as workspace_id FROM workspace WHERE
393
- workspace."userId" = token_parameters.user_id
394
- data: []
395
- `
396
- });
397
- const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).sync_rules;
398
- const bucketStorage = factory.getInstance(syncRules);
399
-
400
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
401
- await batch.save({
402
- sourceTable: WORKSPACE_TABLE,
403
- tag: storage.SaveOperationTag.INSERT,
404
- after: {
405
- id: 'workspace1',
406
- userId: 'u1'
407
- },
408
- afterReplicaId: test_utils.rid('workspace1')
409
- });
410
- await batch.commit('1/1');
411
- });
412
- const checkpoint = await bucketStorage.getCheckpoint();
413
-
414
- const parameters = new RequestParameters({ sub: 'u1' }, {});
415
-
416
- const q1 = (sync_rules.bucketSources[0] as SqlBucketDescriptor).parameterQueries[0];
417
-
418
- const lookups = q1.getLookups(parameters);
419
- expect(lookups).toEqual([ParameterLookup.normalized('by_workspace', '1', ['u1'])]);
420
-
421
- const parameter_sets = await checkpoint.getParameterSets(lookups);
422
- expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }]);
423
-
424
- const buckets = await sync_rules
425
- .getBucketParameterQuerier(test_utils.querierOptions(parameters))
426
- .querier.queryDynamicBucketDescriptions({
427
- getParameterSets(lookups) {
428
- return checkpoint.getParameterSets(lookups);
429
- }
430
- });
431
- expect(buckets).toEqual([
432
- { bucket: 'by_workspace["workspace1"]', priority: 3, definition: 'by_workspace', inclusion_reasons: ['default'] }
433
- ]);
434
- });
435
-
436
- test('save and load parameters with dynamic global buckets', async () => {
437
- const WORKSPACE_TABLE = test_utils.makeTestTable('workspace');
438
-
439
- await using factory = await generateStorageFactory();
440
- const syncRules = await factory.updateSyncRules({
441
- content: `
442
- bucket_definitions:
443
- by_public_workspace:
444
- parameters:
445
- - SELECT id as workspace_id FROM workspace WHERE
446
- workspace.visibility = 'public'
447
- data: []
448
- `
449
- });
450
- const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).sync_rules;
451
- const bucketStorage = factory.getInstance(syncRules);
452
-
453
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
454
- await batch.save({
455
- sourceTable: WORKSPACE_TABLE,
456
- tag: storage.SaveOperationTag.INSERT,
457
- after: {
458
- id: 'workspace1',
459
- visibility: 'public'
460
- },
461
- afterReplicaId: test_utils.rid('workspace1')
462
- });
463
-
464
- await batch.save({
465
- sourceTable: WORKSPACE_TABLE,
466
- tag: storage.SaveOperationTag.INSERT,
467
- after: {
468
- id: 'workspace2',
469
- visibility: 'private'
470
- },
471
- afterReplicaId: test_utils.rid('workspace2')
472
- });
473
-
474
- await batch.save({
475
- sourceTable: WORKSPACE_TABLE,
476
- tag: storage.SaveOperationTag.INSERT,
477
- after: {
478
- id: 'workspace3',
479
- visibility: 'public'
480
- },
481
- afterReplicaId: test_utils.rid('workspace3')
482
- });
483
-
484
- await batch.commit('1/1');
485
- });
486
-
487
- const checkpoint = await bucketStorage.getCheckpoint();
488
-
489
- const parameters = new RequestParameters({ sub: 'unknown' }, {});
490
-
491
- const q1 = (sync_rules.bucketSources[0] as SqlBucketDescriptor).parameterQueries[0];
492
-
493
- const lookups = q1.getLookups(parameters);
494
- expect(lookups).toEqual([ParameterLookup.normalized('by_public_workspace', '1', [])]);
495
-
496
- const parameter_sets = await checkpoint.getParameterSets(lookups);
497
- parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
498
- expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
499
-
500
- const buckets = await sync_rules
501
- .getBucketParameterQuerier(test_utils.querierOptions(parameters))
502
- .querier.queryDynamicBucketDescriptions({
503
- getParameterSets(lookups) {
504
- return checkpoint.getParameterSets(lookups);
505
- }
506
- });
507
- buckets.sort((a, b) => a.bucket.localeCompare(b.bucket));
508
- expect(buckets).toEqual([
509
- {
510
- bucket: 'by_public_workspace["workspace1"]',
511
- priority: 3,
512
- definition: 'by_public_workspace',
513
- inclusion_reasons: ['default']
514
- },
515
- {
516
- bucket: 'by_public_workspace["workspace3"]',
517
- priority: 3,
518
- definition: 'by_public_workspace',
519
- inclusion_reasons: ['default']
520
- }
521
- ]);
522
- });
523
-
524
- test('multiple parameter queries', async () => {
525
- const WORKSPACE_TABLE = test_utils.makeTestTable('workspace');
526
-
527
- await using factory = await generateStorageFactory();
528
- const syncRules = await factory.updateSyncRules({
529
- content: `
530
- bucket_definitions:
531
- by_workspace:
532
- parameters:
533
- - SELECT id as workspace_id FROM workspace WHERE
534
- workspace.visibility = 'public'
535
- - SELECT id as workspace_id FROM workspace WHERE
536
- workspace.user_id = token_parameters.user_id
537
- data: []
538
- `
539
- });
540
- const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).sync_rules;
541
- const bucketStorage = factory.getInstance(syncRules);
542
-
543
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
544
- await batch.save({
545
- sourceTable: WORKSPACE_TABLE,
546
- tag: storage.SaveOperationTag.INSERT,
547
- after: {
548
- id: 'workspace1',
549
- visibility: 'public'
550
- },
551
- afterReplicaId: test_utils.rid('workspace1')
552
- });
553
-
554
- await batch.save({
555
- sourceTable: WORKSPACE_TABLE,
556
- tag: storage.SaveOperationTag.INSERT,
557
- after: {
558
- id: 'workspace2',
559
- visibility: 'private'
560
- },
561
- afterReplicaId: test_utils.rid('workspace2')
562
- });
563
-
564
- await batch.save({
565
- sourceTable: WORKSPACE_TABLE,
566
- tag: storage.SaveOperationTag.INSERT,
567
- after: {
568
- id: 'workspace3',
569
- user_id: 'u1',
570
- visibility: 'private'
571
- },
572
- afterReplicaId: test_utils.rid('workspace3')
573
- });
574
-
575
- await batch.save({
576
- sourceTable: WORKSPACE_TABLE,
577
- tag: storage.SaveOperationTag.INSERT,
578
- after: {
579
- id: 'workspace4',
580
- user_id: 'u2',
581
- visibility: 'private'
582
- },
583
- afterReplicaId: test_utils.rid('workspace4')
584
- });
585
-
586
- await batch.commit('1/1');
587
- });
588
-
589
- const checkpoint = await bucketStorage.getCheckpoint();
590
-
591
- const parameters = new RequestParameters({ sub: 'u1' }, {});
592
-
593
- // Test intermediate values - could be moved to sync_rules.test.ts
594
- const q1 = (sync_rules.bucketSources[0] as SqlBucketDescriptor).parameterQueries[0];
595
- const lookups1 = q1.getLookups(parameters);
596
- expect(lookups1).toEqual([ParameterLookup.normalized('by_workspace', '1', [])]);
597
-
598
- const parameter_sets1 = await checkpoint.getParameterSets(lookups1);
599
- parameter_sets1.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
600
- expect(parameter_sets1).toEqual([{ workspace_id: 'workspace1' }]);
601
-
602
- const q2 = (sync_rules.bucketSources[0] as SqlBucketDescriptor).parameterQueries[1];
603
- const lookups2 = q2.getLookups(parameters);
604
- expect(lookups2).toEqual([ParameterLookup.normalized('by_workspace', '2', ['u1'])]);
605
-
606
- const parameter_sets2 = await checkpoint.getParameterSets(lookups2);
607
- parameter_sets2.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
608
- expect(parameter_sets2).toEqual([{ workspace_id: 'workspace3' }]);
609
-
610
- // Test final values - the important part
611
- const buckets = (
612
- await sync_rules
613
- .getBucketParameterQuerier(test_utils.querierOptions(parameters))
614
- .querier.queryDynamicBucketDescriptions({
615
- getParameterSets(lookups) {
616
- return checkpoint.getParameterSets(lookups);
617
- }
618
- })
619
- ).map((e) => e.bucket);
620
- buckets.sort();
621
- expect(buckets).toEqual(['by_workspace["workspace1"]', 'by_workspace["workspace3"]']);
622
- });
623
-
624
96
  test('changing client ids', async () => {
625
97
  await using factory = await generateStorageFactory();
626
98
  const syncRules = await factory.updateSyncRules({
@@ -885,41 +357,6 @@ bucket_definitions:
885
357
  ]);
886
358
  });
887
359
 
888
- test('truncate parameters', async () => {
889
- await using factory = await generateStorageFactory();
890
- const syncRules = await factory.updateSyncRules({
891
- content: `
892
- bucket_definitions:
893
- mybucket:
894
- parameters:
895
- - SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
896
- data: []
897
- `
898
- });
899
- const bucketStorage = factory.getInstance(syncRules);
900
-
901
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
902
- await batch.save({
903
- sourceTable: TEST_TABLE,
904
- tag: storage.SaveOperationTag.INSERT,
905
- after: {
906
- id: 't2',
907
- id1: 'user3',
908
- id2: 'user4',
909
- group_id: 'group2a'
910
- },
911
- afterReplicaId: test_utils.rid('t2')
912
- });
913
-
914
- await batch.truncate([TEST_TABLE]);
915
- });
916
-
917
- const checkpoint = await bucketStorage.getCheckpoint();
918
-
919
- const parameters = await checkpoint.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
920
- expect(parameters).toEqual([]);
921
- });
922
-
923
360
  test('batch with overlapping replica ids', async () => {
924
361
  // This test checks that we get the correct output when processing rows with:
925
362
  // 1. changing replica ids
@@ -1650,303 +1087,6 @@ bucket_definitions:
1650
1087
  expect(metrics2).toMatchSnapshot();
1651
1088
  });
1652
1089
 
1653
- test('invalidate cached parsed sync rules', async () => {
1654
- await using bucketStorageFactory = await generateStorageFactory();
1655
- const syncRules = await bucketStorageFactory.updateSyncRules({
1656
- content: `
1657
- bucket_definitions:
1658
- by_workspace:
1659
- parameters:
1660
- - SELECT id as workspace_id FROM workspace WHERE
1661
- workspace."userId" = token_parameters.user_id
1662
- data: []
1663
- `
1664
- });
1665
- const syncBucketStorage = bucketStorageFactory.getInstance(syncRules);
1666
-
1667
- const parsedSchema1 = syncBucketStorage.getParsedSyncRules({
1668
- defaultSchema: 'public'
1669
- });
1670
-
1671
- const parsedSchema2 = syncBucketStorage.getParsedSyncRules({
1672
- defaultSchema: 'public'
1673
- });
1674
-
1675
- // These should be cached, this will be the same instance
1676
- expect(parsedSchema2).equals(parsedSchema1);
1677
- expect(parsedSchema1.getSourceTables()[0].schema).equals('public');
1678
-
1679
- const parsedSchema3 = syncBucketStorage.getParsedSyncRules({
1680
- defaultSchema: 'databasename'
1681
- });
1682
-
1683
- // The cache should not be used
1684
- expect(parsedSchema3).not.equals(parsedSchema2);
1685
- expect(parsedSchema3.getSourceTables()[0].schema).equals('databasename');
1686
- });
1687
-
1688
- test('managed write checkpoints - checkpoint after write', async (context) => {
1689
- await using factory = await generateStorageFactory();
1690
- const r = await factory.configureSyncRules({
1691
- content: `
1692
- bucket_definitions:
1693
- mybucket:
1694
- data: []
1695
- `,
1696
- validate: false
1697
- });
1698
- const bucketStorage = factory.getInstance(r.persisted_sync_rules!);
1699
-
1700
- const abortController = new AbortController();
1701
- context.onTestFinished(() => abortController.abort());
1702
- const iter = bucketStorage
1703
- .watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })
1704
- [Symbol.asyncIterator]();
1705
-
1706
- const writeCheckpoint = await bucketStorage.createManagedWriteCheckpoint({
1707
- heads: { '1': '5/0' },
1708
- user_id: 'user1'
1709
- });
1710
-
1711
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1712
- await batch.keepalive('5/0');
1713
- });
1714
-
1715
- const result = await iter.next();
1716
- expect(result).toMatchObject({
1717
- done: false,
1718
- value: {
1719
- base: {
1720
- checkpoint: 0n,
1721
- lsn: '5/0'
1722
- },
1723
- writeCheckpoint: writeCheckpoint
1724
- }
1725
- });
1726
- });
1727
-
1728
- test('managed write checkpoints - write after checkpoint', async (context) => {
1729
- await using factory = await generateStorageFactory();
1730
- const r = await factory.configureSyncRules({
1731
- content: `
1732
- bucket_definitions:
1733
- mybucket:
1734
- data: []
1735
- `,
1736
- validate: false
1737
- });
1738
- const bucketStorage = factory.getInstance(r.persisted_sync_rules!);
1739
-
1740
- const abortController = new AbortController();
1741
- context.onTestFinished(() => abortController.abort());
1742
- const iter = bucketStorage
1743
- .watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })
1744
- [Symbol.asyncIterator]();
1745
-
1746
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1747
- await batch.keepalive('5/0');
1748
- });
1749
-
1750
- const result = await iter.next();
1751
- expect(result).toMatchObject({
1752
- done: false,
1753
- value: {
1754
- base: {
1755
- checkpoint: 0n,
1756
- lsn: '5/0'
1757
- },
1758
- writeCheckpoint: null
1759
- }
1760
- });
1761
-
1762
- const writeCheckpoint = await bucketStorage.createManagedWriteCheckpoint({
1763
- heads: { '1': '6/0' },
1764
- user_id: 'user1'
1765
- });
1766
- // We have to trigger a new keepalive after the checkpoint, at least to cover postgres storage.
1767
- // This is what is effetively triggered with RouteAPI.createReplicationHead().
1768
- // MongoDB storage doesn't explicitly need this anymore.
1769
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1770
- await batch.keepalive('6/0');
1771
- });
1772
-
1773
- let result2 = await iter.next();
1774
- if (result2.value?.base?.lsn == '5/0') {
1775
- // Events could arrive in a different order in some cases - this caters for it
1776
- result2 = await iter.next();
1777
- }
1778
- expect(result2).toMatchObject({
1779
- done: false,
1780
- value: {
1781
- base: {
1782
- checkpoint: 0n,
1783
- lsn: '6/0'
1784
- },
1785
- writeCheckpoint: writeCheckpoint
1786
- }
1787
- });
1788
- });
1789
-
1790
- test('custom write checkpoints - checkpoint after write', async (context) => {
1791
- await using factory = await generateStorageFactory();
1792
- const r = await factory.configureSyncRules({
1793
- content: `
1794
- bucket_definitions:
1795
- mybucket:
1796
- data: []
1797
- `,
1798
- validate: false
1799
- });
1800
- const bucketStorage = factory.getInstance(r.persisted_sync_rules!);
1801
- bucketStorage.setWriteCheckpointMode(storage.WriteCheckpointMode.CUSTOM);
1802
-
1803
- const abortController = new AbortController();
1804
- context.onTestFinished(() => abortController.abort());
1805
- const iter = bucketStorage
1806
- .watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })
1807
- [Symbol.asyncIterator]();
1808
-
1809
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1810
- await batch.addCustomWriteCheckpoint({
1811
- checkpoint: 5n,
1812
- user_id: 'user1'
1813
- });
1814
- await batch.flush();
1815
- await batch.keepalive('5/0');
1816
- });
1817
-
1818
- const result = await iter.next();
1819
- expect(result).toMatchObject({
1820
- done: false,
1821
- value: {
1822
- base: {
1823
- lsn: '5/0'
1824
- },
1825
- writeCheckpoint: 5n
1826
- }
1827
- });
1828
- });
1829
-
1830
- test('custom write checkpoints - standalone checkpoint', async (context) => {
1831
- await using factory = await generateStorageFactory();
1832
- const r = await factory.configureSyncRules({
1833
- content: `
1834
- bucket_definitions:
1835
- mybucket:
1836
- data: []
1837
- `,
1838
- validate: false
1839
- });
1840
- const bucketStorage = factory.getInstance(r.persisted_sync_rules!);
1841
- bucketStorage.setWriteCheckpointMode(storage.WriteCheckpointMode.CUSTOM);
1842
-
1843
- const abortController = new AbortController();
1844
- context.onTestFinished(() => abortController.abort());
1845
- const iter = bucketStorage
1846
- .watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })
1847
- [Symbol.asyncIterator]();
1848
-
1849
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1850
- // Flush to clear state
1851
- await batch.flush();
1852
-
1853
- await batch.addCustomWriteCheckpoint({
1854
- checkpoint: 5n,
1855
- user_id: 'user1'
1856
- });
1857
- await batch.flush();
1858
- await batch.keepalive('5/0');
1859
- });
1860
-
1861
- const result = await iter.next();
1862
- expect(result).toMatchObject({
1863
- done: false,
1864
- value: {
1865
- base: {
1866
- lsn: '5/0'
1867
- },
1868
- writeCheckpoint: 5n
1869
- }
1870
- });
1871
- });
1872
-
1873
- test('custom write checkpoints - write after checkpoint', async (context) => {
1874
- await using factory = await generateStorageFactory();
1875
- const r = await factory.configureSyncRules({
1876
- content: `
1877
- bucket_definitions:
1878
- mybucket:
1879
- data: []
1880
- `,
1881
- validate: false
1882
- });
1883
- const bucketStorage = factory.getInstance(r.persisted_sync_rules!);
1884
- bucketStorage.setWriteCheckpointMode(storage.WriteCheckpointMode.CUSTOM);
1885
-
1886
- const abortController = new AbortController();
1887
- context.onTestFinished(() => abortController.abort());
1888
- const iter = bucketStorage
1889
- .watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })
1890
- [Symbol.asyncIterator]();
1891
-
1892
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1893
- await batch.keepalive('5/0');
1894
- });
1895
-
1896
- const result = await iter.next();
1897
- expect(result).toMatchObject({
1898
- done: false,
1899
- value: {
1900
- base: {
1901
- lsn: '5/0'
1902
- },
1903
- writeCheckpoint: null
1904
- }
1905
- });
1906
-
1907
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1908
- batch.addCustomWriteCheckpoint({
1909
- checkpoint: 6n,
1910
- user_id: 'user1'
1911
- });
1912
- await batch.flush();
1913
- await batch.keepalive('6/0');
1914
- });
1915
-
1916
- let result2 = await iter.next();
1917
- expect(result2).toMatchObject({
1918
- done: false,
1919
- value: {
1920
- base: {
1921
- // can be 5/0 or 6/0 - actual value not relevant for custom write checkpoints
1922
- // lsn: '6/0'
1923
- },
1924
- writeCheckpoint: 6n
1925
- }
1926
- });
1927
-
1928
- await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1929
- batch.addCustomWriteCheckpoint({
1930
- checkpoint: 7n,
1931
- user_id: 'user1'
1932
- });
1933
- await batch.flush();
1934
- await batch.keepalive('7/0');
1935
- });
1936
-
1937
- let result3 = await iter.next();
1938
- expect(result3).toMatchObject({
1939
- done: false,
1940
- value: {
1941
- base: {
1942
- // can be 5/0, 6/0 or 7/0 - actual value not relevant for custom write checkpoints
1943
- // lsn: '7/0'
1944
- },
1945
- writeCheckpoint: 7n
1946
- }
1947
- });
1948
- });
1949
-
1950
1090
  test('op_id initialization edge case', async () => {
1951
1091
  // Test syncing a batch of data that is small in count,
1952
1092
  // but large enough in size to be split over multiple returned chunks.
@@ -2062,4 +1202,37 @@ bucket_definitions:
2062
1202
  }
2063
1203
  ]);
2064
1204
  });
1205
+
1206
+ test('unchanged checksums', async () => {
1207
+ await using factory = await generateStorageFactory();
1208
+ const syncRules = await factory.updateSyncRules({
1209
+ content: `
1210
+ bucket_definitions:
1211
+ global:
1212
+ data:
1213
+ - SELECT client_id as id, description FROM "%"
1214
+ `
1215
+ });
1216
+ const bucketStorage = factory.getInstance(syncRules);
1217
+
1218
+ const sourceTable = TEST_TABLE;
1219
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1220
+ await batch.save({
1221
+ sourceTable,
1222
+ tag: storage.SaveOperationTag.INSERT,
1223
+ after: {
1224
+ id: 'test1',
1225
+ description: 'test1a'
1226
+ },
1227
+ afterReplicaId: test_utils.rid('test1')
1228
+ });
1229
+ await batch.commit('1/1');
1230
+ });
1231
+ const { checkpoint } = await bucketStorage.getCheckpoint();
1232
+
1233
+ const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()];
1234
+ expect(checksums).toEqual([{ bucket: 'global[]', checksum: 1917136889, count: 1 }]);
1235
+ const checksums2 = [...(await bucketStorage.getChecksums(checkpoint + 1n, ['global[]'])).values()];
1236
+ expect(checksums2).toEqual([{ bucket: 'global[]', checksum: 1917136889, count: 1 }]);
1237
+ });
2065
1238
  }