@powersync/service-core-tests 0.0.0-dev-20241219091224

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/LICENSE +67 -0
  3. package/README.md +5 -0
  4. package/dist/index.d.ts +4 -0
  5. package/dist/index.js +5 -0
  6. package/dist/index.js.map +1 -0
  7. package/dist/test-utils/bucket-validation.d.ts +42 -0
  8. package/dist/test-utils/bucket-validation.js +115 -0
  9. package/dist/test-utils/bucket-validation.js.map +1 -0
  10. package/dist/test-utils/general-utils.d.ts +31 -0
  11. package/dist/test-utils/general-utils.js +81 -0
  12. package/dist/test-utils/general-utils.js.map +1 -0
  13. package/dist/test-utils/stream_utils.d.ts +6 -0
  14. package/dist/test-utils/stream_utils.js +37 -0
  15. package/dist/test-utils/stream_utils.js.map +1 -0
  16. package/dist/test-utils/test-utils-index.d.ts +3 -0
  17. package/dist/test-utils/test-utils-index.js +4 -0
  18. package/dist/test-utils/test-utils-index.js.map +1 -0
  19. package/dist/tests/register-bucket-validation-tests.d.ts +10 -0
  20. package/dist/tests/register-bucket-validation-tests.js +139 -0
  21. package/dist/tests/register-bucket-validation-tests.js.map +1 -0
  22. package/dist/tests/register-compacting-tests.d.ts +14 -0
  23. package/dist/tests/register-compacting-tests.js +343 -0
  24. package/dist/tests/register-compacting-tests.js.map +1 -0
  25. package/dist/tests/register-data-storage-tests.d.ts +14 -0
  26. package/dist/tests/register-data-storage-tests.js +1571 -0
  27. package/dist/tests/register-data-storage-tests.js.map +1 -0
  28. package/dist/tests/register-sync-tests.d.ts +11 -0
  29. package/dist/tests/register-sync-tests.js +538 -0
  30. package/dist/tests/register-sync-tests.js.map +1 -0
  31. package/dist/tests/tests-index.d.ts +4 -0
  32. package/dist/tests/tests-index.js +5 -0
  33. package/dist/tests/tests-index.js.map +1 -0
  34. package/package.json +28 -0
  35. package/src/index.ts +5 -0
  36. package/src/test-utils/bucket-validation.ts +120 -0
  37. package/src/test-utils/general-utils.ts +113 -0
  38. package/src/test-utils/stream_utils.ts +42 -0
  39. package/src/test-utils/test-utils-index.ts +4 -0
  40. package/src/tests/register-bucket-validation-tests.ts +148 -0
  41. package/src/tests/register-compacting-tests.ts +297 -0
  42. package/src/tests/register-data-storage-tests.ts +1552 -0
  43. package/src/tests/register-sync-tests.ts +521 -0
  44. package/src/tests/tests-index.ts +4 -0
  45. package/tsconfig.json +34 -0
  46. package/tsconfig.tsbuildinfo +1 -0
@@ -0,0 +1,1552 @@
1
+ import { getUuidReplicaIdentityBson, OplogEntry, storage } from '@powersync/service-core';
2
+ import { RequestParameters } from '@powersync/service-sync-rules';
3
+ import { expect, test } from 'vitest';
4
+ import * as test_utils from '../test-utils/test-utils-index.js';
5
+
6
+ export const TEST_TABLE = test_utils.makeTestTable('test', ['id']);
7
+
8
+ /**
9
+ * Normalize data from OplogEntries for comparison in tests.
10
+ * Tests typically expect the stringified result
11
+ */
12
+ const normalizeOplogData = (data: OplogEntry['data']) => {
13
+ if (data != null && typeof data == 'object') {
14
+ return JSON.stringify(data);
15
+ }
16
+ return data;
17
+ };
18
+
19
+ /**
20
+ * @example
21
+ * ```TypeScript
22
+ *
23
+ * describe('store - mongodb', function () {
24
+ * registerDataStorageTests(MONGO_STORAGE_FACTORY);
25
+ * });
26
+ *
27
+ * ```
28
+ */
29
+ export function registerDataStorageTests(generateStorageFactory: test_utils.StorageFactory) {
30
+ test('save and load parameters', async () => {
31
+ const sync_rules = test_utils.testRules(`
32
+ bucket_definitions:
33
+ mybucket:
34
+ parameters:
35
+ - SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
36
+ data: []
37
+ `);
38
+
39
+ using factory = await generateStorageFactory();
40
+ const bucketStorage = factory.getInstance(sync_rules);
41
+
42
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
43
+ await batch.save({
44
+ sourceTable: TEST_TABLE,
45
+ tag: storage.SaveOperationTag.INSERT,
46
+ after: {
47
+ id: 't2',
48
+ id1: 'user3',
49
+ id2: 'user4',
50
+ group_id: 'group2a'
51
+ },
52
+ afterReplicaId: test_utils.rid('t2')
53
+ });
54
+
55
+ await batch.save({
56
+ sourceTable: TEST_TABLE,
57
+ tag: storage.SaveOperationTag.INSERT,
58
+ after: {
59
+ id: 't1',
60
+ id1: 'user1',
61
+ id2: 'user2',
62
+ group_id: 'group1a'
63
+ },
64
+ afterReplicaId: test_utils.rid('t1')
65
+ });
66
+ });
67
+
68
+ const parameters = await bucketStorage.getParameterSets(result!.flushed_op, [['mybucket', '1', 'user1']]);
69
+ expect(parameters).toEqual([
70
+ {
71
+ group_id: 'group1a'
72
+ }
73
+ ]);
74
+ });
75
+
76
+ test('it should use the latest version', async () => {
77
+ const sync_rules = test_utils.testRules(
78
+ `
79
+ bucket_definitions:
80
+ mybucket:
81
+ parameters:
82
+ - SELECT group_id FROM test WHERE id = token_parameters.user_id
83
+ data: []
84
+ `
85
+ );
86
+
87
+ using factory = await generateStorageFactory();
88
+ const bucketStorage = factory.getInstance(sync_rules);
89
+
90
+ const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
91
+ await batch.save({
92
+ sourceTable: TEST_TABLE,
93
+ tag: storage.SaveOperationTag.INSERT,
94
+ after: {
95
+ id: 'user1',
96
+ group_id: 'group1'
97
+ },
98
+ afterReplicaId: test_utils.rid('user1')
99
+ });
100
+ });
101
+ const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
102
+ await batch.save({
103
+ sourceTable: TEST_TABLE,
104
+ tag: storage.SaveOperationTag.INSERT,
105
+ after: {
106
+ id: 'user1',
107
+ group_id: 'group2'
108
+ },
109
+ afterReplicaId: test_utils.rid('user1')
110
+ });
111
+ });
112
+
113
+ const parameters = await bucketStorage.getParameterSets(result2!.flushed_op, [['mybucket', '1', 'user1']]);
114
+ expect(parameters).toEqual([
115
+ {
116
+ group_id: 'group2'
117
+ }
118
+ ]);
119
+
120
+ // Use the checkpoint to get older data if relevant
121
+ const parameters2 = await bucketStorage.getParameterSets(result1!.flushed_op, [['mybucket', '1', 'user1']]);
122
+ expect(parameters2).toEqual([
123
+ {
124
+ group_id: 'group1'
125
+ }
126
+ ]);
127
+ });
128
+
129
+ test('it should use the latest version after updates', async () => {
130
+ const sync_rules = test_utils.testRules(
131
+ `
132
+ bucket_definitions:
133
+ mybucket:
134
+ parameters:
135
+ - SELECT id AS todo_id
136
+ FROM todos
137
+ WHERE list_id IN token_parameters.list_id
138
+ data: []
139
+ `
140
+ );
141
+
142
+ await using factory = await generateStorageFactory();
143
+ await using bucketStorage = factory.getInstance(sync_rules);
144
+
145
+ const table = test_utils.makeTestTable('todos', ['id', 'list_id']);
146
+
147
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
148
+ // Create two todos which initially belong to different lists
149
+ await batch.save({
150
+ sourceTable: table,
151
+ tag: storage.SaveOperationTag.INSERT,
152
+ after: {
153
+ id: 'todo1',
154
+ list_id: 'list1'
155
+ },
156
+ afterReplicaId: test_utils.rid('todo1')
157
+ });
158
+ await batch.save({
159
+ sourceTable: table,
160
+ tag: storage.SaveOperationTag.INSERT,
161
+ after: {
162
+ id: 'todo2',
163
+ list_id: 'list2'
164
+ },
165
+ afterReplicaId: test_utils.rid('todo2')
166
+ });
167
+ });
168
+
169
+ const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
170
+ // Update the second todo item to now belong to list 1
171
+ await batch.save({
172
+ sourceTable: table,
173
+ tag: storage.SaveOperationTag.UPDATE,
174
+ after: {
175
+ id: 'todo2',
176
+ list_id: 'list1'
177
+ },
178
+ afterReplicaId: test_utils.rid('todo2')
179
+ });
180
+ });
181
+
182
+ // We specifically request the todo_ids for both lists.
183
+ // There removal operation for the association of `list2`::`todo2` should not interfere with the new
184
+ // association of `list1`::`todo2`
185
+ const parameters = await bucketStorage.getParameterSets(BigInt(result2!.flushed_op).toString(), [
186
+ ['mybucket', '1', 'list1'],
187
+ ['mybucket', '1', 'list2']
188
+ ]);
189
+
190
+ expect(parameters.sort((a, b) => (a.todo_id as string).localeCompare(b.todo_id as string))).toEqual([
191
+ {
192
+ todo_id: 'todo1'
193
+ },
194
+ {
195
+ todo_id: 'todo2'
196
+ }
197
+ ]);
198
+ });
199
+
200
+ test('save and load parameters with different number types', async () => {
201
+ const sync_rules = test_utils.testRules(
202
+ `
203
+ bucket_definitions:
204
+ mybucket:
205
+ parameters:
206
+ - SELECT group_id FROM test WHERE n1 = token_parameters.n1 and f2 = token_parameters.f2 and f3 = token_parameters.f3
207
+ data: []
208
+ `
209
+ );
210
+
211
+ using factory = await generateStorageFactory();
212
+ const bucketStorage = factory.getInstance(sync_rules);
213
+
214
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
215
+ await batch.save({
216
+ sourceTable: TEST_TABLE,
217
+ tag: storage.SaveOperationTag.INSERT,
218
+ after: {
219
+ id: 't1',
220
+ group_id: 'group1',
221
+ n1: 314n,
222
+ f2: 314,
223
+ f3: 3.14
224
+ },
225
+ afterReplicaId: test_utils.rid('t1')
226
+ });
227
+ });
228
+
229
+ const TEST_PARAMS = { group_id: 'group1' };
230
+
231
+ const checkpoint = result!.flushed_op;
232
+
233
+ const parameters1 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 314n, 314, 3.14]]);
234
+ expect(parameters1).toEqual([TEST_PARAMS]);
235
+ const parameters2 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 314, 314n, 3.14]]);
236
+ expect(parameters2).toEqual([TEST_PARAMS]);
237
+ const parameters3 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 314n, 314, 3]]);
238
+ expect(parameters3).toEqual([]);
239
+ });
240
+
241
+ test('save and load parameters with large numbers', async () => {
242
+ // This ensures serialization / deserialization of "current_data" is done correctly.
243
+ // This specific case tested here cannot happen with postgres in practice, but we still
244
+ // test this to ensure correct deserialization.
245
+
246
+ const sync_rules = test_utils.testRules(
247
+ `
248
+ bucket_definitions:
249
+ mybucket:
250
+ parameters:
251
+ - SELECT group_id FROM test WHERE n1 = token_parameters.n1
252
+ data: []
253
+ `
254
+ );
255
+
256
+ using factory = await generateStorageFactory();
257
+ const bucketStorage = factory.getInstance(sync_rules);
258
+
259
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
260
+ await batch.save({
261
+ sourceTable: TEST_TABLE,
262
+ tag: storage.SaveOperationTag.INSERT,
263
+ after: {
264
+ id: 't1',
265
+ group_id: 'group1',
266
+ n1: 1152921504606846976n // 2^60
267
+ },
268
+ afterReplicaId: test_utils.rid('t1')
269
+ });
270
+
271
+ await batch.save({
272
+ sourceTable: TEST_TABLE,
273
+ tag: storage.SaveOperationTag.UPDATE,
274
+ after: {
275
+ id: 't1',
276
+ group_id: 'group1',
277
+ // Simulate a TOAST value, even though it can't happen for values like this
278
+ // in practice.
279
+ n1: undefined
280
+ },
281
+ afterReplicaId: test_utils.rid('t1')
282
+ });
283
+ });
284
+
285
+ const TEST_PARAMS = { group_id: 'group1' };
286
+
287
+ const checkpoint = result!.flushed_op;
288
+
289
+ const parameters1 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 1152921504606846976n]]);
290
+ expect(parameters1).toEqual([TEST_PARAMS]);
291
+ });
292
+
293
+ test('removing row', async () => {
294
+ const sync_rules = test_utils.testRules(
295
+ `
296
+ bucket_definitions:
297
+ global:
298
+ data:
299
+ - SELECT id, description FROM "%"
300
+ `
301
+ );
302
+ using factory = await generateStorageFactory();
303
+ const bucketStorage = factory.getInstance(sync_rules);
304
+
305
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
306
+ const sourceTable = TEST_TABLE;
307
+
308
+ await batch.save({
309
+ sourceTable,
310
+ tag: storage.SaveOperationTag.INSERT,
311
+ after: {
312
+ id: 'test1',
313
+ description: 'test1'
314
+ },
315
+ afterReplicaId: test_utils.rid('test1')
316
+ });
317
+ await batch.save({
318
+ sourceTable,
319
+ tag: storage.SaveOperationTag.DELETE,
320
+ beforeReplicaId: test_utils.rid('test1')
321
+ });
322
+ });
323
+
324
+ const checkpoint = result!.flushed_op;
325
+
326
+ const batch = await test_utils.fromAsync(
327
+ bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))
328
+ );
329
+ const data = batch[0].batch.data.map((d) => {
330
+ return {
331
+ op: d.op,
332
+ object_id: d.object_id,
333
+ checksum: d.checksum
334
+ };
335
+ });
336
+
337
+ const c1 = 2871785649;
338
+ const c2 = 2872534815;
339
+
340
+ expect(data).toEqual([
341
+ { op: 'PUT', object_id: 'test1', checksum: c1 },
342
+ { op: 'REMOVE', object_id: 'test1', checksum: c2 }
343
+ ]);
344
+
345
+ const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()];
346
+ expect(checksums).toEqual([
347
+ {
348
+ bucket: 'global[]',
349
+ checksum: (c1 + c2) & 0xffffffff,
350
+ count: 2
351
+ }
352
+ ]);
353
+ });
354
+
355
+ test('save and load parameters with workspaceId', async () => {
356
+ const WORKSPACE_TABLE = test_utils.makeTestTable('workspace', ['id']);
357
+
358
+ const sync_rules_content = test_utils.testRules(
359
+ `
360
+ bucket_definitions:
361
+ by_workspace:
362
+ parameters:
363
+ - SELECT id as workspace_id FROM workspace WHERE
364
+ workspace."userId" = token_parameters.user_id
365
+ data: []
366
+ `
367
+ );
368
+ const sync_rules = sync_rules_content.parsed(test_utils.PARSE_OPTIONS).sync_rules;
369
+
370
+ using factory = await generateStorageFactory();
371
+ const bucketStorage = factory.getInstance(sync_rules_content);
372
+
373
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
374
+ await batch.save({
375
+ sourceTable: WORKSPACE_TABLE,
376
+ tag: storage.SaveOperationTag.INSERT,
377
+ after: {
378
+ id: 'workspace1',
379
+ userId: 'u1'
380
+ },
381
+ afterReplicaId: test_utils.rid('workspace1')
382
+ });
383
+ });
384
+
385
+ const checkpoint = result!.flushed_op;
386
+
387
+ const parameters = new RequestParameters({ sub: 'u1' }, {});
388
+
389
+ const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
390
+
391
+ const lookups = q1.getLookups(parameters);
392
+ expect(lookups).toEqual([['by_workspace', '1', 'u1']]);
393
+
394
+ const parameter_sets = await bucketStorage.getParameterSets(checkpoint, lookups);
395
+ expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }]);
396
+
397
+ const buckets = await sync_rules.queryBucketIds({
398
+ getParameterSets(lookups) {
399
+ return bucketStorage.getParameterSets(checkpoint, lookups);
400
+ },
401
+ parameters
402
+ });
403
+ expect(buckets).toEqual(['by_workspace["workspace1"]']);
404
+ });
405
+
406
+ test('save and load parameters with dynamic global buckets', async () => {
407
+ const WORKSPACE_TABLE = test_utils.makeTestTable('workspace');
408
+
409
+ const sync_rules_content = test_utils.testRules(
410
+ `
411
+ bucket_definitions:
412
+ by_public_workspace:
413
+ parameters:
414
+ - SELECT id as workspace_id FROM workspace WHERE
415
+ workspace.visibility = 'public'
416
+ data: []
417
+ `
418
+ );
419
+ const sync_rules = sync_rules_content.parsed(test_utils.PARSE_OPTIONS).sync_rules;
420
+
421
+ using factory = await generateStorageFactory();
422
+ const bucketStorage = factory.getInstance(sync_rules_content);
423
+
424
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
425
+ await batch.save({
426
+ sourceTable: WORKSPACE_TABLE,
427
+ tag: storage.SaveOperationTag.INSERT,
428
+ after: {
429
+ id: 'workspace1',
430
+ visibility: 'public'
431
+ },
432
+ afterReplicaId: test_utils.rid('workspace1')
433
+ });
434
+
435
+ await batch.save({
436
+ sourceTable: WORKSPACE_TABLE,
437
+ tag: storage.SaveOperationTag.INSERT,
438
+ after: {
439
+ id: 'workspace2',
440
+ visibility: 'private'
441
+ },
442
+ afterReplicaId: test_utils.rid('workspace2')
443
+ });
444
+
445
+ await batch.save({
446
+ sourceTable: WORKSPACE_TABLE,
447
+ tag: storage.SaveOperationTag.INSERT,
448
+ after: {
449
+ id: 'workspace3',
450
+ visibility: 'public'
451
+ },
452
+ afterReplicaId: test_utils.rid('workspace3')
453
+ });
454
+ });
455
+
456
+ const checkpoint = result!.flushed_op;
457
+
458
+ const parameters = new RequestParameters({ sub: 'unknown' }, {});
459
+
460
+ const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
461
+
462
+ const lookups = q1.getLookups(parameters);
463
+ expect(lookups).toEqual([['by_public_workspace', '1']]);
464
+
465
+ const parameter_sets = await bucketStorage.getParameterSets(checkpoint, lookups);
466
+ parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
467
+ expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
468
+
469
+ const buckets = await sync_rules.queryBucketIds({
470
+ getParameterSets(lookups) {
471
+ return bucketStorage.getParameterSets(checkpoint, lookups);
472
+ },
473
+ parameters
474
+ });
475
+ buckets.sort();
476
+ expect(buckets).toEqual(['by_public_workspace["workspace1"]', 'by_public_workspace["workspace3"]']);
477
+ });
478
+
479
+ test('multiple parameter queries', async () => {
480
+ const WORKSPACE_TABLE = test_utils.makeTestTable('workspace');
481
+
482
+ const sync_rules_content = test_utils.testRules(
483
+ `
484
+ bucket_definitions:
485
+ by_workspace:
486
+ parameters:
487
+ - SELECT id as workspace_id FROM workspace WHERE
488
+ workspace.visibility = 'public'
489
+ - SELECT id as workspace_id FROM workspace WHERE
490
+ workspace.user_id = token_parameters.user_id
491
+ data: []
492
+ `
493
+ );
494
+ const sync_rules = sync_rules_content.parsed(test_utils.PARSE_OPTIONS).sync_rules;
495
+
496
+ using factory = await generateStorageFactory();
497
+ const bucketStorage = factory.getInstance(sync_rules_content);
498
+
499
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
500
+ await batch.save({
501
+ sourceTable: WORKSPACE_TABLE,
502
+ tag: storage.SaveOperationTag.INSERT,
503
+ after: {
504
+ id: 'workspace1',
505
+ visibility: 'public'
506
+ },
507
+ afterReplicaId: test_utils.rid('workspace1')
508
+ });
509
+
510
+ await batch.save({
511
+ sourceTable: WORKSPACE_TABLE,
512
+ tag: storage.SaveOperationTag.INSERT,
513
+ after: {
514
+ id: 'workspace2',
515
+ visibility: 'private'
516
+ },
517
+ afterReplicaId: test_utils.rid('workspace2')
518
+ });
519
+
520
+ await batch.save({
521
+ sourceTable: WORKSPACE_TABLE,
522
+ tag: storage.SaveOperationTag.INSERT,
523
+ after: {
524
+ id: 'workspace3',
525
+ user_id: 'u1',
526
+ visibility: 'private'
527
+ },
528
+ afterReplicaId: test_utils.rid('workspace3')
529
+ });
530
+
531
+ await batch.save({
532
+ sourceTable: WORKSPACE_TABLE,
533
+ tag: storage.SaveOperationTag.INSERT,
534
+ after: {
535
+ id: 'workspace4',
536
+ user_id: 'u2',
537
+ visibility: 'private'
538
+ },
539
+ afterReplicaId: test_utils.rid('workspace4')
540
+ });
541
+ });
542
+
543
+ const checkpoint = result!.flushed_op;
544
+
545
+ const parameters = new RequestParameters({ sub: 'u1' }, {});
546
+
547
+ // Test intermediate values - could be moved to sync_rules.test.ts
548
+ const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
549
+ const lookups1 = q1.getLookups(parameters);
550
+ expect(lookups1).toEqual([['by_workspace', '1']]);
551
+
552
+ const parameter_sets1 = await bucketStorage.getParameterSets(checkpoint, lookups1);
553
+ parameter_sets1.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
554
+ expect(parameter_sets1).toEqual([{ workspace_id: 'workspace1' }]);
555
+
556
+ const q2 = sync_rules.bucket_descriptors[0].parameter_queries[1];
557
+ const lookups2 = q2.getLookups(parameters);
558
+ expect(lookups2).toEqual([['by_workspace', '2', 'u1']]);
559
+
560
+ const parameter_sets2 = await bucketStorage.getParameterSets(checkpoint, lookups2);
561
+ parameter_sets2.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
562
+ expect(parameter_sets2).toEqual([{ workspace_id: 'workspace3' }]);
563
+
564
+ // Test final values - the important part
565
+ const buckets = await sync_rules.queryBucketIds({
566
+ getParameterSets(lookups) {
567
+ return bucketStorage.getParameterSets(checkpoint, lookups);
568
+ },
569
+ parameters
570
+ });
571
+ buckets.sort();
572
+ expect(buckets).toEqual(['by_workspace["workspace1"]', 'by_workspace["workspace3"]']);
573
+ });
574
+
575
+ test('changing client ids', async () => {
576
+ const sync_rules = test_utils.testRules(
577
+ `
578
+ bucket_definitions:
579
+ global:
580
+ data:
581
+ - SELECT client_id as id, description FROM "%"
582
+ `
583
+ );
584
+ using factory = await generateStorageFactory();
585
+
586
+ const bucketStorage = factory.getInstance(sync_rules);
587
+
588
+ const sourceTable = TEST_TABLE;
589
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
590
+ await batch.save({
591
+ sourceTable,
592
+ tag: storage.SaveOperationTag.INSERT,
593
+ after: {
594
+ id: 'test1',
595
+ client_id: 'client1a',
596
+ description: 'test1a'
597
+ },
598
+ afterReplicaId: test_utils.rid('test1')
599
+ });
600
+ await batch.save({
601
+ sourceTable,
602
+ tag: storage.SaveOperationTag.UPDATE,
603
+ after: {
604
+ id: 'test1',
605
+ client_id: 'client1b',
606
+ description: 'test1b'
607
+ },
608
+ afterReplicaId: test_utils.rid('test1')
609
+ });
610
+
611
+ await batch.save({
612
+ sourceTable,
613
+ tag: storage.SaveOperationTag.INSERT,
614
+ after: {
615
+ id: 'test2',
616
+ client_id: 'client2',
617
+ description: 'test2'
618
+ },
619
+ afterReplicaId: test_utils.rid('test2')
620
+ });
621
+ });
622
+ const checkpoint = result!.flushed_op;
623
+ const batch = await test_utils.fromAsync(
624
+ bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))
625
+ );
626
+ const data = batch[0].batch.data.map((d) => {
627
+ return {
628
+ op: d.op,
629
+ object_id: d.object_id
630
+ };
631
+ });
632
+
633
+ expect(data).toEqual([
634
+ { op: 'PUT', object_id: 'client1a' },
635
+ { op: 'PUT', object_id: 'client1b' },
636
+ { op: 'REMOVE', object_id: 'client1a' },
637
+ { op: 'PUT', object_id: 'client2' }
638
+ ]);
639
+ });
640
+
641
+ test('re-apply delete', async () => {
642
+ const sync_rules = test_utils.testRules(
643
+ `
644
+ bucket_definitions:
645
+ global:
646
+ data:
647
+ - SELECT id, description FROM "%"
648
+ `
649
+ );
650
+ using factory = await generateStorageFactory();
651
+ const bucketStorage = factory.getInstance(sync_rules);
652
+
653
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
654
+ const sourceTable = TEST_TABLE;
655
+
656
+ await batch.save({
657
+ sourceTable,
658
+ tag: storage.SaveOperationTag.INSERT,
659
+ after: {
660
+ id: 'test1',
661
+ description: 'test1'
662
+ },
663
+ afterReplicaId: test_utils.rid('test1')
664
+ });
665
+ });
666
+
667
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
668
+ const sourceTable = TEST_TABLE;
669
+
670
+ await batch.save({
671
+ sourceTable,
672
+ tag: storage.SaveOperationTag.DELETE,
673
+ beforeReplicaId: test_utils.rid('test1')
674
+ });
675
+ });
676
+
677
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
678
+ const sourceTable = TEST_TABLE;
679
+
680
+ await batch.save({
681
+ sourceTable,
682
+ tag: storage.SaveOperationTag.DELETE,
683
+ beforeReplicaId: test_utils.rid('test1')
684
+ });
685
+ });
686
+
687
+ const checkpoint = result!.flushed_op;
688
+
689
+ const batch = await test_utils.fromAsync(
690
+ bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))
691
+ );
692
+ const data = batch[0].batch.data.map((d) => {
693
+ return {
694
+ op: d.op,
695
+ object_id: d.object_id,
696
+ checksum: d.checksum
697
+ };
698
+ });
699
+
700
+ const c1 = 2871785649;
701
+ const c2 = 2872534815;
702
+
703
+ expect(data).toEqual([
704
+ { op: 'PUT', object_id: 'test1', checksum: c1 },
705
+ { op: 'REMOVE', object_id: 'test1', checksum: c2 }
706
+ ]);
707
+
708
+ const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()];
709
+ expect(checksums).toEqual([
710
+ {
711
+ bucket: 'global[]',
712
+ checksum: (c1 + c2) & 0xffffffff,
713
+ count: 2
714
+ }
715
+ ]);
716
+ });
717
+
718
+ test('re-apply update + delete', async () => {
719
+ const sync_rules = test_utils.testRules(
720
+ `
721
+ bucket_definitions:
722
+ global:
723
+ data:
724
+ - SELECT id, description FROM "%"
725
+ `
726
+ );
727
+ using factory = await generateStorageFactory();
728
+ const bucketStorage = factory.getInstance(sync_rules);
729
+
730
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
731
+ const sourceTable = TEST_TABLE;
732
+
733
+ await batch.save({
734
+ sourceTable,
735
+ tag: storage.SaveOperationTag.INSERT,
736
+ after: {
737
+ id: 'test1',
738
+ description: 'test1'
739
+ },
740
+ afterReplicaId: test_utils.rid('test1')
741
+ });
742
+ });
743
+
744
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
745
+ const sourceTable = TEST_TABLE;
746
+
747
+ await batch.save({
748
+ sourceTable,
749
+ tag: storage.SaveOperationTag.UPDATE,
750
+ after: {
751
+ id: 'test1',
752
+ description: undefined
753
+ },
754
+ afterReplicaId: test_utils.rid('test1')
755
+ });
756
+
757
+ await batch.save({
758
+ sourceTable,
759
+ tag: storage.SaveOperationTag.UPDATE,
760
+ after: {
761
+ id: 'test1',
762
+ description: undefined
763
+ },
764
+ afterReplicaId: test_utils.rid('test1')
765
+ });
766
+
767
+ await batch.save({
768
+ sourceTable,
769
+ tag: storage.SaveOperationTag.DELETE,
770
+ beforeReplicaId: test_utils.rid('test1')
771
+ });
772
+ });
773
+
774
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
775
+ const sourceTable = TEST_TABLE;
776
+
777
+ await batch.save({
778
+ sourceTable,
779
+ tag: storage.SaveOperationTag.UPDATE,
780
+ after: {
781
+ id: 'test1',
782
+ description: undefined
783
+ },
784
+ afterReplicaId: test_utils.rid('test1')
785
+ });
786
+
787
+ await batch.save({
788
+ sourceTable,
789
+ tag: storage.SaveOperationTag.UPDATE,
790
+ after: {
791
+ id: 'test1',
792
+ description: undefined
793
+ },
794
+ afterReplicaId: test_utils.rid('test1')
795
+ });
796
+
797
+ await batch.save({
798
+ sourceTable,
799
+ tag: storage.SaveOperationTag.DELETE,
800
+ beforeReplicaId: test_utils.rid('test1')
801
+ });
802
+ });
803
+
804
+ const checkpoint = result!.flushed_op;
805
+
806
+ const batch = await test_utils.fromAsync(
807
+ bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))
808
+ );
809
+
810
+ const data = batch[0].batch.data.map((d) => {
811
+ return {
812
+ op: d.op,
813
+ object_id: d.object_id,
814
+ checksum: d.checksum
815
+ };
816
+ });
817
+
818
+ const c1 = 2871785649;
819
+ const c2 = 2872534815;
820
+
821
+ expect(data).toEqual([
822
+ { op: 'PUT', object_id: 'test1', checksum: c1 },
823
+ { op: 'PUT', object_id: 'test1', checksum: c1 },
824
+ { op: 'PUT', object_id: 'test1', checksum: c1 },
825
+ { op: 'REMOVE', object_id: 'test1', checksum: c2 }
826
+ ]);
827
+
828
+ const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()];
829
+ expect(checksums).toEqual([
830
+ {
831
+ bucket: 'global[]',
832
+ checksum: (c1 + c1 + c1 + c2) & 0xffffffff,
833
+ count: 4
834
+ }
835
+ ]);
836
+ });
837
+
838
+ test('truncate parameters', async () => {
839
+ const sync_rules = test_utils.testRules(
840
+ `
841
+ bucket_definitions:
842
+ mybucket:
843
+ parameters:
844
+ - SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
845
+ data: []
846
+ `
847
+ );
848
+
849
+ using factory = await generateStorageFactory();
850
+ const bucketStorage = factory.getInstance(sync_rules);
851
+
852
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
853
+ await batch.save({
854
+ sourceTable: TEST_TABLE,
855
+ tag: storage.SaveOperationTag.INSERT,
856
+ after: {
857
+ id: 't2',
858
+ id1: 'user3',
859
+ id2: 'user4',
860
+ group_id: 'group2a'
861
+ },
862
+ afterReplicaId: test_utils.rid('t2')
863
+ });
864
+
865
+ await batch.truncate([TEST_TABLE]);
866
+ });
867
+
868
+ const { checkpoint } = await bucketStorage.getCheckpoint();
869
+
870
+ const parameters = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 'user1']]);
871
+ expect(parameters).toEqual([]);
872
+ });
873
+
874
+ test('batch with overlapping replica ids', async () => {
875
+ // This test checks that we get the correct output when processing rows with:
876
+ // 1. changing replica ids
877
+ // 2. overlapping with replica ids of other rows in the same transaction (at different times)
878
+ // If operations are not processing in input order, this breaks easily.
879
+ // It can break at two places:
880
+ // 1. Not getting the correct "current_data" state for each operation.
881
+ // 2. Output order not being correct.
882
+
883
+ const sync_rules = test_utils.testRules(
884
+ `
885
+ bucket_definitions:
886
+ global:
887
+ data:
888
+ - SELECT id, description FROM "test"
889
+ `
890
+ );
891
+ using factory = await generateStorageFactory();
892
+ const bucketStorage = factory.getInstance(sync_rules);
893
+
894
+ // Pre-setup
895
+ const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
896
+ const sourceTable = TEST_TABLE;
897
+
898
+ await batch.save({
899
+ sourceTable,
900
+ tag: storage.SaveOperationTag.INSERT,
901
+ after: {
902
+ id: 'test1',
903
+ description: 'test1a'
904
+ },
905
+ afterReplicaId: test_utils.rid('test1')
906
+ });
907
+
908
+ await batch.save({
909
+ sourceTable,
910
+ tag: storage.SaveOperationTag.INSERT,
911
+ after: {
912
+ id: 'test2',
913
+ description: 'test2a'
914
+ },
915
+ afterReplicaId: test_utils.rid('test2')
916
+ });
917
+ });
918
+
919
+ const checkpoint1 = result1?.flushed_op ?? '0';
920
+
921
+ // Test batch
922
+ const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
923
+ const sourceTable = TEST_TABLE;
924
+ // b
925
+ await batch.save({
926
+ sourceTable,
927
+ tag: storage.SaveOperationTag.INSERT,
928
+ after: {
929
+ id: 'test1',
930
+ description: 'test1b'
931
+ },
932
+ afterReplicaId: test_utils.rid('test1')
933
+ });
934
+
935
+ await batch.save({
936
+ sourceTable,
937
+ tag: storage.SaveOperationTag.UPDATE,
938
+ before: {
939
+ id: 'test1'
940
+ },
941
+ beforeReplicaId: test_utils.rid('test1'),
942
+ after: {
943
+ id: 'test2',
944
+ description: 'test2b'
945
+ },
946
+ afterReplicaId: test_utils.rid('test2')
947
+ });
948
+
949
+ await batch.save({
950
+ sourceTable,
951
+ tag: storage.SaveOperationTag.UPDATE,
952
+ before: {
953
+ id: 'test2'
954
+ },
955
+ beforeReplicaId: test_utils.rid('test2'),
956
+ after: {
957
+ id: 'test3',
958
+ description: 'test3b'
959
+ },
960
+
961
+ afterReplicaId: test_utils.rid('test3')
962
+ });
963
+
964
+ // c
965
+ await batch.save({
966
+ sourceTable,
967
+ tag: storage.SaveOperationTag.UPDATE,
968
+ after: {
969
+ id: 'test2',
970
+ description: 'test2c'
971
+ },
972
+ afterReplicaId: test_utils.rid('test2')
973
+ });
974
+
975
+ // d
976
+ await batch.save({
977
+ sourceTable,
978
+ tag: storage.SaveOperationTag.INSERT,
979
+ after: {
980
+ id: 'test4',
981
+ description: 'test4d'
982
+ },
983
+ afterReplicaId: test_utils.rid('test4')
984
+ });
985
+
986
+ await batch.save({
987
+ sourceTable,
988
+ tag: storage.SaveOperationTag.UPDATE,
989
+ before: {
990
+ id: 'test4'
991
+ },
992
+ beforeReplicaId: test_utils.rid('test4'),
993
+ after: {
994
+ id: 'test5',
995
+ description: 'test5d'
996
+ },
997
+ afterReplicaId: test_utils.rid('test5')
998
+ });
999
+ });
1000
+
1001
+ const checkpoint2 = result2!.flushed_op;
1002
+
1003
+ const batch = await test_utils.fromAsync(
1004
+ bucketStorage.getBucketDataBatch(checkpoint2, new Map([['global[]', checkpoint1]]))
1005
+ );
1006
+
1007
+ const data = batch[0].batch.data.map((d) => {
1008
+ return {
1009
+ op: d.op,
1010
+ object_id: d.object_id,
1011
+ data: normalizeOplogData(d.data)
1012
+ };
1013
+ });
1014
+
1015
+ // Operations must be in this order
1016
+ expect(data).toEqual([
1017
+ // b
1018
+ { op: 'PUT', object_id: 'test1', data: JSON.stringify({ id: 'test1', description: 'test1b' }) },
1019
+ { op: 'REMOVE', object_id: 'test1', data: null },
1020
+ { op: 'PUT', object_id: 'test2', data: JSON.stringify({ id: 'test2', description: 'test2b' }) },
1021
+ { op: 'REMOVE', object_id: 'test2', data: null },
1022
+ { op: 'PUT', object_id: 'test3', data: JSON.stringify({ id: 'test3', description: 'test3b' }) },
1023
+
1024
+ // c
1025
+ { op: 'PUT', object_id: 'test2', data: JSON.stringify({ id: 'test2', description: 'test2c' }) },
1026
+
1027
+ // d
1028
+ { op: 'PUT', object_id: 'test4', data: JSON.stringify({ id: 'test4', description: 'test4d' }) },
1029
+ { op: 'REMOVE', object_id: 'test4', data: null },
1030
+ { op: 'PUT', object_id: 'test5', data: JSON.stringify({ id: 'test5', description: 'test5d' }) }
1031
+ ]);
1032
+ });
1033
+
1034
+ test('changed data with replica identity full', async () => {
1035
+ const sync_rules = test_utils.testRules(
1036
+ `
1037
+ bucket_definitions:
1038
+ global:
1039
+ data:
1040
+ - SELECT id, description FROM "test"
1041
+ `
1042
+ );
1043
+ function rid2(id: string, description: string) {
1044
+ return getUuidReplicaIdentityBson({ id, description }, [
1045
+ { name: 'id', type: 'VARCHAR', typeId: 25 },
1046
+ { name: 'description', type: 'VARCHAR', typeId: 25 }
1047
+ ]);
1048
+ }
1049
+ using factory = await generateStorageFactory();
1050
+ const bucketStorage = factory.getInstance(sync_rules);
1051
+
1052
+ const sourceTable = test_utils.makeTestTable('test', ['id', 'description']);
1053
+
1054
+ // Pre-setup
1055
+ const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1056
+ await batch.save({
1057
+ sourceTable,
1058
+ tag: storage.SaveOperationTag.INSERT,
1059
+ after: {
1060
+ id: 'test1',
1061
+ description: 'test1a'
1062
+ },
1063
+ afterReplicaId: rid2('test1', 'test1a')
1064
+ });
1065
+ });
1066
+
1067
+ const checkpoint1 = result1?.flushed_op ?? '0';
1068
+
1069
+ const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1070
+ // Unchanged, but has a before id
1071
+ await batch.save({
1072
+ sourceTable,
1073
+ tag: storage.SaveOperationTag.UPDATE,
1074
+ before: {
1075
+ id: 'test1',
1076
+ description: 'test1a'
1077
+ },
1078
+ beforeReplicaId: rid2('test1', 'test1a'),
1079
+ after: {
1080
+ id: 'test1',
1081
+ description: 'test1b'
1082
+ },
1083
+ afterReplicaId: rid2('test1', 'test1b')
1084
+ });
1085
+ });
1086
+
1087
+ const result3 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1088
+ // Delete
1089
+ await batch.save({
1090
+ sourceTable,
1091
+ tag: storage.SaveOperationTag.DELETE,
1092
+ before: {
1093
+ id: 'test1',
1094
+ description: 'test1b'
1095
+ },
1096
+ beforeReplicaId: rid2('test1', 'test1b'),
1097
+ after: undefined
1098
+ });
1099
+ });
1100
+
1101
+ const checkpoint3 = result3!.flushed_op;
1102
+
1103
+ const batch = await test_utils.fromAsync(
1104
+ bucketStorage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]]))
1105
+ );
1106
+ const data = batch[0].batch.data.map((d) => {
1107
+ return {
1108
+ op: d.op,
1109
+ object_id: d.object_id,
1110
+ data: normalizeOplogData(d.data),
1111
+ subkey: d.subkey
1112
+ };
1113
+ });
1114
+
1115
+ // Operations must be in this order
1116
+ expect(data).toEqual([
1117
+ // 2
1118
+ // The REMOVE is expected because the subkey changes
1119
+ {
1120
+ op: 'REMOVE',
1121
+ object_id: 'test1',
1122
+ data: null,
1123
+ subkey: '6544e3899293153fa7b38331/740ba9f2-8b0f-53e3-bb17-5f38a9616f0e'
1124
+ },
1125
+ {
1126
+ op: 'PUT',
1127
+ object_id: 'test1',
1128
+ data: JSON.stringify({ id: 'test1', description: 'test1b' }),
1129
+ subkey: '6544e3899293153fa7b38331/500e9b68-a2fd-51ff-9c00-313e2fb9f562'
1130
+ },
1131
+ // 3
1132
+ {
1133
+ op: 'REMOVE',
1134
+ object_id: 'test1',
1135
+ data: null,
1136
+ subkey: '6544e3899293153fa7b38331/500e9b68-a2fd-51ff-9c00-313e2fb9f562'
1137
+ }
1138
+ ]);
1139
+ });
1140
+
1141
+ test('unchanged data with replica identity full', async () => {
1142
+ const sync_rules = test_utils.testRules(
1143
+ `
1144
+ bucket_definitions:
1145
+ global:
1146
+ data:
1147
+ - SELECT id, description FROM "test"
1148
+ `
1149
+ );
1150
+ function rid2(id: string, description: string) {
1151
+ return getUuidReplicaIdentityBson({ id, description }, [
1152
+ { name: 'id', type: 'VARCHAR', typeId: 25 },
1153
+ { name: 'description', type: 'VARCHAR', typeId: 25 }
1154
+ ]);
1155
+ }
1156
+
1157
+ using factory = await generateStorageFactory();
1158
+ const bucketStorage = factory.getInstance(sync_rules);
1159
+
1160
+ const sourceTable = test_utils.makeTestTable('test', ['id', 'description']);
1161
+
1162
+ // Pre-setup
1163
+ const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1164
+ await batch.save({
1165
+ sourceTable,
1166
+ tag: storage.SaveOperationTag.INSERT,
1167
+ after: {
1168
+ id: 'test1',
1169
+ description: 'test1a'
1170
+ },
1171
+ afterReplicaId: rid2('test1', 'test1a')
1172
+ });
1173
+ });
1174
+
1175
+ const checkpoint1 = result1?.flushed_op ?? '0';
1176
+
1177
+ const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1178
+ // Unchanged, but has a before id
1179
+ await batch.save({
1180
+ sourceTable,
1181
+ tag: storage.SaveOperationTag.UPDATE,
1182
+ before: {
1183
+ id: 'test1',
1184
+ description: 'test1a'
1185
+ },
1186
+ beforeReplicaId: rid2('test1', 'test1a'),
1187
+ after: {
1188
+ id: 'test1',
1189
+ description: 'test1a'
1190
+ },
1191
+ afterReplicaId: rid2('test1', 'test1a')
1192
+ });
1193
+ });
1194
+
1195
+ const result3 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1196
+ // Delete
1197
+ await batch.save({
1198
+ sourceTable,
1199
+ tag: storage.SaveOperationTag.DELETE,
1200
+ before: {
1201
+ id: 'test1',
1202
+ description: 'test1a'
1203
+ },
1204
+ beforeReplicaId: rid2('test1', 'test1a'),
1205
+ after: undefined
1206
+ });
1207
+ });
1208
+
1209
+ const checkpoint3 = result3!.flushed_op;
1210
+
1211
+ const batch = await test_utils.fromAsync(
1212
+ bucketStorage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]]))
1213
+ );
1214
+ const data = batch[0].batch.data.map((d) => {
1215
+ return {
1216
+ op: d.op,
1217
+ object_id: d.object_id,
1218
+ data: normalizeOplogData(d.data),
1219
+ subkey: d.subkey
1220
+ };
1221
+ });
1222
+
1223
+ // Operations must be in this order
1224
+ expect(data).toEqual([
1225
+ // 2
1226
+ {
1227
+ op: 'PUT',
1228
+ object_id: 'test1',
1229
+ data: JSON.stringify({ id: 'test1', description: 'test1a' }),
1230
+ subkey: '6544e3899293153fa7b38331/740ba9f2-8b0f-53e3-bb17-5f38a9616f0e'
1231
+ },
1232
+ // 3
1233
+ {
1234
+ op: 'REMOVE',
1235
+ object_id: 'test1',
1236
+ data: null,
1237
+ subkey: '6544e3899293153fa7b38331/740ba9f2-8b0f-53e3-bb17-5f38a9616f0e'
1238
+ }
1239
+ ]);
1240
+ });
1241
+
1242
+ test('large batch', async () => {
1243
+ // Test syncing a batch of data that is small in count,
1244
+ // but large enough in size to be split over multiple returned batches.
1245
+ // The specific batch splits is an implementation detail of the storage driver,
1246
+ // and the test will have to updated when other implementations are added.
1247
+ const sync_rules = test_utils.testRules(
1248
+ `
1249
+ bucket_definitions:
1250
+ global:
1251
+ data:
1252
+ - SELECT id, description FROM "%"
1253
+ `
1254
+ );
1255
+ using factory = await generateStorageFactory();
1256
+ const bucketStorage = factory.getInstance(sync_rules);
1257
+
1258
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1259
+ const sourceTable = TEST_TABLE;
1260
+
1261
+ const largeDescription = '0123456789'.repeat(12_000_00);
1262
+
1263
+ await batch.save({
1264
+ sourceTable,
1265
+ tag: storage.SaveOperationTag.INSERT,
1266
+ after: {
1267
+ id: 'test1',
1268
+ description: 'test1'
1269
+ },
1270
+ afterReplicaId: test_utils.rid('test1')
1271
+ });
1272
+
1273
+ await batch.save({
1274
+ sourceTable,
1275
+ tag: storage.SaveOperationTag.INSERT,
1276
+ after: {
1277
+ id: 'large1',
1278
+ description: largeDescription
1279
+ },
1280
+ afterReplicaId: test_utils.rid('large1')
1281
+ });
1282
+
1283
+ // Large enough to split the returned batch
1284
+ await batch.save({
1285
+ sourceTable,
1286
+ tag: storage.SaveOperationTag.INSERT,
1287
+ after: {
1288
+ id: 'large2',
1289
+ description: largeDescription
1290
+ },
1291
+ afterReplicaId: test_utils.rid('large2')
1292
+ });
1293
+
1294
+ await batch.save({
1295
+ sourceTable,
1296
+ tag: storage.SaveOperationTag.INSERT,
1297
+ after: {
1298
+ id: 'test3',
1299
+ description: 'test3'
1300
+ },
1301
+ afterReplicaId: test_utils.rid('test3')
1302
+ });
1303
+ });
1304
+
1305
+ const checkpoint = result!.flushed_op;
1306
+
1307
+ const options: storage.BucketDataBatchOptions = {
1308
+ chunkLimitBytes: 16 * 1024 * 1024
1309
+ };
1310
+
1311
+ const batch1 = await test_utils.fromAsync(
1312
+ bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), options)
1313
+ );
1314
+ expect(test_utils.getBatchData(batch1)).toEqual([
1315
+ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 },
1316
+ { op_id: '2', op: 'PUT', object_id: 'large1', checksum: 454746904 }
1317
+ ]);
1318
+ expect(test_utils.getBatchMeta(batch1)).toEqual({
1319
+ after: '0',
1320
+ has_more: true,
1321
+ next_after: '2'
1322
+ });
1323
+
1324
+ const batch2 = await test_utils.fromAsync(
1325
+ bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options)
1326
+ );
1327
+ expect(test_utils.getBatchData(batch2)).toEqual([
1328
+ { op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1795508474 },
1329
+ { op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 }
1330
+ ]);
1331
+ expect(test_utils.getBatchMeta(batch2)).toEqual({
1332
+ after: '2',
1333
+ has_more: false,
1334
+ next_after: '4'
1335
+ });
1336
+
1337
+ const batch3 = await test_utils.fromAsync(
1338
+ bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options)
1339
+ );
1340
+ expect(test_utils.getBatchData(batch3)).toEqual([]);
1341
+ expect(test_utils.getBatchMeta(batch3)).toEqual(null);
1342
+ });
1343
+
1344
+ test('long batch', async () => {
1345
+ // Test syncing a batch of data that is limited by count.
1346
+ const sync_rules = test_utils.testRules(
1347
+ `
1348
+ bucket_definitions:
1349
+ global:
1350
+ data:
1351
+ - SELECT id, description FROM "%"
1352
+ `
1353
+ );
1354
+ using factory = await generateStorageFactory();
1355
+ const bucketStorage = factory.getInstance(sync_rules);
1356
+
1357
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1358
+ const sourceTable = TEST_TABLE;
1359
+
1360
+ for (let i = 1; i <= 6; i++) {
1361
+ await batch.save({
1362
+ sourceTable,
1363
+ tag: storage.SaveOperationTag.INSERT,
1364
+ after: {
1365
+ id: `test${i}`,
1366
+ description: `test${i}`
1367
+ },
1368
+ afterReplicaId: `test${i}`
1369
+ });
1370
+ }
1371
+ });
1372
+
1373
+ const checkpoint = result!.flushed_op;
1374
+
1375
+ const batch1 = await test_utils.oneFromAsync(
1376
+ bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), { limit: 4 })
1377
+ );
1378
+
1379
+ expect(test_utils.getBatchData(batch1)).toEqual([
1380
+ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 },
1381
+ { op_id: '2', op: 'PUT', object_id: 'test2', checksum: 730027011 },
1382
+ { op_id: '3', op: 'PUT', object_id: 'test3', checksum: 1359888332 },
1383
+ { op_id: '4', op: 'PUT', object_id: 'test4', checksum: 2049153252 }
1384
+ ]);
1385
+
1386
+ expect(test_utils.getBatchMeta(batch1)).toEqual({
1387
+ after: '0',
1388
+ has_more: true,
1389
+ next_after: '4'
1390
+ });
1391
+
1392
+ const batch2 = await test_utils.oneFromAsync(
1393
+ bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1.batch.next_after]]), {
1394
+ limit: 4
1395
+ })
1396
+ );
1397
+ expect(test_utils.getBatchData(batch2)).toEqual([
1398
+ { op_id: '5', op: 'PUT', object_id: 'test5', checksum: 3686902721 },
1399
+ { op_id: '6', op: 'PUT', object_id: 'test6', checksum: 1974820016 }
1400
+ ]);
1401
+
1402
+ expect(test_utils.getBatchMeta(batch2)).toEqual({
1403
+ after: '4',
1404
+ has_more: false,
1405
+ next_after: '6'
1406
+ });
1407
+
1408
+ const batch3 = await test_utils.fromAsync(
1409
+ bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2.batch.next_after]]), {
1410
+ limit: 4
1411
+ })
1412
+ );
1413
+ expect(test_utils.getBatchData(batch3)).toEqual([]);
1414
+
1415
+ expect(test_utils.getBatchMeta(batch3)).toEqual(null);
1416
+ });
1417
+
1418
+ test('batch should be disposed automatically', async () => {
1419
+ const sync_rules = test_utils.testRules(`
1420
+ bucket_definitions:
1421
+ global:
1422
+ data: []
1423
+ `);
1424
+
1425
+ using factory = await generateStorageFactory();
1426
+ const bucketStorage = factory.getInstance(sync_rules);
1427
+
1428
+ let isDisposed = false;
1429
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1430
+ batch.registerListener({
1431
+ disposed: () => {
1432
+ isDisposed = true;
1433
+ }
1434
+ });
1435
+ });
1436
+ expect(isDisposed).true;
1437
+
1438
+ isDisposed = false;
1439
+ let errorCaught = false;
1440
+ try {
1441
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1442
+ batch.registerListener({
1443
+ disposed: () => {
1444
+ isDisposed = true;
1445
+ }
1446
+ });
1447
+ throw new Error(`Testing exceptions`);
1448
+ });
1449
+ } catch (ex) {
1450
+ errorCaught = true;
1451
+ expect(ex.message.includes('Testing')).true;
1452
+ }
1453
+ expect(errorCaught).true;
1454
+ expect(isDisposed).true;
1455
+ });
1456
+
1457
+ test('batch should be disposed automatically', async () => {
1458
+ const sync_rules = test_utils.testRules(`
1459
+ bucket_definitions:
1460
+ global:
1461
+ data: []
1462
+ `);
1463
+
1464
+ using factory = await generateStorageFactory();
1465
+ const bucketStorage = factory.getInstance(sync_rules);
1466
+
1467
+ let isDisposed = false;
1468
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1469
+ batch.registerListener({
1470
+ disposed: () => {
1471
+ isDisposed = true;
1472
+ }
1473
+ });
1474
+ });
1475
+ expect(isDisposed).true;
1476
+
1477
+ isDisposed = false;
1478
+ let errorCaught = false;
1479
+ try {
1480
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1481
+ batch.registerListener({
1482
+ disposed: () => {
1483
+ isDisposed = true;
1484
+ }
1485
+ });
1486
+ throw new Error(`Testing exceptions`);
1487
+ });
1488
+ } catch (ex) {
1489
+ errorCaught = true;
1490
+ expect(ex.message.includes('Testing')).true;
1491
+ }
1492
+ expect(errorCaught).true;
1493
+ expect(isDisposed).true;
1494
+ });
1495
+
1496
+ test('empty storage metrics', async () => {
1497
+ using f = await generateStorageFactory({ dropAll: true });
1498
+ const metrics = await f.getStorageMetrics();
1499
+ expect(metrics).toEqual({
1500
+ operations_size_bytes: 0,
1501
+ parameters_size_bytes: 0,
1502
+ replication_size_bytes: 0
1503
+ });
1504
+
1505
+ const r = await f.configureSyncRules('bucket_definitions: {}');
1506
+ const storage = f.getInstance(r.persisted_sync_rules!);
1507
+ await storage.autoActivate();
1508
+
1509
+ const metrics2 = await f.getStorageMetrics();
1510
+ expect(metrics2).toEqual({
1511
+ operations_size_bytes: 0,
1512
+ parameters_size_bytes: 0,
1513
+ replication_size_bytes: 0
1514
+ });
1515
+ });
1516
+
1517
+ test('invalidate cached parsed sync rules', async () => {
1518
+ const sync_rules_content = test_utils.testRules(
1519
+ `
1520
+ bucket_definitions:
1521
+ by_workspace:
1522
+ parameters:
1523
+ - SELECT id as workspace_id FROM workspace WHERE
1524
+ workspace."userId" = token_parameters.user_id
1525
+ data: []
1526
+ `
1527
+ );
1528
+
1529
+ using bucketStorageFactory = await generateStorageFactory();
1530
+ const syncBucketStorage = bucketStorageFactory.getInstance(sync_rules_content);
1531
+
1532
+ const parsedSchema1 = syncBucketStorage.getParsedSyncRules({
1533
+ defaultSchema: 'public'
1534
+ });
1535
+
1536
+ const parsedSchema2 = syncBucketStorage.getParsedSyncRules({
1537
+ defaultSchema: 'public'
1538
+ });
1539
+
1540
+ // These should be cached, this will be the same instance
1541
+ expect(parsedSchema2).equals(parsedSchema1);
1542
+ expect(parsedSchema1.getSourceTables()[0].schema).equals('public');
1543
+
1544
+ const parsedSchema3 = syncBucketStorage.getParsedSyncRules({
1545
+ defaultSchema: 'databasename'
1546
+ });
1547
+
1548
+ // The cache should not be used
1549
+ expect(parsedSchema3).not.equals(parsedSchema2);
1550
+ expect(parsedSchema3.getSourceTables()[0].schema).equals('databasename');
1551
+ });
1552
+ }