@powersync/service-core-tests 0.0.0-dev-20241219091224

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/LICENSE +67 -0
  3. package/README.md +5 -0
  4. package/dist/index.d.ts +4 -0
  5. package/dist/index.js +5 -0
  6. package/dist/index.js.map +1 -0
  7. package/dist/test-utils/bucket-validation.d.ts +42 -0
  8. package/dist/test-utils/bucket-validation.js +115 -0
  9. package/dist/test-utils/bucket-validation.js.map +1 -0
  10. package/dist/test-utils/general-utils.d.ts +31 -0
  11. package/dist/test-utils/general-utils.js +81 -0
  12. package/dist/test-utils/general-utils.js.map +1 -0
  13. package/dist/test-utils/stream_utils.d.ts +6 -0
  14. package/dist/test-utils/stream_utils.js +37 -0
  15. package/dist/test-utils/stream_utils.js.map +1 -0
  16. package/dist/test-utils/test-utils-index.d.ts +3 -0
  17. package/dist/test-utils/test-utils-index.js +4 -0
  18. package/dist/test-utils/test-utils-index.js.map +1 -0
  19. package/dist/tests/register-bucket-validation-tests.d.ts +10 -0
  20. package/dist/tests/register-bucket-validation-tests.js +139 -0
  21. package/dist/tests/register-bucket-validation-tests.js.map +1 -0
  22. package/dist/tests/register-compacting-tests.d.ts +14 -0
  23. package/dist/tests/register-compacting-tests.js +343 -0
  24. package/dist/tests/register-compacting-tests.js.map +1 -0
  25. package/dist/tests/register-data-storage-tests.d.ts +14 -0
  26. package/dist/tests/register-data-storage-tests.js +1571 -0
  27. package/dist/tests/register-data-storage-tests.js.map +1 -0
  28. package/dist/tests/register-sync-tests.d.ts +11 -0
  29. package/dist/tests/register-sync-tests.js +538 -0
  30. package/dist/tests/register-sync-tests.js.map +1 -0
  31. package/dist/tests/tests-index.d.ts +4 -0
  32. package/dist/tests/tests-index.js +5 -0
  33. package/dist/tests/tests-index.js.map +1 -0
  34. package/package.json +28 -0
  35. package/src/index.ts +5 -0
  36. package/src/test-utils/bucket-validation.ts +120 -0
  37. package/src/test-utils/general-utils.ts +113 -0
  38. package/src/test-utils/stream_utils.ts +42 -0
  39. package/src/test-utils/test-utils-index.ts +4 -0
  40. package/src/tests/register-bucket-validation-tests.ts +148 -0
  41. package/src/tests/register-compacting-tests.ts +297 -0
  42. package/src/tests/register-data-storage-tests.ts +1552 -0
  43. package/src/tests/register-sync-tests.ts +521 -0
  44. package/src/tests/tests-index.ts +4 -0
  45. package/tsconfig.json +34 -0
  46. package/tsconfig.tsbuildinfo +1 -0
@@ -0,0 +1,1571 @@
1
+ var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
2
+ if (value !== null && value !== void 0) {
3
+ if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
4
+ var dispose, inner;
5
+ if (async) {
6
+ if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
7
+ dispose = value[Symbol.asyncDispose];
8
+ }
9
+ if (dispose === void 0) {
10
+ if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
11
+ dispose = value[Symbol.dispose];
12
+ if (async) inner = dispose;
13
+ }
14
+ if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
15
+ if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
16
+ env.stack.push({ value: value, dispose: dispose, async: async });
17
+ }
18
+ else if (async) {
19
+ env.stack.push({ async: true });
20
+ }
21
+ return value;
22
+ };
23
+ var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
24
+ return function (env) {
25
+ function fail(e) {
26
+ env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
27
+ env.hasError = true;
28
+ }
29
+ var r, s = 0;
30
+ function next() {
31
+ while (r = env.stack.pop()) {
32
+ try {
33
+ if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
34
+ if (r.dispose) {
35
+ var result = r.dispose.call(r.value);
36
+ if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
37
+ }
38
+ else s |= 1;
39
+ }
40
+ catch (e) {
41
+ fail(e);
42
+ }
43
+ }
44
+ if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
45
+ if (env.hasError) throw env.error;
46
+ }
47
+ return next();
48
+ };
49
+ })(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
50
+ var e = new Error(message);
51
+ return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
52
+ });
53
+ import { getUuidReplicaIdentityBson, storage } from '@powersync/service-core';
54
+ import { RequestParameters } from '@powersync/service-sync-rules';
55
+ import { expect, test } from 'vitest';
56
+ import * as test_utils from '../test-utils/test-utils-index.js';
57
+ export const TEST_TABLE = test_utils.makeTestTable('test', ['id']);
58
+ /**
59
+ * Normalize data from OplogEntries for comparison in tests.
60
+ * Tests typically expect the stringified result
61
+ */
62
+ const normalizeOplogData = (data) => {
63
+ if (data != null && typeof data == 'object') {
64
+ return JSON.stringify(data);
65
+ }
66
+ return data;
67
+ };
68
+ /**
69
+ * @example
70
+ * ```TypeScript
71
+ *
72
+ * describe('store - mongodb', function () {
73
+ * registerDataStorageTests(MONGO_STORAGE_FACTORY);
74
+ * });
75
+ *
76
+ * ```
77
+ */
78
+ export function registerDataStorageTests(generateStorageFactory) {
79
+ test('save and load parameters', async () => {
80
+ const env_1 = { stack: [], error: void 0, hasError: false };
81
+ try {
82
+ const sync_rules = test_utils.testRules(`
83
+ bucket_definitions:
84
+ mybucket:
85
+ parameters:
86
+ - SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
87
+ data: []
88
+ `);
89
+ const factory = __addDisposableResource(env_1, await generateStorageFactory(), false);
90
+ const bucketStorage = factory.getInstance(sync_rules);
91
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
92
+ await batch.save({
93
+ sourceTable: TEST_TABLE,
94
+ tag: storage.SaveOperationTag.INSERT,
95
+ after: {
96
+ id: 't2',
97
+ id1: 'user3',
98
+ id2: 'user4',
99
+ group_id: 'group2a'
100
+ },
101
+ afterReplicaId: test_utils.rid('t2')
102
+ });
103
+ await batch.save({
104
+ sourceTable: TEST_TABLE,
105
+ tag: storage.SaveOperationTag.INSERT,
106
+ after: {
107
+ id: 't1',
108
+ id1: 'user1',
109
+ id2: 'user2',
110
+ group_id: 'group1a'
111
+ },
112
+ afterReplicaId: test_utils.rid('t1')
113
+ });
114
+ });
115
+ const parameters = await bucketStorage.getParameterSets(result.flushed_op, [['mybucket', '1', 'user1']]);
116
+ expect(parameters).toEqual([
117
+ {
118
+ group_id: 'group1a'
119
+ }
120
+ ]);
121
+ }
122
+ catch (e_1) {
123
+ env_1.error = e_1;
124
+ env_1.hasError = true;
125
+ }
126
+ finally {
127
+ __disposeResources(env_1);
128
+ }
129
+ });
130
+ test('it should use the latest version', async () => {
131
+ const env_2 = { stack: [], error: void 0, hasError: false };
132
+ try {
133
+ const sync_rules = test_utils.testRules(`
134
+ bucket_definitions:
135
+ mybucket:
136
+ parameters:
137
+ - SELECT group_id FROM test WHERE id = token_parameters.user_id
138
+ data: []
139
+ `);
140
+ const factory = __addDisposableResource(env_2, await generateStorageFactory(), false);
141
+ const bucketStorage = factory.getInstance(sync_rules);
142
+ const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
143
+ await batch.save({
144
+ sourceTable: TEST_TABLE,
145
+ tag: storage.SaveOperationTag.INSERT,
146
+ after: {
147
+ id: 'user1',
148
+ group_id: 'group1'
149
+ },
150
+ afterReplicaId: test_utils.rid('user1')
151
+ });
152
+ });
153
+ const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
154
+ await batch.save({
155
+ sourceTable: TEST_TABLE,
156
+ tag: storage.SaveOperationTag.INSERT,
157
+ after: {
158
+ id: 'user1',
159
+ group_id: 'group2'
160
+ },
161
+ afterReplicaId: test_utils.rid('user1')
162
+ });
163
+ });
164
+ const parameters = await bucketStorage.getParameterSets(result2.flushed_op, [['mybucket', '1', 'user1']]);
165
+ expect(parameters).toEqual([
166
+ {
167
+ group_id: 'group2'
168
+ }
169
+ ]);
170
+ // Use the checkpoint to get older data if relevant
171
+ const parameters2 = await bucketStorage.getParameterSets(result1.flushed_op, [['mybucket', '1', 'user1']]);
172
+ expect(parameters2).toEqual([
173
+ {
174
+ group_id: 'group1'
175
+ }
176
+ ]);
177
+ }
178
+ catch (e_2) {
179
+ env_2.error = e_2;
180
+ env_2.hasError = true;
181
+ }
182
+ finally {
183
+ __disposeResources(env_2);
184
+ }
185
+ });
186
+ test('it should use the latest version after updates', async () => {
187
+ const env_3 = { stack: [], error: void 0, hasError: false };
188
+ try {
189
+ const sync_rules = test_utils.testRules(`
190
+ bucket_definitions:
191
+ mybucket:
192
+ parameters:
193
+ - SELECT id AS todo_id
194
+ FROM todos
195
+ WHERE list_id IN token_parameters.list_id
196
+ data: []
197
+ `);
198
+ const factory = __addDisposableResource(env_3, await generateStorageFactory(), true);
199
+ const bucketStorage = __addDisposableResource(env_3, factory.getInstance(sync_rules), true);
200
+ const table = test_utils.makeTestTable('todos', ['id', 'list_id']);
201
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
202
+ // Create two todos which initially belong to different lists
203
+ await batch.save({
204
+ sourceTable: table,
205
+ tag: storage.SaveOperationTag.INSERT,
206
+ after: {
207
+ id: 'todo1',
208
+ list_id: 'list1'
209
+ },
210
+ afterReplicaId: test_utils.rid('todo1')
211
+ });
212
+ await batch.save({
213
+ sourceTable: table,
214
+ tag: storage.SaveOperationTag.INSERT,
215
+ after: {
216
+ id: 'todo2',
217
+ list_id: 'list2'
218
+ },
219
+ afterReplicaId: test_utils.rid('todo2')
220
+ });
221
+ });
222
+ const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
223
+ // Update the second todo item to now belong to list 1
224
+ await batch.save({
225
+ sourceTable: table,
226
+ tag: storage.SaveOperationTag.UPDATE,
227
+ after: {
228
+ id: 'todo2',
229
+ list_id: 'list1'
230
+ },
231
+ afterReplicaId: test_utils.rid('todo2')
232
+ });
233
+ });
234
+ // We specifically request the todo_ids for both lists.
235
+ // There removal operation for the association of `list2`::`todo2` should not interfere with the new
236
+ // association of `list1`::`todo2`
237
+ const parameters = await bucketStorage.getParameterSets(BigInt(result2.flushed_op).toString(), [
238
+ ['mybucket', '1', 'list1'],
239
+ ['mybucket', '1', 'list2']
240
+ ]);
241
+ expect(parameters.sort((a, b) => a.todo_id.localeCompare(b.todo_id))).toEqual([
242
+ {
243
+ todo_id: 'todo1'
244
+ },
245
+ {
246
+ todo_id: 'todo2'
247
+ }
248
+ ]);
249
+ }
250
+ catch (e_3) {
251
+ env_3.error = e_3;
252
+ env_3.hasError = true;
253
+ }
254
+ finally {
255
+ const result_1 = __disposeResources(env_3);
256
+ if (result_1)
257
+ await result_1;
258
+ }
259
+ });
260
+ test('save and load parameters with different number types', async () => {
261
+ const env_4 = { stack: [], error: void 0, hasError: false };
262
+ try {
263
+ const sync_rules = test_utils.testRules(`
264
+ bucket_definitions:
265
+ mybucket:
266
+ parameters:
267
+ - SELECT group_id FROM test WHERE n1 = token_parameters.n1 and f2 = token_parameters.f2 and f3 = token_parameters.f3
268
+ data: []
269
+ `);
270
+ const factory = __addDisposableResource(env_4, await generateStorageFactory(), false);
271
+ const bucketStorage = factory.getInstance(sync_rules);
272
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
273
+ await batch.save({
274
+ sourceTable: TEST_TABLE,
275
+ tag: storage.SaveOperationTag.INSERT,
276
+ after: {
277
+ id: 't1',
278
+ group_id: 'group1',
279
+ n1: 314n,
280
+ f2: 314,
281
+ f3: 3.14
282
+ },
283
+ afterReplicaId: test_utils.rid('t1')
284
+ });
285
+ });
286
+ const TEST_PARAMS = { group_id: 'group1' };
287
+ const checkpoint = result.flushed_op;
288
+ const parameters1 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 314n, 314, 3.14]]);
289
+ expect(parameters1).toEqual([TEST_PARAMS]);
290
+ const parameters2 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 314, 314n, 3.14]]);
291
+ expect(parameters2).toEqual([TEST_PARAMS]);
292
+ const parameters3 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 314n, 314, 3]]);
293
+ expect(parameters3).toEqual([]);
294
+ }
295
+ catch (e_4) {
296
+ env_4.error = e_4;
297
+ env_4.hasError = true;
298
+ }
299
+ finally {
300
+ __disposeResources(env_4);
301
+ }
302
+ });
303
+ test('save and load parameters with large numbers', async () => {
304
+ const env_5 = { stack: [], error: void 0, hasError: false };
305
+ try {
306
+ // This ensures serialization / deserialization of "current_data" is done correctly.
307
+ // This specific case tested here cannot happen with postgres in practice, but we still
308
+ // test this to ensure correct deserialization.
309
+ const sync_rules = test_utils.testRules(`
310
+ bucket_definitions:
311
+ mybucket:
312
+ parameters:
313
+ - SELECT group_id FROM test WHERE n1 = token_parameters.n1
314
+ data: []
315
+ `);
316
+ const factory = __addDisposableResource(env_5, await generateStorageFactory(), false);
317
+ const bucketStorage = factory.getInstance(sync_rules);
318
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
319
+ await batch.save({
320
+ sourceTable: TEST_TABLE,
321
+ tag: storage.SaveOperationTag.INSERT,
322
+ after: {
323
+ id: 't1',
324
+ group_id: 'group1',
325
+ n1: 1152921504606846976n // 2^60
326
+ },
327
+ afterReplicaId: test_utils.rid('t1')
328
+ });
329
+ await batch.save({
330
+ sourceTable: TEST_TABLE,
331
+ tag: storage.SaveOperationTag.UPDATE,
332
+ after: {
333
+ id: 't1',
334
+ group_id: 'group1',
335
+ // Simulate a TOAST value, even though it can't happen for values like this
336
+ // in practice.
337
+ n1: undefined
338
+ },
339
+ afterReplicaId: test_utils.rid('t1')
340
+ });
341
+ });
342
+ const TEST_PARAMS = { group_id: 'group1' };
343
+ const checkpoint = result.flushed_op;
344
+ const parameters1 = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 1152921504606846976n]]);
345
+ expect(parameters1).toEqual([TEST_PARAMS]);
346
+ }
347
+ catch (e_5) {
348
+ env_5.error = e_5;
349
+ env_5.hasError = true;
350
+ }
351
+ finally {
352
+ __disposeResources(env_5);
353
+ }
354
+ });
355
+ test('removing row', async () => {
356
+ const env_6 = { stack: [], error: void 0, hasError: false };
357
+ try {
358
+ const sync_rules = test_utils.testRules(`
359
+ bucket_definitions:
360
+ global:
361
+ data:
362
+ - SELECT id, description FROM "%"
363
+ `);
364
+ const factory = __addDisposableResource(env_6, await generateStorageFactory(), false);
365
+ const bucketStorage = factory.getInstance(sync_rules);
366
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
367
+ const sourceTable = TEST_TABLE;
368
+ await batch.save({
369
+ sourceTable,
370
+ tag: storage.SaveOperationTag.INSERT,
371
+ after: {
372
+ id: 'test1',
373
+ description: 'test1'
374
+ },
375
+ afterReplicaId: test_utils.rid('test1')
376
+ });
377
+ await batch.save({
378
+ sourceTable,
379
+ tag: storage.SaveOperationTag.DELETE,
380
+ beforeReplicaId: test_utils.rid('test1')
381
+ });
382
+ });
383
+ const checkpoint = result.flushed_op;
384
+ const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
385
+ const data = batch[0].batch.data.map((d) => {
386
+ return {
387
+ op: d.op,
388
+ object_id: d.object_id,
389
+ checksum: d.checksum
390
+ };
391
+ });
392
+ const c1 = 2871785649;
393
+ const c2 = 2872534815;
394
+ expect(data).toEqual([
395
+ { op: 'PUT', object_id: 'test1', checksum: c1 },
396
+ { op: 'REMOVE', object_id: 'test1', checksum: c2 }
397
+ ]);
398
+ const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()];
399
+ expect(checksums).toEqual([
400
+ {
401
+ bucket: 'global[]',
402
+ checksum: (c1 + c2) & 0xffffffff,
403
+ count: 2
404
+ }
405
+ ]);
406
+ }
407
+ catch (e_6) {
408
+ env_6.error = e_6;
409
+ env_6.hasError = true;
410
+ }
411
+ finally {
412
+ __disposeResources(env_6);
413
+ }
414
+ });
415
+ test('save and load parameters with workspaceId', async () => {
416
+ const env_7 = { stack: [], error: void 0, hasError: false };
417
+ try {
418
+ const WORKSPACE_TABLE = test_utils.makeTestTable('workspace', ['id']);
419
+ const sync_rules_content = test_utils.testRules(`
420
+ bucket_definitions:
421
+ by_workspace:
422
+ parameters:
423
+ - SELECT id as workspace_id FROM workspace WHERE
424
+ workspace."userId" = token_parameters.user_id
425
+ data: []
426
+ `);
427
+ const sync_rules = sync_rules_content.parsed(test_utils.PARSE_OPTIONS).sync_rules;
428
+ const factory = __addDisposableResource(env_7, await generateStorageFactory(), false);
429
+ const bucketStorage = factory.getInstance(sync_rules_content);
430
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
431
+ await batch.save({
432
+ sourceTable: WORKSPACE_TABLE,
433
+ tag: storage.SaveOperationTag.INSERT,
434
+ after: {
435
+ id: 'workspace1',
436
+ userId: 'u1'
437
+ },
438
+ afterReplicaId: test_utils.rid('workspace1')
439
+ });
440
+ });
441
+ const checkpoint = result.flushed_op;
442
+ const parameters = new RequestParameters({ sub: 'u1' }, {});
443
+ const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
444
+ const lookups = q1.getLookups(parameters);
445
+ expect(lookups).toEqual([['by_workspace', '1', 'u1']]);
446
+ const parameter_sets = await bucketStorage.getParameterSets(checkpoint, lookups);
447
+ expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }]);
448
+ const buckets = await sync_rules.queryBucketIds({
449
+ getParameterSets(lookups) {
450
+ return bucketStorage.getParameterSets(checkpoint, lookups);
451
+ },
452
+ parameters
453
+ });
454
+ expect(buckets).toEqual(['by_workspace["workspace1"]']);
455
+ }
456
+ catch (e_7) {
457
+ env_7.error = e_7;
458
+ env_7.hasError = true;
459
+ }
460
+ finally {
461
+ __disposeResources(env_7);
462
+ }
463
+ });
464
+ test('save and load parameters with dynamic global buckets', async () => {
465
+ const env_8 = { stack: [], error: void 0, hasError: false };
466
+ try {
467
+ const WORKSPACE_TABLE = test_utils.makeTestTable('workspace');
468
+ const sync_rules_content = test_utils.testRules(`
469
+ bucket_definitions:
470
+ by_public_workspace:
471
+ parameters:
472
+ - SELECT id as workspace_id FROM workspace WHERE
473
+ workspace.visibility = 'public'
474
+ data: []
475
+ `);
476
+ const sync_rules = sync_rules_content.parsed(test_utils.PARSE_OPTIONS).sync_rules;
477
+ const factory = __addDisposableResource(env_8, await generateStorageFactory(), false);
478
+ const bucketStorage = factory.getInstance(sync_rules_content);
479
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
480
+ await batch.save({
481
+ sourceTable: WORKSPACE_TABLE,
482
+ tag: storage.SaveOperationTag.INSERT,
483
+ after: {
484
+ id: 'workspace1',
485
+ visibility: 'public'
486
+ },
487
+ afterReplicaId: test_utils.rid('workspace1')
488
+ });
489
+ await batch.save({
490
+ sourceTable: WORKSPACE_TABLE,
491
+ tag: storage.SaveOperationTag.INSERT,
492
+ after: {
493
+ id: 'workspace2',
494
+ visibility: 'private'
495
+ },
496
+ afterReplicaId: test_utils.rid('workspace2')
497
+ });
498
+ await batch.save({
499
+ sourceTable: WORKSPACE_TABLE,
500
+ tag: storage.SaveOperationTag.INSERT,
501
+ after: {
502
+ id: 'workspace3',
503
+ visibility: 'public'
504
+ },
505
+ afterReplicaId: test_utils.rid('workspace3')
506
+ });
507
+ });
508
+ const checkpoint = result.flushed_op;
509
+ const parameters = new RequestParameters({ sub: 'unknown' }, {});
510
+ const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
511
+ const lookups = q1.getLookups(parameters);
512
+ expect(lookups).toEqual([['by_public_workspace', '1']]);
513
+ const parameter_sets = await bucketStorage.getParameterSets(checkpoint, lookups);
514
+ parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
515
+ expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
516
+ const buckets = await sync_rules.queryBucketIds({
517
+ getParameterSets(lookups) {
518
+ return bucketStorage.getParameterSets(checkpoint, lookups);
519
+ },
520
+ parameters
521
+ });
522
+ buckets.sort();
523
+ expect(buckets).toEqual(['by_public_workspace["workspace1"]', 'by_public_workspace["workspace3"]']);
524
+ }
525
+ catch (e_8) {
526
+ env_8.error = e_8;
527
+ env_8.hasError = true;
528
+ }
529
+ finally {
530
+ __disposeResources(env_8);
531
+ }
532
+ });
533
+ test('multiple parameter queries', async () => {
534
+ const env_9 = { stack: [], error: void 0, hasError: false };
535
+ try {
536
+ const WORKSPACE_TABLE = test_utils.makeTestTable('workspace');
537
+ const sync_rules_content = test_utils.testRules(`
538
+ bucket_definitions:
539
+ by_workspace:
540
+ parameters:
541
+ - SELECT id as workspace_id FROM workspace WHERE
542
+ workspace.visibility = 'public'
543
+ - SELECT id as workspace_id FROM workspace WHERE
544
+ workspace.user_id = token_parameters.user_id
545
+ data: []
546
+ `);
547
+ const sync_rules = sync_rules_content.parsed(test_utils.PARSE_OPTIONS).sync_rules;
548
+ const factory = __addDisposableResource(env_9, await generateStorageFactory(), false);
549
+ const bucketStorage = factory.getInstance(sync_rules_content);
550
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
551
+ await batch.save({
552
+ sourceTable: WORKSPACE_TABLE,
553
+ tag: storage.SaveOperationTag.INSERT,
554
+ after: {
555
+ id: 'workspace1',
556
+ visibility: 'public'
557
+ },
558
+ afterReplicaId: test_utils.rid('workspace1')
559
+ });
560
+ await batch.save({
561
+ sourceTable: WORKSPACE_TABLE,
562
+ tag: storage.SaveOperationTag.INSERT,
563
+ after: {
564
+ id: 'workspace2',
565
+ visibility: 'private'
566
+ },
567
+ afterReplicaId: test_utils.rid('workspace2')
568
+ });
569
+ await batch.save({
570
+ sourceTable: WORKSPACE_TABLE,
571
+ tag: storage.SaveOperationTag.INSERT,
572
+ after: {
573
+ id: 'workspace3',
574
+ user_id: 'u1',
575
+ visibility: 'private'
576
+ },
577
+ afterReplicaId: test_utils.rid('workspace3')
578
+ });
579
+ await batch.save({
580
+ sourceTable: WORKSPACE_TABLE,
581
+ tag: storage.SaveOperationTag.INSERT,
582
+ after: {
583
+ id: 'workspace4',
584
+ user_id: 'u2',
585
+ visibility: 'private'
586
+ },
587
+ afterReplicaId: test_utils.rid('workspace4')
588
+ });
589
+ });
590
+ const checkpoint = result.flushed_op;
591
+ const parameters = new RequestParameters({ sub: 'u1' }, {});
592
+ // Test intermediate values - could be moved to sync_rules.test.ts
593
+ const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
594
+ const lookups1 = q1.getLookups(parameters);
595
+ expect(lookups1).toEqual([['by_workspace', '1']]);
596
+ const parameter_sets1 = await bucketStorage.getParameterSets(checkpoint, lookups1);
597
+ parameter_sets1.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
598
+ expect(parameter_sets1).toEqual([{ workspace_id: 'workspace1' }]);
599
+ const q2 = sync_rules.bucket_descriptors[0].parameter_queries[1];
600
+ const lookups2 = q2.getLookups(parameters);
601
+ expect(lookups2).toEqual([['by_workspace', '2', 'u1']]);
602
+ const parameter_sets2 = await bucketStorage.getParameterSets(checkpoint, lookups2);
603
+ parameter_sets2.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
604
+ expect(parameter_sets2).toEqual([{ workspace_id: 'workspace3' }]);
605
+ // Test final values - the important part
606
+ const buckets = await sync_rules.queryBucketIds({
607
+ getParameterSets(lookups) {
608
+ return bucketStorage.getParameterSets(checkpoint, lookups);
609
+ },
610
+ parameters
611
+ });
612
+ buckets.sort();
613
+ expect(buckets).toEqual(['by_workspace["workspace1"]', 'by_workspace["workspace3"]']);
614
+ }
615
+ catch (e_9) {
616
+ env_9.error = e_9;
617
+ env_9.hasError = true;
618
+ }
619
+ finally {
620
+ __disposeResources(env_9);
621
+ }
622
+ });
623
+ test('changing client ids', async () => {
624
+ const env_10 = { stack: [], error: void 0, hasError: false };
625
+ try {
626
+ const sync_rules = test_utils.testRules(`
627
+ bucket_definitions:
628
+ global:
629
+ data:
630
+ - SELECT client_id as id, description FROM "%"
631
+ `);
632
+ const factory = __addDisposableResource(env_10, await generateStorageFactory(), false);
633
+ const bucketStorage = factory.getInstance(sync_rules);
634
+ const sourceTable = TEST_TABLE;
635
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
636
+ await batch.save({
637
+ sourceTable,
638
+ tag: storage.SaveOperationTag.INSERT,
639
+ after: {
640
+ id: 'test1',
641
+ client_id: 'client1a',
642
+ description: 'test1a'
643
+ },
644
+ afterReplicaId: test_utils.rid('test1')
645
+ });
646
+ await batch.save({
647
+ sourceTable,
648
+ tag: storage.SaveOperationTag.UPDATE,
649
+ after: {
650
+ id: 'test1',
651
+ client_id: 'client1b',
652
+ description: 'test1b'
653
+ },
654
+ afterReplicaId: test_utils.rid('test1')
655
+ });
656
+ await batch.save({
657
+ sourceTable,
658
+ tag: storage.SaveOperationTag.INSERT,
659
+ after: {
660
+ id: 'test2',
661
+ client_id: 'client2',
662
+ description: 'test2'
663
+ },
664
+ afterReplicaId: test_utils.rid('test2')
665
+ });
666
+ });
667
+ const checkpoint = result.flushed_op;
668
+ const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
669
+ const data = batch[0].batch.data.map((d) => {
670
+ return {
671
+ op: d.op,
672
+ object_id: d.object_id
673
+ };
674
+ });
675
+ expect(data).toEqual([
676
+ { op: 'PUT', object_id: 'client1a' },
677
+ { op: 'PUT', object_id: 'client1b' },
678
+ { op: 'REMOVE', object_id: 'client1a' },
679
+ { op: 'PUT', object_id: 'client2' }
680
+ ]);
681
+ }
682
+ catch (e_10) {
683
+ env_10.error = e_10;
684
+ env_10.hasError = true;
685
+ }
686
+ finally {
687
+ __disposeResources(env_10);
688
+ }
689
+ });
690
+ test('re-apply delete', async () => {
691
+ const env_11 = { stack: [], error: void 0, hasError: false };
692
+ try {
693
+ const sync_rules = test_utils.testRules(`
694
+ bucket_definitions:
695
+ global:
696
+ data:
697
+ - SELECT id, description FROM "%"
698
+ `);
699
+ const factory = __addDisposableResource(env_11, await generateStorageFactory(), false);
700
+ const bucketStorage = factory.getInstance(sync_rules);
701
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
702
+ const sourceTable = TEST_TABLE;
703
+ await batch.save({
704
+ sourceTable,
705
+ tag: storage.SaveOperationTag.INSERT,
706
+ after: {
707
+ id: 'test1',
708
+ description: 'test1'
709
+ },
710
+ afterReplicaId: test_utils.rid('test1')
711
+ });
712
+ });
713
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
714
+ const sourceTable = TEST_TABLE;
715
+ await batch.save({
716
+ sourceTable,
717
+ tag: storage.SaveOperationTag.DELETE,
718
+ beforeReplicaId: test_utils.rid('test1')
719
+ });
720
+ });
721
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
722
+ const sourceTable = TEST_TABLE;
723
+ await batch.save({
724
+ sourceTable,
725
+ tag: storage.SaveOperationTag.DELETE,
726
+ beforeReplicaId: test_utils.rid('test1')
727
+ });
728
+ });
729
+ const checkpoint = result.flushed_op;
730
+ const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
731
+ const data = batch[0].batch.data.map((d) => {
732
+ return {
733
+ op: d.op,
734
+ object_id: d.object_id,
735
+ checksum: d.checksum
736
+ };
737
+ });
738
+ const c1 = 2871785649;
739
+ const c2 = 2872534815;
740
+ expect(data).toEqual([
741
+ { op: 'PUT', object_id: 'test1', checksum: c1 },
742
+ { op: 'REMOVE', object_id: 'test1', checksum: c2 }
743
+ ]);
744
+ const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()];
745
+ expect(checksums).toEqual([
746
+ {
747
+ bucket: 'global[]',
748
+ checksum: (c1 + c2) & 0xffffffff,
749
+ count: 2
750
+ }
751
+ ]);
752
+ }
753
+ catch (e_11) {
754
+ env_11.error = e_11;
755
+ env_11.hasError = true;
756
+ }
757
+ finally {
758
+ __disposeResources(env_11);
759
+ }
760
+ });
761
+ test('re-apply update + delete', async () => {
762
+ const env_12 = { stack: [], error: void 0, hasError: false };
763
+ try {
764
+ const sync_rules = test_utils.testRules(`
765
+ bucket_definitions:
766
+ global:
767
+ data:
768
+ - SELECT id, description FROM "%"
769
+ `);
770
+ const factory = __addDisposableResource(env_12, await generateStorageFactory(), false);
771
+ const bucketStorage = factory.getInstance(sync_rules);
772
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
773
+ const sourceTable = TEST_TABLE;
774
+ await batch.save({
775
+ sourceTable,
776
+ tag: storage.SaveOperationTag.INSERT,
777
+ after: {
778
+ id: 'test1',
779
+ description: 'test1'
780
+ },
781
+ afterReplicaId: test_utils.rid('test1')
782
+ });
783
+ });
784
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
785
+ const sourceTable = TEST_TABLE;
786
+ await batch.save({
787
+ sourceTable,
788
+ tag: storage.SaveOperationTag.UPDATE,
789
+ after: {
790
+ id: 'test1',
791
+ description: undefined
792
+ },
793
+ afterReplicaId: test_utils.rid('test1')
794
+ });
795
+ await batch.save({
796
+ sourceTable,
797
+ tag: storage.SaveOperationTag.UPDATE,
798
+ after: {
799
+ id: 'test1',
800
+ description: undefined
801
+ },
802
+ afterReplicaId: test_utils.rid('test1')
803
+ });
804
+ await batch.save({
805
+ sourceTable,
806
+ tag: storage.SaveOperationTag.DELETE,
807
+ beforeReplicaId: test_utils.rid('test1')
808
+ });
809
+ });
810
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
811
+ const sourceTable = TEST_TABLE;
812
+ await batch.save({
813
+ sourceTable,
814
+ tag: storage.SaveOperationTag.UPDATE,
815
+ after: {
816
+ id: 'test1',
817
+ description: undefined
818
+ },
819
+ afterReplicaId: test_utils.rid('test1')
820
+ });
821
+ await batch.save({
822
+ sourceTable,
823
+ tag: storage.SaveOperationTag.UPDATE,
824
+ after: {
825
+ id: 'test1',
826
+ description: undefined
827
+ },
828
+ afterReplicaId: test_utils.rid('test1')
829
+ });
830
+ await batch.save({
831
+ sourceTable,
832
+ tag: storage.SaveOperationTag.DELETE,
833
+ beforeReplicaId: test_utils.rid('test1')
834
+ });
835
+ });
836
+ const checkpoint = result.flushed_op;
837
+ const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
838
+ const data = batch[0].batch.data.map((d) => {
839
+ return {
840
+ op: d.op,
841
+ object_id: d.object_id,
842
+ checksum: d.checksum
843
+ };
844
+ });
845
+ const c1 = 2871785649;
846
+ const c2 = 2872534815;
847
+ expect(data).toEqual([
848
+ { op: 'PUT', object_id: 'test1', checksum: c1 },
849
+ { op: 'PUT', object_id: 'test1', checksum: c1 },
850
+ { op: 'PUT', object_id: 'test1', checksum: c1 },
851
+ { op: 'REMOVE', object_id: 'test1', checksum: c2 }
852
+ ]);
853
+ const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()];
854
+ expect(checksums).toEqual([
855
+ {
856
+ bucket: 'global[]',
857
+ checksum: (c1 + c1 + c1 + c2) & 0xffffffff,
858
+ count: 4
859
+ }
860
+ ]);
861
+ }
862
+ catch (e_12) {
863
+ env_12.error = e_12;
864
+ env_12.hasError = true;
865
+ }
866
+ finally {
867
+ __disposeResources(env_12);
868
+ }
869
+ });
870
+ test('truncate parameters', async () => {
871
+ const env_13 = { stack: [], error: void 0, hasError: false };
872
+ try {
873
+ const sync_rules = test_utils.testRules(`
874
+ bucket_definitions:
875
+ mybucket:
876
+ parameters:
877
+ - SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
878
+ data: []
879
+ `);
880
+ const factory = __addDisposableResource(env_13, await generateStorageFactory(), false);
881
+ const bucketStorage = factory.getInstance(sync_rules);
882
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
883
+ await batch.save({
884
+ sourceTable: TEST_TABLE,
885
+ tag: storage.SaveOperationTag.INSERT,
886
+ after: {
887
+ id: 't2',
888
+ id1: 'user3',
889
+ id2: 'user4',
890
+ group_id: 'group2a'
891
+ },
892
+ afterReplicaId: test_utils.rid('t2')
893
+ });
894
+ await batch.truncate([TEST_TABLE]);
895
+ });
896
+ const { checkpoint } = await bucketStorage.getCheckpoint();
897
+ const parameters = await bucketStorage.getParameterSets(checkpoint, [['mybucket', '1', 'user1']]);
898
+ expect(parameters).toEqual([]);
899
+ }
900
+ catch (e_13) {
901
+ env_13.error = e_13;
902
+ env_13.hasError = true;
903
+ }
904
+ finally {
905
+ __disposeResources(env_13);
906
+ }
907
+ });
908
+ test('batch with overlapping replica ids', async () => {
909
+ const env_14 = { stack: [], error: void 0, hasError: false };
910
+ try {
911
+ // This test checks that we get the correct output when processing rows with:
912
+ // 1. changing replica ids
913
+ // 2. overlapping with replica ids of other rows in the same transaction (at different times)
914
+ // If operations are not processing in input order, this breaks easily.
915
+ // It can break at two places:
916
+ // 1. Not getting the correct "current_data" state for each operation.
917
+ // 2. Output order not being correct.
918
+ const sync_rules = test_utils.testRules(`
919
+ bucket_definitions:
920
+ global:
921
+ data:
922
+ - SELECT id, description FROM "test"
923
+ `);
924
+ const factory = __addDisposableResource(env_14, await generateStorageFactory(), false);
925
+ const bucketStorage = factory.getInstance(sync_rules);
926
+ // Pre-setup
927
+ const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
928
+ const sourceTable = TEST_TABLE;
929
+ await batch.save({
930
+ sourceTable,
931
+ tag: storage.SaveOperationTag.INSERT,
932
+ after: {
933
+ id: 'test1',
934
+ description: 'test1a'
935
+ },
936
+ afterReplicaId: test_utils.rid('test1')
937
+ });
938
+ await batch.save({
939
+ sourceTable,
940
+ tag: storage.SaveOperationTag.INSERT,
941
+ after: {
942
+ id: 'test2',
943
+ description: 'test2a'
944
+ },
945
+ afterReplicaId: test_utils.rid('test2')
946
+ });
947
+ });
948
+ const checkpoint1 = result1?.flushed_op ?? '0';
949
+ // Test batch
950
+ const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
951
+ const sourceTable = TEST_TABLE;
952
+ // b
953
+ await batch.save({
954
+ sourceTable,
955
+ tag: storage.SaveOperationTag.INSERT,
956
+ after: {
957
+ id: 'test1',
958
+ description: 'test1b'
959
+ },
960
+ afterReplicaId: test_utils.rid('test1')
961
+ });
962
+ await batch.save({
963
+ sourceTable,
964
+ tag: storage.SaveOperationTag.UPDATE,
965
+ before: {
966
+ id: 'test1'
967
+ },
968
+ beforeReplicaId: test_utils.rid('test1'),
969
+ after: {
970
+ id: 'test2',
971
+ description: 'test2b'
972
+ },
973
+ afterReplicaId: test_utils.rid('test2')
974
+ });
975
+ await batch.save({
976
+ sourceTable,
977
+ tag: storage.SaveOperationTag.UPDATE,
978
+ before: {
979
+ id: 'test2'
980
+ },
981
+ beforeReplicaId: test_utils.rid('test2'),
982
+ after: {
983
+ id: 'test3',
984
+ description: 'test3b'
985
+ },
986
+ afterReplicaId: test_utils.rid('test3')
987
+ });
988
+ // c
989
+ await batch.save({
990
+ sourceTable,
991
+ tag: storage.SaveOperationTag.UPDATE,
992
+ after: {
993
+ id: 'test2',
994
+ description: 'test2c'
995
+ },
996
+ afterReplicaId: test_utils.rid('test2')
997
+ });
998
+ // d
999
+ await batch.save({
1000
+ sourceTable,
1001
+ tag: storage.SaveOperationTag.INSERT,
1002
+ after: {
1003
+ id: 'test4',
1004
+ description: 'test4d'
1005
+ },
1006
+ afterReplicaId: test_utils.rid('test4')
1007
+ });
1008
+ await batch.save({
1009
+ sourceTable,
1010
+ tag: storage.SaveOperationTag.UPDATE,
1011
+ before: {
1012
+ id: 'test4'
1013
+ },
1014
+ beforeReplicaId: test_utils.rid('test4'),
1015
+ after: {
1016
+ id: 'test5',
1017
+ description: 'test5d'
1018
+ },
1019
+ afterReplicaId: test_utils.rid('test5')
1020
+ });
1021
+ });
1022
+ const checkpoint2 = result2.flushed_op;
1023
+ const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint2, new Map([['global[]', checkpoint1]])));
1024
+ const data = batch[0].batch.data.map((d) => {
1025
+ return {
1026
+ op: d.op,
1027
+ object_id: d.object_id,
1028
+ data: normalizeOplogData(d.data)
1029
+ };
1030
+ });
1031
+ // Operations must be in this order
1032
+ expect(data).toEqual([
1033
+ // b
1034
+ { op: 'PUT', object_id: 'test1', data: JSON.stringify({ id: 'test1', description: 'test1b' }) },
1035
+ { op: 'REMOVE', object_id: 'test1', data: null },
1036
+ { op: 'PUT', object_id: 'test2', data: JSON.stringify({ id: 'test2', description: 'test2b' }) },
1037
+ { op: 'REMOVE', object_id: 'test2', data: null },
1038
+ { op: 'PUT', object_id: 'test3', data: JSON.stringify({ id: 'test3', description: 'test3b' }) },
1039
+ // c
1040
+ { op: 'PUT', object_id: 'test2', data: JSON.stringify({ id: 'test2', description: 'test2c' }) },
1041
+ // d
1042
+ { op: 'PUT', object_id: 'test4', data: JSON.stringify({ id: 'test4', description: 'test4d' }) },
1043
+ { op: 'REMOVE', object_id: 'test4', data: null },
1044
+ { op: 'PUT', object_id: 'test5', data: JSON.stringify({ id: 'test5', description: 'test5d' }) }
1045
+ ]);
1046
+ }
1047
+ catch (e_14) {
1048
+ env_14.error = e_14;
1049
+ env_14.hasError = true;
1050
+ }
1051
+ finally {
1052
+ __disposeResources(env_14);
1053
+ }
1054
+ });
1055
+ test('changed data with replica identity full', async () => {
1056
+ const env_15 = { stack: [], error: void 0, hasError: false };
1057
+ try {
1058
+ const sync_rules = test_utils.testRules(`
1059
+ bucket_definitions:
1060
+ global:
1061
+ data:
1062
+ - SELECT id, description FROM "test"
1063
+ `);
1064
+ function rid2(id, description) {
1065
+ return getUuidReplicaIdentityBson({ id, description }, [
1066
+ { name: 'id', type: 'VARCHAR', typeId: 25 },
1067
+ { name: 'description', type: 'VARCHAR', typeId: 25 }
1068
+ ]);
1069
+ }
1070
+ const factory = __addDisposableResource(env_15, await generateStorageFactory(), false);
1071
+ const bucketStorage = factory.getInstance(sync_rules);
1072
+ const sourceTable = test_utils.makeTestTable('test', ['id', 'description']);
1073
+ // Pre-setup
1074
+ const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1075
+ await batch.save({
1076
+ sourceTable,
1077
+ tag: storage.SaveOperationTag.INSERT,
1078
+ after: {
1079
+ id: 'test1',
1080
+ description: 'test1a'
1081
+ },
1082
+ afterReplicaId: rid2('test1', 'test1a')
1083
+ });
1084
+ });
1085
+ const checkpoint1 = result1?.flushed_op ?? '0';
1086
+ const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1087
+ // Unchanged, but has a before id
1088
+ await batch.save({
1089
+ sourceTable,
1090
+ tag: storage.SaveOperationTag.UPDATE,
1091
+ before: {
1092
+ id: 'test1',
1093
+ description: 'test1a'
1094
+ },
1095
+ beforeReplicaId: rid2('test1', 'test1a'),
1096
+ after: {
1097
+ id: 'test1',
1098
+ description: 'test1b'
1099
+ },
1100
+ afterReplicaId: rid2('test1', 'test1b')
1101
+ });
1102
+ });
1103
+ const result3 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1104
+ // Delete
1105
+ await batch.save({
1106
+ sourceTable,
1107
+ tag: storage.SaveOperationTag.DELETE,
1108
+ before: {
1109
+ id: 'test1',
1110
+ description: 'test1b'
1111
+ },
1112
+ beforeReplicaId: rid2('test1', 'test1b'),
1113
+ after: undefined
1114
+ });
1115
+ });
1116
+ const checkpoint3 = result3.flushed_op;
1117
+ const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]])));
1118
+ const data = batch[0].batch.data.map((d) => {
1119
+ return {
1120
+ op: d.op,
1121
+ object_id: d.object_id,
1122
+ data: normalizeOplogData(d.data),
1123
+ subkey: d.subkey
1124
+ };
1125
+ });
1126
+ // Operations must be in this order
1127
+ expect(data).toEqual([
1128
+ // 2
1129
+ // The REMOVE is expected because the subkey changes
1130
+ {
1131
+ op: 'REMOVE',
1132
+ object_id: 'test1',
1133
+ data: null,
1134
+ subkey: '6544e3899293153fa7b38331/740ba9f2-8b0f-53e3-bb17-5f38a9616f0e'
1135
+ },
1136
+ {
1137
+ op: 'PUT',
1138
+ object_id: 'test1',
1139
+ data: JSON.stringify({ id: 'test1', description: 'test1b' }),
1140
+ subkey: '6544e3899293153fa7b38331/500e9b68-a2fd-51ff-9c00-313e2fb9f562'
1141
+ },
1142
+ // 3
1143
+ {
1144
+ op: 'REMOVE',
1145
+ object_id: 'test1',
1146
+ data: null,
1147
+ subkey: '6544e3899293153fa7b38331/500e9b68-a2fd-51ff-9c00-313e2fb9f562'
1148
+ }
1149
+ ]);
1150
+ }
1151
+ catch (e_15) {
1152
+ env_15.error = e_15;
1153
+ env_15.hasError = true;
1154
+ }
1155
+ finally {
1156
+ __disposeResources(env_15);
1157
+ }
1158
+ });
1159
+ test('unchanged data with replica identity full', async () => {
1160
+ const env_16 = { stack: [], error: void 0, hasError: false };
1161
+ try {
1162
+ const sync_rules = test_utils.testRules(`
1163
+ bucket_definitions:
1164
+ global:
1165
+ data:
1166
+ - SELECT id, description FROM "test"
1167
+ `);
1168
+ function rid2(id, description) {
1169
+ return getUuidReplicaIdentityBson({ id, description }, [
1170
+ { name: 'id', type: 'VARCHAR', typeId: 25 },
1171
+ { name: 'description', type: 'VARCHAR', typeId: 25 }
1172
+ ]);
1173
+ }
1174
+ const factory = __addDisposableResource(env_16, await generateStorageFactory(), false);
1175
+ const bucketStorage = factory.getInstance(sync_rules);
1176
+ const sourceTable = test_utils.makeTestTable('test', ['id', 'description']);
1177
+ // Pre-setup
1178
+ const result1 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1179
+ await batch.save({
1180
+ sourceTable,
1181
+ tag: storage.SaveOperationTag.INSERT,
1182
+ after: {
1183
+ id: 'test1',
1184
+ description: 'test1a'
1185
+ },
1186
+ afterReplicaId: rid2('test1', 'test1a')
1187
+ });
1188
+ });
1189
+ const checkpoint1 = result1?.flushed_op ?? '0';
1190
+ const result2 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1191
+ // Unchanged, but has a before id
1192
+ await batch.save({
1193
+ sourceTable,
1194
+ tag: storage.SaveOperationTag.UPDATE,
1195
+ before: {
1196
+ id: 'test1',
1197
+ description: 'test1a'
1198
+ },
1199
+ beforeReplicaId: rid2('test1', 'test1a'),
1200
+ after: {
1201
+ id: 'test1',
1202
+ description: 'test1a'
1203
+ },
1204
+ afterReplicaId: rid2('test1', 'test1a')
1205
+ });
1206
+ });
1207
+ const result3 = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1208
+ // Delete
1209
+ await batch.save({
1210
+ sourceTable,
1211
+ tag: storage.SaveOperationTag.DELETE,
1212
+ before: {
1213
+ id: 'test1',
1214
+ description: 'test1a'
1215
+ },
1216
+ beforeReplicaId: rid2('test1', 'test1a'),
1217
+ after: undefined
1218
+ });
1219
+ });
1220
+ const checkpoint3 = result3.flushed_op;
1221
+ const batch = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]])));
1222
+ const data = batch[0].batch.data.map((d) => {
1223
+ return {
1224
+ op: d.op,
1225
+ object_id: d.object_id,
1226
+ data: normalizeOplogData(d.data),
1227
+ subkey: d.subkey
1228
+ };
1229
+ });
1230
+ // Operations must be in this order
1231
+ expect(data).toEqual([
1232
+ // 2
1233
+ {
1234
+ op: 'PUT',
1235
+ object_id: 'test1',
1236
+ data: JSON.stringify({ id: 'test1', description: 'test1a' }),
1237
+ subkey: '6544e3899293153fa7b38331/740ba9f2-8b0f-53e3-bb17-5f38a9616f0e'
1238
+ },
1239
+ // 3
1240
+ {
1241
+ op: 'REMOVE',
1242
+ object_id: 'test1',
1243
+ data: null,
1244
+ subkey: '6544e3899293153fa7b38331/740ba9f2-8b0f-53e3-bb17-5f38a9616f0e'
1245
+ }
1246
+ ]);
1247
+ }
1248
+ catch (e_16) {
1249
+ env_16.error = e_16;
1250
+ env_16.hasError = true;
1251
+ }
1252
+ finally {
1253
+ __disposeResources(env_16);
1254
+ }
1255
+ });
1256
+ test('large batch', async () => {
1257
+ const env_17 = { stack: [], error: void 0, hasError: false };
1258
+ try {
1259
+ // Test syncing a batch of data that is small in count,
1260
+ // but large enough in size to be split over multiple returned batches.
1261
+ // The specific batch splits is an implementation detail of the storage driver,
1262
+ // and the test will have to updated when other implementations are added.
1263
+ const sync_rules = test_utils.testRules(`
1264
+ bucket_definitions:
1265
+ global:
1266
+ data:
1267
+ - SELECT id, description FROM "%"
1268
+ `);
1269
+ const factory = __addDisposableResource(env_17, await generateStorageFactory(), false);
1270
+ const bucketStorage = factory.getInstance(sync_rules);
1271
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1272
+ const sourceTable = TEST_TABLE;
1273
+ const largeDescription = '0123456789'.repeat(1200000);
1274
+ await batch.save({
1275
+ sourceTable,
1276
+ tag: storage.SaveOperationTag.INSERT,
1277
+ after: {
1278
+ id: 'test1',
1279
+ description: 'test1'
1280
+ },
1281
+ afterReplicaId: test_utils.rid('test1')
1282
+ });
1283
+ await batch.save({
1284
+ sourceTable,
1285
+ tag: storage.SaveOperationTag.INSERT,
1286
+ after: {
1287
+ id: 'large1',
1288
+ description: largeDescription
1289
+ },
1290
+ afterReplicaId: test_utils.rid('large1')
1291
+ });
1292
+ // Large enough to split the returned batch
1293
+ await batch.save({
1294
+ sourceTable,
1295
+ tag: storage.SaveOperationTag.INSERT,
1296
+ after: {
1297
+ id: 'large2',
1298
+ description: largeDescription
1299
+ },
1300
+ afterReplicaId: test_utils.rid('large2')
1301
+ });
1302
+ await batch.save({
1303
+ sourceTable,
1304
+ tag: storage.SaveOperationTag.INSERT,
1305
+ after: {
1306
+ id: 'test3',
1307
+ description: 'test3'
1308
+ },
1309
+ afterReplicaId: test_utils.rid('test3')
1310
+ });
1311
+ });
1312
+ const checkpoint = result.flushed_op;
1313
+ const options = {
1314
+ chunkLimitBytes: 16 * 1024 * 1024
1315
+ };
1316
+ const batch1 = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), options));
1317
+ expect(test_utils.getBatchData(batch1)).toEqual([
1318
+ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 },
1319
+ { op_id: '2', op: 'PUT', object_id: 'large1', checksum: 454746904 }
1320
+ ]);
1321
+ expect(test_utils.getBatchMeta(batch1)).toEqual({
1322
+ after: '0',
1323
+ has_more: true,
1324
+ next_after: '2'
1325
+ });
1326
+ const batch2 = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options));
1327
+ expect(test_utils.getBatchData(batch2)).toEqual([
1328
+ { op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1795508474 },
1329
+ { op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 }
1330
+ ]);
1331
+ expect(test_utils.getBatchMeta(batch2)).toEqual({
1332
+ after: '2',
1333
+ has_more: false,
1334
+ next_after: '4'
1335
+ });
1336
+ const batch3 = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options));
1337
+ expect(test_utils.getBatchData(batch3)).toEqual([]);
1338
+ expect(test_utils.getBatchMeta(batch3)).toEqual(null);
1339
+ }
1340
+ catch (e_17) {
1341
+ env_17.error = e_17;
1342
+ env_17.hasError = true;
1343
+ }
1344
+ finally {
1345
+ __disposeResources(env_17);
1346
+ }
1347
+ });
1348
+ test('long batch', async () => {
1349
+ const env_18 = { stack: [], error: void 0, hasError: false };
1350
+ try {
1351
+ // Test syncing a batch of data that is limited by count.
1352
+ const sync_rules = test_utils.testRules(`
1353
+ bucket_definitions:
1354
+ global:
1355
+ data:
1356
+ - SELECT id, description FROM "%"
1357
+ `);
1358
+ const factory = __addDisposableResource(env_18, await generateStorageFactory(), false);
1359
+ const bucketStorage = factory.getInstance(sync_rules);
1360
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1361
+ const sourceTable = TEST_TABLE;
1362
+ for (let i = 1; i <= 6; i++) {
1363
+ await batch.save({
1364
+ sourceTable,
1365
+ tag: storage.SaveOperationTag.INSERT,
1366
+ after: {
1367
+ id: `test${i}`,
1368
+ description: `test${i}`
1369
+ },
1370
+ afterReplicaId: `test${i}`
1371
+ });
1372
+ }
1373
+ });
1374
+ const checkpoint = result.flushed_op;
1375
+ const batch1 = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), { limit: 4 }));
1376
+ expect(test_utils.getBatchData(batch1)).toEqual([
1377
+ { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 },
1378
+ { op_id: '2', op: 'PUT', object_id: 'test2', checksum: 730027011 },
1379
+ { op_id: '3', op: 'PUT', object_id: 'test3', checksum: 1359888332 },
1380
+ { op_id: '4', op: 'PUT', object_id: 'test4', checksum: 2049153252 }
1381
+ ]);
1382
+ expect(test_utils.getBatchMeta(batch1)).toEqual({
1383
+ after: '0',
1384
+ has_more: true,
1385
+ next_after: '4'
1386
+ });
1387
+ const batch2 = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1.batch.next_after]]), {
1388
+ limit: 4
1389
+ }));
1390
+ expect(test_utils.getBatchData(batch2)).toEqual([
1391
+ { op_id: '5', op: 'PUT', object_id: 'test5', checksum: 3686902721 },
1392
+ { op_id: '6', op: 'PUT', object_id: 'test6', checksum: 1974820016 }
1393
+ ]);
1394
+ expect(test_utils.getBatchMeta(batch2)).toEqual({
1395
+ after: '4',
1396
+ has_more: false,
1397
+ next_after: '6'
1398
+ });
1399
+ const batch3 = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2.batch.next_after]]), {
1400
+ limit: 4
1401
+ }));
1402
+ expect(test_utils.getBatchData(batch3)).toEqual([]);
1403
+ expect(test_utils.getBatchMeta(batch3)).toEqual(null);
1404
+ }
1405
+ catch (e_18) {
1406
+ env_18.error = e_18;
1407
+ env_18.hasError = true;
1408
+ }
1409
+ finally {
1410
+ __disposeResources(env_18);
1411
+ }
1412
+ });
1413
+ test('batch should be disposed automatically', async () => {
1414
+ const env_19 = { stack: [], error: void 0, hasError: false };
1415
+ try {
1416
+ const sync_rules = test_utils.testRules(`
1417
+ bucket_definitions:
1418
+ global:
1419
+ data: []
1420
+ `);
1421
+ const factory = __addDisposableResource(env_19, await generateStorageFactory(), false);
1422
+ const bucketStorage = factory.getInstance(sync_rules);
1423
+ let isDisposed = false;
1424
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1425
+ batch.registerListener({
1426
+ disposed: () => {
1427
+ isDisposed = true;
1428
+ }
1429
+ });
1430
+ });
1431
+ expect(isDisposed).true;
1432
+ isDisposed = false;
1433
+ let errorCaught = false;
1434
+ try {
1435
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1436
+ batch.registerListener({
1437
+ disposed: () => {
1438
+ isDisposed = true;
1439
+ }
1440
+ });
1441
+ throw new Error(`Testing exceptions`);
1442
+ });
1443
+ }
1444
+ catch (ex) {
1445
+ errorCaught = true;
1446
+ expect(ex.message.includes('Testing')).true;
1447
+ }
1448
+ expect(errorCaught).true;
1449
+ expect(isDisposed).true;
1450
+ }
1451
+ catch (e_19) {
1452
+ env_19.error = e_19;
1453
+ env_19.hasError = true;
1454
+ }
1455
+ finally {
1456
+ __disposeResources(env_19);
1457
+ }
1458
+ });
1459
+ test('batch should be disposed automatically', async () => {
1460
+ const env_20 = { stack: [], error: void 0, hasError: false };
1461
+ try {
1462
+ const sync_rules = test_utils.testRules(`
1463
+ bucket_definitions:
1464
+ global:
1465
+ data: []
1466
+ `);
1467
+ const factory = __addDisposableResource(env_20, await generateStorageFactory(), false);
1468
+ const bucketStorage = factory.getInstance(sync_rules);
1469
+ let isDisposed = false;
1470
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1471
+ batch.registerListener({
1472
+ disposed: () => {
1473
+ isDisposed = true;
1474
+ }
1475
+ });
1476
+ });
1477
+ expect(isDisposed).true;
1478
+ isDisposed = false;
1479
+ let errorCaught = false;
1480
+ try {
1481
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
1482
+ batch.registerListener({
1483
+ disposed: () => {
1484
+ isDisposed = true;
1485
+ }
1486
+ });
1487
+ throw new Error(`Testing exceptions`);
1488
+ });
1489
+ }
1490
+ catch (ex) {
1491
+ errorCaught = true;
1492
+ expect(ex.message.includes('Testing')).true;
1493
+ }
1494
+ expect(errorCaught).true;
1495
+ expect(isDisposed).true;
1496
+ }
1497
+ catch (e_20) {
1498
+ env_20.error = e_20;
1499
+ env_20.hasError = true;
1500
+ }
1501
+ finally {
1502
+ __disposeResources(env_20);
1503
+ }
1504
+ });
1505
+ test('empty storage metrics', async () => {
1506
+ const env_21 = { stack: [], error: void 0, hasError: false };
1507
+ try {
1508
+ const f = __addDisposableResource(env_21, await generateStorageFactory({ dropAll: true }), false);
1509
+ const metrics = await f.getStorageMetrics();
1510
+ expect(metrics).toEqual({
1511
+ operations_size_bytes: 0,
1512
+ parameters_size_bytes: 0,
1513
+ replication_size_bytes: 0
1514
+ });
1515
+ const r = await f.configureSyncRules('bucket_definitions: {}');
1516
+ const storage = f.getInstance(r.persisted_sync_rules);
1517
+ await storage.autoActivate();
1518
+ const metrics2 = await f.getStorageMetrics();
1519
+ expect(metrics2).toEqual({
1520
+ operations_size_bytes: 0,
1521
+ parameters_size_bytes: 0,
1522
+ replication_size_bytes: 0
1523
+ });
1524
+ }
1525
+ catch (e_21) {
1526
+ env_21.error = e_21;
1527
+ env_21.hasError = true;
1528
+ }
1529
+ finally {
1530
+ __disposeResources(env_21);
1531
+ }
1532
+ });
1533
+ test('invalidate cached parsed sync rules', async () => {
1534
+ const env_22 = { stack: [], error: void 0, hasError: false };
1535
+ try {
1536
+ const sync_rules_content = test_utils.testRules(`
1537
+ bucket_definitions:
1538
+ by_workspace:
1539
+ parameters:
1540
+ - SELECT id as workspace_id FROM workspace WHERE
1541
+ workspace."userId" = token_parameters.user_id
1542
+ data: []
1543
+ `);
1544
+ const bucketStorageFactory = __addDisposableResource(env_22, await generateStorageFactory(), false);
1545
+ const syncBucketStorage = bucketStorageFactory.getInstance(sync_rules_content);
1546
+ const parsedSchema1 = syncBucketStorage.getParsedSyncRules({
1547
+ defaultSchema: 'public'
1548
+ });
1549
+ const parsedSchema2 = syncBucketStorage.getParsedSyncRules({
1550
+ defaultSchema: 'public'
1551
+ });
1552
+ // These should be cached, this will be the same instance
1553
+ expect(parsedSchema2).equals(parsedSchema1);
1554
+ expect(parsedSchema1.getSourceTables()[0].schema).equals('public');
1555
+ const parsedSchema3 = syncBucketStorage.getParsedSyncRules({
1556
+ defaultSchema: 'databasename'
1557
+ });
1558
+ // The cache should not be used
1559
+ expect(parsedSchema3).not.equals(parsedSchema2);
1560
+ expect(parsedSchema3.getSourceTables()[0].schema).equals('databasename');
1561
+ }
1562
+ catch (e_22) {
1563
+ env_22.error = e_22;
1564
+ env_22.hasError = true;
1565
+ }
1566
+ finally {
1567
+ __disposeResources(env_22);
1568
+ }
1569
+ });
1570
+ }
1571
+ //# sourceMappingURL=register-data-storage-tests.js.map