@powersync/service-core-tests 0.12.0 → 0.12.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/tests/register-compacting-tests.js +68 -0
- package/dist/tests/register-compacting-tests.js.map +1 -1
- package/dist/tests/register-data-storage-checkpoint-tests.d.ts +12 -0
- package/dist/tests/register-data-storage-checkpoint-tests.js +357 -0
- package/dist/tests/register-data-storage-checkpoint-tests.js.map +1 -0
- package/dist/tests/register-data-storage-data-tests.d.ts +12 -0
- package/dist/tests/{register-data-storage-tests.js → register-data-storage-data-tests.js} +151 -1035
- package/dist/tests/register-data-storage-data-tests.js.map +1 -0
- package/dist/tests/{register-data-storage-tests.d.ts → register-data-storage-parameter-tests.d.ts} +1 -2
- package/dist/tests/register-data-storage-parameter-tests.js +707 -0
- package/dist/tests/register-data-storage-parameter-tests.js.map +1 -0
- package/dist/tests/register-sync-tests.js +2 -1
- package/dist/tests/register-sync-tests.js.map +1 -1
- package/dist/tests/tests-index.d.ts +4 -1
- package/dist/tests/tests-index.js +4 -1
- package/dist/tests/tests-index.js.map +1 -1
- package/dist/tests/util.d.ts +1 -0
- package/dist/tests/util.js +3 -0
- package/dist/tests/util.js.map +1 -0
- package/package.json +2 -2
- package/src/tests/register-compacting-tests.ts +63 -0
- package/src/tests/register-data-storage-checkpoint-tests.ts +277 -0
- package/src/tests/{register-data-storage-tests.ts → register-data-storage-data-tests.ts} +38 -865
- package/src/tests/register-data-storage-parameter-tests.ts +613 -0
- package/src/tests/register-sync-tests.ts +2 -1
- package/src/tests/tests-index.ts +4 -1
- package/src/tests/util.ts +3 -0
- package/tsconfig.tsbuildinfo +1 -1
- package/dist/tests/register-data-storage-tests.js.map +0 -1
|
@@ -51,10 +51,10 @@ var __disposeResources = (this && this.__disposeResources) || (function (Suppres
|
|
|
51
51
|
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
52
52
|
});
|
|
53
53
|
import { getUuidReplicaIdentityBson, storage } from '@powersync/service-core';
|
|
54
|
-
import { DateTimeValue
|
|
55
|
-
import { expect, test
|
|
54
|
+
import { DateTimeValue } from '@powersync/service-sync-rules';
|
|
55
|
+
import { describe, expect, test } from 'vitest';
|
|
56
56
|
import * as test_utils from '../test-utils/test-utils-index.js';
|
|
57
|
-
|
|
57
|
+
import { TEST_TABLE } from './util.js';
|
|
58
58
|
/**
|
|
59
59
|
* Normalize data from OplogEntries for comparison in tests.
|
|
60
60
|
* Tests typically expect the stringified result
|
|
@@ -70,331 +70,18 @@ const normalizeOplogData = (data) => {
|
|
|
70
70
|
* ```TypeScript
|
|
71
71
|
*
|
|
72
72
|
* describe('store - mongodb', function () {
|
|
73
|
-
*
|
|
73
|
+
* registerDataStorageDataTests(MONGO_STORAGE_FACTORY);
|
|
74
74
|
* });
|
|
75
75
|
*
|
|
76
76
|
* ```
|
|
77
77
|
*/
|
|
78
|
-
export function
|
|
79
|
-
test('
|
|
78
|
+
export function registerDataStorageDataTests(generateStorageFactory) {
|
|
79
|
+
test('removing row', async () => {
|
|
80
80
|
const env_1 = { stack: [], error: void 0, hasError: false };
|
|
81
81
|
try {
|
|
82
82
|
const factory = __addDisposableResource(env_1, await generateStorageFactory(), true);
|
|
83
83
|
const syncRules = await factory.updateSyncRules({
|
|
84
84
|
content: `
|
|
85
|
-
bucket_definitions:
|
|
86
|
-
mybucket:
|
|
87
|
-
parameters:
|
|
88
|
-
- SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
|
|
89
|
-
data: []
|
|
90
|
-
`
|
|
91
|
-
});
|
|
92
|
-
const bucketStorage = factory.getInstance(syncRules);
|
|
93
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
94
|
-
await batch.save({
|
|
95
|
-
sourceTable: TEST_TABLE,
|
|
96
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
97
|
-
after: {
|
|
98
|
-
id: 't2',
|
|
99
|
-
id1: 'user3',
|
|
100
|
-
id2: 'user4',
|
|
101
|
-
group_id: 'group2a'
|
|
102
|
-
},
|
|
103
|
-
afterReplicaId: test_utils.rid('t2')
|
|
104
|
-
});
|
|
105
|
-
await batch.save({
|
|
106
|
-
sourceTable: TEST_TABLE,
|
|
107
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
108
|
-
after: {
|
|
109
|
-
id: 't1',
|
|
110
|
-
id1: 'user1',
|
|
111
|
-
id2: 'user2',
|
|
112
|
-
group_id: 'group1a'
|
|
113
|
-
},
|
|
114
|
-
afterReplicaId: test_utils.rid('t1')
|
|
115
|
-
});
|
|
116
|
-
await batch.commit('1/1');
|
|
117
|
-
});
|
|
118
|
-
const checkpoint = await bucketStorage.getCheckpoint();
|
|
119
|
-
const parameters = await checkpoint.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
|
|
120
|
-
expect(parameters).toEqual([
|
|
121
|
-
{
|
|
122
|
-
group_id: 'group1a'
|
|
123
|
-
}
|
|
124
|
-
]);
|
|
125
|
-
}
|
|
126
|
-
catch (e_1) {
|
|
127
|
-
env_1.error = e_1;
|
|
128
|
-
env_1.hasError = true;
|
|
129
|
-
}
|
|
130
|
-
finally {
|
|
131
|
-
const result_1 = __disposeResources(env_1);
|
|
132
|
-
if (result_1)
|
|
133
|
-
await result_1;
|
|
134
|
-
}
|
|
135
|
-
});
|
|
136
|
-
test('it should use the latest version', async () => {
|
|
137
|
-
const env_2 = { stack: [], error: void 0, hasError: false };
|
|
138
|
-
try {
|
|
139
|
-
const factory = __addDisposableResource(env_2, await generateStorageFactory(), true);
|
|
140
|
-
const syncRules = await factory.updateSyncRules({
|
|
141
|
-
content: `
|
|
142
|
-
bucket_definitions:
|
|
143
|
-
mybucket:
|
|
144
|
-
parameters:
|
|
145
|
-
- SELECT group_id FROM test WHERE id = token_parameters.user_id
|
|
146
|
-
data: []
|
|
147
|
-
`
|
|
148
|
-
});
|
|
149
|
-
const bucketStorage = factory.getInstance(syncRules);
|
|
150
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
151
|
-
await batch.save({
|
|
152
|
-
sourceTable: TEST_TABLE,
|
|
153
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
154
|
-
after: {
|
|
155
|
-
id: 'user1',
|
|
156
|
-
group_id: 'group1'
|
|
157
|
-
},
|
|
158
|
-
afterReplicaId: test_utils.rid('user1')
|
|
159
|
-
});
|
|
160
|
-
await batch.commit('1/1');
|
|
161
|
-
});
|
|
162
|
-
const checkpoint1 = await bucketStorage.getCheckpoint();
|
|
163
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
164
|
-
await batch.save({
|
|
165
|
-
sourceTable: TEST_TABLE,
|
|
166
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
167
|
-
after: {
|
|
168
|
-
id: 'user1',
|
|
169
|
-
group_id: 'group2'
|
|
170
|
-
},
|
|
171
|
-
afterReplicaId: test_utils.rid('user1')
|
|
172
|
-
});
|
|
173
|
-
await batch.commit('1/2');
|
|
174
|
-
});
|
|
175
|
-
const checkpoint2 = await bucketStorage.getCheckpoint();
|
|
176
|
-
const parameters = await checkpoint2.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
|
|
177
|
-
expect(parameters).toEqual([
|
|
178
|
-
{
|
|
179
|
-
group_id: 'group2'
|
|
180
|
-
}
|
|
181
|
-
]);
|
|
182
|
-
// Use the checkpoint to get older data if relevant
|
|
183
|
-
const parameters2 = await checkpoint1.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
|
|
184
|
-
expect(parameters2).toEqual([
|
|
185
|
-
{
|
|
186
|
-
group_id: 'group1'
|
|
187
|
-
}
|
|
188
|
-
]);
|
|
189
|
-
}
|
|
190
|
-
catch (e_2) {
|
|
191
|
-
env_2.error = e_2;
|
|
192
|
-
env_2.hasError = true;
|
|
193
|
-
}
|
|
194
|
-
finally {
|
|
195
|
-
const result_2 = __disposeResources(env_2);
|
|
196
|
-
if (result_2)
|
|
197
|
-
await result_2;
|
|
198
|
-
}
|
|
199
|
-
});
|
|
200
|
-
test('it should use the latest version after updates', async () => {
|
|
201
|
-
const env_3 = { stack: [], error: void 0, hasError: false };
|
|
202
|
-
try {
|
|
203
|
-
const factory = __addDisposableResource(env_3, await generateStorageFactory(), true);
|
|
204
|
-
const syncRules = await factory.updateSyncRules({
|
|
205
|
-
content: `
|
|
206
|
-
bucket_definitions:
|
|
207
|
-
mybucket:
|
|
208
|
-
parameters:
|
|
209
|
-
- SELECT id AS todo_id
|
|
210
|
-
FROM todos
|
|
211
|
-
WHERE list_id IN token_parameters.list_id
|
|
212
|
-
data: []
|
|
213
|
-
`
|
|
214
|
-
});
|
|
215
|
-
const bucketStorage = factory.getInstance(syncRules);
|
|
216
|
-
const table = test_utils.makeTestTable('todos', ['id', 'list_id']);
|
|
217
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
218
|
-
// Create two todos which initially belong to different lists
|
|
219
|
-
await batch.save({
|
|
220
|
-
sourceTable: table,
|
|
221
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
222
|
-
after: {
|
|
223
|
-
id: 'todo1',
|
|
224
|
-
list_id: 'list1'
|
|
225
|
-
},
|
|
226
|
-
afterReplicaId: test_utils.rid('todo1')
|
|
227
|
-
});
|
|
228
|
-
await batch.save({
|
|
229
|
-
sourceTable: table,
|
|
230
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
231
|
-
after: {
|
|
232
|
-
id: 'todo2',
|
|
233
|
-
list_id: 'list2'
|
|
234
|
-
},
|
|
235
|
-
afterReplicaId: test_utils.rid('todo2')
|
|
236
|
-
});
|
|
237
|
-
await batch.commit('1/1');
|
|
238
|
-
});
|
|
239
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
240
|
-
// Update the second todo item to now belong to list 1
|
|
241
|
-
await batch.save({
|
|
242
|
-
sourceTable: table,
|
|
243
|
-
tag: storage.SaveOperationTag.UPDATE,
|
|
244
|
-
after: {
|
|
245
|
-
id: 'todo2',
|
|
246
|
-
list_id: 'list1'
|
|
247
|
-
},
|
|
248
|
-
afterReplicaId: test_utils.rid('todo2')
|
|
249
|
-
});
|
|
250
|
-
await batch.commit('1/1');
|
|
251
|
-
});
|
|
252
|
-
// We specifically request the todo_ids for both lists.
|
|
253
|
-
// There removal operation for the association of `list2`::`todo2` should not interfere with the new
|
|
254
|
-
// association of `list1`::`todo2`
|
|
255
|
-
const checkpoint = await bucketStorage.getCheckpoint();
|
|
256
|
-
const parameters = await checkpoint.getParameterSets([
|
|
257
|
-
ParameterLookup.normalized('mybucket', '1', ['list1']),
|
|
258
|
-
ParameterLookup.normalized('mybucket', '1', ['list2'])
|
|
259
|
-
]);
|
|
260
|
-
expect(parameters.sort((a, b) => a.todo_id.localeCompare(b.todo_id))).toEqual([
|
|
261
|
-
{
|
|
262
|
-
todo_id: 'todo1'
|
|
263
|
-
},
|
|
264
|
-
{
|
|
265
|
-
todo_id: 'todo2'
|
|
266
|
-
}
|
|
267
|
-
]);
|
|
268
|
-
}
|
|
269
|
-
catch (e_3) {
|
|
270
|
-
env_3.error = e_3;
|
|
271
|
-
env_3.hasError = true;
|
|
272
|
-
}
|
|
273
|
-
finally {
|
|
274
|
-
const result_3 = __disposeResources(env_3);
|
|
275
|
-
if (result_3)
|
|
276
|
-
await result_3;
|
|
277
|
-
}
|
|
278
|
-
});
|
|
279
|
-
test('save and load parameters with different number types', async () => {
|
|
280
|
-
const env_4 = { stack: [], error: void 0, hasError: false };
|
|
281
|
-
try {
|
|
282
|
-
const factory = __addDisposableResource(env_4, await generateStorageFactory(), true);
|
|
283
|
-
const syncRules = await factory.updateSyncRules({
|
|
284
|
-
content: `
|
|
285
|
-
bucket_definitions:
|
|
286
|
-
mybucket:
|
|
287
|
-
parameters:
|
|
288
|
-
- SELECT group_id FROM test WHERE n1 = token_parameters.n1 and f2 = token_parameters.f2 and f3 = token_parameters.f3
|
|
289
|
-
data: []
|
|
290
|
-
`
|
|
291
|
-
});
|
|
292
|
-
const bucketStorage = factory.getInstance(syncRules);
|
|
293
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
294
|
-
await batch.save({
|
|
295
|
-
sourceTable: TEST_TABLE,
|
|
296
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
297
|
-
after: {
|
|
298
|
-
id: 't1',
|
|
299
|
-
group_id: 'group1',
|
|
300
|
-
n1: 314n,
|
|
301
|
-
f2: 314,
|
|
302
|
-
f3: 3.14
|
|
303
|
-
},
|
|
304
|
-
afterReplicaId: test_utils.rid('t1')
|
|
305
|
-
});
|
|
306
|
-
await batch.commit('1/1');
|
|
307
|
-
});
|
|
308
|
-
const TEST_PARAMS = { group_id: 'group1' };
|
|
309
|
-
const checkpoint = await bucketStorage.getCheckpoint();
|
|
310
|
-
const parameters1 = await checkpoint.getParameterSets([
|
|
311
|
-
ParameterLookup.normalized('mybucket', '1', [314n, 314, 3.14])
|
|
312
|
-
]);
|
|
313
|
-
expect(parameters1).toEqual([TEST_PARAMS]);
|
|
314
|
-
const parameters2 = await checkpoint.getParameterSets([
|
|
315
|
-
ParameterLookup.normalized('mybucket', '1', [314, 314n, 3.14])
|
|
316
|
-
]);
|
|
317
|
-
expect(parameters2).toEqual([TEST_PARAMS]);
|
|
318
|
-
const parameters3 = await checkpoint.getParameterSets([
|
|
319
|
-
ParameterLookup.normalized('mybucket', '1', [314n, 314, 3])
|
|
320
|
-
]);
|
|
321
|
-
expect(parameters3).toEqual([]);
|
|
322
|
-
}
|
|
323
|
-
catch (e_4) {
|
|
324
|
-
env_4.error = e_4;
|
|
325
|
-
env_4.hasError = true;
|
|
326
|
-
}
|
|
327
|
-
finally {
|
|
328
|
-
const result_4 = __disposeResources(env_4);
|
|
329
|
-
if (result_4)
|
|
330
|
-
await result_4;
|
|
331
|
-
}
|
|
332
|
-
});
|
|
333
|
-
test('save and load parameters with large numbers', async () => {
|
|
334
|
-
const env_5 = { stack: [], error: void 0, hasError: false };
|
|
335
|
-
try {
|
|
336
|
-
// This ensures serialization / deserialization of "current_data" is done correctly.
|
|
337
|
-
// This specific case tested here cannot happen with postgres in practice, but we still
|
|
338
|
-
// test this to ensure correct deserialization.
|
|
339
|
-
const factory = __addDisposableResource(env_5, await generateStorageFactory(), true);
|
|
340
|
-
const syncRules = await factory.updateSyncRules({
|
|
341
|
-
content: `
|
|
342
|
-
bucket_definitions:
|
|
343
|
-
mybucket:
|
|
344
|
-
parameters:
|
|
345
|
-
- SELECT group_id FROM test WHERE n1 = token_parameters.n1
|
|
346
|
-
data: []
|
|
347
|
-
`
|
|
348
|
-
});
|
|
349
|
-
const bucketStorage = factory.getInstance(syncRules);
|
|
350
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
351
|
-
await batch.save({
|
|
352
|
-
sourceTable: TEST_TABLE,
|
|
353
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
354
|
-
after: {
|
|
355
|
-
id: 't1',
|
|
356
|
-
group_id: 'group1',
|
|
357
|
-
n1: 1152921504606846976n // 2^60
|
|
358
|
-
},
|
|
359
|
-
afterReplicaId: test_utils.rid('t1')
|
|
360
|
-
});
|
|
361
|
-
await batch.save({
|
|
362
|
-
sourceTable: TEST_TABLE,
|
|
363
|
-
tag: storage.SaveOperationTag.UPDATE,
|
|
364
|
-
after: {
|
|
365
|
-
id: 't1',
|
|
366
|
-
group_id: 'group1',
|
|
367
|
-
// Simulate a TOAST value, even though it can't happen for values like this
|
|
368
|
-
// in practice.
|
|
369
|
-
n1: undefined
|
|
370
|
-
},
|
|
371
|
-
afterReplicaId: test_utils.rid('t1')
|
|
372
|
-
});
|
|
373
|
-
await batch.commit('1/1');
|
|
374
|
-
});
|
|
375
|
-
const TEST_PARAMS = { group_id: 'group1' };
|
|
376
|
-
const checkpoint = await bucketStorage.getCheckpoint();
|
|
377
|
-
const parameters1 = await checkpoint.getParameterSets([
|
|
378
|
-
ParameterLookup.normalized('mybucket', '1', [1152921504606846976n])
|
|
379
|
-
]);
|
|
380
|
-
expect(parameters1).toEqual([TEST_PARAMS]);
|
|
381
|
-
}
|
|
382
|
-
catch (e_5) {
|
|
383
|
-
env_5.error = e_5;
|
|
384
|
-
env_5.hasError = true;
|
|
385
|
-
}
|
|
386
|
-
finally {
|
|
387
|
-
const result_5 = __disposeResources(env_5);
|
|
388
|
-
if (result_5)
|
|
389
|
-
await result_5;
|
|
390
|
-
}
|
|
391
|
-
});
|
|
392
|
-
test('removing row', async () => {
|
|
393
|
-
const env_6 = { stack: [], error: void 0, hasError: false };
|
|
394
|
-
try {
|
|
395
|
-
const factory = __addDisposableResource(env_6, await generateStorageFactory(), true);
|
|
396
|
-
const syncRules = await factory.updateSyncRules({
|
|
397
|
-
content: `
|
|
398
85
|
bucket_definitions:
|
|
399
86
|
global:
|
|
400
87
|
data:
|
|
@@ -444,261 +131,20 @@ bucket_definitions:
|
|
|
444
131
|
}
|
|
445
132
|
]);
|
|
446
133
|
}
|
|
447
|
-
catch (
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
}
|
|
451
|
-
finally {
|
|
452
|
-
const result_6 = __disposeResources(env_6);
|
|
453
|
-
if (result_6)
|
|
454
|
-
await result_6;
|
|
455
|
-
}
|
|
456
|
-
});
|
|
457
|
-
test('save and load parameters with workspaceId', async () => {
|
|
458
|
-
const env_7 = { stack: [], error: void 0, hasError: false };
|
|
459
|
-
try {
|
|
460
|
-
const WORKSPACE_TABLE = test_utils.makeTestTable('workspace', ['id']);
|
|
461
|
-
const factory = __addDisposableResource(env_7, await generateStorageFactory(), true);
|
|
462
|
-
const syncRules = await factory.updateSyncRules({
|
|
463
|
-
content: `
|
|
464
|
-
bucket_definitions:
|
|
465
|
-
by_workspace:
|
|
466
|
-
parameters:
|
|
467
|
-
- SELECT id as workspace_id FROM workspace WHERE
|
|
468
|
-
workspace."userId" = token_parameters.user_id
|
|
469
|
-
data: []
|
|
470
|
-
`
|
|
471
|
-
});
|
|
472
|
-
const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).sync_rules;
|
|
473
|
-
const bucketStorage = factory.getInstance(syncRules);
|
|
474
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
475
|
-
await batch.save({
|
|
476
|
-
sourceTable: WORKSPACE_TABLE,
|
|
477
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
478
|
-
after: {
|
|
479
|
-
id: 'workspace1',
|
|
480
|
-
userId: 'u1'
|
|
481
|
-
},
|
|
482
|
-
afterReplicaId: test_utils.rid('workspace1')
|
|
483
|
-
});
|
|
484
|
-
await batch.commit('1/1');
|
|
485
|
-
});
|
|
486
|
-
const checkpoint = await bucketStorage.getCheckpoint();
|
|
487
|
-
const parameters = new RequestParameters({ sub: 'u1' }, {});
|
|
488
|
-
const q1 = sync_rules.bucketSources[0].parameterQueries[0];
|
|
489
|
-
const lookups = q1.getLookups(parameters);
|
|
490
|
-
expect(lookups).toEqual([ParameterLookup.normalized('by_workspace', '1', ['u1'])]);
|
|
491
|
-
const parameter_sets = await checkpoint.getParameterSets(lookups);
|
|
492
|
-
expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }]);
|
|
493
|
-
const buckets = await sync_rules
|
|
494
|
-
.getBucketParameterQuerier(test_utils.querierOptions(parameters))
|
|
495
|
-
.querier.queryDynamicBucketDescriptions({
|
|
496
|
-
getParameterSets(lookups) {
|
|
497
|
-
return checkpoint.getParameterSets(lookups);
|
|
498
|
-
}
|
|
499
|
-
});
|
|
500
|
-
expect(buckets).toEqual([
|
|
501
|
-
{ bucket: 'by_workspace["workspace1"]', priority: 3, definition: 'by_workspace', inclusion_reasons: ['default'] }
|
|
502
|
-
]);
|
|
503
|
-
}
|
|
504
|
-
catch (e_7) {
|
|
505
|
-
env_7.error = e_7;
|
|
506
|
-
env_7.hasError = true;
|
|
507
|
-
}
|
|
508
|
-
finally {
|
|
509
|
-
const result_7 = __disposeResources(env_7);
|
|
510
|
-
if (result_7)
|
|
511
|
-
await result_7;
|
|
512
|
-
}
|
|
513
|
-
});
|
|
514
|
-
test('save and load parameters with dynamic global buckets', async () => {
|
|
515
|
-
const env_8 = { stack: [], error: void 0, hasError: false };
|
|
516
|
-
try {
|
|
517
|
-
const WORKSPACE_TABLE = test_utils.makeTestTable('workspace');
|
|
518
|
-
const factory = __addDisposableResource(env_8, await generateStorageFactory(), true);
|
|
519
|
-
const syncRules = await factory.updateSyncRules({
|
|
520
|
-
content: `
|
|
521
|
-
bucket_definitions:
|
|
522
|
-
by_public_workspace:
|
|
523
|
-
parameters:
|
|
524
|
-
- SELECT id as workspace_id FROM workspace WHERE
|
|
525
|
-
workspace.visibility = 'public'
|
|
526
|
-
data: []
|
|
527
|
-
`
|
|
528
|
-
});
|
|
529
|
-
const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).sync_rules;
|
|
530
|
-
const bucketStorage = factory.getInstance(syncRules);
|
|
531
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
532
|
-
await batch.save({
|
|
533
|
-
sourceTable: WORKSPACE_TABLE,
|
|
534
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
535
|
-
after: {
|
|
536
|
-
id: 'workspace1',
|
|
537
|
-
visibility: 'public'
|
|
538
|
-
},
|
|
539
|
-
afterReplicaId: test_utils.rid('workspace1')
|
|
540
|
-
});
|
|
541
|
-
await batch.save({
|
|
542
|
-
sourceTable: WORKSPACE_TABLE,
|
|
543
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
544
|
-
after: {
|
|
545
|
-
id: 'workspace2',
|
|
546
|
-
visibility: 'private'
|
|
547
|
-
},
|
|
548
|
-
afterReplicaId: test_utils.rid('workspace2')
|
|
549
|
-
});
|
|
550
|
-
await batch.save({
|
|
551
|
-
sourceTable: WORKSPACE_TABLE,
|
|
552
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
553
|
-
after: {
|
|
554
|
-
id: 'workspace3',
|
|
555
|
-
visibility: 'public'
|
|
556
|
-
},
|
|
557
|
-
afterReplicaId: test_utils.rid('workspace3')
|
|
558
|
-
});
|
|
559
|
-
await batch.commit('1/1');
|
|
560
|
-
});
|
|
561
|
-
const checkpoint = await bucketStorage.getCheckpoint();
|
|
562
|
-
const parameters = new RequestParameters({ sub: 'unknown' }, {});
|
|
563
|
-
const q1 = sync_rules.bucketSources[0].parameterQueries[0];
|
|
564
|
-
const lookups = q1.getLookups(parameters);
|
|
565
|
-
expect(lookups).toEqual([ParameterLookup.normalized('by_public_workspace', '1', [])]);
|
|
566
|
-
const parameter_sets = await checkpoint.getParameterSets(lookups);
|
|
567
|
-
parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
|
|
568
|
-
expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
|
|
569
|
-
const buckets = await sync_rules
|
|
570
|
-
.getBucketParameterQuerier(test_utils.querierOptions(parameters))
|
|
571
|
-
.querier.queryDynamicBucketDescriptions({
|
|
572
|
-
getParameterSets(lookups) {
|
|
573
|
-
return checkpoint.getParameterSets(lookups);
|
|
574
|
-
}
|
|
575
|
-
});
|
|
576
|
-
buckets.sort((a, b) => a.bucket.localeCompare(b.bucket));
|
|
577
|
-
expect(buckets).toEqual([
|
|
578
|
-
{
|
|
579
|
-
bucket: 'by_public_workspace["workspace1"]',
|
|
580
|
-
priority: 3,
|
|
581
|
-
definition: 'by_public_workspace',
|
|
582
|
-
inclusion_reasons: ['default']
|
|
583
|
-
},
|
|
584
|
-
{
|
|
585
|
-
bucket: 'by_public_workspace["workspace3"]',
|
|
586
|
-
priority: 3,
|
|
587
|
-
definition: 'by_public_workspace',
|
|
588
|
-
inclusion_reasons: ['default']
|
|
589
|
-
}
|
|
590
|
-
]);
|
|
591
|
-
}
|
|
592
|
-
catch (e_8) {
|
|
593
|
-
env_8.error = e_8;
|
|
594
|
-
env_8.hasError = true;
|
|
595
|
-
}
|
|
596
|
-
finally {
|
|
597
|
-
const result_8 = __disposeResources(env_8);
|
|
598
|
-
if (result_8)
|
|
599
|
-
await result_8;
|
|
600
|
-
}
|
|
601
|
-
});
|
|
602
|
-
test('multiple parameter queries', async () => {
|
|
603
|
-
const env_9 = { stack: [], error: void 0, hasError: false };
|
|
604
|
-
try {
|
|
605
|
-
const WORKSPACE_TABLE = test_utils.makeTestTable('workspace');
|
|
606
|
-
const factory = __addDisposableResource(env_9, await generateStorageFactory(), true);
|
|
607
|
-
const syncRules = await factory.updateSyncRules({
|
|
608
|
-
content: `
|
|
609
|
-
bucket_definitions:
|
|
610
|
-
by_workspace:
|
|
611
|
-
parameters:
|
|
612
|
-
- SELECT id as workspace_id FROM workspace WHERE
|
|
613
|
-
workspace.visibility = 'public'
|
|
614
|
-
- SELECT id as workspace_id FROM workspace WHERE
|
|
615
|
-
workspace.user_id = token_parameters.user_id
|
|
616
|
-
data: []
|
|
617
|
-
`
|
|
618
|
-
});
|
|
619
|
-
const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).sync_rules;
|
|
620
|
-
const bucketStorage = factory.getInstance(syncRules);
|
|
621
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
622
|
-
await batch.save({
|
|
623
|
-
sourceTable: WORKSPACE_TABLE,
|
|
624
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
625
|
-
after: {
|
|
626
|
-
id: 'workspace1',
|
|
627
|
-
visibility: 'public'
|
|
628
|
-
},
|
|
629
|
-
afterReplicaId: test_utils.rid('workspace1')
|
|
630
|
-
});
|
|
631
|
-
await batch.save({
|
|
632
|
-
sourceTable: WORKSPACE_TABLE,
|
|
633
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
634
|
-
after: {
|
|
635
|
-
id: 'workspace2',
|
|
636
|
-
visibility: 'private'
|
|
637
|
-
},
|
|
638
|
-
afterReplicaId: test_utils.rid('workspace2')
|
|
639
|
-
});
|
|
640
|
-
await batch.save({
|
|
641
|
-
sourceTable: WORKSPACE_TABLE,
|
|
642
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
643
|
-
after: {
|
|
644
|
-
id: 'workspace3',
|
|
645
|
-
user_id: 'u1',
|
|
646
|
-
visibility: 'private'
|
|
647
|
-
},
|
|
648
|
-
afterReplicaId: test_utils.rid('workspace3')
|
|
649
|
-
});
|
|
650
|
-
await batch.save({
|
|
651
|
-
sourceTable: WORKSPACE_TABLE,
|
|
652
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
653
|
-
after: {
|
|
654
|
-
id: 'workspace4',
|
|
655
|
-
user_id: 'u2',
|
|
656
|
-
visibility: 'private'
|
|
657
|
-
},
|
|
658
|
-
afterReplicaId: test_utils.rid('workspace4')
|
|
659
|
-
});
|
|
660
|
-
await batch.commit('1/1');
|
|
661
|
-
});
|
|
662
|
-
const checkpoint = await bucketStorage.getCheckpoint();
|
|
663
|
-
const parameters = new RequestParameters({ sub: 'u1' }, {});
|
|
664
|
-
// Test intermediate values - could be moved to sync_rules.test.ts
|
|
665
|
-
const q1 = sync_rules.bucketSources[0].parameterQueries[0];
|
|
666
|
-
const lookups1 = q1.getLookups(parameters);
|
|
667
|
-
expect(lookups1).toEqual([ParameterLookup.normalized('by_workspace', '1', [])]);
|
|
668
|
-
const parameter_sets1 = await checkpoint.getParameterSets(lookups1);
|
|
669
|
-
parameter_sets1.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
|
|
670
|
-
expect(parameter_sets1).toEqual([{ workspace_id: 'workspace1' }]);
|
|
671
|
-
const q2 = sync_rules.bucketSources[0].parameterQueries[1];
|
|
672
|
-
const lookups2 = q2.getLookups(parameters);
|
|
673
|
-
expect(lookups2).toEqual([ParameterLookup.normalized('by_workspace', '2', ['u1'])]);
|
|
674
|
-
const parameter_sets2 = await checkpoint.getParameterSets(lookups2);
|
|
675
|
-
parameter_sets2.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
|
|
676
|
-
expect(parameter_sets2).toEqual([{ workspace_id: 'workspace3' }]);
|
|
677
|
-
// Test final values - the important part
|
|
678
|
-
const buckets = (await sync_rules
|
|
679
|
-
.getBucketParameterQuerier(test_utils.querierOptions(parameters))
|
|
680
|
-
.querier.queryDynamicBucketDescriptions({
|
|
681
|
-
getParameterSets(lookups) {
|
|
682
|
-
return checkpoint.getParameterSets(lookups);
|
|
683
|
-
}
|
|
684
|
-
})).map((e) => e.bucket);
|
|
685
|
-
buckets.sort();
|
|
686
|
-
expect(buckets).toEqual(['by_workspace["workspace1"]', 'by_workspace["workspace3"]']);
|
|
687
|
-
}
|
|
688
|
-
catch (e_9) {
|
|
689
|
-
env_9.error = e_9;
|
|
690
|
-
env_9.hasError = true;
|
|
134
|
+
catch (e_1) {
|
|
135
|
+
env_1.error = e_1;
|
|
136
|
+
env_1.hasError = true;
|
|
691
137
|
}
|
|
692
138
|
finally {
|
|
693
|
-
const
|
|
694
|
-
if (
|
|
695
|
-
await
|
|
139
|
+
const result_1 = __disposeResources(env_1);
|
|
140
|
+
if (result_1)
|
|
141
|
+
await result_1;
|
|
696
142
|
}
|
|
697
143
|
});
|
|
698
144
|
test('changing client ids', async () => {
|
|
699
|
-
const
|
|
145
|
+
const env_2 = { stack: [], error: void 0, hasError: false };
|
|
700
146
|
try {
|
|
701
|
-
const factory = __addDisposableResource(
|
|
147
|
+
const factory = __addDisposableResource(env_2, await generateStorageFactory(), true);
|
|
702
148
|
const syncRules = await factory.updateSyncRules({
|
|
703
149
|
content: `
|
|
704
150
|
bucket_definitions:
|
|
@@ -757,20 +203,20 @@ bucket_definitions:
|
|
|
757
203
|
{ op: 'PUT', object_id: 'client2' }
|
|
758
204
|
]);
|
|
759
205
|
}
|
|
760
|
-
catch (
|
|
761
|
-
|
|
762
|
-
|
|
206
|
+
catch (e_2) {
|
|
207
|
+
env_2.error = e_2;
|
|
208
|
+
env_2.hasError = true;
|
|
763
209
|
}
|
|
764
210
|
finally {
|
|
765
|
-
const
|
|
766
|
-
if (
|
|
767
|
-
await
|
|
211
|
+
const result_2 = __disposeResources(env_2);
|
|
212
|
+
if (result_2)
|
|
213
|
+
await result_2;
|
|
768
214
|
}
|
|
769
215
|
});
|
|
770
216
|
test('re-apply delete', async () => {
|
|
771
|
-
const
|
|
217
|
+
const env_3 = { stack: [], error: void 0, hasError: false };
|
|
772
218
|
try {
|
|
773
|
-
const factory = __addDisposableResource(
|
|
219
|
+
const factory = __addDisposableResource(env_3, await generateStorageFactory(), true);
|
|
774
220
|
const syncRules = await factory.updateSyncRules({
|
|
775
221
|
content: `
|
|
776
222
|
bucket_definitions:
|
|
@@ -833,20 +279,20 @@ bucket_definitions:
|
|
|
833
279
|
}
|
|
834
280
|
]);
|
|
835
281
|
}
|
|
836
|
-
catch (
|
|
837
|
-
|
|
838
|
-
|
|
282
|
+
catch (e_3) {
|
|
283
|
+
env_3.error = e_3;
|
|
284
|
+
env_3.hasError = true;
|
|
839
285
|
}
|
|
840
286
|
finally {
|
|
841
|
-
const
|
|
842
|
-
if (
|
|
843
|
-
await
|
|
287
|
+
const result_3 = __disposeResources(env_3);
|
|
288
|
+
if (result_3)
|
|
289
|
+
await result_3;
|
|
844
290
|
}
|
|
845
291
|
});
|
|
846
292
|
test('re-apply update + delete', async () => {
|
|
847
|
-
const
|
|
293
|
+
const env_4 = { stack: [], error: void 0, hasError: false };
|
|
848
294
|
try {
|
|
849
|
-
const factory = __addDisposableResource(
|
|
295
|
+
const factory = __addDisposableResource(env_4, await generateStorageFactory(), true);
|
|
850
296
|
const syncRules = await factory.updateSyncRules({
|
|
851
297
|
content: `
|
|
852
298
|
bucket_definitions:
|
|
@@ -948,60 +394,18 @@ bucket_definitions:
|
|
|
948
394
|
}
|
|
949
395
|
]);
|
|
950
396
|
}
|
|
951
|
-
catch (
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
}
|
|
955
|
-
finally {
|
|
956
|
-
const result_12 = __disposeResources(env_12);
|
|
957
|
-
if (result_12)
|
|
958
|
-
await result_12;
|
|
959
|
-
}
|
|
960
|
-
});
|
|
961
|
-
test('truncate parameters', async () => {
|
|
962
|
-
const env_13 = { stack: [], error: void 0, hasError: false };
|
|
963
|
-
try {
|
|
964
|
-
const factory = __addDisposableResource(env_13, await generateStorageFactory(), true);
|
|
965
|
-
const syncRules = await factory.updateSyncRules({
|
|
966
|
-
content: `
|
|
967
|
-
bucket_definitions:
|
|
968
|
-
mybucket:
|
|
969
|
-
parameters:
|
|
970
|
-
- SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
|
|
971
|
-
data: []
|
|
972
|
-
`
|
|
973
|
-
});
|
|
974
|
-
const bucketStorage = factory.getInstance(syncRules);
|
|
975
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
976
|
-
await batch.save({
|
|
977
|
-
sourceTable: TEST_TABLE,
|
|
978
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
979
|
-
after: {
|
|
980
|
-
id: 't2',
|
|
981
|
-
id1: 'user3',
|
|
982
|
-
id2: 'user4',
|
|
983
|
-
group_id: 'group2a'
|
|
984
|
-
},
|
|
985
|
-
afterReplicaId: test_utils.rid('t2')
|
|
986
|
-
});
|
|
987
|
-
await batch.truncate([TEST_TABLE]);
|
|
988
|
-
});
|
|
989
|
-
const checkpoint = await bucketStorage.getCheckpoint();
|
|
990
|
-
const parameters = await checkpoint.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
|
|
991
|
-
expect(parameters).toEqual([]);
|
|
992
|
-
}
|
|
993
|
-
catch (e_13) {
|
|
994
|
-
env_13.error = e_13;
|
|
995
|
-
env_13.hasError = true;
|
|
397
|
+
catch (e_4) {
|
|
398
|
+
env_4.error = e_4;
|
|
399
|
+
env_4.hasError = true;
|
|
996
400
|
}
|
|
997
401
|
finally {
|
|
998
|
-
const
|
|
999
|
-
if (
|
|
1000
|
-
await
|
|
402
|
+
const result_4 = __disposeResources(env_4);
|
|
403
|
+
if (result_4)
|
|
404
|
+
await result_4;
|
|
1001
405
|
}
|
|
1002
406
|
});
|
|
1003
407
|
test('batch with overlapping replica ids', async () => {
|
|
1004
|
-
const
|
|
408
|
+
const env_5 = { stack: [], error: void 0, hasError: false };
|
|
1005
409
|
try {
|
|
1006
410
|
// This test checks that we get the correct output when processing rows with:
|
|
1007
411
|
// 1. changing replica ids
|
|
@@ -1010,7 +414,7 @@ bucket_definitions:
|
|
|
1010
414
|
// It can break at two places:
|
|
1011
415
|
// 1. Not getting the correct "current_data" state for each operation.
|
|
1012
416
|
// 2. Output order not being correct.
|
|
1013
|
-
const factory = __addDisposableResource(
|
|
417
|
+
const factory = __addDisposableResource(env_5, await generateStorageFactory(), true);
|
|
1014
418
|
const syncRules = await factory.updateSyncRules({
|
|
1015
419
|
content: `
|
|
1016
420
|
bucket_definitions:
|
|
@@ -1141,18 +545,18 @@ bucket_definitions:
|
|
|
1141
545
|
{ op: 'PUT', object_id: 'test5', data: JSON.stringify({ id: 'test5', description: 'test5d' }) }
|
|
1142
546
|
]);
|
|
1143
547
|
}
|
|
1144
|
-
catch (
|
|
1145
|
-
|
|
1146
|
-
|
|
548
|
+
catch (e_5) {
|
|
549
|
+
env_5.error = e_5;
|
|
550
|
+
env_5.hasError = true;
|
|
1147
551
|
}
|
|
1148
552
|
finally {
|
|
1149
|
-
const
|
|
1150
|
-
if (
|
|
1151
|
-
await
|
|
553
|
+
const result_5 = __disposeResources(env_5);
|
|
554
|
+
if (result_5)
|
|
555
|
+
await result_5;
|
|
1152
556
|
}
|
|
1153
557
|
});
|
|
1154
558
|
test('changed data with replica identity full', async () => {
|
|
1155
|
-
const
|
|
559
|
+
const env_6 = { stack: [], error: void 0, hasError: false };
|
|
1156
560
|
try {
|
|
1157
561
|
function rid2(id, description) {
|
|
1158
562
|
return getUuidReplicaIdentityBson({ id, description }, [
|
|
@@ -1160,7 +564,7 @@ bucket_definitions:
|
|
|
1160
564
|
{ name: 'description', type: 'VARCHAR', typeId: 25 }
|
|
1161
565
|
]);
|
|
1162
566
|
}
|
|
1163
|
-
const factory = __addDisposableResource(
|
|
567
|
+
const factory = __addDisposableResource(env_6, await generateStorageFactory(), true);
|
|
1164
568
|
const syncRules = await factory.updateSyncRules({
|
|
1165
569
|
content: `
|
|
1166
570
|
bucket_definitions:
|
|
@@ -1249,18 +653,18 @@ bucket_definitions:
|
|
|
1249
653
|
}
|
|
1250
654
|
]);
|
|
1251
655
|
}
|
|
1252
|
-
catch (
|
|
1253
|
-
|
|
1254
|
-
|
|
656
|
+
catch (e_6) {
|
|
657
|
+
env_6.error = e_6;
|
|
658
|
+
env_6.hasError = true;
|
|
1255
659
|
}
|
|
1256
660
|
finally {
|
|
1257
|
-
const
|
|
1258
|
-
if (
|
|
1259
|
-
await
|
|
661
|
+
const result_6 = __disposeResources(env_6);
|
|
662
|
+
if (result_6)
|
|
663
|
+
await result_6;
|
|
1260
664
|
}
|
|
1261
665
|
});
|
|
1262
666
|
test('unchanged data with replica identity full', async () => {
|
|
1263
|
-
const
|
|
667
|
+
const env_7 = { stack: [], error: void 0, hasError: false };
|
|
1264
668
|
try {
|
|
1265
669
|
function rid2(id, description) {
|
|
1266
670
|
return getUuidReplicaIdentityBson({ id, description }, [
|
|
@@ -1268,7 +672,7 @@ bucket_definitions:
|
|
|
1268
672
|
{ name: 'description', type: 'VARCHAR', typeId: 25 }
|
|
1269
673
|
]);
|
|
1270
674
|
}
|
|
1271
|
-
const factory = __addDisposableResource(
|
|
675
|
+
const factory = __addDisposableResource(env_7, await generateStorageFactory(), true);
|
|
1272
676
|
const syncRules = await factory.updateSyncRules({
|
|
1273
677
|
content: `
|
|
1274
678
|
bucket_definitions:
|
|
@@ -1350,24 +754,24 @@ bucket_definitions:
|
|
|
1350
754
|
}
|
|
1351
755
|
]);
|
|
1352
756
|
}
|
|
1353
|
-
catch (
|
|
1354
|
-
|
|
1355
|
-
|
|
757
|
+
catch (e_7) {
|
|
758
|
+
env_7.error = e_7;
|
|
759
|
+
env_7.hasError = true;
|
|
1356
760
|
}
|
|
1357
761
|
finally {
|
|
1358
|
-
const
|
|
1359
|
-
if (
|
|
1360
|
-
await
|
|
762
|
+
const result_7 = __disposeResources(env_7);
|
|
763
|
+
if (result_7)
|
|
764
|
+
await result_7;
|
|
1361
765
|
}
|
|
1362
766
|
});
|
|
1363
767
|
test('large batch', async () => {
|
|
1364
|
-
const
|
|
768
|
+
const env_8 = { stack: [], error: void 0, hasError: false };
|
|
1365
769
|
try {
|
|
1366
770
|
// Test syncing a batch of data that is small in count,
|
|
1367
771
|
// but large enough in size to be split over multiple returned batches.
|
|
1368
772
|
// The specific batch splits is an implementation detail of the storage driver,
|
|
1369
773
|
// and the test will have to updated when other implementations are added.
|
|
1370
|
-
const factory = __addDisposableResource(
|
|
774
|
+
const factory = __addDisposableResource(env_8, await generateStorageFactory(), true);
|
|
1371
775
|
const syncRules = await factory.updateSyncRules({
|
|
1372
776
|
content: `
|
|
1373
777
|
bucket_definitions:
|
|
@@ -1447,21 +851,21 @@ bucket_definitions:
|
|
|
1447
851
|
expect(test_utils.getBatchData(batch3)).toEqual([]);
|
|
1448
852
|
expect(test_utils.getBatchMeta(batch3)).toEqual(null);
|
|
1449
853
|
}
|
|
1450
|
-
catch (
|
|
1451
|
-
|
|
1452
|
-
|
|
854
|
+
catch (e_8) {
|
|
855
|
+
env_8.error = e_8;
|
|
856
|
+
env_8.hasError = true;
|
|
1453
857
|
}
|
|
1454
858
|
finally {
|
|
1455
|
-
const
|
|
1456
|
-
if (
|
|
1457
|
-
await
|
|
859
|
+
const result_8 = __disposeResources(env_8);
|
|
860
|
+
if (result_8)
|
|
861
|
+
await result_8;
|
|
1458
862
|
}
|
|
1459
863
|
});
|
|
1460
864
|
test('long batch', async () => {
|
|
1461
|
-
const
|
|
865
|
+
const env_9 = { stack: [], error: void 0, hasError: false };
|
|
1462
866
|
try {
|
|
1463
867
|
// Test syncing a batch of data that is limited by count.
|
|
1464
|
-
const factory = __addDisposableResource(
|
|
868
|
+
const factory = __addDisposableResource(env_9, await generateStorageFactory(), true);
|
|
1465
869
|
const syncRules = await factory.updateSyncRules({
|
|
1466
870
|
content: `
|
|
1467
871
|
bucket_definitions:
|
|
@@ -1517,21 +921,21 @@ bucket_definitions:
|
|
|
1517
921
|
expect(test_utils.getBatchData(batch3)).toEqual([]);
|
|
1518
922
|
expect(test_utils.getBatchMeta(batch3)).toEqual(null);
|
|
1519
923
|
}
|
|
1520
|
-
catch (
|
|
1521
|
-
|
|
1522
|
-
|
|
924
|
+
catch (e_9) {
|
|
925
|
+
env_9.error = e_9;
|
|
926
|
+
env_9.hasError = true;
|
|
1523
927
|
}
|
|
1524
928
|
finally {
|
|
1525
|
-
const
|
|
1526
|
-
if (
|
|
1527
|
-
await
|
|
929
|
+
const result_9 = __disposeResources(env_9);
|
|
930
|
+
if (result_9)
|
|
931
|
+
await result_9;
|
|
1528
932
|
}
|
|
1529
933
|
});
|
|
1530
934
|
describe('batch has_more', () => {
|
|
1531
935
|
const setup = async (options) => {
|
|
1532
|
-
const
|
|
936
|
+
const env_10 = { stack: [], error: void 0, hasError: false };
|
|
1533
937
|
try {
|
|
1534
|
-
const factory = __addDisposableResource(
|
|
938
|
+
const factory = __addDisposableResource(env_10, await generateStorageFactory(), true);
|
|
1535
939
|
const syncRules = await factory.updateSyncRules({
|
|
1536
940
|
content: `
|
|
1537
941
|
bucket_definitions:
|
|
@@ -1566,14 +970,14 @@ bucket_definitions:
|
|
|
1566
970
|
['global2[]', 0n]
|
|
1567
971
|
]), options));
|
|
1568
972
|
}
|
|
1569
|
-
catch (
|
|
1570
|
-
|
|
1571
|
-
|
|
973
|
+
catch (e_10) {
|
|
974
|
+
env_10.error = e_10;
|
|
975
|
+
env_10.hasError = true;
|
|
1572
976
|
}
|
|
1573
977
|
finally {
|
|
1574
|
-
const
|
|
1575
|
-
if (
|
|
1576
|
-
await
|
|
978
|
+
const result_10 = __disposeResources(env_10);
|
|
979
|
+
if (result_10)
|
|
980
|
+
await result_10;
|
|
1577
981
|
}
|
|
1578
982
|
};
|
|
1579
983
|
test('batch has_more (1)', async () => {
|
|
@@ -1665,9 +1069,9 @@ bucket_definitions:
|
|
|
1665
1069
|
});
|
|
1666
1070
|
});
|
|
1667
1071
|
test('empty storage metrics', async () => {
|
|
1668
|
-
const
|
|
1072
|
+
const env_11 = { stack: [], error: void 0, hasError: false };
|
|
1669
1073
|
try {
|
|
1670
|
-
const f = __addDisposableResource(
|
|
1074
|
+
const f = __addDisposableResource(env_11, await generateStorageFactory({ dropAll: true }), true);
|
|
1671
1075
|
const metrics = await f.getStorageMetrics();
|
|
1672
1076
|
expect(metrics).toEqual({
|
|
1673
1077
|
operations_size_bytes: 0,
|
|
@@ -1682,353 +1086,23 @@ bucket_definitions:
|
|
|
1682
1086
|
const metrics2 = await f.getStorageMetrics();
|
|
1683
1087
|
expect(metrics2).toMatchSnapshot();
|
|
1684
1088
|
}
|
|
1685
|
-
catch (
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
}
|
|
1689
|
-
finally {
|
|
1690
|
-
const result_20 = __disposeResources(env_20);
|
|
1691
|
-
if (result_20)
|
|
1692
|
-
await result_20;
|
|
1693
|
-
}
|
|
1694
|
-
});
|
|
1695
|
-
test('invalidate cached parsed sync rules', async () => {
|
|
1696
|
-
const env_21 = { stack: [], error: void 0, hasError: false };
|
|
1697
|
-
try {
|
|
1698
|
-
const bucketStorageFactory = __addDisposableResource(env_21, await generateStorageFactory(), true);
|
|
1699
|
-
const syncRules = await bucketStorageFactory.updateSyncRules({
|
|
1700
|
-
content: `
|
|
1701
|
-
bucket_definitions:
|
|
1702
|
-
by_workspace:
|
|
1703
|
-
parameters:
|
|
1704
|
-
- SELECT id as workspace_id FROM workspace WHERE
|
|
1705
|
-
workspace."userId" = token_parameters.user_id
|
|
1706
|
-
data: []
|
|
1707
|
-
`
|
|
1708
|
-
});
|
|
1709
|
-
const syncBucketStorage = bucketStorageFactory.getInstance(syncRules);
|
|
1710
|
-
const parsedSchema1 = syncBucketStorage.getParsedSyncRules({
|
|
1711
|
-
defaultSchema: 'public'
|
|
1712
|
-
});
|
|
1713
|
-
const parsedSchema2 = syncBucketStorage.getParsedSyncRules({
|
|
1714
|
-
defaultSchema: 'public'
|
|
1715
|
-
});
|
|
1716
|
-
// These should be cached, this will be the same instance
|
|
1717
|
-
expect(parsedSchema2).equals(parsedSchema1);
|
|
1718
|
-
expect(parsedSchema1.getSourceTables()[0].schema).equals('public');
|
|
1719
|
-
const parsedSchema3 = syncBucketStorage.getParsedSyncRules({
|
|
1720
|
-
defaultSchema: 'databasename'
|
|
1721
|
-
});
|
|
1722
|
-
// The cache should not be used
|
|
1723
|
-
expect(parsedSchema3).not.equals(parsedSchema2);
|
|
1724
|
-
expect(parsedSchema3.getSourceTables()[0].schema).equals('databasename');
|
|
1725
|
-
}
|
|
1726
|
-
catch (e_21) {
|
|
1727
|
-
env_21.error = e_21;
|
|
1728
|
-
env_21.hasError = true;
|
|
1729
|
-
}
|
|
1730
|
-
finally {
|
|
1731
|
-
const result_21 = __disposeResources(env_21);
|
|
1732
|
-
if (result_21)
|
|
1733
|
-
await result_21;
|
|
1734
|
-
}
|
|
1735
|
-
});
|
|
1736
|
-
test('managed write checkpoints - checkpoint after write', async (context) => {
|
|
1737
|
-
const env_22 = { stack: [], error: void 0, hasError: false };
|
|
1738
|
-
try {
|
|
1739
|
-
const factory = __addDisposableResource(env_22, await generateStorageFactory(), true);
|
|
1740
|
-
const r = await factory.configureSyncRules({
|
|
1741
|
-
content: `
|
|
1742
|
-
bucket_definitions:
|
|
1743
|
-
mybucket:
|
|
1744
|
-
data: []
|
|
1745
|
-
`,
|
|
1746
|
-
validate: false
|
|
1747
|
-
});
|
|
1748
|
-
const bucketStorage = factory.getInstance(r.persisted_sync_rules);
|
|
1749
|
-
const abortController = new AbortController();
|
|
1750
|
-
context.onTestFinished(() => abortController.abort());
|
|
1751
|
-
const iter = bucketStorage
|
|
1752
|
-
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })[Symbol.asyncIterator]();
|
|
1753
|
-
const writeCheckpoint = await bucketStorage.createManagedWriteCheckpoint({
|
|
1754
|
-
heads: { '1': '5/0' },
|
|
1755
|
-
user_id: 'user1'
|
|
1756
|
-
});
|
|
1757
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1758
|
-
await batch.keepalive('5/0');
|
|
1759
|
-
});
|
|
1760
|
-
const result = await iter.next();
|
|
1761
|
-
expect(result).toMatchObject({
|
|
1762
|
-
done: false,
|
|
1763
|
-
value: {
|
|
1764
|
-
base: {
|
|
1765
|
-
checkpoint: 0n,
|
|
1766
|
-
lsn: '5/0'
|
|
1767
|
-
},
|
|
1768
|
-
writeCheckpoint: writeCheckpoint
|
|
1769
|
-
}
|
|
1770
|
-
});
|
|
1771
|
-
}
|
|
1772
|
-
catch (e_22) {
|
|
1773
|
-
env_22.error = e_22;
|
|
1774
|
-
env_22.hasError = true;
|
|
1775
|
-
}
|
|
1776
|
-
finally {
|
|
1777
|
-
const result_22 = __disposeResources(env_22);
|
|
1778
|
-
if (result_22)
|
|
1779
|
-
await result_22;
|
|
1780
|
-
}
|
|
1781
|
-
});
|
|
1782
|
-
test('managed write checkpoints - write after checkpoint', async (context) => {
|
|
1783
|
-
const env_23 = { stack: [], error: void 0, hasError: false };
|
|
1784
|
-
try {
|
|
1785
|
-
const factory = __addDisposableResource(env_23, await generateStorageFactory(), true);
|
|
1786
|
-
const r = await factory.configureSyncRules({
|
|
1787
|
-
content: `
|
|
1788
|
-
bucket_definitions:
|
|
1789
|
-
mybucket:
|
|
1790
|
-
data: []
|
|
1791
|
-
`,
|
|
1792
|
-
validate: false
|
|
1793
|
-
});
|
|
1794
|
-
const bucketStorage = factory.getInstance(r.persisted_sync_rules);
|
|
1795
|
-
const abortController = new AbortController();
|
|
1796
|
-
context.onTestFinished(() => abortController.abort());
|
|
1797
|
-
const iter = bucketStorage
|
|
1798
|
-
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })[Symbol.asyncIterator]();
|
|
1799
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1800
|
-
await batch.keepalive('5/0');
|
|
1801
|
-
});
|
|
1802
|
-
const result = await iter.next();
|
|
1803
|
-
expect(result).toMatchObject({
|
|
1804
|
-
done: false,
|
|
1805
|
-
value: {
|
|
1806
|
-
base: {
|
|
1807
|
-
checkpoint: 0n,
|
|
1808
|
-
lsn: '5/0'
|
|
1809
|
-
},
|
|
1810
|
-
writeCheckpoint: null
|
|
1811
|
-
}
|
|
1812
|
-
});
|
|
1813
|
-
const writeCheckpoint = await bucketStorage.createManagedWriteCheckpoint({
|
|
1814
|
-
heads: { '1': '6/0' },
|
|
1815
|
-
user_id: 'user1'
|
|
1816
|
-
});
|
|
1817
|
-
// We have to trigger a new keepalive after the checkpoint, at least to cover postgres storage.
|
|
1818
|
-
// This is what is effetively triggered with RouteAPI.createReplicationHead().
|
|
1819
|
-
// MongoDB storage doesn't explicitly need this anymore.
|
|
1820
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1821
|
-
await batch.keepalive('6/0');
|
|
1822
|
-
});
|
|
1823
|
-
let result2 = await iter.next();
|
|
1824
|
-
if (result2.value?.base?.lsn == '5/0') {
|
|
1825
|
-
// Events could arrive in a different order in some cases - this caters for it
|
|
1826
|
-
result2 = await iter.next();
|
|
1827
|
-
}
|
|
1828
|
-
expect(result2).toMatchObject({
|
|
1829
|
-
done: false,
|
|
1830
|
-
value: {
|
|
1831
|
-
base: {
|
|
1832
|
-
checkpoint: 0n,
|
|
1833
|
-
lsn: '6/0'
|
|
1834
|
-
},
|
|
1835
|
-
writeCheckpoint: writeCheckpoint
|
|
1836
|
-
}
|
|
1837
|
-
});
|
|
1838
|
-
}
|
|
1839
|
-
catch (e_23) {
|
|
1840
|
-
env_23.error = e_23;
|
|
1841
|
-
env_23.hasError = true;
|
|
1842
|
-
}
|
|
1843
|
-
finally {
|
|
1844
|
-
const result_23 = __disposeResources(env_23);
|
|
1845
|
-
if (result_23)
|
|
1846
|
-
await result_23;
|
|
1847
|
-
}
|
|
1848
|
-
});
|
|
1849
|
-
test('custom write checkpoints - checkpoint after write', async (context) => {
|
|
1850
|
-
const env_24 = { stack: [], error: void 0, hasError: false };
|
|
1851
|
-
try {
|
|
1852
|
-
const factory = __addDisposableResource(env_24, await generateStorageFactory(), true);
|
|
1853
|
-
const r = await factory.configureSyncRules({
|
|
1854
|
-
content: `
|
|
1855
|
-
bucket_definitions:
|
|
1856
|
-
mybucket:
|
|
1857
|
-
data: []
|
|
1858
|
-
`,
|
|
1859
|
-
validate: false
|
|
1860
|
-
});
|
|
1861
|
-
const bucketStorage = factory.getInstance(r.persisted_sync_rules);
|
|
1862
|
-
bucketStorage.setWriteCheckpointMode(storage.WriteCheckpointMode.CUSTOM);
|
|
1863
|
-
const abortController = new AbortController();
|
|
1864
|
-
context.onTestFinished(() => abortController.abort());
|
|
1865
|
-
const iter = bucketStorage
|
|
1866
|
-
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })[Symbol.asyncIterator]();
|
|
1867
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1868
|
-
await batch.addCustomWriteCheckpoint({
|
|
1869
|
-
checkpoint: 5n,
|
|
1870
|
-
user_id: 'user1'
|
|
1871
|
-
});
|
|
1872
|
-
await batch.flush();
|
|
1873
|
-
await batch.keepalive('5/0');
|
|
1874
|
-
});
|
|
1875
|
-
const result = await iter.next();
|
|
1876
|
-
expect(result).toMatchObject({
|
|
1877
|
-
done: false,
|
|
1878
|
-
value: {
|
|
1879
|
-
base: {
|
|
1880
|
-
lsn: '5/0'
|
|
1881
|
-
},
|
|
1882
|
-
writeCheckpoint: 5n
|
|
1883
|
-
}
|
|
1884
|
-
});
|
|
1885
|
-
}
|
|
1886
|
-
catch (e_24) {
|
|
1887
|
-
env_24.error = e_24;
|
|
1888
|
-
env_24.hasError = true;
|
|
1889
|
-
}
|
|
1890
|
-
finally {
|
|
1891
|
-
const result_24 = __disposeResources(env_24);
|
|
1892
|
-
if (result_24)
|
|
1893
|
-
await result_24;
|
|
1894
|
-
}
|
|
1895
|
-
});
|
|
1896
|
-
test('custom write checkpoints - standalone checkpoint', async (context) => {
|
|
1897
|
-
const env_25 = { stack: [], error: void 0, hasError: false };
|
|
1898
|
-
try {
|
|
1899
|
-
const factory = __addDisposableResource(env_25, await generateStorageFactory(), true);
|
|
1900
|
-
const r = await factory.configureSyncRules({
|
|
1901
|
-
content: `
|
|
1902
|
-
bucket_definitions:
|
|
1903
|
-
mybucket:
|
|
1904
|
-
data: []
|
|
1905
|
-
`,
|
|
1906
|
-
validate: false
|
|
1907
|
-
});
|
|
1908
|
-
const bucketStorage = factory.getInstance(r.persisted_sync_rules);
|
|
1909
|
-
bucketStorage.setWriteCheckpointMode(storage.WriteCheckpointMode.CUSTOM);
|
|
1910
|
-
const abortController = new AbortController();
|
|
1911
|
-
context.onTestFinished(() => abortController.abort());
|
|
1912
|
-
const iter = bucketStorage
|
|
1913
|
-
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })[Symbol.asyncIterator]();
|
|
1914
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1915
|
-
// Flush to clear state
|
|
1916
|
-
await batch.flush();
|
|
1917
|
-
await batch.addCustomWriteCheckpoint({
|
|
1918
|
-
checkpoint: 5n,
|
|
1919
|
-
user_id: 'user1'
|
|
1920
|
-
});
|
|
1921
|
-
await batch.flush();
|
|
1922
|
-
await batch.keepalive('5/0');
|
|
1923
|
-
});
|
|
1924
|
-
const result = await iter.next();
|
|
1925
|
-
expect(result).toMatchObject({
|
|
1926
|
-
done: false,
|
|
1927
|
-
value: {
|
|
1928
|
-
base: {
|
|
1929
|
-
lsn: '5/0'
|
|
1930
|
-
},
|
|
1931
|
-
writeCheckpoint: 5n
|
|
1932
|
-
}
|
|
1933
|
-
});
|
|
1934
|
-
}
|
|
1935
|
-
catch (e_25) {
|
|
1936
|
-
env_25.error = e_25;
|
|
1937
|
-
env_25.hasError = true;
|
|
1938
|
-
}
|
|
1939
|
-
finally {
|
|
1940
|
-
const result_25 = __disposeResources(env_25);
|
|
1941
|
-
if (result_25)
|
|
1942
|
-
await result_25;
|
|
1943
|
-
}
|
|
1944
|
-
});
|
|
1945
|
-
test('custom write checkpoints - write after checkpoint', async (context) => {
|
|
1946
|
-
const env_26 = { stack: [], error: void 0, hasError: false };
|
|
1947
|
-
try {
|
|
1948
|
-
const factory = __addDisposableResource(env_26, await generateStorageFactory(), true);
|
|
1949
|
-
const r = await factory.configureSyncRules({
|
|
1950
|
-
content: `
|
|
1951
|
-
bucket_definitions:
|
|
1952
|
-
mybucket:
|
|
1953
|
-
data: []
|
|
1954
|
-
`,
|
|
1955
|
-
validate: false
|
|
1956
|
-
});
|
|
1957
|
-
const bucketStorage = factory.getInstance(r.persisted_sync_rules);
|
|
1958
|
-
bucketStorage.setWriteCheckpointMode(storage.WriteCheckpointMode.CUSTOM);
|
|
1959
|
-
const abortController = new AbortController();
|
|
1960
|
-
context.onTestFinished(() => abortController.abort());
|
|
1961
|
-
const iter = bucketStorage
|
|
1962
|
-
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })[Symbol.asyncIterator]();
|
|
1963
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1964
|
-
await batch.keepalive('5/0');
|
|
1965
|
-
});
|
|
1966
|
-
const result = await iter.next();
|
|
1967
|
-
expect(result).toMatchObject({
|
|
1968
|
-
done: false,
|
|
1969
|
-
value: {
|
|
1970
|
-
base: {
|
|
1971
|
-
lsn: '5/0'
|
|
1972
|
-
},
|
|
1973
|
-
writeCheckpoint: null
|
|
1974
|
-
}
|
|
1975
|
-
});
|
|
1976
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1977
|
-
batch.addCustomWriteCheckpoint({
|
|
1978
|
-
checkpoint: 6n,
|
|
1979
|
-
user_id: 'user1'
|
|
1980
|
-
});
|
|
1981
|
-
await batch.flush();
|
|
1982
|
-
await batch.keepalive('6/0');
|
|
1983
|
-
});
|
|
1984
|
-
let result2 = await iter.next();
|
|
1985
|
-
expect(result2).toMatchObject({
|
|
1986
|
-
done: false,
|
|
1987
|
-
value: {
|
|
1988
|
-
base: {
|
|
1989
|
-
// can be 5/0 or 6/0 - actual value not relevant for custom write checkpoints
|
|
1990
|
-
// lsn: '6/0'
|
|
1991
|
-
},
|
|
1992
|
-
writeCheckpoint: 6n
|
|
1993
|
-
}
|
|
1994
|
-
});
|
|
1995
|
-
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1996
|
-
batch.addCustomWriteCheckpoint({
|
|
1997
|
-
checkpoint: 7n,
|
|
1998
|
-
user_id: 'user1'
|
|
1999
|
-
});
|
|
2000
|
-
await batch.flush();
|
|
2001
|
-
await batch.keepalive('7/0');
|
|
2002
|
-
});
|
|
2003
|
-
let result3 = await iter.next();
|
|
2004
|
-
expect(result3).toMatchObject({
|
|
2005
|
-
done: false,
|
|
2006
|
-
value: {
|
|
2007
|
-
base: {
|
|
2008
|
-
// can be 5/0, 6/0 or 7/0 - actual value not relevant for custom write checkpoints
|
|
2009
|
-
// lsn: '7/0'
|
|
2010
|
-
},
|
|
2011
|
-
writeCheckpoint: 7n
|
|
2012
|
-
}
|
|
2013
|
-
});
|
|
2014
|
-
}
|
|
2015
|
-
catch (e_26) {
|
|
2016
|
-
env_26.error = e_26;
|
|
2017
|
-
env_26.hasError = true;
|
|
1089
|
+
catch (e_11) {
|
|
1090
|
+
env_11.error = e_11;
|
|
1091
|
+
env_11.hasError = true;
|
|
2018
1092
|
}
|
|
2019
1093
|
finally {
|
|
2020
|
-
const
|
|
2021
|
-
if (
|
|
2022
|
-
await
|
|
1094
|
+
const result_11 = __disposeResources(env_11);
|
|
1095
|
+
if (result_11)
|
|
1096
|
+
await result_11;
|
|
2023
1097
|
}
|
|
2024
1098
|
});
|
|
2025
1099
|
test('op_id initialization edge case', async () => {
|
|
2026
|
-
const
|
|
1100
|
+
const env_12 = { stack: [], error: void 0, hasError: false };
|
|
2027
1101
|
try {
|
|
2028
1102
|
// Test syncing a batch of data that is small in count,
|
|
2029
1103
|
// but large enough in size to be split over multiple returned chunks.
|
|
2030
1104
|
// Similar to the above test, but splits over 1MB chunks.
|
|
2031
|
-
const factory = __addDisposableResource(
|
|
1105
|
+
const factory = __addDisposableResource(env_12, await generateStorageFactory(), true);
|
|
2032
1106
|
const syncRules = await factory.updateSyncRules({
|
|
2033
1107
|
content: `
|
|
2034
1108
|
bucket_definitions:
|
|
@@ -2068,20 +1142,20 @@ bucket_definitions:
|
|
|
2068
1142
|
// we expect 0n and 1n, or 1n and 2n.
|
|
2069
1143
|
expect(checkpoint2).toBeGreaterThan(checkpoint1);
|
|
2070
1144
|
}
|
|
2071
|
-
catch (
|
|
2072
|
-
|
|
2073
|
-
|
|
1145
|
+
catch (e_12) {
|
|
1146
|
+
env_12.error = e_12;
|
|
1147
|
+
env_12.hasError = true;
|
|
2074
1148
|
}
|
|
2075
1149
|
finally {
|
|
2076
|
-
const
|
|
2077
|
-
if (
|
|
2078
|
-
await
|
|
1150
|
+
const result_12 = __disposeResources(env_12);
|
|
1151
|
+
if (result_12)
|
|
1152
|
+
await result_12;
|
|
2079
1153
|
}
|
|
2080
1154
|
});
|
|
2081
1155
|
test('data with custom types', async () => {
|
|
2082
|
-
const
|
|
1156
|
+
const env_13 = { stack: [], error: void 0, hasError: false };
|
|
2083
1157
|
try {
|
|
2084
|
-
const factory = __addDisposableResource(
|
|
1158
|
+
const factory = __addDisposableResource(env_13, await generateStorageFactory(), true);
|
|
2085
1159
|
const testValue = {
|
|
2086
1160
|
sourceTable: TEST_TABLE,
|
|
2087
1161
|
tag: storage.SaveOperationTag.INSERT,
|
|
@@ -2138,15 +1212,57 @@ bucket_definitions:
|
|
|
2138
1212
|
}
|
|
2139
1213
|
]);
|
|
2140
1214
|
}
|
|
2141
|
-
catch (
|
|
2142
|
-
|
|
2143
|
-
|
|
1215
|
+
catch (e_13) {
|
|
1216
|
+
env_13.error = e_13;
|
|
1217
|
+
env_13.hasError = true;
|
|
1218
|
+
}
|
|
1219
|
+
finally {
|
|
1220
|
+
const result_13 = __disposeResources(env_13);
|
|
1221
|
+
if (result_13)
|
|
1222
|
+
await result_13;
|
|
1223
|
+
}
|
|
1224
|
+
});
|
|
1225
|
+
test('unchanged checksums', async () => {
|
|
1226
|
+
const env_14 = { stack: [], error: void 0, hasError: false };
|
|
1227
|
+
try {
|
|
1228
|
+
const factory = __addDisposableResource(env_14, await generateStorageFactory(), true);
|
|
1229
|
+
const syncRules = await factory.updateSyncRules({
|
|
1230
|
+
content: `
|
|
1231
|
+
bucket_definitions:
|
|
1232
|
+
global:
|
|
1233
|
+
data:
|
|
1234
|
+
- SELECT client_id as id, description FROM "%"
|
|
1235
|
+
`
|
|
1236
|
+
});
|
|
1237
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
1238
|
+
const sourceTable = TEST_TABLE;
|
|
1239
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1240
|
+
await batch.save({
|
|
1241
|
+
sourceTable,
|
|
1242
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
1243
|
+
after: {
|
|
1244
|
+
id: 'test1',
|
|
1245
|
+
description: 'test1a'
|
|
1246
|
+
},
|
|
1247
|
+
afterReplicaId: test_utils.rid('test1')
|
|
1248
|
+
});
|
|
1249
|
+
await batch.commit('1/1');
|
|
1250
|
+
});
|
|
1251
|
+
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
1252
|
+
const checksums = [...(await bucketStorage.getChecksums(checkpoint, ['global[]'])).values()];
|
|
1253
|
+
expect(checksums).toEqual([{ bucket: 'global[]', checksum: 1917136889, count: 1 }]);
|
|
1254
|
+
const checksums2 = [...(await bucketStorage.getChecksums(checkpoint + 1n, ['global[]'])).values()];
|
|
1255
|
+
expect(checksums2).toEqual([{ bucket: 'global[]', checksum: 1917136889, count: 1 }]);
|
|
1256
|
+
}
|
|
1257
|
+
catch (e_14) {
|
|
1258
|
+
env_14.error = e_14;
|
|
1259
|
+
env_14.hasError = true;
|
|
2144
1260
|
}
|
|
2145
1261
|
finally {
|
|
2146
|
-
const
|
|
2147
|
-
if (
|
|
2148
|
-
await
|
|
1262
|
+
const result_14 = __disposeResources(env_14);
|
|
1263
|
+
if (result_14)
|
|
1264
|
+
await result_14;
|
|
2149
1265
|
}
|
|
2150
1266
|
});
|
|
2151
1267
|
}
|
|
2152
|
-
//# sourceMappingURL=register-data-storage-tests.js.map
|
|
1268
|
+
//# sourceMappingURL=register-data-storage-data-tests.js.map
|