@powersync/service-core-tests 0.12.1 → 0.12.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/tests/register-compacting-tests.js +68 -0
- package/dist/tests/register-compacting-tests.js.map +1 -1
- package/dist/tests/register-data-storage-checkpoint-tests.d.ts +12 -0
- package/dist/tests/register-data-storage-checkpoint-tests.js +357 -0
- package/dist/tests/register-data-storage-checkpoint-tests.js.map +1 -0
- package/dist/tests/register-data-storage-data-tests.d.ts +18 -0
- package/dist/tests/{register-data-storage-tests.js → register-data-storage-data-tests.js} +216 -1037
- package/dist/tests/register-data-storage-data-tests.js.map +1 -0
- package/dist/tests/{register-data-storage-tests.d.ts → register-data-storage-parameter-tests.d.ts} +1 -2
- package/dist/tests/register-data-storage-parameter-tests.js +707 -0
- package/dist/tests/register-data-storage-parameter-tests.js.map +1 -0
- package/dist/tests/register-sync-tests.js +2 -1
- package/dist/tests/register-sync-tests.js.map +1 -1
- package/dist/tests/tests-index.d.ts +4 -1
- package/dist/tests/tests-index.js +4 -1
- package/dist/tests/tests-index.js.map +1 -1
- package/dist/tests/util.d.ts +1 -0
- package/dist/tests/util.js +3 -0
- package/dist/tests/util.js.map +1 -0
- package/package.json +3 -3
- package/src/tests/register-compacting-tests.ts +63 -0
- package/src/tests/register-data-storage-checkpoint-tests.ts +277 -0
- package/src/tests/{register-data-storage-tests.ts → register-data-storage-data-tests.ts} +93 -865
- package/src/tests/register-data-storage-parameter-tests.ts +613 -0
- package/src/tests/register-sync-tests.ts +2 -1
- package/src/tests/tests-index.ts +4 -1
- package/src/tests/util.ts +3 -0
- package/tsconfig.tsbuildinfo +1 -1
- package/dist/tests/register-data-storage-tests.js.map +0 -1
|
@@ -0,0 +1,707 @@
|
|
|
1
|
+
var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
|
|
2
|
+
if (value !== null && value !== void 0) {
|
|
3
|
+
if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
|
|
4
|
+
var dispose, inner;
|
|
5
|
+
if (async) {
|
|
6
|
+
if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
|
|
7
|
+
dispose = value[Symbol.asyncDispose];
|
|
8
|
+
}
|
|
9
|
+
if (dispose === void 0) {
|
|
10
|
+
if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
|
|
11
|
+
dispose = value[Symbol.dispose];
|
|
12
|
+
if (async) inner = dispose;
|
|
13
|
+
}
|
|
14
|
+
if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
|
|
15
|
+
if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
|
|
16
|
+
env.stack.push({ value: value, dispose: dispose, async: async });
|
|
17
|
+
}
|
|
18
|
+
else if (async) {
|
|
19
|
+
env.stack.push({ async: true });
|
|
20
|
+
}
|
|
21
|
+
return value;
|
|
22
|
+
};
|
|
23
|
+
var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
|
|
24
|
+
return function (env) {
|
|
25
|
+
function fail(e) {
|
|
26
|
+
env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
|
|
27
|
+
env.hasError = true;
|
|
28
|
+
}
|
|
29
|
+
var r, s = 0;
|
|
30
|
+
function next() {
|
|
31
|
+
while (r = env.stack.pop()) {
|
|
32
|
+
try {
|
|
33
|
+
if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
|
|
34
|
+
if (r.dispose) {
|
|
35
|
+
var result = r.dispose.call(r.value);
|
|
36
|
+
if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
|
|
37
|
+
}
|
|
38
|
+
else s |= 1;
|
|
39
|
+
}
|
|
40
|
+
catch (e) {
|
|
41
|
+
fail(e);
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
|
|
45
|
+
if (env.hasError) throw env.error;
|
|
46
|
+
}
|
|
47
|
+
return next();
|
|
48
|
+
};
|
|
49
|
+
})(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
|
|
50
|
+
var e = new Error(message);
|
|
51
|
+
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
52
|
+
});
|
|
53
|
+
import { storage } from '@powersync/service-core';
|
|
54
|
+
import { ParameterLookup, RequestParameters } from '@powersync/service-sync-rules';
|
|
55
|
+
import { expect, test } from 'vitest';
|
|
56
|
+
import * as test_utils from '../test-utils/test-utils-index.js';
|
|
57
|
+
import { TEST_TABLE } from './util.js';
|
|
58
|
+
/**
|
|
59
|
+
* @example
|
|
60
|
+
* ```TypeScript
|
|
61
|
+
*
|
|
62
|
+
* describe('store - mongodb', function () {
|
|
63
|
+
* registerDataStorageTests(MONGO_STORAGE_FACTORY);
|
|
64
|
+
* });
|
|
65
|
+
*
|
|
66
|
+
* ```
|
|
67
|
+
*/
|
|
68
|
+
export function registerDataStorageParameterTests(generateStorageFactory) {
|
|
69
|
+
test('save and load parameters', async () => {
|
|
70
|
+
const env_1 = { stack: [], error: void 0, hasError: false };
|
|
71
|
+
try {
|
|
72
|
+
const factory = __addDisposableResource(env_1, await generateStorageFactory(), true);
|
|
73
|
+
const syncRules = await factory.updateSyncRules({
|
|
74
|
+
content: `
|
|
75
|
+
bucket_definitions:
|
|
76
|
+
mybucket:
|
|
77
|
+
parameters:
|
|
78
|
+
- SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
|
|
79
|
+
data: []
|
|
80
|
+
`
|
|
81
|
+
});
|
|
82
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
83
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
84
|
+
await batch.save({
|
|
85
|
+
sourceTable: TEST_TABLE,
|
|
86
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
87
|
+
after: {
|
|
88
|
+
id: 't2',
|
|
89
|
+
id1: 'user3',
|
|
90
|
+
id2: 'user4',
|
|
91
|
+
group_id: 'group2a'
|
|
92
|
+
},
|
|
93
|
+
afterReplicaId: test_utils.rid('t2')
|
|
94
|
+
});
|
|
95
|
+
await batch.save({
|
|
96
|
+
sourceTable: TEST_TABLE,
|
|
97
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
98
|
+
after: {
|
|
99
|
+
id: 't1',
|
|
100
|
+
id1: 'user1',
|
|
101
|
+
id2: 'user2',
|
|
102
|
+
group_id: 'group1a'
|
|
103
|
+
},
|
|
104
|
+
afterReplicaId: test_utils.rid('t1')
|
|
105
|
+
});
|
|
106
|
+
await batch.commit('1/1');
|
|
107
|
+
});
|
|
108
|
+
const checkpoint = await bucketStorage.getCheckpoint();
|
|
109
|
+
const parameters = await checkpoint.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
|
|
110
|
+
expect(parameters).toEqual([
|
|
111
|
+
{
|
|
112
|
+
group_id: 'group1a'
|
|
113
|
+
}
|
|
114
|
+
]);
|
|
115
|
+
}
|
|
116
|
+
catch (e_1) {
|
|
117
|
+
env_1.error = e_1;
|
|
118
|
+
env_1.hasError = true;
|
|
119
|
+
}
|
|
120
|
+
finally {
|
|
121
|
+
const result_1 = __disposeResources(env_1);
|
|
122
|
+
if (result_1)
|
|
123
|
+
await result_1;
|
|
124
|
+
}
|
|
125
|
+
});
|
|
126
|
+
test('it should use the latest version', async () => {
|
|
127
|
+
const env_2 = { stack: [], error: void 0, hasError: false };
|
|
128
|
+
try {
|
|
129
|
+
const factory = __addDisposableResource(env_2, await generateStorageFactory(), true);
|
|
130
|
+
const syncRules = await factory.updateSyncRules({
|
|
131
|
+
content: `
|
|
132
|
+
bucket_definitions:
|
|
133
|
+
mybucket:
|
|
134
|
+
parameters:
|
|
135
|
+
- SELECT group_id FROM test WHERE id = token_parameters.user_id
|
|
136
|
+
data: []
|
|
137
|
+
`
|
|
138
|
+
});
|
|
139
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
140
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
141
|
+
await batch.save({
|
|
142
|
+
sourceTable: TEST_TABLE,
|
|
143
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
144
|
+
after: {
|
|
145
|
+
id: 'user1',
|
|
146
|
+
group_id: 'group1'
|
|
147
|
+
},
|
|
148
|
+
afterReplicaId: test_utils.rid('user1')
|
|
149
|
+
});
|
|
150
|
+
await batch.commit('1/1');
|
|
151
|
+
});
|
|
152
|
+
const checkpoint1 = await bucketStorage.getCheckpoint();
|
|
153
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
154
|
+
await batch.save({
|
|
155
|
+
sourceTable: TEST_TABLE,
|
|
156
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
157
|
+
after: {
|
|
158
|
+
id: 'user1',
|
|
159
|
+
group_id: 'group2'
|
|
160
|
+
},
|
|
161
|
+
afterReplicaId: test_utils.rid('user1')
|
|
162
|
+
});
|
|
163
|
+
await batch.commit('1/2');
|
|
164
|
+
});
|
|
165
|
+
const checkpoint2 = await bucketStorage.getCheckpoint();
|
|
166
|
+
const parameters = await checkpoint2.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
|
|
167
|
+
expect(parameters).toEqual([
|
|
168
|
+
{
|
|
169
|
+
group_id: 'group2'
|
|
170
|
+
}
|
|
171
|
+
]);
|
|
172
|
+
// Use the checkpoint to get older data if relevant
|
|
173
|
+
const parameters2 = await checkpoint1.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
|
|
174
|
+
expect(parameters2).toEqual([
|
|
175
|
+
{
|
|
176
|
+
group_id: 'group1'
|
|
177
|
+
}
|
|
178
|
+
]);
|
|
179
|
+
}
|
|
180
|
+
catch (e_2) {
|
|
181
|
+
env_2.error = e_2;
|
|
182
|
+
env_2.hasError = true;
|
|
183
|
+
}
|
|
184
|
+
finally {
|
|
185
|
+
const result_2 = __disposeResources(env_2);
|
|
186
|
+
if (result_2)
|
|
187
|
+
await result_2;
|
|
188
|
+
}
|
|
189
|
+
});
|
|
190
|
+
test('it should use the latest version after updates', async () => {
|
|
191
|
+
const env_3 = { stack: [], error: void 0, hasError: false };
|
|
192
|
+
try {
|
|
193
|
+
const factory = __addDisposableResource(env_3, await generateStorageFactory(), true);
|
|
194
|
+
const syncRules = await factory.updateSyncRules({
|
|
195
|
+
content: `
|
|
196
|
+
bucket_definitions:
|
|
197
|
+
mybucket:
|
|
198
|
+
parameters:
|
|
199
|
+
- SELECT id AS todo_id
|
|
200
|
+
FROM todos
|
|
201
|
+
WHERE list_id IN token_parameters.list_id
|
|
202
|
+
data: []
|
|
203
|
+
`
|
|
204
|
+
});
|
|
205
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
206
|
+
const table = test_utils.makeTestTable('todos', ['id', 'list_id']);
|
|
207
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
208
|
+
// Create two todos which initially belong to different lists
|
|
209
|
+
await batch.save({
|
|
210
|
+
sourceTable: table,
|
|
211
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
212
|
+
after: {
|
|
213
|
+
id: 'todo1',
|
|
214
|
+
list_id: 'list1'
|
|
215
|
+
},
|
|
216
|
+
afterReplicaId: test_utils.rid('todo1')
|
|
217
|
+
});
|
|
218
|
+
await batch.save({
|
|
219
|
+
sourceTable: table,
|
|
220
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
221
|
+
after: {
|
|
222
|
+
id: 'todo2',
|
|
223
|
+
list_id: 'list2'
|
|
224
|
+
},
|
|
225
|
+
afterReplicaId: test_utils.rid('todo2')
|
|
226
|
+
});
|
|
227
|
+
await batch.commit('1/1');
|
|
228
|
+
});
|
|
229
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
230
|
+
// Update the second todo item to now belong to list 1
|
|
231
|
+
await batch.save({
|
|
232
|
+
sourceTable: table,
|
|
233
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
234
|
+
after: {
|
|
235
|
+
id: 'todo2',
|
|
236
|
+
list_id: 'list1'
|
|
237
|
+
},
|
|
238
|
+
afterReplicaId: test_utils.rid('todo2')
|
|
239
|
+
});
|
|
240
|
+
await batch.commit('1/1');
|
|
241
|
+
});
|
|
242
|
+
// We specifically request the todo_ids for both lists.
|
|
243
|
+
// There removal operation for the association of `list2`::`todo2` should not interfere with the new
|
|
244
|
+
// association of `list1`::`todo2`
|
|
245
|
+
const checkpoint = await bucketStorage.getCheckpoint();
|
|
246
|
+
const parameters = await checkpoint.getParameterSets([
|
|
247
|
+
ParameterLookup.normalized('mybucket', '1', ['list1']),
|
|
248
|
+
ParameterLookup.normalized('mybucket', '1', ['list2'])
|
|
249
|
+
]);
|
|
250
|
+
expect(parameters.sort((a, b) => a.todo_id.localeCompare(b.todo_id))).toEqual([
|
|
251
|
+
{
|
|
252
|
+
todo_id: 'todo1'
|
|
253
|
+
},
|
|
254
|
+
{
|
|
255
|
+
todo_id: 'todo2'
|
|
256
|
+
}
|
|
257
|
+
]);
|
|
258
|
+
}
|
|
259
|
+
catch (e_3) {
|
|
260
|
+
env_3.error = e_3;
|
|
261
|
+
env_3.hasError = true;
|
|
262
|
+
}
|
|
263
|
+
finally {
|
|
264
|
+
const result_3 = __disposeResources(env_3);
|
|
265
|
+
if (result_3)
|
|
266
|
+
await result_3;
|
|
267
|
+
}
|
|
268
|
+
});
|
|
269
|
+
test('save and load parameters with different number types', async () => {
|
|
270
|
+
const env_4 = { stack: [], error: void 0, hasError: false };
|
|
271
|
+
try {
|
|
272
|
+
const factory = __addDisposableResource(env_4, await generateStorageFactory(), true);
|
|
273
|
+
const syncRules = await factory.updateSyncRules({
|
|
274
|
+
content: `
|
|
275
|
+
bucket_definitions:
|
|
276
|
+
mybucket:
|
|
277
|
+
parameters:
|
|
278
|
+
- SELECT group_id FROM test WHERE n1 = token_parameters.n1 and f2 = token_parameters.f2 and f3 = token_parameters.f3
|
|
279
|
+
data: []
|
|
280
|
+
`
|
|
281
|
+
});
|
|
282
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
283
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
284
|
+
await batch.save({
|
|
285
|
+
sourceTable: TEST_TABLE,
|
|
286
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
287
|
+
after: {
|
|
288
|
+
id: 't1',
|
|
289
|
+
group_id: 'group1',
|
|
290
|
+
n1: 314n,
|
|
291
|
+
f2: 314,
|
|
292
|
+
f3: 3.14
|
|
293
|
+
},
|
|
294
|
+
afterReplicaId: test_utils.rid('t1')
|
|
295
|
+
});
|
|
296
|
+
await batch.commit('1/1');
|
|
297
|
+
});
|
|
298
|
+
const TEST_PARAMS = { group_id: 'group1' };
|
|
299
|
+
const checkpoint = await bucketStorage.getCheckpoint();
|
|
300
|
+
const parameters1 = await checkpoint.getParameterSets([
|
|
301
|
+
ParameterLookup.normalized('mybucket', '1', [314n, 314, 3.14])
|
|
302
|
+
]);
|
|
303
|
+
expect(parameters1).toEqual([TEST_PARAMS]);
|
|
304
|
+
const parameters2 = await checkpoint.getParameterSets([
|
|
305
|
+
ParameterLookup.normalized('mybucket', '1', [314, 314n, 3.14])
|
|
306
|
+
]);
|
|
307
|
+
expect(parameters2).toEqual([TEST_PARAMS]);
|
|
308
|
+
const parameters3 = await checkpoint.getParameterSets([
|
|
309
|
+
ParameterLookup.normalized('mybucket', '1', [314n, 314, 3])
|
|
310
|
+
]);
|
|
311
|
+
expect(parameters3).toEqual([]);
|
|
312
|
+
}
|
|
313
|
+
catch (e_4) {
|
|
314
|
+
env_4.error = e_4;
|
|
315
|
+
env_4.hasError = true;
|
|
316
|
+
}
|
|
317
|
+
finally {
|
|
318
|
+
const result_4 = __disposeResources(env_4);
|
|
319
|
+
if (result_4)
|
|
320
|
+
await result_4;
|
|
321
|
+
}
|
|
322
|
+
});
|
|
323
|
+
test('save and load parameters with large numbers', async () => {
|
|
324
|
+
const env_5 = { stack: [], error: void 0, hasError: false };
|
|
325
|
+
try {
|
|
326
|
+
// This ensures serialization / deserialization of "current_data" is done correctly.
|
|
327
|
+
// This specific case tested here cannot happen with postgres in practice, but we still
|
|
328
|
+
// test this to ensure correct deserialization.
|
|
329
|
+
const factory = __addDisposableResource(env_5, await generateStorageFactory(), true);
|
|
330
|
+
const syncRules = await factory.updateSyncRules({
|
|
331
|
+
content: `
|
|
332
|
+
bucket_definitions:
|
|
333
|
+
mybucket:
|
|
334
|
+
parameters:
|
|
335
|
+
- SELECT group_id FROM test WHERE n1 = token_parameters.n1
|
|
336
|
+
data: []
|
|
337
|
+
`
|
|
338
|
+
});
|
|
339
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
340
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
341
|
+
await batch.save({
|
|
342
|
+
sourceTable: TEST_TABLE,
|
|
343
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
344
|
+
after: {
|
|
345
|
+
id: 't1',
|
|
346
|
+
group_id: 'group1',
|
|
347
|
+
n1: 1152921504606846976n // 2^60
|
|
348
|
+
},
|
|
349
|
+
afterReplicaId: test_utils.rid('t1')
|
|
350
|
+
});
|
|
351
|
+
await batch.save({
|
|
352
|
+
sourceTable: TEST_TABLE,
|
|
353
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
354
|
+
after: {
|
|
355
|
+
id: 't1',
|
|
356
|
+
group_id: 'group1',
|
|
357
|
+
// Simulate a TOAST value, even though it can't happen for values like this
|
|
358
|
+
// in practice.
|
|
359
|
+
n1: undefined
|
|
360
|
+
},
|
|
361
|
+
afterReplicaId: test_utils.rid('t1')
|
|
362
|
+
});
|
|
363
|
+
await batch.commit('1/1');
|
|
364
|
+
});
|
|
365
|
+
const TEST_PARAMS = { group_id: 'group1' };
|
|
366
|
+
const checkpoint = await bucketStorage.getCheckpoint();
|
|
367
|
+
const parameters1 = await checkpoint.getParameterSets([
|
|
368
|
+
ParameterLookup.normalized('mybucket', '1', [1152921504606846976n])
|
|
369
|
+
]);
|
|
370
|
+
expect(parameters1).toEqual([TEST_PARAMS]);
|
|
371
|
+
}
|
|
372
|
+
catch (e_5) {
|
|
373
|
+
env_5.error = e_5;
|
|
374
|
+
env_5.hasError = true;
|
|
375
|
+
}
|
|
376
|
+
finally {
|
|
377
|
+
const result_5 = __disposeResources(env_5);
|
|
378
|
+
if (result_5)
|
|
379
|
+
await result_5;
|
|
380
|
+
}
|
|
381
|
+
});
|
|
382
|
+
test('save and load parameters with workspaceId', async () => {
|
|
383
|
+
const env_6 = { stack: [], error: void 0, hasError: false };
|
|
384
|
+
try {
|
|
385
|
+
const WORKSPACE_TABLE = test_utils.makeTestTable('workspace', ['id']);
|
|
386
|
+
const factory = __addDisposableResource(env_6, await generateStorageFactory(), true);
|
|
387
|
+
const syncRules = await factory.updateSyncRules({
|
|
388
|
+
content: `
|
|
389
|
+
bucket_definitions:
|
|
390
|
+
by_workspace:
|
|
391
|
+
parameters:
|
|
392
|
+
- SELECT id as workspace_id FROM workspace WHERE
|
|
393
|
+
workspace."userId" = token_parameters.user_id
|
|
394
|
+
data: []
|
|
395
|
+
`
|
|
396
|
+
});
|
|
397
|
+
const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).sync_rules;
|
|
398
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
399
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
400
|
+
await batch.save({
|
|
401
|
+
sourceTable: WORKSPACE_TABLE,
|
|
402
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
403
|
+
after: {
|
|
404
|
+
id: 'workspace1',
|
|
405
|
+
userId: 'u1'
|
|
406
|
+
},
|
|
407
|
+
afterReplicaId: test_utils.rid('workspace1')
|
|
408
|
+
});
|
|
409
|
+
await batch.commit('1/1');
|
|
410
|
+
});
|
|
411
|
+
const checkpoint = await bucketStorage.getCheckpoint();
|
|
412
|
+
const parameters = new RequestParameters({ sub: 'u1' }, {});
|
|
413
|
+
const q1 = sync_rules.bucketSources[0].parameterQueries[0];
|
|
414
|
+
const lookups = q1.getLookups(parameters);
|
|
415
|
+
expect(lookups).toEqual([ParameterLookup.normalized('by_workspace', '1', ['u1'])]);
|
|
416
|
+
const parameter_sets = await checkpoint.getParameterSets(lookups);
|
|
417
|
+
expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }]);
|
|
418
|
+
const buckets = await sync_rules
|
|
419
|
+
.getBucketParameterQuerier(test_utils.querierOptions(parameters))
|
|
420
|
+
.querier.queryDynamicBucketDescriptions({
|
|
421
|
+
getParameterSets(lookups) {
|
|
422
|
+
return checkpoint.getParameterSets(lookups);
|
|
423
|
+
}
|
|
424
|
+
});
|
|
425
|
+
expect(buckets).toEqual([
|
|
426
|
+
{ bucket: 'by_workspace["workspace1"]', priority: 3, definition: 'by_workspace', inclusion_reasons: ['default'] }
|
|
427
|
+
]);
|
|
428
|
+
}
|
|
429
|
+
catch (e_6) {
|
|
430
|
+
env_6.error = e_6;
|
|
431
|
+
env_6.hasError = true;
|
|
432
|
+
}
|
|
433
|
+
finally {
|
|
434
|
+
const result_6 = __disposeResources(env_6);
|
|
435
|
+
if (result_6)
|
|
436
|
+
await result_6;
|
|
437
|
+
}
|
|
438
|
+
});
|
|
439
|
+
test('save and load parameters with dynamic global buckets', async () => {
|
|
440
|
+
const env_7 = { stack: [], error: void 0, hasError: false };
|
|
441
|
+
try {
|
|
442
|
+
const WORKSPACE_TABLE = test_utils.makeTestTable('workspace');
|
|
443
|
+
const factory = __addDisposableResource(env_7, await generateStorageFactory(), true);
|
|
444
|
+
const syncRules = await factory.updateSyncRules({
|
|
445
|
+
content: `
|
|
446
|
+
bucket_definitions:
|
|
447
|
+
by_public_workspace:
|
|
448
|
+
parameters:
|
|
449
|
+
- SELECT id as workspace_id FROM workspace WHERE
|
|
450
|
+
workspace.visibility = 'public'
|
|
451
|
+
data: []
|
|
452
|
+
`
|
|
453
|
+
});
|
|
454
|
+
const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).sync_rules;
|
|
455
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
456
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
457
|
+
await batch.save({
|
|
458
|
+
sourceTable: WORKSPACE_TABLE,
|
|
459
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
460
|
+
after: {
|
|
461
|
+
id: 'workspace1',
|
|
462
|
+
visibility: 'public'
|
|
463
|
+
},
|
|
464
|
+
afterReplicaId: test_utils.rid('workspace1')
|
|
465
|
+
});
|
|
466
|
+
await batch.save({
|
|
467
|
+
sourceTable: WORKSPACE_TABLE,
|
|
468
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
469
|
+
after: {
|
|
470
|
+
id: 'workspace2',
|
|
471
|
+
visibility: 'private'
|
|
472
|
+
},
|
|
473
|
+
afterReplicaId: test_utils.rid('workspace2')
|
|
474
|
+
});
|
|
475
|
+
await batch.save({
|
|
476
|
+
sourceTable: WORKSPACE_TABLE,
|
|
477
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
478
|
+
after: {
|
|
479
|
+
id: 'workspace3',
|
|
480
|
+
visibility: 'public'
|
|
481
|
+
},
|
|
482
|
+
afterReplicaId: test_utils.rid('workspace3')
|
|
483
|
+
});
|
|
484
|
+
await batch.commit('1/1');
|
|
485
|
+
});
|
|
486
|
+
const checkpoint = await bucketStorage.getCheckpoint();
|
|
487
|
+
const parameters = new RequestParameters({ sub: 'unknown' }, {});
|
|
488
|
+
const q1 = sync_rules.bucketSources[0].parameterQueries[0];
|
|
489
|
+
const lookups = q1.getLookups(parameters);
|
|
490
|
+
expect(lookups).toEqual([ParameterLookup.normalized('by_public_workspace', '1', [])]);
|
|
491
|
+
const parameter_sets = await checkpoint.getParameterSets(lookups);
|
|
492
|
+
parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
|
|
493
|
+
expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
|
|
494
|
+
const buckets = await sync_rules
|
|
495
|
+
.getBucketParameterQuerier(test_utils.querierOptions(parameters))
|
|
496
|
+
.querier.queryDynamicBucketDescriptions({
|
|
497
|
+
getParameterSets(lookups) {
|
|
498
|
+
return checkpoint.getParameterSets(lookups);
|
|
499
|
+
}
|
|
500
|
+
});
|
|
501
|
+
buckets.sort((a, b) => a.bucket.localeCompare(b.bucket));
|
|
502
|
+
expect(buckets).toEqual([
|
|
503
|
+
{
|
|
504
|
+
bucket: 'by_public_workspace["workspace1"]',
|
|
505
|
+
priority: 3,
|
|
506
|
+
definition: 'by_public_workspace',
|
|
507
|
+
inclusion_reasons: ['default']
|
|
508
|
+
},
|
|
509
|
+
{
|
|
510
|
+
bucket: 'by_public_workspace["workspace3"]',
|
|
511
|
+
priority: 3,
|
|
512
|
+
definition: 'by_public_workspace',
|
|
513
|
+
inclusion_reasons: ['default']
|
|
514
|
+
}
|
|
515
|
+
]);
|
|
516
|
+
}
|
|
517
|
+
catch (e_7) {
|
|
518
|
+
env_7.error = e_7;
|
|
519
|
+
env_7.hasError = true;
|
|
520
|
+
}
|
|
521
|
+
finally {
|
|
522
|
+
const result_7 = __disposeResources(env_7);
|
|
523
|
+
if (result_7)
|
|
524
|
+
await result_7;
|
|
525
|
+
}
|
|
526
|
+
});
|
|
527
|
+
test('multiple parameter queries', async () => {
|
|
528
|
+
const env_8 = { stack: [], error: void 0, hasError: false };
|
|
529
|
+
try {
|
|
530
|
+
const WORKSPACE_TABLE = test_utils.makeTestTable('workspace');
|
|
531
|
+
const factory = __addDisposableResource(env_8, await generateStorageFactory(), true);
|
|
532
|
+
const syncRules = await factory.updateSyncRules({
|
|
533
|
+
content: `
|
|
534
|
+
bucket_definitions:
|
|
535
|
+
by_workspace:
|
|
536
|
+
parameters:
|
|
537
|
+
- SELECT id as workspace_id FROM workspace WHERE
|
|
538
|
+
workspace.visibility = 'public'
|
|
539
|
+
- SELECT id as workspace_id FROM workspace WHERE
|
|
540
|
+
workspace.user_id = token_parameters.user_id
|
|
541
|
+
data: []
|
|
542
|
+
`
|
|
543
|
+
});
|
|
544
|
+
const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).sync_rules;
|
|
545
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
546
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
547
|
+
await batch.save({
|
|
548
|
+
sourceTable: WORKSPACE_TABLE,
|
|
549
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
550
|
+
after: {
|
|
551
|
+
id: 'workspace1',
|
|
552
|
+
visibility: 'public'
|
|
553
|
+
},
|
|
554
|
+
afterReplicaId: test_utils.rid('workspace1')
|
|
555
|
+
});
|
|
556
|
+
await batch.save({
|
|
557
|
+
sourceTable: WORKSPACE_TABLE,
|
|
558
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
559
|
+
after: {
|
|
560
|
+
id: 'workspace2',
|
|
561
|
+
visibility: 'private'
|
|
562
|
+
},
|
|
563
|
+
afterReplicaId: test_utils.rid('workspace2')
|
|
564
|
+
});
|
|
565
|
+
await batch.save({
|
|
566
|
+
sourceTable: WORKSPACE_TABLE,
|
|
567
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
568
|
+
after: {
|
|
569
|
+
id: 'workspace3',
|
|
570
|
+
user_id: 'u1',
|
|
571
|
+
visibility: 'private'
|
|
572
|
+
},
|
|
573
|
+
afterReplicaId: test_utils.rid('workspace3')
|
|
574
|
+
});
|
|
575
|
+
await batch.save({
|
|
576
|
+
sourceTable: WORKSPACE_TABLE,
|
|
577
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
578
|
+
after: {
|
|
579
|
+
id: 'workspace4',
|
|
580
|
+
user_id: 'u2',
|
|
581
|
+
visibility: 'private'
|
|
582
|
+
},
|
|
583
|
+
afterReplicaId: test_utils.rid('workspace4')
|
|
584
|
+
});
|
|
585
|
+
await batch.commit('1/1');
|
|
586
|
+
});
|
|
587
|
+
const checkpoint = await bucketStorage.getCheckpoint();
|
|
588
|
+
const parameters = new RequestParameters({ sub: 'u1' }, {});
|
|
589
|
+
// Test intermediate values - could be moved to sync_rules.test.ts
|
|
590
|
+
const q1 = sync_rules.bucketSources[0].parameterQueries[0];
|
|
591
|
+
const lookups1 = q1.getLookups(parameters);
|
|
592
|
+
expect(lookups1).toEqual([ParameterLookup.normalized('by_workspace', '1', [])]);
|
|
593
|
+
const parameter_sets1 = await checkpoint.getParameterSets(lookups1);
|
|
594
|
+
parameter_sets1.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
|
|
595
|
+
expect(parameter_sets1).toEqual([{ workspace_id: 'workspace1' }]);
|
|
596
|
+
const q2 = sync_rules.bucketSources[0].parameterQueries[1];
|
|
597
|
+
const lookups2 = q2.getLookups(parameters);
|
|
598
|
+
expect(lookups2).toEqual([ParameterLookup.normalized('by_workspace', '2', ['u1'])]);
|
|
599
|
+
const parameter_sets2 = await checkpoint.getParameterSets(lookups2);
|
|
600
|
+
parameter_sets2.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
|
|
601
|
+
expect(parameter_sets2).toEqual([{ workspace_id: 'workspace3' }]);
|
|
602
|
+
// Test final values - the important part
|
|
603
|
+
const buckets = (await sync_rules
|
|
604
|
+
.getBucketParameterQuerier(test_utils.querierOptions(parameters))
|
|
605
|
+
.querier.queryDynamicBucketDescriptions({
|
|
606
|
+
getParameterSets(lookups) {
|
|
607
|
+
return checkpoint.getParameterSets(lookups);
|
|
608
|
+
}
|
|
609
|
+
})).map((e) => e.bucket);
|
|
610
|
+
buckets.sort();
|
|
611
|
+
expect(buckets).toEqual(['by_workspace["workspace1"]', 'by_workspace["workspace3"]']);
|
|
612
|
+
}
|
|
613
|
+
catch (e_8) {
|
|
614
|
+
env_8.error = e_8;
|
|
615
|
+
env_8.hasError = true;
|
|
616
|
+
}
|
|
617
|
+
finally {
|
|
618
|
+
const result_8 = __disposeResources(env_8);
|
|
619
|
+
if (result_8)
|
|
620
|
+
await result_8;
|
|
621
|
+
}
|
|
622
|
+
});
|
|
623
|
+
test('truncate parameters', async () => {
|
|
624
|
+
const env_9 = { stack: [], error: void 0, hasError: false };
|
|
625
|
+
try {
|
|
626
|
+
const factory = __addDisposableResource(env_9, await generateStorageFactory(), true);
|
|
627
|
+
const syncRules = await factory.updateSyncRules({
|
|
628
|
+
content: `
|
|
629
|
+
bucket_definitions:
|
|
630
|
+
mybucket:
|
|
631
|
+
parameters:
|
|
632
|
+
- SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
|
|
633
|
+
data: []
|
|
634
|
+
`
|
|
635
|
+
});
|
|
636
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
637
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
638
|
+
await batch.save({
|
|
639
|
+
sourceTable: TEST_TABLE,
|
|
640
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
641
|
+
after: {
|
|
642
|
+
id: 't2',
|
|
643
|
+
id1: 'user3',
|
|
644
|
+
id2: 'user4',
|
|
645
|
+
group_id: 'group2a'
|
|
646
|
+
},
|
|
647
|
+
afterReplicaId: test_utils.rid('t2')
|
|
648
|
+
});
|
|
649
|
+
await batch.truncate([TEST_TABLE]);
|
|
650
|
+
});
|
|
651
|
+
const checkpoint = await bucketStorage.getCheckpoint();
|
|
652
|
+
const parameters = await checkpoint.getParameterSets([ParameterLookup.normalized('mybucket', '1', ['user1'])]);
|
|
653
|
+
expect(parameters).toEqual([]);
|
|
654
|
+
}
|
|
655
|
+
catch (e_9) {
|
|
656
|
+
env_9.error = e_9;
|
|
657
|
+
env_9.hasError = true;
|
|
658
|
+
}
|
|
659
|
+
finally {
|
|
660
|
+
const result_9 = __disposeResources(env_9);
|
|
661
|
+
if (result_9)
|
|
662
|
+
await result_9;
|
|
663
|
+
}
|
|
664
|
+
});
|
|
665
|
+
test('invalidate cached parsed sync rules', async () => {
|
|
666
|
+
const env_10 = { stack: [], error: void 0, hasError: false };
|
|
667
|
+
try {
|
|
668
|
+
const bucketStorageFactory = __addDisposableResource(env_10, await generateStorageFactory(), true);
|
|
669
|
+
const syncRules = await bucketStorageFactory.updateSyncRules({
|
|
670
|
+
content: `
|
|
671
|
+
bucket_definitions:
|
|
672
|
+
by_workspace:
|
|
673
|
+
parameters:
|
|
674
|
+
- SELECT id as workspace_id FROM workspace WHERE
|
|
675
|
+
workspace."userId" = token_parameters.user_id
|
|
676
|
+
data: []
|
|
677
|
+
`
|
|
678
|
+
});
|
|
679
|
+
const syncBucketStorage = bucketStorageFactory.getInstance(syncRules);
|
|
680
|
+
const parsedSchema1 = syncBucketStorage.getParsedSyncRules({
|
|
681
|
+
defaultSchema: 'public'
|
|
682
|
+
});
|
|
683
|
+
const parsedSchema2 = syncBucketStorage.getParsedSyncRules({
|
|
684
|
+
defaultSchema: 'public'
|
|
685
|
+
});
|
|
686
|
+
// These should be cached, this will be the same instance
|
|
687
|
+
expect(parsedSchema2).equals(parsedSchema1);
|
|
688
|
+
expect(parsedSchema1.getSourceTables()[0].schema).equals('public');
|
|
689
|
+
const parsedSchema3 = syncBucketStorage.getParsedSyncRules({
|
|
690
|
+
defaultSchema: 'databasename'
|
|
691
|
+
});
|
|
692
|
+
// The cache should not be used
|
|
693
|
+
expect(parsedSchema3).not.equals(parsedSchema2);
|
|
694
|
+
expect(parsedSchema3.getSourceTables()[0].schema).equals('databasename');
|
|
695
|
+
}
|
|
696
|
+
catch (e_10) {
|
|
697
|
+
env_10.error = e_10;
|
|
698
|
+
env_10.hasError = true;
|
|
699
|
+
}
|
|
700
|
+
finally {
|
|
701
|
+
const result_10 = __disposeResources(env_10);
|
|
702
|
+
if (result_10)
|
|
703
|
+
await result_10;
|
|
704
|
+
}
|
|
705
|
+
});
|
|
706
|
+
}
|
|
707
|
+
//# sourceMappingURL=register-data-storage-parameter-tests.js.map
|