@powersync/service-core-tests 0.15.1 → 0.15.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,55 +1,3 @@
1
- var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
2
- if (value !== null && value !== void 0) {
3
- if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
4
- var dispose, inner;
5
- if (async) {
6
- if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
7
- dispose = value[Symbol.asyncDispose];
8
- }
9
- if (dispose === void 0) {
10
- if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
11
- dispose = value[Symbol.dispose];
12
- if (async) inner = dispose;
13
- }
14
- if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
15
- if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
16
- env.stack.push({ value: value, dispose: dispose, async: async });
17
- }
18
- else if (async) {
19
- env.stack.push({ async: true });
20
- }
21
- return value;
22
- };
23
- var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
24
- return function (env) {
25
- function fail(e) {
26
- env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
27
- env.hasError = true;
28
- }
29
- var r, s = 0;
30
- function next() {
31
- while (r = env.stack.pop()) {
32
- try {
33
- if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
34
- if (r.dispose) {
35
- var result = r.dispose.call(r.value);
36
- if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
37
- }
38
- else s |= 1;
39
- }
40
- catch (e) {
41
- fail(e);
42
- }
43
- }
44
- if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
45
- if (env.hasError) throw env.error;
46
- }
47
- return next();
48
- };
49
- })(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
50
- var e = new Error(message);
51
- return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
52
- });
53
1
  import { CURRENT_STORAGE_VERSION, JwtPayload, storage, updateSyncRulesFromYaml } from '@powersync/service-core';
54
2
  import { RequestParameters, ScopedParameterLookup } from '@powersync/service-sync-rules';
55
3
  import { expect, test } from 'vitest';
@@ -71,131 +19,105 @@ export function registerDataStorageParameterTests(config) {
71
19
  const storageVersion = config.storageVersion ?? CURRENT_STORAGE_VERSION;
72
20
  const MYBUCKET_1 = parameterLookupScope('mybucket', '1');
73
21
  test('save and load parameters', async () => {
74
- const env_1 = { stack: [], error: void 0, hasError: false };
75
- try {
76
- const factory = __addDisposableResource(env_1, await generateStorageFactory(), true);
77
- const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
22
+ await using factory = await generateStorageFactory();
23
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
78
24
  bucket_definitions:
79
25
  mybucket:
80
26
  parameters:
81
27
  - SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
82
28
  data: []
83
29
  `, {
84
- storageVersion
85
- }));
86
- const bucketStorage = factory.getInstance(syncRules);
87
- const writer = __addDisposableResource(env_1, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
88
- const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
89
- await writer.markAllSnapshotDone('1/1');
90
- await writer.save({
91
- sourceTable: testTable,
92
- tag: storage.SaveOperationTag.INSERT,
93
- after: {
94
- id: 't2',
95
- id1: 'user3',
96
- id2: 'user4',
97
- group_id: 'group2a'
98
- },
99
- afterReplicaId: test_utils.rid('t2')
100
- });
101
- await writer.save({
102
- sourceTable: testTable,
103
- tag: storage.SaveOperationTag.INSERT,
104
- after: {
105
- id: 't1',
106
- id1: 'user1',
107
- id2: 'user2',
108
- group_id: 'group1a'
109
- },
110
- afterReplicaId: test_utils.rid('t1')
111
- });
112
- await writer.commit('1/1');
113
- const checkpoint = await bucketStorage.getCheckpoint();
114
- const parameters = await checkpoint.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, ['user1'])]);
115
- expect(parameters).toEqual([
116
- {
117
- group_id: 'group1a'
118
- }
119
- ]);
120
- }
121
- catch (e_1) {
122
- env_1.error = e_1;
123
- env_1.hasError = true;
124
- }
125
- finally {
126
- const result_1 = __disposeResources(env_1);
127
- if (result_1)
128
- await result_1;
129
- }
30
+ storageVersion
31
+ }));
32
+ const bucketStorage = factory.getInstance(syncRules);
33
+ await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
34
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
35
+ await writer.markAllSnapshotDone('1/1');
36
+ await writer.save({
37
+ sourceTable: testTable,
38
+ tag: storage.SaveOperationTag.INSERT,
39
+ after: {
40
+ id: 't2',
41
+ id1: 'user3',
42
+ id2: 'user4',
43
+ group_id: 'group2a'
44
+ },
45
+ afterReplicaId: test_utils.rid('t2')
46
+ });
47
+ await writer.save({
48
+ sourceTable: testTable,
49
+ tag: storage.SaveOperationTag.INSERT,
50
+ after: {
51
+ id: 't1',
52
+ id1: 'user1',
53
+ id2: 'user2',
54
+ group_id: 'group1a'
55
+ },
56
+ afterReplicaId: test_utils.rid('t1')
57
+ });
58
+ await writer.commit('1/1');
59
+ const checkpoint = await bucketStorage.getCheckpoint();
60
+ const parameters = await checkpoint.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, ['user1'])]);
61
+ expect(parameters).toEqual([
62
+ {
63
+ group_id: 'group1a'
64
+ }
65
+ ]);
130
66
  });
131
67
  test('it should use the latest version', async () => {
132
- const env_2 = { stack: [], error: void 0, hasError: false };
133
- try {
134
- const factory = __addDisposableResource(env_2, await generateStorageFactory(), true);
135
- const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
68
+ await using factory = await generateStorageFactory();
69
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
136
70
  bucket_definitions:
137
71
  mybucket:
138
72
  parameters:
139
73
  - SELECT group_id FROM test WHERE id = token_parameters.user_id
140
74
  data: []
141
75
  `, {
142
- storageVersion
143
- }));
144
- const bucketStorage = factory.getInstance(syncRules);
145
- const writer = __addDisposableResource(env_2, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
146
- const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
147
- await writer.markAllSnapshotDone('1/1');
148
- await writer.save({
149
- sourceTable: testTable,
150
- tag: storage.SaveOperationTag.INSERT,
151
- after: {
152
- id: 'user1',
153
- group_id: 'group1'
154
- },
155
- afterReplicaId: test_utils.rid('user1')
156
- });
157
- await writer.commit('1/1');
158
- const checkpoint1 = await bucketStorage.getCheckpoint();
159
- await writer.save({
160
- sourceTable: testTable,
161
- tag: storage.SaveOperationTag.INSERT,
162
- after: {
163
- id: 'user1',
164
- group_id: 'group2'
165
- },
166
- afterReplicaId: test_utils.rid('user1')
167
- });
168
- await writer.commit('1/2');
169
- const checkpoint2 = await bucketStorage.getCheckpoint();
170
- const parameters = await checkpoint2.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, ['user1'])]);
171
- expect(parameters).toEqual([
172
- {
173
- group_id: 'group2'
174
- }
175
- ]);
176
- // Use the checkpoint to get older data if relevant
177
- const parameters2 = await checkpoint1.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, ['user1'])]);
178
- expect(parameters2).toEqual([
179
- {
180
- group_id: 'group1'
181
- }
182
- ]);
183
- }
184
- catch (e_2) {
185
- env_2.error = e_2;
186
- env_2.hasError = true;
187
- }
188
- finally {
189
- const result_2 = __disposeResources(env_2);
190
- if (result_2)
191
- await result_2;
192
- }
76
+ storageVersion
77
+ }));
78
+ const bucketStorage = factory.getInstance(syncRules);
79
+ await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
80
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
81
+ await writer.markAllSnapshotDone('1/1');
82
+ await writer.save({
83
+ sourceTable: testTable,
84
+ tag: storage.SaveOperationTag.INSERT,
85
+ after: {
86
+ id: 'user1',
87
+ group_id: 'group1'
88
+ },
89
+ afterReplicaId: test_utils.rid('user1')
90
+ });
91
+ await writer.commit('1/1');
92
+ const checkpoint1 = await bucketStorage.getCheckpoint();
93
+ await writer.save({
94
+ sourceTable: testTable,
95
+ tag: storage.SaveOperationTag.INSERT,
96
+ after: {
97
+ id: 'user1',
98
+ group_id: 'group2'
99
+ },
100
+ afterReplicaId: test_utils.rid('user1')
101
+ });
102
+ await writer.commit('1/2');
103
+ const checkpoint2 = await bucketStorage.getCheckpoint();
104
+ const parameters = await checkpoint2.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, ['user1'])]);
105
+ expect(parameters).toEqual([
106
+ {
107
+ group_id: 'group2'
108
+ }
109
+ ]);
110
+ // Use the checkpoint to get older data if relevant
111
+ const parameters2 = await checkpoint1.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, ['user1'])]);
112
+ expect(parameters2).toEqual([
113
+ {
114
+ group_id: 'group1'
115
+ }
116
+ ]);
193
117
  });
194
118
  test('it should use the latest version after updates', async () => {
195
- const env_3 = { stack: [], error: void 0, hasError: false };
196
- try {
197
- const factory = __addDisposableResource(env_3, await generateStorageFactory(), true);
198
- const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
119
+ await using factory = await generateStorageFactory();
120
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
199
121
  bucket_definitions:
200
122
  mybucket:
201
123
  parameters:
@@ -204,186 +126,150 @@ bucket_definitions:
204
126
  WHERE list_id IN token_parameters.list_id
205
127
  data: []
206
128
  `, { storageVersion }));
207
- const bucketStorage = factory.getInstance(syncRules);
208
- const writer = __addDisposableResource(env_3, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
209
- const table = await test_utils.resolveTestTable(writer, 'todos', ['id', 'list_id'], config);
210
- await writer.markAllSnapshotDone('1/1');
211
- // Create two todos which initially belong to different lists
212
- await writer.save({
213
- sourceTable: table,
214
- tag: storage.SaveOperationTag.INSERT,
215
- after: {
216
- id: 'todo1',
217
- list_id: 'list1'
218
- },
219
- afterReplicaId: test_utils.rid('todo1')
220
- });
221
- await writer.save({
222
- sourceTable: table,
223
- tag: storage.SaveOperationTag.INSERT,
224
- after: {
225
- id: 'todo2',
226
- list_id: 'list2'
227
- },
228
- afterReplicaId: test_utils.rid('todo2')
229
- });
230
- await writer.commit('1/1');
231
- // Update the second todo item to now belong to list 1
232
- await writer.save({
233
- sourceTable: table,
234
- tag: storage.SaveOperationTag.UPDATE,
235
- after: {
236
- id: 'todo2',
237
- list_id: 'list1'
238
- },
239
- afterReplicaId: test_utils.rid('todo2')
240
- });
241
- await writer.commit('1/1');
242
- // We specifically request the todo_ids for both lists.
243
- // There removal operation for the association of `list2`::`todo2` should not interfere with the new
244
- // association of `list1`::`todo2`
245
- const checkpoint = await bucketStorage.getCheckpoint();
246
- const parameters = await checkpoint.getParameterSets([
247
- ScopedParameterLookup.direct(MYBUCKET_1, ['list1']),
248
- ScopedParameterLookup.direct(MYBUCKET_1, ['list2'])
249
- ]);
250
- expect(parameters.sort((a, b) => a.todo_id.localeCompare(b.todo_id))).toEqual([
251
- {
252
- todo_id: 'todo1'
253
- },
254
- {
255
- todo_id: 'todo2'
256
- }
257
- ]);
258
- }
259
- catch (e_3) {
260
- env_3.error = e_3;
261
- env_3.hasError = true;
262
- }
263
- finally {
264
- const result_3 = __disposeResources(env_3);
265
- if (result_3)
266
- await result_3;
267
- }
129
+ const bucketStorage = factory.getInstance(syncRules);
130
+ await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
131
+ const table = await test_utils.resolveTestTable(writer, 'todos', ['id', 'list_id'], config);
132
+ await writer.markAllSnapshotDone('1/1');
133
+ // Create two todos which initially belong to different lists
134
+ await writer.save({
135
+ sourceTable: table,
136
+ tag: storage.SaveOperationTag.INSERT,
137
+ after: {
138
+ id: 'todo1',
139
+ list_id: 'list1'
140
+ },
141
+ afterReplicaId: test_utils.rid('todo1')
142
+ });
143
+ await writer.save({
144
+ sourceTable: table,
145
+ tag: storage.SaveOperationTag.INSERT,
146
+ after: {
147
+ id: 'todo2',
148
+ list_id: 'list2'
149
+ },
150
+ afterReplicaId: test_utils.rid('todo2')
151
+ });
152
+ await writer.commit('1/1');
153
+ // Update the second todo item to now belong to list 1
154
+ await writer.save({
155
+ sourceTable: table,
156
+ tag: storage.SaveOperationTag.UPDATE,
157
+ after: {
158
+ id: 'todo2',
159
+ list_id: 'list1'
160
+ },
161
+ afterReplicaId: test_utils.rid('todo2')
162
+ });
163
+ await writer.commit('1/1');
164
+ // We specifically request the todo_ids for both lists.
165
+ // There removal operation for the association of `list2`::`todo2` should not interfere with the new
166
+ // association of `list1`::`todo2`
167
+ const checkpoint = await bucketStorage.getCheckpoint();
168
+ const parameters = await checkpoint.getParameterSets([
169
+ ScopedParameterLookup.direct(MYBUCKET_1, ['list1']),
170
+ ScopedParameterLookup.direct(MYBUCKET_1, ['list2'])
171
+ ]);
172
+ expect(parameters.sort((a, b) => a.todo_id.localeCompare(b.todo_id))).toEqual([
173
+ {
174
+ todo_id: 'todo1'
175
+ },
176
+ {
177
+ todo_id: 'todo2'
178
+ }
179
+ ]);
268
180
  });
269
181
  test('save and load parameters with different number types', async () => {
270
- const env_4 = { stack: [], error: void 0, hasError: false };
271
- try {
272
- const factory = __addDisposableResource(env_4, await generateStorageFactory(), true);
273
- const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
182
+ await using factory = await generateStorageFactory();
183
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
274
184
  bucket_definitions:
275
185
  mybucket:
276
186
  parameters:
277
187
  - SELECT group_id FROM test WHERE n1 = token_parameters.n1 and f2 = token_parameters.f2 and f3 = token_parameters.f3
278
188
  data: []
279
189
  `, {
280
- storageVersion
281
- }));
282
- const bucketStorage = factory.getInstance(syncRules);
283
- const writer = __addDisposableResource(env_4, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
284
- const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
285
- await writer.markAllSnapshotDone('1/1');
286
- await writer.save({
287
- sourceTable: testTable,
288
- tag: storage.SaveOperationTag.INSERT,
289
- after: {
290
- id: 't1',
291
- group_id: 'group1',
292
- n1: 314n,
293
- f2: 314,
294
- f3: 3.14
295
- },
296
- afterReplicaId: test_utils.rid('t1')
297
- });
298
- await writer.commit('1/1');
299
- const TEST_PARAMS = { group_id: 'group1' };
300
- const checkpoint = await bucketStorage.getCheckpoint();
301
- const parameters1 = await checkpoint.getParameterSets([
302
- ScopedParameterLookup.direct(MYBUCKET_1, [314n, 314, 3.14])
303
- ]);
304
- expect(parameters1).toEqual([TEST_PARAMS]);
305
- const parameters2 = await checkpoint.getParameterSets([
306
- ScopedParameterLookup.direct(MYBUCKET_1, [314, 314n, 3.14])
307
- ]);
308
- expect(parameters2).toEqual([TEST_PARAMS]);
309
- const parameters3 = await checkpoint.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, [314n, 314, 3])]);
310
- expect(parameters3).toEqual([]);
311
- }
312
- catch (e_4) {
313
- env_4.error = e_4;
314
- env_4.hasError = true;
315
- }
316
- finally {
317
- const result_4 = __disposeResources(env_4);
318
- if (result_4)
319
- await result_4;
320
- }
190
+ storageVersion
191
+ }));
192
+ const bucketStorage = factory.getInstance(syncRules);
193
+ await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
194
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
195
+ await writer.markAllSnapshotDone('1/1');
196
+ await writer.save({
197
+ sourceTable: testTable,
198
+ tag: storage.SaveOperationTag.INSERT,
199
+ after: {
200
+ id: 't1',
201
+ group_id: 'group1',
202
+ n1: 314n,
203
+ f2: 314,
204
+ f3: 3.14
205
+ },
206
+ afterReplicaId: test_utils.rid('t1')
207
+ });
208
+ await writer.commit('1/1');
209
+ const TEST_PARAMS = { group_id: 'group1' };
210
+ const checkpoint = await bucketStorage.getCheckpoint();
211
+ const parameters1 = await checkpoint.getParameterSets([
212
+ ScopedParameterLookup.direct(MYBUCKET_1, [314n, 314, 3.14])
213
+ ]);
214
+ expect(parameters1).toEqual([TEST_PARAMS]);
215
+ const parameters2 = await checkpoint.getParameterSets([
216
+ ScopedParameterLookup.direct(MYBUCKET_1, [314, 314n, 3.14])
217
+ ]);
218
+ expect(parameters2).toEqual([TEST_PARAMS]);
219
+ const parameters3 = await checkpoint.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, [314n, 314, 3])]);
220
+ expect(parameters3).toEqual([]);
321
221
  });
322
222
  test('save and load parameters with large numbers', async () => {
323
- const env_5 = { stack: [], error: void 0, hasError: false };
324
- try {
325
- // This ensures serialization / deserialization of "current_data" is done correctly.
326
- // This specific case tested here cannot happen with postgres in practice, but we still
327
- // test this to ensure correct deserialization.
328
- const factory = __addDisposableResource(env_5, await generateStorageFactory(), true);
329
- const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
223
+ // This ensures serialization / deserialization of "current_data" is done correctly.
224
+ // This specific case tested here cannot happen with postgres in practice, but we still
225
+ // test this to ensure correct deserialization.
226
+ await using factory = await generateStorageFactory();
227
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
330
228
  bucket_definitions:
331
229
  mybucket:
332
230
  parameters:
333
231
  - SELECT group_id FROM test WHERE n1 = token_parameters.n1
334
232
  data: []
335
233
  `, {
336
- storageVersion
337
- }));
338
- const bucketStorage = factory.getInstance(syncRules);
339
- const writer = __addDisposableResource(env_5, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
340
- const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
341
- await writer.markAllSnapshotDone('1/1');
342
- await writer.save({
343
- sourceTable: testTable,
344
- tag: storage.SaveOperationTag.INSERT,
345
- after: {
346
- id: 't1',
347
- group_id: 'group1',
348
- n1: 1152921504606846976n // 2^60
349
- },
350
- afterReplicaId: test_utils.rid('t1')
351
- });
352
- await writer.save({
353
- sourceTable: testTable,
354
- tag: storage.SaveOperationTag.UPDATE,
355
- after: {
356
- id: 't1',
357
- group_id: 'group1',
358
- // Simulate a TOAST value, even though it can't happen for values like this
359
- // in practice.
360
- n1: undefined
361
- },
362
- afterReplicaId: test_utils.rid('t1')
363
- });
364
- await writer.commit('1/1');
365
- const TEST_PARAMS = { group_id: 'group1' };
366
- const checkpoint = await bucketStorage.getCheckpoint();
367
- const parameters1 = await checkpoint.getParameterSets([
368
- ScopedParameterLookup.direct(MYBUCKET_1, [1152921504606846976n])
369
- ]);
370
- expect(parameters1).toEqual([TEST_PARAMS]);
371
- }
372
- catch (e_5) {
373
- env_5.error = e_5;
374
- env_5.hasError = true;
375
- }
376
- finally {
377
- const result_5 = __disposeResources(env_5);
378
- if (result_5)
379
- await result_5;
380
- }
234
+ storageVersion
235
+ }));
236
+ const bucketStorage = factory.getInstance(syncRules);
237
+ await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
238
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
239
+ await writer.markAllSnapshotDone('1/1');
240
+ await writer.save({
241
+ sourceTable: testTable,
242
+ tag: storage.SaveOperationTag.INSERT,
243
+ after: {
244
+ id: 't1',
245
+ group_id: 'group1',
246
+ n1: 1152921504606846976n // 2^60
247
+ },
248
+ afterReplicaId: test_utils.rid('t1')
249
+ });
250
+ await writer.save({
251
+ sourceTable: testTable,
252
+ tag: storage.SaveOperationTag.UPDATE,
253
+ after: {
254
+ id: 't1',
255
+ group_id: 'group1',
256
+ // Simulate a TOAST value, even though it can't happen for values like this
257
+ // in practice.
258
+ n1: undefined
259
+ },
260
+ afterReplicaId: test_utils.rid('t1')
261
+ });
262
+ await writer.commit('1/1');
263
+ const TEST_PARAMS = { group_id: 'group1' };
264
+ const checkpoint = await bucketStorage.getCheckpoint();
265
+ const parameters1 = await checkpoint.getParameterSets([
266
+ ScopedParameterLookup.direct(MYBUCKET_1, [1152921504606846976n])
267
+ ]);
268
+ expect(parameters1).toEqual([TEST_PARAMS]);
381
269
  });
382
270
  test('save and load parameters with workspaceId', async () => {
383
- const env_6 = { stack: [], error: void 0, hasError: false };
384
- try {
385
- const factory = __addDisposableResource(env_6, await generateStorageFactory(), true);
386
- const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
271
+ await using factory = await generateStorageFactory();
272
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
387
273
  bucket_definitions:
388
274
  by_workspace:
389
275
  parameters:
@@ -391,58 +277,46 @@ bucket_definitions:
391
277
  workspace."userId" = token_parameters.user_id
392
278
  data: []
393
279
  `, {
394
- storageVersion
395
- }));
396
- const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).hydratedSyncRules();
397
- const bucketStorage = factory.getInstance(syncRules);
398
- const writer = __addDisposableResource(env_6, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
399
- const workspaceTable = await test_utils.resolveTestTable(writer, 'workspace', ['id'], config);
400
- await writer.markAllSnapshotDone('1/1');
401
- await writer.save({
402
- sourceTable: workspaceTable,
403
- tag: storage.SaveOperationTag.INSERT,
404
- after: {
405
- id: 'workspace1',
406
- userId: 'u1'
407
- },
408
- afterReplicaId: test_utils.rid('workspace1')
409
- });
410
- await writer.commit('1/1');
411
- const checkpoint = await bucketStorage.getCheckpoint();
412
- const parameters = new RequestParameters(new JwtPayload({ sub: 'u1' }), {});
413
- const querier = sync_rules.getBucketParameterQuerier(test_utils.querierOptions(parameters)).querier;
414
- const buckets = await querier.queryDynamicBucketDescriptions({
415
- async getParameterSets(lookups) {
416
- expect(lookups).toEqual([ScopedParameterLookup.direct(parameterLookupScope('by_workspace', '1'), ['u1'])]);
417
- const parameter_sets = await checkpoint.getParameterSets(lookups);
418
- expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }]);
419
- return parameter_sets;
420
- }
421
- });
422
- expect(buckets).toEqual([
423
- {
424
- bucket: bucketRequest(syncRules, 'by_workspace["workspace1"]').bucket,
425
- priority: 3,
426
- definition: 'by_workspace',
427
- inclusion_reasons: ['default']
428
- }
429
- ]);
430
- }
431
- catch (e_6) {
432
- env_6.error = e_6;
433
- env_6.hasError = true;
434
- }
435
- finally {
436
- const result_6 = __disposeResources(env_6);
437
- if (result_6)
438
- await result_6;
439
- }
280
+ storageVersion
281
+ }));
282
+ const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).hydratedSyncRules();
283
+ const bucketStorage = factory.getInstance(syncRules);
284
+ await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
285
+ const workspaceTable = await test_utils.resolveTestTable(writer, 'workspace', ['id'], config);
286
+ await writer.markAllSnapshotDone('1/1');
287
+ await writer.save({
288
+ sourceTable: workspaceTable,
289
+ tag: storage.SaveOperationTag.INSERT,
290
+ after: {
291
+ id: 'workspace1',
292
+ userId: 'u1'
293
+ },
294
+ afterReplicaId: test_utils.rid('workspace1')
295
+ });
296
+ await writer.commit('1/1');
297
+ const checkpoint = await bucketStorage.getCheckpoint();
298
+ const parameters = new RequestParameters(new JwtPayload({ sub: 'u1' }), {});
299
+ const querier = sync_rules.getBucketParameterQuerier(test_utils.querierOptions(parameters)).querier;
300
+ const buckets = await querier.queryDynamicBucketDescriptions({
301
+ async getParameterSets(lookups) {
302
+ expect(lookups).toEqual([ScopedParameterLookup.direct(parameterLookupScope('by_workspace', '1'), ['u1'])]);
303
+ const parameter_sets = await checkpoint.getParameterSets(lookups);
304
+ expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }]);
305
+ return parameter_sets;
306
+ }
307
+ });
308
+ expect(buckets).toEqual([
309
+ {
310
+ bucket: bucketRequest(syncRules, 'by_workspace["workspace1"]').bucket,
311
+ priority: 3,
312
+ definition: 'by_workspace',
313
+ inclusion_reasons: ['default']
314
+ }
315
+ ]);
440
316
  });
441
317
  test('save and load parameters with dynamic global buckets', async () => {
442
- const env_7 = { stack: [], error: void 0, hasError: false };
443
- try {
444
- const factory = __addDisposableResource(env_7, await generateStorageFactory(), true);
445
- const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
318
+ await using factory = await generateStorageFactory();
319
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
446
320
  bucket_definitions:
447
321
  by_public_workspace:
448
322
  parameters:
@@ -450,84 +324,72 @@ bucket_definitions:
450
324
  workspace.visibility = 'public'
451
325
  data: []
452
326
  `, {
453
- storageVersion
454
- }));
455
- const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).hydratedSyncRules();
456
- const bucketStorage = factory.getInstance(syncRules);
457
- const writer = __addDisposableResource(env_7, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
458
- const workspaceTable = await test_utils.resolveTestTable(writer, 'workspace', undefined, config);
459
- await writer.markAllSnapshotDone('1/1');
460
- await writer.save({
461
- sourceTable: workspaceTable,
462
- tag: storage.SaveOperationTag.INSERT,
463
- after: {
464
- id: 'workspace1',
465
- visibility: 'public'
466
- },
467
- afterReplicaId: test_utils.rid('workspace1')
468
- });
469
- await writer.save({
470
- sourceTable: workspaceTable,
471
- tag: storage.SaveOperationTag.INSERT,
472
- after: {
473
- id: 'workspace2',
474
- visibility: 'private'
475
- },
476
- afterReplicaId: test_utils.rid('workspace2')
477
- });
478
- await writer.save({
479
- sourceTable: workspaceTable,
480
- tag: storage.SaveOperationTag.INSERT,
481
- after: {
482
- id: 'workspace3',
483
- visibility: 'public'
484
- },
485
- afterReplicaId: test_utils.rid('workspace3')
486
- });
487
- await writer.commit('1/1');
488
- const checkpoint = await bucketStorage.getCheckpoint();
489
- const parameters = new RequestParameters(new JwtPayload({ sub: 'unknown' }), {});
490
- const querier = sync_rules.getBucketParameterQuerier(test_utils.querierOptions(parameters)).querier;
491
- const buckets = await querier.queryDynamicBucketDescriptions({
492
- async getParameterSets(lookups) {
493
- expect(lookups).toEqual([ScopedParameterLookup.direct(parameterLookupScope('by_public_workspace', '1'), [])]);
494
- const parameter_sets = await checkpoint.getParameterSets(lookups);
495
- parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
496
- expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
497
- return parameter_sets;
498
- }
499
- });
500
- buckets.sort((a, b) => a.bucket.localeCompare(b.bucket));
501
- expect(buckets).toEqual([
502
- {
503
- bucket: bucketRequest(syncRules, 'by_public_workspace["workspace1"]').bucket,
504
- priority: 3,
505
- definition: 'by_public_workspace',
506
- inclusion_reasons: ['default']
507
- },
508
- {
509
- bucket: bucketRequest(syncRules, 'by_public_workspace["workspace3"]').bucket,
510
- priority: 3,
511
- definition: 'by_public_workspace',
512
- inclusion_reasons: ['default']
513
- }
514
- ]);
515
- }
516
- catch (e_7) {
517
- env_7.error = e_7;
518
- env_7.hasError = true;
519
- }
520
- finally {
521
- const result_7 = __disposeResources(env_7);
522
- if (result_7)
523
- await result_7;
524
- }
327
+ storageVersion
328
+ }));
329
+ const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).hydratedSyncRules();
330
+ const bucketStorage = factory.getInstance(syncRules);
331
+ await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
332
+ const workspaceTable = await test_utils.resolveTestTable(writer, 'workspace', undefined, config);
333
+ await writer.markAllSnapshotDone('1/1');
334
+ await writer.save({
335
+ sourceTable: workspaceTable,
336
+ tag: storage.SaveOperationTag.INSERT,
337
+ after: {
338
+ id: 'workspace1',
339
+ visibility: 'public'
340
+ },
341
+ afterReplicaId: test_utils.rid('workspace1')
342
+ });
343
+ await writer.save({
344
+ sourceTable: workspaceTable,
345
+ tag: storage.SaveOperationTag.INSERT,
346
+ after: {
347
+ id: 'workspace2',
348
+ visibility: 'private'
349
+ },
350
+ afterReplicaId: test_utils.rid('workspace2')
351
+ });
352
+ await writer.save({
353
+ sourceTable: workspaceTable,
354
+ tag: storage.SaveOperationTag.INSERT,
355
+ after: {
356
+ id: 'workspace3',
357
+ visibility: 'public'
358
+ },
359
+ afterReplicaId: test_utils.rid('workspace3')
360
+ });
361
+ await writer.commit('1/1');
362
+ const checkpoint = await bucketStorage.getCheckpoint();
363
+ const parameters = new RequestParameters(new JwtPayload({ sub: 'unknown' }), {});
364
+ const querier = sync_rules.getBucketParameterQuerier(test_utils.querierOptions(parameters)).querier;
365
+ const buckets = await querier.queryDynamicBucketDescriptions({
366
+ async getParameterSets(lookups) {
367
+ expect(lookups).toEqual([ScopedParameterLookup.direct(parameterLookupScope('by_public_workspace', '1'), [])]);
368
+ const parameter_sets = await checkpoint.getParameterSets(lookups);
369
+ parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
370
+ expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
371
+ return parameter_sets;
372
+ }
373
+ });
374
+ buckets.sort((a, b) => a.bucket.localeCompare(b.bucket));
375
+ expect(buckets).toEqual([
376
+ {
377
+ bucket: bucketRequest(syncRules, 'by_public_workspace["workspace1"]').bucket,
378
+ priority: 3,
379
+ definition: 'by_public_workspace',
380
+ inclusion_reasons: ['default']
381
+ },
382
+ {
383
+ bucket: bucketRequest(syncRules, 'by_public_workspace["workspace3"]').bucket,
384
+ priority: 3,
385
+ definition: 'by_public_workspace',
386
+ inclusion_reasons: ['default']
387
+ }
388
+ ]);
525
389
  });
526
390
  test('multiple parameter queries', async () => {
527
- const env_8 = { stack: [], error: void 0, hasError: false };
528
- try {
529
- const factory = __addDisposableResource(env_8, await generateStorageFactory(), true);
530
- const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
391
+ await using factory = await generateStorageFactory();
392
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
531
393
  bucket_definitions:
532
394
  by_workspace:
533
395
  parameters:
@@ -537,138 +399,114 @@ bucket_definitions:
537
399
  workspace.user_id = token_parameters.user_id
538
400
  data: []
539
401
  `, {
540
- storageVersion
541
- }));
542
- const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).hydratedSyncRules();
543
- const bucketStorage = factory.getInstance(syncRules);
544
- const writer = __addDisposableResource(env_8, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
545
- const workspaceTable = await test_utils.resolveTestTable(writer, 'workspace', undefined, config);
546
- await writer.markAllSnapshotDone('1/1');
547
- await writer.save({
548
- sourceTable: workspaceTable,
549
- tag: storage.SaveOperationTag.INSERT,
550
- after: {
551
- id: 'workspace1',
552
- visibility: 'public'
553
- },
554
- afterReplicaId: test_utils.rid('workspace1')
555
- });
556
- await writer.save({
557
- sourceTable: workspaceTable,
558
- tag: storage.SaveOperationTag.INSERT,
559
- after: {
560
- id: 'workspace2',
561
- visibility: 'private'
562
- },
563
- afterReplicaId: test_utils.rid('workspace2')
564
- });
565
- await writer.save({
566
- sourceTable: workspaceTable,
567
- tag: storage.SaveOperationTag.INSERT,
568
- after: {
569
- id: 'workspace3',
570
- user_id: 'u1',
571
- visibility: 'private'
572
- },
573
- afterReplicaId: test_utils.rid('workspace3')
574
- });
575
- await writer.save({
576
- sourceTable: workspaceTable,
577
- tag: storage.SaveOperationTag.INSERT,
578
- after: {
579
- id: 'workspace4',
580
- user_id: 'u2',
581
- visibility: 'private'
582
- },
583
- afterReplicaId: test_utils.rid('workspace4')
584
- });
585
- await writer.commit('1/1');
586
- const checkpoint = await bucketStorage.getCheckpoint();
587
- const parameters = new RequestParameters(new JwtPayload({ sub: 'u1' }), {});
588
- // Test intermediate values - could be moved to sync_rules.test.ts
589
- const querier = sync_rules.getBucketParameterQuerier(test_utils.querierOptions(parameters)).querier;
590
- // Test final values - the important part
591
- const foundLookups = [];
592
- const parameter_sets = [];
593
- const buckets = (await querier.queryDynamicBucketDescriptions({
594
- async getParameterSets(lookups) {
595
- foundLookups.push(...lookups);
596
- const output = await checkpoint.getParameterSets(lookups);
597
- parameter_sets.push(...output);
598
- return output;
599
- }
600
- })).map((e) => e.bucket);
601
- expect(foundLookups).toEqual([
602
- ScopedParameterLookup.direct(parameterLookupScope('by_workspace', '1'), []),
603
- ScopedParameterLookup.direct(parameterLookupScope('by_workspace', '2'), ['u1'])
604
- ]);
605
- parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
606
- expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
607
- buckets.sort();
608
- expect(buckets).toEqual([
609
- bucketRequest(syncRules, 'by_workspace["workspace1"]').bucket,
610
- bucketRequest(syncRules, 'by_workspace["workspace3"]').bucket
611
- ]);
612
- }
613
- catch (e_8) {
614
- env_8.error = e_8;
615
- env_8.hasError = true;
616
- }
617
- finally {
618
- const result_8 = __disposeResources(env_8);
619
- if (result_8)
620
- await result_8;
621
- }
402
+ storageVersion
403
+ }));
404
+ const sync_rules = syncRules.parsed(test_utils.PARSE_OPTIONS).hydratedSyncRules();
405
+ const bucketStorage = factory.getInstance(syncRules);
406
+ await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
407
+ const workspaceTable = await test_utils.resolveTestTable(writer, 'workspace', undefined, config);
408
+ await writer.markAllSnapshotDone('1/1');
409
+ await writer.save({
410
+ sourceTable: workspaceTable,
411
+ tag: storage.SaveOperationTag.INSERT,
412
+ after: {
413
+ id: 'workspace1',
414
+ visibility: 'public'
415
+ },
416
+ afterReplicaId: test_utils.rid('workspace1')
417
+ });
418
+ await writer.save({
419
+ sourceTable: workspaceTable,
420
+ tag: storage.SaveOperationTag.INSERT,
421
+ after: {
422
+ id: 'workspace2',
423
+ visibility: 'private'
424
+ },
425
+ afterReplicaId: test_utils.rid('workspace2')
426
+ });
427
+ await writer.save({
428
+ sourceTable: workspaceTable,
429
+ tag: storage.SaveOperationTag.INSERT,
430
+ after: {
431
+ id: 'workspace3',
432
+ user_id: 'u1',
433
+ visibility: 'private'
434
+ },
435
+ afterReplicaId: test_utils.rid('workspace3')
436
+ });
437
+ await writer.save({
438
+ sourceTable: workspaceTable,
439
+ tag: storage.SaveOperationTag.INSERT,
440
+ after: {
441
+ id: 'workspace4',
442
+ user_id: 'u2',
443
+ visibility: 'private'
444
+ },
445
+ afterReplicaId: test_utils.rid('workspace4')
446
+ });
447
+ await writer.commit('1/1');
448
+ const checkpoint = await bucketStorage.getCheckpoint();
449
+ const parameters = new RequestParameters(new JwtPayload({ sub: 'u1' }), {});
450
+ // Test intermediate values - could be moved to sync_rules.test.ts
451
+ const querier = sync_rules.getBucketParameterQuerier(test_utils.querierOptions(parameters)).querier;
452
+ // Test final values - the important part
453
+ const foundLookups = [];
454
+ const parameter_sets = [];
455
+ const buckets = (await querier.queryDynamicBucketDescriptions({
456
+ async getParameterSets(lookups) {
457
+ foundLookups.push(...lookups);
458
+ const output = await checkpoint.getParameterSets(lookups);
459
+ parameter_sets.push(...output);
460
+ return output;
461
+ }
462
+ })).map((e) => e.bucket);
463
+ expect(foundLookups).toEqual([
464
+ ScopedParameterLookup.direct(parameterLookupScope('by_workspace', '1'), []),
465
+ ScopedParameterLookup.direct(parameterLookupScope('by_workspace', '2'), ['u1'])
466
+ ]);
467
+ parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
468
+ expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
469
+ buckets.sort();
470
+ expect(buckets).toEqual([
471
+ bucketRequest(syncRules, 'by_workspace["workspace1"]').bucket,
472
+ bucketRequest(syncRules, 'by_workspace["workspace3"]').bucket
473
+ ]);
622
474
  });
623
475
  test('truncate parameters', async () => {
624
- const env_9 = { stack: [], error: void 0, hasError: false };
625
- try {
626
- const factory = __addDisposableResource(env_9, await generateStorageFactory(), true);
627
- const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
476
+ await using factory = await generateStorageFactory();
477
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
628
478
  bucket_definitions:
629
479
  mybucket:
630
480
  parameters:
631
481
  - SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
632
482
  data: []
633
483
  `, {
634
- storageVersion
635
- }));
636
- const bucketStorage = factory.getInstance(syncRules);
637
- const writer = __addDisposableResource(env_9, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
638
- const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
639
- await writer.markAllSnapshotDone('1/1');
640
- await writer.save({
641
- sourceTable: testTable,
642
- tag: storage.SaveOperationTag.INSERT,
643
- after: {
644
- id: 't2',
645
- id1: 'user3',
646
- id2: 'user4',
647
- group_id: 'group2a'
648
- },
649
- afterReplicaId: test_utils.rid('t2')
650
- });
651
- await writer.truncate([testTable]);
652
- await writer.flush();
653
- const checkpoint = await bucketStorage.getCheckpoint();
654
- const parameters = await checkpoint.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, ['user1'])]);
655
- expect(parameters).toEqual([]);
656
- }
657
- catch (e_9) {
658
- env_9.error = e_9;
659
- env_9.hasError = true;
660
- }
661
- finally {
662
- const result_9 = __disposeResources(env_9);
663
- if (result_9)
664
- await result_9;
665
- }
484
+ storageVersion
485
+ }));
486
+ const bucketStorage = factory.getInstance(syncRules);
487
+ await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
488
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
489
+ await writer.markAllSnapshotDone('1/1');
490
+ await writer.save({
491
+ sourceTable: testTable,
492
+ tag: storage.SaveOperationTag.INSERT,
493
+ after: {
494
+ id: 't2',
495
+ id1: 'user3',
496
+ id2: 'user4',
497
+ group_id: 'group2a'
498
+ },
499
+ afterReplicaId: test_utils.rid('t2')
500
+ });
501
+ await writer.truncate([testTable]);
502
+ await writer.flush();
503
+ const checkpoint = await bucketStorage.getCheckpoint();
504
+ const parameters = await checkpoint.getParameterSets([ScopedParameterLookup.direct(MYBUCKET_1, ['user1'])]);
505
+ expect(parameters).toEqual([]);
666
506
  });
667
507
  test('invalidate cached parsed sync rules', async () => {
668
- const env_10 = { stack: [], error: void 0, hasError: false };
669
- try {
670
- const bucketStorageFactory = __addDisposableResource(env_10, await generateStorageFactory(), true);
671
- const syncRules = await bucketStorageFactory.updateSyncRules(updateSyncRulesFromYaml(`
508
+ await using bucketStorageFactory = await generateStorageFactory();
509
+ const syncRules = await bucketStorageFactory.updateSyncRules(updateSyncRulesFromYaml(`
672
510
  bucket_definitions:
673
511
  by_workspace:
674
512
  parameters:
@@ -676,40 +514,28 @@ bucket_definitions:
676
514
  workspace."userId" = token_parameters.user_id
677
515
  data: []
678
516
  `, {
679
- storageVersion
680
- }));
681
- const syncBucketStorage = bucketStorageFactory.getInstance(syncRules);
682
- const parsedSchema1 = syncBucketStorage.getParsedSyncRules({
683
- defaultSchema: 'public'
684
- });
685
- const parsedSchema2 = syncBucketStorage.getParsedSyncRules({
686
- defaultSchema: 'public'
687
- });
688
- // These should be cached, this will be the same instance
689
- expect(parsedSchema2).equals(parsedSchema1);
690
- expect(parsedSchema1.getSourceTables()[0].schema).equals('public');
691
- const parsedSchema3 = syncBucketStorage.getParsedSyncRules({
692
- defaultSchema: 'databasename'
693
- });
694
- // The cache should not be used
695
- expect(parsedSchema3).not.equals(parsedSchema2);
696
- expect(parsedSchema3.getSourceTables()[0].schema).equals('databasename');
697
- }
698
- catch (e_10) {
699
- env_10.error = e_10;
700
- env_10.hasError = true;
701
- }
702
- finally {
703
- const result_10 = __disposeResources(env_10);
704
- if (result_10)
705
- await result_10;
706
- }
517
+ storageVersion
518
+ }));
519
+ const syncBucketStorage = bucketStorageFactory.getInstance(syncRules);
520
+ const parsedSchema1 = syncBucketStorage.getParsedSyncRules({
521
+ defaultSchema: 'public'
522
+ });
523
+ const parsedSchema2 = syncBucketStorage.getParsedSyncRules({
524
+ defaultSchema: 'public'
525
+ });
526
+ // These should be cached, this will be the same instance
527
+ expect(parsedSchema2).equals(parsedSchema1);
528
+ expect(parsedSchema1.getSourceTables()[0].schema).equals('public');
529
+ const parsedSchema3 = syncBucketStorage.getParsedSyncRules({
530
+ defaultSchema: 'databasename'
531
+ });
532
+ // The cache should not be used
533
+ expect(parsedSchema3).not.equals(parsedSchema2);
534
+ expect(parsedSchema3.getSourceTables()[0].schema).equals('databasename');
707
535
  });
708
536
  test('sync streams smoke test', async () => {
709
- const env_11 = { stack: [], error: void 0, hasError: false };
710
- try {
711
- const factory = __addDisposableResource(env_11, await generateStorageFactory(), true);
712
- const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
537
+ await using factory = await generateStorageFactory();
538
+ const syncRules = await factory.updateSyncRules(updateSyncRulesFromYaml(`
713
539
  config:
714
540
  edition: 3
715
541
 
@@ -719,39 +545,29 @@ streams:
719
545
  SELECT data.* FROM test AS data, test AS param
720
546
  WHERE data.foo = param.bar AND param.baz = auth.user_id()
721
547
  `));
722
- const bucketStorage = factory.getInstance(syncRules);
723
- const writer = __addDisposableResource(env_11, await bucketStorage.createWriter(test_utils.BATCH_OPTIONS), true);
724
- const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
725
- await writer.markAllSnapshotDone('1/1');
726
- await writer.save({
727
- sourceTable: testTable,
728
- tag: storage.SaveOperationTag.INSERT,
729
- after: {
730
- baz: 'baz',
731
- bar: 'bar'
732
- },
733
- afterReplicaId: test_utils.rid('t1')
734
- });
735
- await writer.commit('1/1');
736
- const checkpoint = await bucketStorage.getCheckpoint();
737
- const parameters = await checkpoint.getParameterSets([
738
- ScopedParameterLookup.direct(parameterLookupScope('lookup', '0'), ['baz'])
739
- ]);
740
- expect(parameters).toEqual([
741
- {
742
- '0': 'bar'
743
- }
744
- ]);
745
- }
746
- catch (e_11) {
747
- env_11.error = e_11;
748
- env_11.hasError = true;
749
- }
750
- finally {
751
- const result_11 = __disposeResources(env_11);
752
- if (result_11)
753
- await result_11;
754
- }
548
+ const bucketStorage = factory.getInstance(syncRules);
549
+ await using writer = await bucketStorage.createWriter(test_utils.BATCH_OPTIONS);
550
+ const testTable = await test_utils.resolveTestTable(writer, 'test', ['id'], config);
551
+ await writer.markAllSnapshotDone('1/1');
552
+ await writer.save({
553
+ sourceTable: testTable,
554
+ tag: storage.SaveOperationTag.INSERT,
555
+ after: {
556
+ baz: 'baz',
557
+ bar: 'bar'
558
+ },
559
+ afterReplicaId: test_utils.rid('t1')
560
+ });
561
+ await writer.commit('1/1');
562
+ const checkpoint = await bucketStorage.getCheckpoint();
563
+ const parameters = await checkpoint.getParameterSets([
564
+ ScopedParameterLookup.direct(parameterLookupScope('lookup', '0'), ['baz'])
565
+ ]);
566
+ expect(parameters).toEqual([
567
+ {
568
+ '0': 'bar'
569
+ }
570
+ ]);
755
571
  });
756
572
  }
757
573
  //# sourceMappingURL=register-data-storage-parameter-tests.js.map