@powersync/service-core-tests 0.7.2 → 0.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +22 -0
- package/dist/test-utils/general-utils.d.ts +1 -1
- package/dist/test-utils/general-utils.js +3 -3
- package/dist/test-utils/general-utils.js.map +1 -1
- package/dist/tests/register-data-storage-tests.js +285 -15
- package/dist/tests/register-data-storage-tests.js.map +1 -1
- package/dist/tests/register-sync-tests.js +246 -26
- package/dist/tests/register-sync-tests.js.map +1 -1
- package/package.json +3 -3
- package/src/test-utils/general-utils.ts +3 -3
- package/src/tests/register-data-storage-tests.ts +265 -15
- package/src/tests/register-sync-tests.ts +231 -5
- package/tsconfig.tsbuildinfo +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,27 @@
|
|
|
1
1
|
# @powersync/service-core-tests
|
|
2
2
|
|
|
3
|
+
## 0.8.1
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- @powersync/service-core@1.10.1
|
|
8
|
+
|
|
9
|
+
## 0.8.0
|
|
10
|
+
|
|
11
|
+
### Minor Changes
|
|
12
|
+
|
|
13
|
+
- 833e8f2: [MongoDB Storage] Stream write checkpoint changes instead of polling, reducing overhead for large numbers of concurrent connections
|
|
14
|
+
- bfece49: Cache parameter queries and buckets to reduce incremental sync overhead
|
|
15
|
+
|
|
16
|
+
### Patch Changes
|
|
17
|
+
|
|
18
|
+
- Updated dependencies [833e8f2]
|
|
19
|
+
- Updated dependencies [833e8f2]
|
|
20
|
+
- Updated dependencies [bfece49]
|
|
21
|
+
- Updated dependencies [2cb5252]
|
|
22
|
+
- @powersync/service-core@1.10.0
|
|
23
|
+
- @powersync/service-sync-rules@0.25.0
|
|
24
|
+
|
|
3
25
|
## 0.7.2
|
|
4
26
|
|
|
5
27
|
### Patch Changes
|
|
@@ -4,7 +4,7 @@ export declare const ZERO_LSN = "0/0";
|
|
|
4
4
|
export declare const PARSE_OPTIONS: storage.ParseSyncRulesOptions;
|
|
5
5
|
export declare const BATCH_OPTIONS: storage.StartBatchOptions;
|
|
6
6
|
export declare function testRules(content: string): storage.PersistedSyncRulesContent;
|
|
7
|
-
export declare function makeTestTable(name: string,
|
|
7
|
+
export declare function makeTestTable(name: string, replicaIdColumns?: string[] | undefined): storage.SourceTable;
|
|
8
8
|
export declare function getBatchData(batch: utils.SyncBucketData[] | storage.SyncBucketDataBatch[] | storage.SyncBucketDataBatch): {
|
|
9
9
|
op_id: string;
|
|
10
10
|
op: "PUT" | "REMOVE" | "MOVE" | "CLEAR";
|
|
@@ -27,10 +27,10 @@ export function testRules(content) {
|
|
|
27
27
|
}
|
|
28
28
|
};
|
|
29
29
|
}
|
|
30
|
-
export function makeTestTable(name,
|
|
31
|
-
const relId = utils.hashData('table', name, (
|
|
30
|
+
export function makeTestTable(name, replicaIdColumns) {
|
|
31
|
+
const relId = utils.hashData('table', name, (replicaIdColumns ?? ['id']).join(','));
|
|
32
32
|
const id = new bson.ObjectId('6544e3899293153fa7b38331');
|
|
33
|
-
return new storage.SourceTable(id, storage.SourceTable.DEFAULT_TAG, relId, 'public', name, (
|
|
33
|
+
return new storage.SourceTable(id, storage.SourceTable.DEFAULT_TAG, relId, 'public', name, (replicaIdColumns ?? ['id']).map((column) => ({ name: column, type: 'VARCHAR', typeId: 25 })), true);
|
|
34
34
|
}
|
|
35
35
|
export function getBatchData(batch) {
|
|
36
36
|
const first = getFirst(batch);
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"general-utils.js","sourceRoot":"","sources":["../../src/test-utils/general-utils.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,KAAK,EAAE,MAAM,yBAAyB,CAAC;AACzD,OAAO,EAAE,YAAY,EAAE,MAAM,+BAA+B,CAAC;AAC7D,OAAO,KAAK,IAAI,MAAM,MAAM,CAAC;AAE7B,MAAM,CAAC,MAAM,QAAQ,GAAG,KAAK,CAAC;AAE9B,MAAM,CAAC,MAAM,aAAa,GAAkC;IAC1D,aAAa,EAAE,QAAQ;CACxB,CAAC;AAEF,MAAM,CAAC,MAAM,aAAa,GAA8B;IACtD,GAAG,aAAa;IAChB,OAAO,EAAE,QAAQ;IACjB,gBAAgB,EAAE,IAAI;CACvB,CAAC;AAEF,MAAM,UAAU,SAAS,CAAC,OAAe;IACvC,OAAO;QACL,EAAE,EAAE,CAAC;QACL,kBAAkB,EAAE,OAAO;QAC3B,SAAS,EAAE,MAAM;QACjB,MAAM,CAAC,OAAO;YACZ,OAAO;gBACL,EAAE,EAAE,CAAC;gBACL,UAAU,EAAE,YAAY,CAAC,QAAQ,CAAC,OAAO,EAAE,OAAO,CAAC;gBACnD,SAAS,EAAE,MAAM;aAClB,CAAC;QACJ,CAAC;QACD,IAAI;YACF,MAAM,IAAI,KAAK,CAAC,iBAAiB,CAAC,CAAC;QACrC,CAAC;KACF,CAAC;AACJ,CAAC;AAED,MAAM,UAAU,aAAa,CAAC,IAAY,EAAE,
|
|
1
|
+
{"version":3,"file":"general-utils.js","sourceRoot":"","sources":["../../src/test-utils/general-utils.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,KAAK,EAAE,MAAM,yBAAyB,CAAC;AACzD,OAAO,EAAE,YAAY,EAAE,MAAM,+BAA+B,CAAC;AAC7D,OAAO,KAAK,IAAI,MAAM,MAAM,CAAC;AAE7B,MAAM,CAAC,MAAM,QAAQ,GAAG,KAAK,CAAC;AAE9B,MAAM,CAAC,MAAM,aAAa,GAAkC;IAC1D,aAAa,EAAE,QAAQ;CACxB,CAAC;AAEF,MAAM,CAAC,MAAM,aAAa,GAA8B;IACtD,GAAG,aAAa;IAChB,OAAO,EAAE,QAAQ;IACjB,gBAAgB,EAAE,IAAI;CACvB,CAAC;AAEF,MAAM,UAAU,SAAS,CAAC,OAAe;IACvC,OAAO;QACL,EAAE,EAAE,CAAC;QACL,kBAAkB,EAAE,OAAO;QAC3B,SAAS,EAAE,MAAM;QACjB,MAAM,CAAC,OAAO;YACZ,OAAO;gBACL,EAAE,EAAE,CAAC;gBACL,UAAU,EAAE,YAAY,CAAC,QAAQ,CAAC,OAAO,EAAE,OAAO,CAAC;gBACnD,SAAS,EAAE,MAAM;aAClB,CAAC;QACJ,CAAC;QACD,IAAI;YACF,MAAM,IAAI,KAAK,CAAC,iBAAiB,CAAC,CAAC;QACrC,CAAC;KACF,CAAC;AACJ,CAAC;AAED,MAAM,UAAU,aAAa,CAAC,IAAY,EAAE,gBAAuC;IACjF,MAAM,KAAK,GAAG,KAAK,CAAC,QAAQ,CAAC,OAAO,EAAE,IAAI,EAAE,CAAC,gBAAgB,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC;IACpF,MAAM,EAAE,GAAG,IAAI,IAAI,CAAC,QAAQ,CAAC,0BAA0B,CAAC,CAAC;IACzD,OAAO,IAAI,OAAO,CAAC,WAAW,CAC5B,EAAE,EACF,OAAO,CAAC,WAAW,CAAC,WAAW,EAC/B,KAAK,EACL,QAAQ,EACR,IAAI,EACJ,CAAC,gBAAgB,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,SAAS,EAAE,MAAM,EAAE,EAAE,EAAE,CAAC,CAAC,EAC7F,IAAI,CACL,CAAC;AACJ,CAAC;AAED,MAAM,UAAU,YAAY,CAC1B,KAA2F;IAE3F,MAAM,KAAK,GAAG,QAAQ,CAAC,KAAK,CAAC,CAAC;IAC9B,IAAI,KAAK,IAAI,IAAI,EAAE,CAAC;QAClB,OAAO,EAAE,CAAC;IACZ,CAAC;IACD,OAAO,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE;QAC1B,OAAO;YACL,KAAK,EAAE,CAAC,CAAC,KAAK;YACd,EAAE,EAAE,CAAC,CAAC,EAAE;YACR,SAAS,EAAE,CAAC,CAAC,SAAS;YACtB,QAAQ,EAAE,CAAC,CAAC,QAAQ;SACrB,CAAC;IACJ,CAAC,CAAC,CAAC;AACL,CAAC;AAED,MAAM,UAAU,YAAY,CAC1B,KAA2F;IAE3F,MAAM,KAAK,GAAG,QAAQ,CAAC,KAAK,CAAC,CAAC;IAC9B,IAAI,KAAK,IAAI,IAAI,EAAE,CAAC;QAClB,OAAO,IAAI,CAAC;IACd,CAAC;IACD,OAAO;QACL,QAAQ,EAAE,KAAK,CAAC,QAAQ;QACxB,KAAK,EAAE,KAAK,CAAC,KAAK;QAClB,UAAU,EAAE,KAAK,CAAC,UAAU;KAC7B,CAAC;AACJ,CAAC;AAED,SAAS,QAAQ,CACf,KAA2F;IAE3F,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC;QAC1B,OAAO,KAAK,CAAC,KAAK,CAAC;IACrB,CAAC;IACD,IAAI,KAAK,CAAC,MAAM,IAAI,CAAC,EAAE,CAAC;QACtB,OAAO,IAAI,CAAC;IACd,CAAC;IACD,IAAI,KAAK,GAAG,KAAK,CAAC,CAAC,CAAC,CAAC;IACrB,IAAK,KAAqC,CAAC,KAAK,IAAI,IAAI,EAAE,CAAC;QACzD,OAAQ,KAAqC,CAAC,KAAK,CAAC;IACtD,CAAC;SAAM,CAAC;QACN,OAAO,KAA6B,CAAC;IACvC,CAAC;AACH,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,GAAG,CAAC,EAAU;IAC5B,OAAO,KAAK,CAAC,0BAA0B,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,SAAS,EAAE,MAAM,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AACrG,CAAC"}
|
|
@@ -51,7 +51,7 @@ var __disposeResources = (this && this.__disposeResources) || (function (Suppres
|
|
|
51
51
|
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
52
52
|
});
|
|
53
53
|
import { getUuidReplicaIdentityBson, storage } from '@powersync/service-core';
|
|
54
|
-
import { RequestParameters } from '@powersync/service-sync-rules';
|
|
54
|
+
import { ParameterLookup, RequestParameters } from '@powersync/service-sync-rules';
|
|
55
55
|
import { expect, test } from 'vitest';
|
|
56
56
|
import * as test_utils from '../test-utils/test-utils-index.js';
|
|
57
57
|
export const TEST_TABLE = test_utils.makeTestTable('test', ['id']);
|
|
@@ -112,7 +112,9 @@ bucket_definitions:
|
|
|
112
112
|
afterReplicaId: test_utils.rid('t1')
|
|
113
113
|
});
|
|
114
114
|
});
|
|
115
|
-
const parameters = await bucketStorage.getParameterSets(result.flushed_op, [
|
|
115
|
+
const parameters = await bucketStorage.getParameterSets(result.flushed_op, [
|
|
116
|
+
ParameterLookup.normalized('mybucket', '1', ['user1'])
|
|
117
|
+
]);
|
|
116
118
|
expect(parameters).toEqual([
|
|
117
119
|
{
|
|
118
120
|
group_id: 'group1a'
|
|
@@ -163,14 +165,18 @@ bucket_definitions:
|
|
|
163
165
|
afterReplicaId: test_utils.rid('user1')
|
|
164
166
|
});
|
|
165
167
|
});
|
|
166
|
-
const parameters = await bucketStorage.getParameterSets(result2.flushed_op, [
|
|
168
|
+
const parameters = await bucketStorage.getParameterSets(result2.flushed_op, [
|
|
169
|
+
ParameterLookup.normalized('mybucket', '1', ['user1'])
|
|
170
|
+
]);
|
|
167
171
|
expect(parameters).toEqual([
|
|
168
172
|
{
|
|
169
173
|
group_id: 'group2'
|
|
170
174
|
}
|
|
171
175
|
]);
|
|
172
176
|
// Use the checkpoint to get older data if relevant
|
|
173
|
-
const parameters2 = await bucketStorage.getParameterSets(result1.flushed_op, [
|
|
177
|
+
const parameters2 = await bucketStorage.getParameterSets(result1.flushed_op, [
|
|
178
|
+
ParameterLookup.normalized('mybucket', '1', ['user1'])
|
|
179
|
+
]);
|
|
174
180
|
expect(parameters2).toEqual([
|
|
175
181
|
{
|
|
176
182
|
group_id: 'group1'
|
|
@@ -239,8 +245,8 @@ bucket_definitions:
|
|
|
239
245
|
// There removal operation for the association of `list2`::`todo2` should not interfere with the new
|
|
240
246
|
// association of `list1`::`todo2`
|
|
241
247
|
const parameters = await bucketStorage.getParameterSets(result2.flushed_op, [
|
|
242
|
-
|
|
243
|
-
|
|
248
|
+
ParameterLookup.normalized('mybucket', '1', ['list1']),
|
|
249
|
+
ParameterLookup.normalized('mybucket', '1', ['list2'])
|
|
244
250
|
]);
|
|
245
251
|
expect(parameters.sort((a, b) => a.todo_id.localeCompare(b.todo_id))).toEqual([
|
|
246
252
|
{
|
|
@@ -289,11 +295,17 @@ bucket_definitions:
|
|
|
289
295
|
});
|
|
290
296
|
const TEST_PARAMS = { group_id: 'group1' };
|
|
291
297
|
const checkpoint = result.flushed_op;
|
|
292
|
-
const parameters1 = await bucketStorage.getParameterSets(checkpoint, [
|
|
298
|
+
const parameters1 = await bucketStorage.getParameterSets(checkpoint, [
|
|
299
|
+
ParameterLookup.normalized('mybucket', '1', [314n, 314, 3.14])
|
|
300
|
+
]);
|
|
293
301
|
expect(parameters1).toEqual([TEST_PARAMS]);
|
|
294
|
-
const parameters2 = await bucketStorage.getParameterSets(checkpoint, [
|
|
302
|
+
const parameters2 = await bucketStorage.getParameterSets(checkpoint, [
|
|
303
|
+
ParameterLookup.normalized('mybucket', '1', [314, 314n, 3.14])
|
|
304
|
+
]);
|
|
295
305
|
expect(parameters2).toEqual([TEST_PARAMS]);
|
|
296
|
-
const parameters3 = await bucketStorage.getParameterSets(checkpoint, [
|
|
306
|
+
const parameters3 = await bucketStorage.getParameterSets(checkpoint, [
|
|
307
|
+
ParameterLookup.normalized('mybucket', '1', [314n, 314, 3])
|
|
308
|
+
]);
|
|
297
309
|
expect(parameters3).toEqual([]);
|
|
298
310
|
}
|
|
299
311
|
catch (e_4) {
|
|
@@ -347,7 +359,9 @@ bucket_definitions:
|
|
|
347
359
|
});
|
|
348
360
|
const TEST_PARAMS = { group_id: 'group1' };
|
|
349
361
|
const checkpoint = result.flushed_op;
|
|
350
|
-
const parameters1 = await bucketStorage.getParameterSets(checkpoint, [
|
|
362
|
+
const parameters1 = await bucketStorage.getParameterSets(checkpoint, [
|
|
363
|
+
ParameterLookup.normalized('mybucket', '1', [1152921504606846976n])
|
|
364
|
+
]);
|
|
351
365
|
expect(parameters1).toEqual([TEST_PARAMS]);
|
|
352
366
|
}
|
|
353
367
|
catch (e_5) {
|
|
@@ -452,7 +466,7 @@ bucket_definitions:
|
|
|
452
466
|
const parameters = new RequestParameters({ sub: 'u1' }, {});
|
|
453
467
|
const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
|
|
454
468
|
const lookups = q1.getLookups(parameters);
|
|
455
|
-
expect(lookups).toEqual([
|
|
469
|
+
expect(lookups).toEqual([ParameterLookup.normalized('by_workspace', '1', ['u1'])]);
|
|
456
470
|
const parameter_sets = await bucketStorage.getParameterSets(checkpoint, lookups);
|
|
457
471
|
expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }]);
|
|
458
472
|
const buckets = await sync_rules.getBucketParameterQuerier(parameters).queryDynamicBucketDescriptions({
|
|
@@ -520,7 +534,7 @@ bucket_definitions:
|
|
|
520
534
|
const parameters = new RequestParameters({ sub: 'unknown' }, {});
|
|
521
535
|
const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
|
|
522
536
|
const lookups = q1.getLookups(parameters);
|
|
523
|
-
expect(lookups).toEqual([
|
|
537
|
+
expect(lookups).toEqual([ParameterLookup.normalized('by_public_workspace', '1', [])]);
|
|
524
538
|
const parameter_sets = await bucketStorage.getParameterSets(checkpoint, lookups);
|
|
525
539
|
parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
|
|
526
540
|
expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
|
|
@@ -607,13 +621,13 @@ bucket_definitions:
|
|
|
607
621
|
// Test intermediate values - could be moved to sync_rules.test.ts
|
|
608
622
|
const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
|
|
609
623
|
const lookups1 = q1.getLookups(parameters);
|
|
610
|
-
expect(lookups1).toEqual([
|
|
624
|
+
expect(lookups1).toEqual([ParameterLookup.normalized('by_workspace', '1', [])]);
|
|
611
625
|
const parameter_sets1 = await bucketStorage.getParameterSets(checkpoint, lookups1);
|
|
612
626
|
parameter_sets1.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
|
|
613
627
|
expect(parameter_sets1).toEqual([{ workspace_id: 'workspace1' }]);
|
|
614
628
|
const q2 = sync_rules.bucket_descriptors[0].parameter_queries[1];
|
|
615
629
|
const lookups2 = q2.getLookups(parameters);
|
|
616
|
-
expect(lookups2).toEqual([
|
|
630
|
+
expect(lookups2).toEqual([ParameterLookup.normalized('by_workspace', '2', ['u1'])]);
|
|
617
631
|
const parameter_sets2 = await bucketStorage.getParameterSets(checkpoint, lookups2);
|
|
618
632
|
parameter_sets2.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
|
|
619
633
|
expect(parameter_sets2).toEqual([{ workspace_id: 'workspace3' }]);
|
|
@@ -916,7 +930,9 @@ bucket_definitions:
|
|
|
916
930
|
await batch.truncate([TEST_TABLE]);
|
|
917
931
|
});
|
|
918
932
|
const { checkpoint } = await bucketStorage.getCheckpoint();
|
|
919
|
-
const parameters = await bucketStorage.getParameterSets(checkpoint, [
|
|
933
|
+
const parameters = await bucketStorage.getParameterSets(checkpoint, [
|
|
934
|
+
ParameterLookup.normalized('mybucket', '1', ['user1'])
|
|
935
|
+
]);
|
|
920
936
|
expect(parameters).toEqual([]);
|
|
921
937
|
}
|
|
922
938
|
catch (e_13) {
|
|
@@ -1509,5 +1525,259 @@ bucket_definitions:
|
|
|
1509
1525
|
await result_20;
|
|
1510
1526
|
}
|
|
1511
1527
|
});
|
|
1528
|
+
test('managed write checkpoints - checkpoint after write', async (context) => {
|
|
1529
|
+
const env_21 = { stack: [], error: void 0, hasError: false };
|
|
1530
|
+
try {
|
|
1531
|
+
const factory = __addDisposableResource(env_21, await generateStorageFactory(), true);
|
|
1532
|
+
const r = await factory.configureSyncRules({
|
|
1533
|
+
content: `
|
|
1534
|
+
bucket_definitions:
|
|
1535
|
+
mybucket:
|
|
1536
|
+
data: []
|
|
1537
|
+
`,
|
|
1538
|
+
validate: false
|
|
1539
|
+
});
|
|
1540
|
+
const bucketStorage = factory.getInstance(r.persisted_sync_rules);
|
|
1541
|
+
await bucketStorage.autoActivate();
|
|
1542
|
+
const abortController = new AbortController();
|
|
1543
|
+
context.onTestFinished(() => abortController.abort());
|
|
1544
|
+
const iter = bucketStorage
|
|
1545
|
+
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })[Symbol.asyncIterator]();
|
|
1546
|
+
const writeCheckpoint = await bucketStorage.createManagedWriteCheckpoint({
|
|
1547
|
+
heads: { '1': '5/0' },
|
|
1548
|
+
user_id: 'user1'
|
|
1549
|
+
});
|
|
1550
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1551
|
+
await batch.keepalive('5/0');
|
|
1552
|
+
});
|
|
1553
|
+
const result = await iter.next();
|
|
1554
|
+
expect(result).toMatchObject({
|
|
1555
|
+
done: false,
|
|
1556
|
+
value: {
|
|
1557
|
+
base: {
|
|
1558
|
+
checkpoint: 0n,
|
|
1559
|
+
lsn: '5/0'
|
|
1560
|
+
},
|
|
1561
|
+
writeCheckpoint: writeCheckpoint
|
|
1562
|
+
}
|
|
1563
|
+
});
|
|
1564
|
+
}
|
|
1565
|
+
catch (e_21) {
|
|
1566
|
+
env_21.error = e_21;
|
|
1567
|
+
env_21.hasError = true;
|
|
1568
|
+
}
|
|
1569
|
+
finally {
|
|
1570
|
+
const result_21 = __disposeResources(env_21);
|
|
1571
|
+
if (result_21)
|
|
1572
|
+
await result_21;
|
|
1573
|
+
}
|
|
1574
|
+
});
|
|
1575
|
+
test('managed write checkpoints - write after checkpoint', async (context) => {
|
|
1576
|
+
const env_22 = { stack: [], error: void 0, hasError: false };
|
|
1577
|
+
try {
|
|
1578
|
+
const factory = __addDisposableResource(env_22, await generateStorageFactory(), true);
|
|
1579
|
+
const r = await factory.configureSyncRules({
|
|
1580
|
+
content: `
|
|
1581
|
+
bucket_definitions:
|
|
1582
|
+
mybucket:
|
|
1583
|
+
data: []
|
|
1584
|
+
`,
|
|
1585
|
+
validate: false
|
|
1586
|
+
});
|
|
1587
|
+
const bucketStorage = factory.getInstance(r.persisted_sync_rules);
|
|
1588
|
+
await bucketStorage.autoActivate();
|
|
1589
|
+
const abortController = new AbortController();
|
|
1590
|
+
context.onTestFinished(() => abortController.abort());
|
|
1591
|
+
const iter = bucketStorage
|
|
1592
|
+
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })[Symbol.asyncIterator]();
|
|
1593
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1594
|
+
await batch.keepalive('5/0');
|
|
1595
|
+
});
|
|
1596
|
+
const result = await iter.next();
|
|
1597
|
+
expect(result).toMatchObject({
|
|
1598
|
+
done: false,
|
|
1599
|
+
value: {
|
|
1600
|
+
base: {
|
|
1601
|
+
checkpoint: 0n,
|
|
1602
|
+
lsn: '5/0'
|
|
1603
|
+
},
|
|
1604
|
+
writeCheckpoint: null
|
|
1605
|
+
}
|
|
1606
|
+
});
|
|
1607
|
+
const writeCheckpoint = await bucketStorage.createManagedWriteCheckpoint({
|
|
1608
|
+
heads: { '1': '6/0' },
|
|
1609
|
+
user_id: 'user1'
|
|
1610
|
+
});
|
|
1611
|
+
// We have to trigger a new keepalive after the checkpoint, at least to cover postgres storage.
|
|
1612
|
+
// This is what is effetively triggered with RouteAPI.createReplicationHead().
|
|
1613
|
+
// MongoDB storage doesn't explicitly need this anymore.
|
|
1614
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1615
|
+
await batch.keepalive('6/0');
|
|
1616
|
+
});
|
|
1617
|
+
let result2 = await iter.next();
|
|
1618
|
+
if (result2.value?.base?.lsn == '5/0') {
|
|
1619
|
+
// Events could arrive in a different order in some cases - this caters for it
|
|
1620
|
+
result2 = await iter.next();
|
|
1621
|
+
}
|
|
1622
|
+
expect(result2).toMatchObject({
|
|
1623
|
+
done: false,
|
|
1624
|
+
value: {
|
|
1625
|
+
base: {
|
|
1626
|
+
checkpoint: 0n,
|
|
1627
|
+
lsn: '6/0'
|
|
1628
|
+
},
|
|
1629
|
+
writeCheckpoint: writeCheckpoint
|
|
1630
|
+
}
|
|
1631
|
+
});
|
|
1632
|
+
}
|
|
1633
|
+
catch (e_22) {
|
|
1634
|
+
env_22.error = e_22;
|
|
1635
|
+
env_22.hasError = true;
|
|
1636
|
+
}
|
|
1637
|
+
finally {
|
|
1638
|
+
const result_22 = __disposeResources(env_22);
|
|
1639
|
+
if (result_22)
|
|
1640
|
+
await result_22;
|
|
1641
|
+
}
|
|
1642
|
+
});
|
|
1643
|
+
test('custom write checkpoints - checkpoint after write', async (context) => {
|
|
1644
|
+
const env_23 = { stack: [], error: void 0, hasError: false };
|
|
1645
|
+
try {
|
|
1646
|
+
const factory = __addDisposableResource(env_23, await generateStorageFactory(), true);
|
|
1647
|
+
const r = await factory.configureSyncRules({
|
|
1648
|
+
content: `
|
|
1649
|
+
bucket_definitions:
|
|
1650
|
+
mybucket:
|
|
1651
|
+
data: []
|
|
1652
|
+
`,
|
|
1653
|
+
validate: false
|
|
1654
|
+
});
|
|
1655
|
+
const bucketStorage = factory.getInstance(r.persisted_sync_rules);
|
|
1656
|
+
await bucketStorage.autoActivate();
|
|
1657
|
+
bucketStorage.setWriteCheckpointMode(storage.WriteCheckpointMode.CUSTOM);
|
|
1658
|
+
const abortController = new AbortController();
|
|
1659
|
+
context.onTestFinished(() => abortController.abort());
|
|
1660
|
+
const iter = bucketStorage
|
|
1661
|
+
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })[Symbol.asyncIterator]();
|
|
1662
|
+
await bucketStorage.batchCreateCustomWriteCheckpoints([
|
|
1663
|
+
{
|
|
1664
|
+
checkpoint: 5n,
|
|
1665
|
+
user_id: 'user1'
|
|
1666
|
+
}
|
|
1667
|
+
]);
|
|
1668
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1669
|
+
await batch.keepalive('5/0');
|
|
1670
|
+
});
|
|
1671
|
+
const result = await iter.next();
|
|
1672
|
+
expect(result).toMatchObject({
|
|
1673
|
+
done: false,
|
|
1674
|
+
value: {
|
|
1675
|
+
base: {
|
|
1676
|
+
checkpoint: 0n,
|
|
1677
|
+
lsn: '5/0'
|
|
1678
|
+
},
|
|
1679
|
+
writeCheckpoint: 5n
|
|
1680
|
+
}
|
|
1681
|
+
});
|
|
1682
|
+
}
|
|
1683
|
+
catch (e_23) {
|
|
1684
|
+
env_23.error = e_23;
|
|
1685
|
+
env_23.hasError = true;
|
|
1686
|
+
}
|
|
1687
|
+
finally {
|
|
1688
|
+
const result_23 = __disposeResources(env_23);
|
|
1689
|
+
if (result_23)
|
|
1690
|
+
await result_23;
|
|
1691
|
+
}
|
|
1692
|
+
});
|
|
1693
|
+
test('custom write checkpoints - write after checkpoint', async (context) => {
|
|
1694
|
+
const env_24 = { stack: [], error: void 0, hasError: false };
|
|
1695
|
+
try {
|
|
1696
|
+
const factory = __addDisposableResource(env_24, await generateStorageFactory(), true);
|
|
1697
|
+
const r = await factory.configureSyncRules({
|
|
1698
|
+
content: `
|
|
1699
|
+
bucket_definitions:
|
|
1700
|
+
mybucket:
|
|
1701
|
+
data: []
|
|
1702
|
+
`,
|
|
1703
|
+
validate: false
|
|
1704
|
+
});
|
|
1705
|
+
const bucketStorage = factory.getInstance(r.persisted_sync_rules);
|
|
1706
|
+
await bucketStorage.autoActivate();
|
|
1707
|
+
bucketStorage.setWriteCheckpointMode(storage.WriteCheckpointMode.CUSTOM);
|
|
1708
|
+
const abortController = new AbortController();
|
|
1709
|
+
context.onTestFinished(() => abortController.abort());
|
|
1710
|
+
const iter = bucketStorage
|
|
1711
|
+
.watchCheckpointChanges({ user_id: 'user1', signal: abortController.signal })[Symbol.asyncIterator]();
|
|
1712
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1713
|
+
await batch.keepalive('5/0');
|
|
1714
|
+
});
|
|
1715
|
+
const result = await iter.next();
|
|
1716
|
+
expect(result).toMatchObject({
|
|
1717
|
+
done: false,
|
|
1718
|
+
value: {
|
|
1719
|
+
base: {
|
|
1720
|
+
checkpoint: 0n,
|
|
1721
|
+
lsn: '5/0'
|
|
1722
|
+
},
|
|
1723
|
+
writeCheckpoint: null
|
|
1724
|
+
}
|
|
1725
|
+
});
|
|
1726
|
+
await bucketStorage.batchCreateCustomWriteCheckpoints([
|
|
1727
|
+
{
|
|
1728
|
+
checkpoint: 6n,
|
|
1729
|
+
user_id: 'user1'
|
|
1730
|
+
}
|
|
1731
|
+
]);
|
|
1732
|
+
// We have to trigger a new keepalive after the checkpoint, at least to cover postgres storage.
|
|
1733
|
+
// This is what is effetively triggered with RouteAPI.createReplicationHead().
|
|
1734
|
+
// MongoDB storage doesn't explicitly need this anymore.
|
|
1735
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1736
|
+
await batch.keepalive('6/0');
|
|
1737
|
+
});
|
|
1738
|
+
let result2 = await iter.next();
|
|
1739
|
+
expect(result2).toMatchObject({
|
|
1740
|
+
done: false,
|
|
1741
|
+
value: {
|
|
1742
|
+
base: {
|
|
1743
|
+
checkpoint: 0n
|
|
1744
|
+
// can be 5/0 or 6/0 - actual value not relevant for custom write checkpoints
|
|
1745
|
+
// lsn: '6/0'
|
|
1746
|
+
},
|
|
1747
|
+
writeCheckpoint: 6n
|
|
1748
|
+
}
|
|
1749
|
+
});
|
|
1750
|
+
await bucketStorage.batchCreateCustomWriteCheckpoints([
|
|
1751
|
+
{
|
|
1752
|
+
checkpoint: 7n,
|
|
1753
|
+
user_id: 'user1'
|
|
1754
|
+
}
|
|
1755
|
+
]);
|
|
1756
|
+
await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
1757
|
+
await batch.keepalive('7/0');
|
|
1758
|
+
});
|
|
1759
|
+
let result3 = await iter.next();
|
|
1760
|
+
expect(result3).toMatchObject({
|
|
1761
|
+
done: false,
|
|
1762
|
+
value: {
|
|
1763
|
+
base: {
|
|
1764
|
+
checkpoint: 0n
|
|
1765
|
+
// can be 5/0, 6/0 or 7/0 - actual value not relevant for custom write checkpoints
|
|
1766
|
+
// lsn: '7/0'
|
|
1767
|
+
},
|
|
1768
|
+
writeCheckpoint: 7n
|
|
1769
|
+
}
|
|
1770
|
+
});
|
|
1771
|
+
}
|
|
1772
|
+
catch (e_24) {
|
|
1773
|
+
env_24.error = e_24;
|
|
1774
|
+
env_24.hasError = true;
|
|
1775
|
+
}
|
|
1776
|
+
finally {
|
|
1777
|
+
const result_24 = __disposeResources(env_24);
|
|
1778
|
+
if (result_24)
|
|
1779
|
+
await result_24;
|
|
1780
|
+
}
|
|
1781
|
+
});
|
|
1512
1782
|
}
|
|
1513
1783
|
//# sourceMappingURL=register-data-storage-tests.js.map
|