@powersync/service-module-postgres-storage 0.11.2 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +60 -0
- package/dist/.tsbuildinfo +1 -1
- package/dist/@types/migrations/scripts/1771232439485-storage-version.d.ts +3 -0
- package/dist/@types/migrations/scripts/1771424826685-current-data-pending-deletes.d.ts +3 -0
- package/dist/@types/migrations/scripts/1771491856000-sync-plan.d.ts +3 -0
- package/dist/@types/storage/PostgresBucketStorageFactory.d.ts +6 -10
- package/dist/@types/storage/PostgresCompactor.d.ts +10 -3
- package/dist/@types/storage/PostgresSyncRulesStorage.d.ts +5 -3
- package/dist/@types/storage/batch/OperationBatch.d.ts +2 -2
- package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +12 -9
- package/dist/@types/storage/batch/PostgresPersistedBatch.d.ts +17 -5
- package/dist/@types/storage/current-data-store.d.ts +85 -0
- package/dist/@types/storage/current-data-table.d.ts +9 -0
- package/dist/@types/storage/sync-rules/PostgresPersistedSyncRulesContent.d.ts +1 -10
- package/dist/@types/storage/table-id.d.ts +2 -0
- package/dist/@types/types/models/CurrentData.d.ts +18 -3
- package/dist/@types/types/models/SyncRules.d.ts +12 -2
- package/dist/@types/types/models/json.d.ts +11 -0
- package/dist/@types/types/types.d.ts +2 -0
- package/dist/@types/utils/bson.d.ts +1 -1
- package/dist/@types/utils/db.d.ts +9 -0
- package/dist/@types/utils/test-utils.d.ts +1 -1
- package/dist/migrations/scripts/1771232439485-storage-version.js +111 -0
- package/dist/migrations/scripts/1771232439485-storage-version.js.map +1 -0
- package/dist/migrations/scripts/1771424826685-current-data-pending-deletes.js +8 -0
- package/dist/migrations/scripts/1771424826685-current-data-pending-deletes.js.map +1 -0
- package/dist/migrations/scripts/1771491856000-sync-plan.js +91 -0
- package/dist/migrations/scripts/1771491856000-sync-plan.js.map +1 -0
- package/dist/storage/PostgresBucketStorageFactory.js +56 -58
- package/dist/storage/PostgresBucketStorageFactory.js.map +1 -1
- package/dist/storage/PostgresCompactor.js +55 -66
- package/dist/storage/PostgresCompactor.js.map +1 -1
- package/dist/storage/PostgresSyncRulesStorage.js +23 -15
- package/dist/storage/PostgresSyncRulesStorage.js.map +1 -1
- package/dist/storage/batch/OperationBatch.js +2 -1
- package/dist/storage/batch/OperationBatch.js.map +1 -1
- package/dist/storage/batch/PostgresBucketBatch.js +286 -213
- package/dist/storage/batch/PostgresBucketBatch.js.map +1 -1
- package/dist/storage/batch/PostgresPersistedBatch.js +86 -81
- package/dist/storage/batch/PostgresPersistedBatch.js.map +1 -1
- package/dist/storage/current-data-store.js +270 -0
- package/dist/storage/current-data-store.js.map +1 -0
- package/dist/storage/current-data-table.js +22 -0
- package/dist/storage/current-data-table.js.map +1 -0
- package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js +14 -30
- package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js.map +1 -1
- package/dist/storage/table-id.js +8 -0
- package/dist/storage/table-id.js.map +1 -0
- package/dist/types/models/CurrentData.js +11 -2
- package/dist/types/models/CurrentData.js.map +1 -1
- package/dist/types/models/SyncRules.js +12 -1
- package/dist/types/models/SyncRules.js.map +1 -1
- package/dist/types/models/json.js +21 -0
- package/dist/types/models/json.js.map +1 -0
- package/dist/utils/bson.js.map +1 -1
- package/dist/utils/db.js +41 -0
- package/dist/utils/db.js.map +1 -1
- package/dist/utils/test-utils.js +50 -14
- package/dist/utils/test-utils.js.map +1 -1
- package/package.json +9 -9
- package/src/migrations/scripts/1771232439485-storage-version.ts +44 -0
- package/src/migrations/scripts/1771424826685-current-data-pending-deletes.ts +10 -0
- package/src/migrations/scripts/1771491856000-sync-plan.ts +21 -0
- package/src/storage/PostgresBucketStorageFactory.ts +69 -68
- package/src/storage/PostgresCompactor.ts +63 -72
- package/src/storage/PostgresSyncRulesStorage.ts +30 -17
- package/src/storage/batch/OperationBatch.ts +4 -3
- package/src/storage/batch/PostgresBucketBatch.ts +306 -238
- package/src/storage/batch/PostgresPersistedBatch.ts +92 -84
- package/src/storage/current-data-store.ts +326 -0
- package/src/storage/current-data-table.ts +26 -0
- package/src/storage/sync-rules/PostgresPersistedSyncRulesContent.ts +13 -33
- package/src/storage/table-id.ts +9 -0
- package/src/types/models/CurrentData.ts +17 -4
- package/src/types/models/SyncRules.ts +16 -1
- package/src/types/models/json.ts +26 -0
- package/src/utils/bson.ts +1 -1
- package/src/utils/db.ts +47 -0
- package/src/utils/test-utils.ts +42 -15
- package/test/src/__snapshots__/storage.test.ts.snap +148 -6
- package/test/src/__snapshots__/storage_compacting.test.ts.snap +17 -0
- package/test/src/__snapshots__/storage_sync.test.ts.snap +2211 -21
- package/test/src/migrations.test.ts +9 -2
- package/test/src/storage.test.ts +137 -131
- package/test/src/storage_compacting.test.ts +113 -2
- package/test/src/storage_sync.test.ts +148 -4
- package/test/src/util.ts +5 -2
|
@@ -2,9 +2,10 @@ import { beforeEach, describe, expect, it } from 'vitest';
|
|
|
2
2
|
|
|
3
3
|
import { Direction } from '@powersync/lib-services-framework';
|
|
4
4
|
import { register } from '@powersync/service-core-tests';
|
|
5
|
+
import { dropTables, PostgresBucketStorageFactory } from '../../src/index.js';
|
|
5
6
|
import { PostgresMigrationAgent } from '../../src/migrations/PostgresMigrationAgent.js';
|
|
6
7
|
import { env } from './env.js';
|
|
7
|
-
import { POSTGRES_STORAGE_FACTORY, POSTGRES_STORAGE_SETUP } from './util.js';
|
|
8
|
+
import { POSTGRES_STORAGE_FACTORY, POSTGRES_STORAGE_SETUP, TEST_CONNECTION_OPTIONS } from './util.js';
|
|
8
9
|
|
|
9
10
|
const MIGRATION_AGENT_FACTORY = () => {
|
|
10
11
|
return new PostgresMigrationAgent({ type: 'postgresql', uri: env.PG_STORAGE_TEST_URL, sslmode: 'disable' });
|
|
@@ -15,13 +16,19 @@ describe('Migrations', () => {
|
|
|
15
16
|
// The migration tests clear the migration store, without running the down migrations.
|
|
16
17
|
// This ensures all the down migrations have been run before.
|
|
17
18
|
const setup = POSTGRES_STORAGE_SETUP;
|
|
19
|
+
await using factory = new PostgresBucketStorageFactory({
|
|
20
|
+
config: TEST_CONNECTION_OPTIONS,
|
|
21
|
+
slot_name_prefix: 'test_'
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
await dropTables(factory.db);
|
|
18
25
|
await setup.migrate(Direction.Down);
|
|
19
26
|
});
|
|
20
27
|
|
|
21
28
|
register.registerMigrationTests(MIGRATION_AGENT_FACTORY);
|
|
22
29
|
|
|
23
30
|
it('Should have tables declared', async () => {
|
|
24
|
-
const { db } = await POSTGRES_STORAGE_FACTORY();
|
|
31
|
+
const { db } = await POSTGRES_STORAGE_FACTORY.factory();
|
|
25
32
|
|
|
26
33
|
const tables = await db.sql`
|
|
27
34
|
SELECT
|
package/test/src/storage.test.ts
CHANGED
|
@@ -1,149 +1,155 @@
|
|
|
1
|
-
import { storage } from '@powersync/service-core';
|
|
2
|
-
import {
|
|
1
|
+
import { storage, updateSyncRulesFromYaml } from '@powersync/service-core';
|
|
2
|
+
import { bucketRequestMap, register, test_utils } from '@powersync/service-core-tests';
|
|
3
3
|
import { describe, expect, test } from 'vitest';
|
|
4
|
-
import { POSTGRES_STORAGE_FACTORY } from './util.js';
|
|
5
|
-
|
|
6
|
-
describe('Postgres Sync Bucket Storage - Parameters', () =>
|
|
7
|
-
register.registerDataStorageParameterTests(POSTGRES_STORAGE_FACTORY));
|
|
8
|
-
|
|
9
|
-
describe('Postgres Sync Bucket Storage - Data', () => register.registerDataStorageDataTests(POSTGRES_STORAGE_FACTORY));
|
|
10
|
-
|
|
11
|
-
describe('Postgres Sync Bucket Storage - Checkpoints', () =>
|
|
12
|
-
register.registerDataStorageCheckpointTests(POSTGRES_STORAGE_FACTORY));
|
|
4
|
+
import { POSTGRES_STORAGE_FACTORY, TEST_STORAGE_VERSIONS } from './util.js';
|
|
13
5
|
|
|
14
6
|
describe('Sync Bucket Validation', register.registerBucketValidationTests);
|
|
15
7
|
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
8
|
+
for (let storageVersion of TEST_STORAGE_VERSIONS) {
|
|
9
|
+
describe(`Postgres Sync Bucket Storage - Parameters - v${storageVersion}`, () =>
|
|
10
|
+
register.registerDataStorageParameterTests({ ...POSTGRES_STORAGE_FACTORY, storageVersion }));
|
|
11
|
+
|
|
12
|
+
describe(`Postgres Sync Bucket Storage - Data - v${storageVersion}`, () =>
|
|
13
|
+
register.registerDataStorageDataTests({ ...POSTGRES_STORAGE_FACTORY, storageVersion }));
|
|
14
|
+
|
|
15
|
+
describe(`Postgres Sync Bucket Storage - Checkpoints - v${storageVersion}`, () =>
|
|
16
|
+
register.registerDataStorageCheckpointTests({ ...POSTGRES_STORAGE_FACTORY, storageVersion }));
|
|
17
|
+
|
|
18
|
+
describe(`Postgres Sync Bucket Storage - pg-specific - v${storageVersion}`, () => {
|
|
19
|
+
/**
|
|
20
|
+
* The split of returned results can vary depending on storage drivers.
|
|
21
|
+
* The large rows here are 2MB large while the default chunk limit is 1mb.
|
|
22
|
+
* The Postgres storage driver will detect if the next row will increase the batch
|
|
23
|
+
* over the limit and separate that row into a new batch (or single row batch) if applicable.
|
|
24
|
+
*/
|
|
25
|
+
test('large batch (2)', async () => {
|
|
26
|
+
// Test syncing a batch of data that is small in count,
|
|
27
|
+
// but large enough in size to be split over multiple returned chunks.
|
|
28
|
+
// Similar to the above test, but splits over 1MB chunks.
|
|
29
|
+
await using factory = await POSTGRES_STORAGE_FACTORY.factory();
|
|
30
|
+
const syncRules = await factory.updateSyncRules(
|
|
31
|
+
updateSyncRulesFromYaml(
|
|
32
|
+
`
|
|
29
33
|
bucket_definitions:
|
|
30
34
|
global:
|
|
31
35
|
data:
|
|
32
36
|
- SELECT id, description FROM "%"
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
const
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
37
|
+
`,
|
|
38
|
+
{ storageVersion }
|
|
39
|
+
)
|
|
40
|
+
);
|
|
41
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
42
|
+
|
|
43
|
+
const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
44
|
+
const sourceTable = test_utils.makeTestTable('test', ['id'], POSTGRES_STORAGE_FACTORY);
|
|
45
|
+
|
|
46
|
+
const largeDescription = '0123456789'.repeat(2_000_00);
|
|
47
|
+
|
|
48
|
+
await batch.save({
|
|
49
|
+
sourceTable,
|
|
50
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
51
|
+
after: {
|
|
52
|
+
id: 'test1',
|
|
53
|
+
description: 'test1'
|
|
54
|
+
},
|
|
55
|
+
afterReplicaId: test_utils.rid('test1')
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
await batch.save({
|
|
59
|
+
sourceTable,
|
|
60
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
61
|
+
after: {
|
|
62
|
+
id: 'large1',
|
|
63
|
+
description: largeDescription
|
|
64
|
+
},
|
|
65
|
+
afterReplicaId: test_utils.rid('large1')
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
// Large enough to split the returned batch
|
|
69
|
+
await batch.save({
|
|
70
|
+
sourceTable,
|
|
71
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
72
|
+
after: {
|
|
73
|
+
id: 'large2',
|
|
74
|
+
description: largeDescription
|
|
75
|
+
},
|
|
76
|
+
afterReplicaId: test_utils.rid('large2')
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
await batch.save({
|
|
80
|
+
sourceTable,
|
|
81
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
82
|
+
after: {
|
|
83
|
+
id: 'test3',
|
|
84
|
+
description: 'test3'
|
|
85
|
+
},
|
|
86
|
+
afterReplicaId: test_utils.rid('test3')
|
|
87
|
+
});
|
|
51
88
|
});
|
|
52
89
|
|
|
53
|
-
|
|
54
|
-
sourceTable,
|
|
55
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
56
|
-
after: {
|
|
57
|
-
id: 'large1',
|
|
58
|
-
description: largeDescription
|
|
59
|
-
},
|
|
60
|
-
afterReplicaId: test_utils.rid('large1')
|
|
61
|
-
});
|
|
90
|
+
const checkpoint = result!.flushed_op;
|
|
62
91
|
|
|
63
|
-
|
|
64
|
-
await batch.save({
|
|
65
|
-
sourceTable,
|
|
66
|
-
tag: storage.SaveOperationTag.INSERT,
|
|
67
|
-
after: {
|
|
68
|
-
id: 'large2',
|
|
69
|
-
description: largeDescription
|
|
70
|
-
},
|
|
71
|
-
afterReplicaId: test_utils.rid('large2')
|
|
72
|
-
});
|
|
92
|
+
const options: storage.BucketDataBatchOptions = {};
|
|
73
93
|
|
|
74
|
-
await
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
94
|
+
const batch1 = await test_utils.fromAsync(
|
|
95
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]), options)
|
|
96
|
+
);
|
|
97
|
+
expect(test_utils.getBatchData(batch1)).toEqual([
|
|
98
|
+
{ op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }
|
|
99
|
+
]);
|
|
100
|
+
expect(test_utils.getBatchMeta(batch1)).toEqual({
|
|
101
|
+
after: '0',
|
|
102
|
+
has_more: true,
|
|
103
|
+
next_after: '1'
|
|
82
104
|
});
|
|
83
|
-
});
|
|
84
|
-
|
|
85
|
-
const checkpoint = result!.flushed_op;
|
|
86
|
-
|
|
87
|
-
const options: storage.BucketDataBatchOptions = {};
|
|
88
105
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
new Map([['global[]', BigInt(batch1[0].chunkData.next_after)]]),
|
|
105
|
-
options
|
|
106
|
-
)
|
|
107
|
-
);
|
|
108
|
-
expect(test_utils.getBatchData(batch2)).toEqual([
|
|
109
|
-
{ op_id: '2', op: 'PUT', object_id: 'large1', checksum: 1178768505 }
|
|
110
|
-
]);
|
|
111
|
-
expect(test_utils.getBatchMeta(batch2)).toEqual({
|
|
112
|
-
after: '1',
|
|
113
|
-
has_more: true,
|
|
114
|
-
next_after: '2'
|
|
115
|
-
});
|
|
106
|
+
const batch2 = await test_utils.fromAsync(
|
|
107
|
+
bucketStorage.getBucketDataBatch(
|
|
108
|
+
checkpoint,
|
|
109
|
+
bucketRequestMap(syncRules, [['global[]', BigInt(batch1[0].chunkData.next_after)]]),
|
|
110
|
+
options
|
|
111
|
+
)
|
|
112
|
+
);
|
|
113
|
+
expect(test_utils.getBatchData(batch2)).toEqual([
|
|
114
|
+
{ op_id: '2', op: 'PUT', object_id: 'large1', checksum: 1178768505 }
|
|
115
|
+
]);
|
|
116
|
+
expect(test_utils.getBatchMeta(batch2)).toEqual({
|
|
117
|
+
after: '1',
|
|
118
|
+
has_more: true,
|
|
119
|
+
next_after: '2'
|
|
120
|
+
});
|
|
116
121
|
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
122
|
+
const batch3 = await test_utils.fromAsync(
|
|
123
|
+
bucketStorage.getBucketDataBatch(
|
|
124
|
+
checkpoint,
|
|
125
|
+
bucketRequestMap(syncRules, [['global[]', BigInt(batch2[0].chunkData.next_after)]]),
|
|
126
|
+
options
|
|
127
|
+
)
|
|
128
|
+
);
|
|
129
|
+
expect(test_utils.getBatchData(batch3)).toEqual([
|
|
130
|
+
{ op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1607205872 }
|
|
131
|
+
]);
|
|
132
|
+
expect(test_utils.getBatchMeta(batch3)).toEqual({
|
|
133
|
+
after: '2',
|
|
134
|
+
has_more: true,
|
|
135
|
+
next_after: '3'
|
|
136
|
+
});
|
|
132
137
|
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
138
|
+
const batch4 = await test_utils.fromAsync(
|
|
139
|
+
bucketStorage.getBucketDataBatch(
|
|
140
|
+
checkpoint,
|
|
141
|
+
bucketRequestMap(syncRules, [['global[]', BigInt(batch3[0].chunkData.next_after)]]),
|
|
142
|
+
options
|
|
143
|
+
)
|
|
144
|
+
);
|
|
145
|
+
expect(test_utils.getBatchData(batch4)).toEqual([
|
|
146
|
+
{ op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 }
|
|
147
|
+
]);
|
|
148
|
+
expect(test_utils.getBatchMeta(batch4)).toEqual({
|
|
149
|
+
after: '3',
|
|
150
|
+
has_more: false,
|
|
151
|
+
next_after: '4'
|
|
152
|
+
});
|
|
147
153
|
});
|
|
148
154
|
});
|
|
149
|
-
}
|
|
155
|
+
}
|
|
@@ -1,5 +1,116 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
1
|
+
import { storage, updateSyncRulesFromYaml } from '@powersync/service-core';
|
|
2
|
+
import { bucketRequest, bucketRequestMap, register, test_utils } from '@powersync/service-core-tests';
|
|
3
|
+
import { describe, expect, test } from 'vitest';
|
|
4
|
+
import { PostgresCompactor } from '../../src/storage/PostgresCompactor.js';
|
|
3
5
|
import { POSTGRES_STORAGE_FACTORY } from './util.js';
|
|
4
6
|
|
|
5
7
|
describe('Postgres Sync Bucket Storage Compact', () => register.registerCompactTests(POSTGRES_STORAGE_FACTORY));
|
|
8
|
+
|
|
9
|
+
describe('Postgres Compact - explicit bucket name', () => {
|
|
10
|
+
const TEST_TABLE = test_utils.makeTestTable('test', ['id'], POSTGRES_STORAGE_FACTORY);
|
|
11
|
+
test('compacts a specific bucket by exact name', async () => {
|
|
12
|
+
await using factory = await POSTGRES_STORAGE_FACTORY.factory();
|
|
13
|
+
const syncRules = await factory.updateSyncRules(
|
|
14
|
+
updateSyncRulesFromYaml(`
|
|
15
|
+
bucket_definitions:
|
|
16
|
+
global:
|
|
17
|
+
data: [select * from test]
|
|
18
|
+
`)
|
|
19
|
+
);
|
|
20
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
21
|
+
|
|
22
|
+
const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
23
|
+
await batch.save({
|
|
24
|
+
sourceTable: TEST_TABLE,
|
|
25
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
26
|
+
after: { id: 't1' },
|
|
27
|
+
afterReplicaId: test_utils.rid('t1')
|
|
28
|
+
});
|
|
29
|
+
await batch.save({
|
|
30
|
+
sourceTable: TEST_TABLE,
|
|
31
|
+
tag: storage.SaveOperationTag.UPDATE,
|
|
32
|
+
after: { id: 't1' },
|
|
33
|
+
afterReplicaId: test_utils.rid('t1')
|
|
34
|
+
});
|
|
35
|
+
await batch.markAllSnapshotDone('1/1');
|
|
36
|
+
await batch.commit('1/1');
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
const checkpoint = result!.flushed_op;
|
|
40
|
+
|
|
41
|
+
// Compact with an explicit bucket name — exercises the this.buckets
|
|
42
|
+
// iteration path, NOT the compactAllBuckets discovery path.
|
|
43
|
+
await bucketStorage.compact({
|
|
44
|
+
compactBuckets: [bucketRequest(syncRules, 'global[]').bucket],
|
|
45
|
+
minBucketChanges: 1
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
const batch = await test_utils.oneFromAsync(
|
|
49
|
+
bucketStorage.getBucketDataBatch(checkpoint, bucketRequestMap(syncRules, [['global[]', 0n]]))
|
|
50
|
+
);
|
|
51
|
+
|
|
52
|
+
expect(batch.chunkData.data).toMatchObject([
|
|
53
|
+
{ op_id: '1', op: 'MOVE' },
|
|
54
|
+
{ op_id: '2', op: 'PUT', object_id: 't1' }
|
|
55
|
+
]);
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
test('clearBucket fails fast when prefix includes PUT', async () => {
|
|
59
|
+
// This tests the specific implementation, to check that our operation type guard is working
|
|
60
|
+
// for CLEAR compacting.
|
|
61
|
+
await using factory = await POSTGRES_STORAGE_FACTORY.factory();
|
|
62
|
+
const syncRules = await factory.updateSyncRules(
|
|
63
|
+
updateSyncRulesFromYaml(`
|
|
64
|
+
bucket_definitions:
|
|
65
|
+
global:
|
|
66
|
+
data: [select * from test]
|
|
67
|
+
`)
|
|
68
|
+
);
|
|
69
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
70
|
+
const request = bucketRequest(syncRules, 'global[]');
|
|
71
|
+
|
|
72
|
+
const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
73
|
+
await batch.markAllSnapshotDone('1/1');
|
|
74
|
+
await batch.save({
|
|
75
|
+
sourceTable: TEST_TABLE,
|
|
76
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
77
|
+
after: { id: 't1' },
|
|
78
|
+
afterReplicaId: test_utils.rid('t1')
|
|
79
|
+
});
|
|
80
|
+
await batch.save({
|
|
81
|
+
sourceTable: TEST_TABLE,
|
|
82
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
83
|
+
before: { id: 't1' },
|
|
84
|
+
beforeReplicaId: test_utils.rid('t1')
|
|
85
|
+
});
|
|
86
|
+
await batch.save({
|
|
87
|
+
sourceTable: TEST_TABLE,
|
|
88
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
89
|
+
after: { id: 't2' },
|
|
90
|
+
afterReplicaId: test_utils.rid('t2')
|
|
91
|
+
});
|
|
92
|
+
await batch.save({
|
|
93
|
+
sourceTable: TEST_TABLE,
|
|
94
|
+
tag: storage.SaveOperationTag.DELETE,
|
|
95
|
+
before: { id: 't2' },
|
|
96
|
+
beforeReplicaId: test_utils.rid('t2')
|
|
97
|
+
});
|
|
98
|
+
await batch.commit('1/1');
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
const checkpoint = result!.flushed_op;
|
|
102
|
+
const rowsBefore = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
|
|
103
|
+
const dataBefore = test_utils.getBatchData(rowsBefore);
|
|
104
|
+
const clearToOpId = BigInt(dataBefore[2].op_id);
|
|
105
|
+
|
|
106
|
+
const compactor = new PostgresCompactor(factory.db, bucketStorage.group_id, {});
|
|
107
|
+
// Trigger the private method directly
|
|
108
|
+
await expect(compactor.clearBucketForTests(request.bucket, clearToOpId)).rejects.toThrow(
|
|
109
|
+
/Unexpected PUT operation/
|
|
110
|
+
);
|
|
111
|
+
|
|
112
|
+
// The method wraps in a transaction; on assertion error the bucket must remain unchanged.
|
|
113
|
+
const rowsAfter = await test_utils.oneFromAsync(bucketStorage.getBucketDataBatch(checkpoint, [request]));
|
|
114
|
+
expect(test_utils.getBatchData(rowsAfter)).toEqual(dataBefore);
|
|
115
|
+
});
|
|
116
|
+
});
|
|
@@ -1,12 +1,156 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
1
|
+
import { storage, updateSyncRulesFromYaml } from '@powersync/service-core';
|
|
2
|
+
import { bucketRequest, register, test_utils } from '@powersync/service-core-tests';
|
|
3
|
+
import { describe, expect, test } from 'vitest';
|
|
4
|
+
import { POSTGRES_STORAGE_FACTORY, TEST_STORAGE_VERSIONS } from './util.js';
|
|
4
5
|
|
|
5
6
|
/**
|
|
6
7
|
* Bucket compacting is not yet implemented.
|
|
7
8
|
* This causes the internal compacting test to fail.
|
|
8
9
|
* Other tests have been verified manually.
|
|
9
10
|
*/
|
|
11
|
+
function registerStorageVersionTests(storageVersion: number) {
|
|
12
|
+
describe(`storage v${storageVersion}`, () => {
|
|
13
|
+
const storageFactory = POSTGRES_STORAGE_FACTORY;
|
|
14
|
+
const TEST_TABLE = test_utils.makeTestTable('test', ['id'], storageFactory);
|
|
15
|
+
|
|
16
|
+
register.registerSyncTests(storageFactory.factory, {
|
|
17
|
+
storageVersion,
|
|
18
|
+
tableIdStrings: storageFactory.tableIdStrings
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
test('large batch (2)', async () => {
|
|
22
|
+
// Test syncing a batch of data that is small in count,
|
|
23
|
+
// but large enough in size to be split over multiple returned chunks.
|
|
24
|
+
// Similar to the above test, but splits over 1MB chunks.
|
|
25
|
+
await using factory = await storageFactory.factory();
|
|
26
|
+
const syncRules = await factory.updateSyncRules(
|
|
27
|
+
updateSyncRulesFromYaml(
|
|
28
|
+
`
|
|
29
|
+
bucket_definitions:
|
|
30
|
+
global:
|
|
31
|
+
data:
|
|
32
|
+
- SELECT id, description FROM "%"
|
|
33
|
+
`,
|
|
34
|
+
{ storageVersion }
|
|
35
|
+
)
|
|
36
|
+
);
|
|
37
|
+
const bucketStorage = factory.getInstance(syncRules);
|
|
38
|
+
const globalBucket = bucketRequest(syncRules, 'global[]');
|
|
39
|
+
|
|
40
|
+
const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
|
|
41
|
+
const sourceTable = TEST_TABLE;
|
|
42
|
+
|
|
43
|
+
const largeDescription = '0123456789'.repeat(2_000_00);
|
|
44
|
+
|
|
45
|
+
await batch.save({
|
|
46
|
+
sourceTable,
|
|
47
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
48
|
+
after: {
|
|
49
|
+
id: 'test1',
|
|
50
|
+
description: 'test1'
|
|
51
|
+
},
|
|
52
|
+
afterReplicaId: test_utils.rid('test1')
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
await batch.save({
|
|
56
|
+
sourceTable,
|
|
57
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
58
|
+
after: {
|
|
59
|
+
id: 'large1',
|
|
60
|
+
description: largeDescription
|
|
61
|
+
},
|
|
62
|
+
afterReplicaId: test_utils.rid('large1')
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
// Large enough to split the returned batch
|
|
66
|
+
await batch.save({
|
|
67
|
+
sourceTable,
|
|
68
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
69
|
+
after: {
|
|
70
|
+
id: 'large2',
|
|
71
|
+
description: largeDescription
|
|
72
|
+
},
|
|
73
|
+
afterReplicaId: test_utils.rid('large2')
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
await batch.save({
|
|
77
|
+
sourceTable,
|
|
78
|
+
tag: storage.SaveOperationTag.INSERT,
|
|
79
|
+
after: {
|
|
80
|
+
id: 'test3',
|
|
81
|
+
description: 'test3'
|
|
82
|
+
},
|
|
83
|
+
afterReplicaId: test_utils.rid('test3')
|
|
84
|
+
});
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
const checkpoint = result!.flushed_op;
|
|
88
|
+
|
|
89
|
+
const options: storage.BucketDataBatchOptions = {};
|
|
90
|
+
|
|
91
|
+
const batch1 = await test_utils.fromAsync(bucketStorage.getBucketDataBatch(checkpoint, [globalBucket], options));
|
|
92
|
+
expect(test_utils.getBatchData(batch1)).toEqual([
|
|
93
|
+
{ op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }
|
|
94
|
+
]);
|
|
95
|
+
expect(test_utils.getBatchMeta(batch1)).toEqual({
|
|
96
|
+
after: '0',
|
|
97
|
+
has_more: true,
|
|
98
|
+
next_after: '1'
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
const batch2 = await test_utils.fromAsync(
|
|
102
|
+
bucketStorage.getBucketDataBatch(
|
|
103
|
+
checkpoint,
|
|
104
|
+
[{ ...globalBucket, start: BigInt(batch1[0].chunkData.next_after) }],
|
|
105
|
+
options
|
|
106
|
+
)
|
|
107
|
+
);
|
|
108
|
+
expect(test_utils.getBatchData(batch2)).toEqual([
|
|
109
|
+
{ op_id: '2', op: 'PUT', object_id: 'large1', checksum: 1178768505 }
|
|
110
|
+
]);
|
|
111
|
+
expect(test_utils.getBatchMeta(batch2)).toEqual({
|
|
112
|
+
after: '1',
|
|
113
|
+
has_more: true,
|
|
114
|
+
next_after: '2'
|
|
115
|
+
});
|
|
116
|
+
|
|
117
|
+
const batch3 = await test_utils.fromAsync(
|
|
118
|
+
bucketStorage.getBucketDataBatch(
|
|
119
|
+
checkpoint,
|
|
120
|
+
[{ ...globalBucket, start: BigInt(batch2[0].chunkData.next_after) }],
|
|
121
|
+
options
|
|
122
|
+
)
|
|
123
|
+
);
|
|
124
|
+
expect(test_utils.getBatchData(batch3)).toEqual([
|
|
125
|
+
{ op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1607205872 }
|
|
126
|
+
]);
|
|
127
|
+
expect(test_utils.getBatchMeta(batch3)).toEqual({
|
|
128
|
+
after: '2',
|
|
129
|
+
has_more: true,
|
|
130
|
+
next_after: '3'
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
const batch4 = await test_utils.fromAsync(
|
|
134
|
+
bucketStorage.getBucketDataBatch(
|
|
135
|
+
checkpoint,
|
|
136
|
+
[{ ...globalBucket, start: BigInt(batch3[0].chunkData.next_after) }],
|
|
137
|
+
options
|
|
138
|
+
)
|
|
139
|
+
);
|
|
140
|
+
expect(test_utils.getBatchData(batch4)).toEqual([
|
|
141
|
+
{ op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 }
|
|
142
|
+
]);
|
|
143
|
+
expect(test_utils.getBatchMeta(batch4)).toEqual({
|
|
144
|
+
after: '3',
|
|
145
|
+
has_more: false,
|
|
146
|
+
next_after: '4'
|
|
147
|
+
});
|
|
148
|
+
});
|
|
149
|
+
});
|
|
150
|
+
}
|
|
151
|
+
|
|
10
152
|
describe('sync - postgres', () => {
|
|
11
|
-
|
|
153
|
+
for (const storageVersion of TEST_STORAGE_VERSIONS) {
|
|
154
|
+
registerStorageVersionTests(storageVersion);
|
|
155
|
+
}
|
|
12
156
|
});
|
package/test/src/util.ts
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
|
+
import { SUPPORTED_STORAGE_VERSIONS } from '@powersync/service-core';
|
|
1
2
|
import path from 'path';
|
|
2
3
|
import { fileURLToPath } from 'url';
|
|
3
4
|
import { normalizePostgresStorageConfig, PostgresMigrationAgent } from '../../src/index.js';
|
|
4
|
-
import { env } from './env.js';
|
|
5
5
|
import { postgresTestSetup } from '../../src/utils/test-utils.js';
|
|
6
|
+
import { env } from './env.js';
|
|
6
7
|
|
|
7
8
|
const __filename = fileURLToPath(import.meta.url);
|
|
8
9
|
const __dirname = path.dirname(__filename);
|
|
@@ -32,5 +33,7 @@ export const POSTGRES_STORAGE_SETUP = postgresTestSetup({
|
|
|
32
33
|
migrationAgent: (config) => new TestPostgresMigrationAgent(config)
|
|
33
34
|
});
|
|
34
35
|
|
|
35
|
-
export const POSTGRES_STORAGE_FACTORY = POSTGRES_STORAGE_SETUP
|
|
36
|
+
export const POSTGRES_STORAGE_FACTORY = POSTGRES_STORAGE_SETUP;
|
|
36
37
|
export const POSTGRES_REPORT_STORAGE_FACTORY = POSTGRES_STORAGE_SETUP.reportFactory;
|
|
38
|
+
|
|
39
|
+
export const TEST_STORAGE_VERSIONS = SUPPORTED_STORAGE_VERSIONS;
|