@powersync/service-module-postgres 0.0.3 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +24 -0
- package/dist/api/PostgresRouteAPIAdapter.d.ts +6 -2
- package/dist/api/PostgresRouteAPIAdapter.js +24 -10
- package/dist/api/PostgresRouteAPIAdapter.js.map +1 -1
- package/dist/module/PostgresModule.js +9 -2
- package/dist/module/PostgresModule.js.map +1 -1
- package/dist/replication/PgManager.js +5 -1
- package/dist/replication/PgManager.js.map +1 -1
- package/dist/replication/WalStream.d.ts +9 -2
- package/dist/replication/WalStream.js +120 -114
- package/dist/replication/WalStream.js.map +1 -1
- package/package.json +4 -4
- package/src/api/PostgresRouteAPIAdapter.ts +31 -12
- package/src/module/PostgresModule.ts +11 -2
- package/src/replication/PgManager.ts +5 -1
- package/src/replication/WalStream.ts +136 -125
- package/test/src/large_batch.test.ts +268 -148
- package/test/src/schema_changes.test.ts +562 -513
- package/test/src/slow_tests.test.ts +2 -1
- package/test/src/util.ts +3 -1
- package/test/src/validation.test.ts +45 -48
- package/test/src/wal_stream.test.ts +224 -249
- package/test/src/wal_stream_utils.ts +61 -22
- package/tsconfig.tsbuildinfo +1 -1
- package/test/src/__snapshots__/pg_test.test.ts.snap +0 -256
|
@@ -1,15 +1,13 @@
|
|
|
1
1
|
import { compareIds, putOp, removeOp } from '@core-tests/stream_utils.js';
|
|
2
|
+
import { reduceBucket } from '@powersync/service-core';
|
|
3
|
+
import { setTimeout } from 'node:timers/promises';
|
|
2
4
|
import { describe, expect, test } from 'vitest';
|
|
3
|
-
import { walStreamTest } from './wal_stream_utils.js';
|
|
4
5
|
import { INITIALIZED_MONGO_STORAGE_FACTORY, StorageFactory } from './util.js';
|
|
6
|
+
import { WalStreamTestContext } from './wal_stream_utils.js';
|
|
5
7
|
|
|
6
|
-
describe(
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
defineTests(INITIALIZED_MONGO_STORAGE_FACTORY);
|
|
10
|
-
},
|
|
11
|
-
{ timeout: 20_000 }
|
|
12
|
-
);
|
|
8
|
+
describe('schema changes', { timeout: 20_000 }, function () {
|
|
9
|
+
defineTests(INITIALIZED_MONGO_STORAGE_FACTORY);
|
|
10
|
+
});
|
|
13
11
|
|
|
14
12
|
const BASIC_SYNC_RULES = `
|
|
15
13
|
bucket_definitions:
|
|
@@ -26,518 +24,569 @@ const REMOVE_T1 = removeOp('test_data', 't1');
|
|
|
26
24
|
const REMOVE_T2 = removeOp('test_data', 't2');
|
|
27
25
|
|
|
28
26
|
function defineTests(factory: StorageFactory) {
|
|
29
|
-
test(
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
27
|
+
test('re-create table', async () => {
|
|
28
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
29
|
+
|
|
30
|
+
// Drop a table and re-create it.
|
|
31
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
32
|
+
const { pool } = context;
|
|
33
|
+
|
|
34
|
+
await pool.query(`DROP TABLE IF EXISTS test_data`);
|
|
35
|
+
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
36
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
37
|
+
|
|
38
|
+
await context.replicateSnapshot();
|
|
39
|
+
context.startStreaming();
|
|
40
|
+
|
|
41
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
|
|
42
|
+
|
|
43
|
+
await pool.query(
|
|
44
|
+
{ statement: `DROP TABLE test_data` },
|
|
45
|
+
{ statement: `CREATE TABLE test_data(id text primary key, description text)` },
|
|
46
|
+
{ statement: `INSERT INTO test_data(id, description) VALUES('t3', 'test3')` }
|
|
47
|
+
);
|
|
48
|
+
|
|
49
|
+
const data = await context.getBucketData('global[]');
|
|
50
|
+
|
|
51
|
+
// Initial inserts
|
|
52
|
+
expect(data.slice(0, 2)).toMatchObject([PUT_T1, PUT_T2]);
|
|
53
|
+
|
|
54
|
+
// Truncate - order doesn't matter
|
|
55
|
+
expect(data.slice(2, 4).sort(compareIds)).toMatchObject([REMOVE_T1, REMOVE_T2]);
|
|
56
|
+
|
|
57
|
+
expect(data.slice(4)).toMatchObject([
|
|
58
|
+
// Snapshot insert
|
|
59
|
+
PUT_T3,
|
|
60
|
+
// Replicated insert
|
|
61
|
+
// We may eventually be able to de-duplicate this
|
|
62
|
+
PUT_T3
|
|
63
|
+
]);
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
test('add table', async () => {
|
|
67
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
68
|
+
// Add table after initial replication
|
|
69
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
70
|
+
const { pool } = context;
|
|
71
|
+
|
|
72
|
+
await context.replicateSnapshot();
|
|
73
|
+
context.startStreaming();
|
|
74
|
+
|
|
75
|
+
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
76
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
77
|
+
|
|
78
|
+
const data = await context.getBucketData('global[]');
|
|
79
|
+
|
|
80
|
+
expect(data).toMatchObject([
|
|
81
|
+
// Snapshot insert
|
|
82
|
+
PUT_T1,
|
|
83
|
+
// Replicated insert
|
|
84
|
+
// We may eventually be able to de-duplicate this
|
|
85
|
+
PUT_T1
|
|
86
|
+
]);
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
test('rename table (1)', async () => {
|
|
90
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
91
|
+
const { pool } = context;
|
|
92
|
+
|
|
93
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
94
|
+
|
|
95
|
+
// Rename table not in sync rules -> in sync rules
|
|
96
|
+
await pool.query(`CREATE TABLE test_data_old(id text primary key, description text)`);
|
|
97
|
+
await pool.query(`INSERT INTO test_data_old(id, description) VALUES('t1', 'test1')`);
|
|
98
|
+
|
|
99
|
+
await context.replicateSnapshot();
|
|
100
|
+
context.startStreaming();
|
|
101
|
+
|
|
102
|
+
await pool.query(
|
|
103
|
+
{ statement: `ALTER TABLE test_data_old RENAME TO test_data` },
|
|
104
|
+
// We need an operation to detect the change
|
|
105
|
+
{ statement: `INSERT INTO test_data(id, description) VALUES('t2', 'test2')` }
|
|
106
|
+
);
|
|
107
|
+
|
|
108
|
+
const data = await context.getBucketData('global[]');
|
|
109
|
+
|
|
110
|
+
expect(data.slice(0, 2).sort(compareIds)).toMatchObject([
|
|
111
|
+
// Snapshot insert
|
|
112
|
+
PUT_T1,
|
|
113
|
+
PUT_T2
|
|
114
|
+
]);
|
|
115
|
+
expect(data.slice(2)).toMatchObject([
|
|
116
|
+
// Replicated insert
|
|
117
|
+
// We may eventually be able to de-duplicate this
|
|
118
|
+
PUT_T2
|
|
119
|
+
]);
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
test('rename table (2)', async () => {
|
|
123
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
124
|
+
// Rename table in sync rules -> in sync rules
|
|
125
|
+
const { pool } = context;
|
|
126
|
+
|
|
127
|
+
await context.updateSyncRules(`
|
|
128
|
+
bucket_definitions:
|
|
129
|
+
global:
|
|
130
|
+
data:
|
|
131
|
+
- SELECT id, * FROM "test_data%"
|
|
132
|
+
`);
|
|
133
|
+
|
|
134
|
+
await pool.query(`CREATE TABLE test_data1(id text primary key, description text)`);
|
|
135
|
+
await pool.query(`INSERT INTO test_data1(id, description) VALUES('t1', 'test1')`);
|
|
136
|
+
|
|
137
|
+
await context.replicateSnapshot();
|
|
138
|
+
context.startStreaming();
|
|
139
|
+
|
|
140
|
+
await pool.query(
|
|
141
|
+
{ statement: `ALTER TABLE test_data1 RENAME TO test_data2` },
|
|
142
|
+
// We need an operation to detect the change
|
|
143
|
+
{ statement: `INSERT INTO test_data2(id, description) VALUES('t2', 'test2')` }
|
|
144
|
+
);
|
|
145
|
+
|
|
146
|
+
const data = await context.getBucketData('global[]');
|
|
147
|
+
|
|
148
|
+
expect(data.slice(0, 2)).toMatchObject([
|
|
149
|
+
// Initial replication
|
|
150
|
+
putOp('test_data1', { id: 't1', description: 'test1' }),
|
|
151
|
+
// Initial truncate
|
|
152
|
+
removeOp('test_data1', 't1')
|
|
153
|
+
]);
|
|
154
|
+
|
|
155
|
+
expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
|
|
156
|
+
// Snapshot insert
|
|
157
|
+
putOp('test_data2', { id: 't1', description: 'test1' }),
|
|
158
|
+
putOp('test_data2', { id: 't2', description: 'test2' })
|
|
159
|
+
]);
|
|
160
|
+
expect(data.slice(4)).toMatchObject([
|
|
161
|
+
// Replicated insert
|
|
162
|
+
// We may eventually be able to de-duplicate this
|
|
163
|
+
putOp('test_data2', { id: 't2', description: 'test2' })
|
|
164
|
+
]);
|
|
165
|
+
});
|
|
35
166
|
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
167
|
+
test('rename table (3)', async () => {
|
|
168
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
169
|
+
// Rename table in sync rules -> not in sync rules
|
|
39
170
|
|
|
40
|
-
|
|
41
|
-
context.startStreaming();
|
|
171
|
+
const { pool } = context;
|
|
42
172
|
|
|
43
|
-
|
|
173
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
44
174
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
{ statement: `CREATE TABLE test_data(id text primary key, description text)` },
|
|
48
|
-
{ statement: `INSERT INTO test_data(id, description) VALUES('t3', 'test3')` }
|
|
49
|
-
);
|
|
175
|
+
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
176
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
50
177
|
|
|
51
|
-
|
|
178
|
+
await context.replicateSnapshot();
|
|
179
|
+
context.startStreaming();
|
|
52
180
|
|
|
181
|
+
await pool.query(
|
|
182
|
+
{ statement: `ALTER TABLE test_data RENAME TO test_data_na` },
|
|
183
|
+
// We need an operation to detect the change
|
|
184
|
+
{ statement: `INSERT INTO test_data_na(id, description) VALUES('t2', 'test2')` }
|
|
185
|
+
);
|
|
186
|
+
|
|
187
|
+
const data = await context.getBucketData('global[]');
|
|
188
|
+
|
|
189
|
+
expect(data).toMatchObject([
|
|
190
|
+
// Initial replication
|
|
191
|
+
PUT_T1,
|
|
192
|
+
// Truncate
|
|
193
|
+
REMOVE_T1
|
|
194
|
+
]);
|
|
195
|
+
});
|
|
196
|
+
|
|
197
|
+
test('change replica id', async () => {
|
|
198
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
199
|
+
// Change replica id from default to full
|
|
200
|
+
// Causes a re-import of the table.
|
|
201
|
+
|
|
202
|
+
const { pool } = context;
|
|
203
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
204
|
+
|
|
205
|
+
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
206
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
207
|
+
|
|
208
|
+
await context.replicateSnapshot();
|
|
209
|
+
context.startStreaming();
|
|
210
|
+
|
|
211
|
+
await pool.query(
|
|
212
|
+
{ statement: `ALTER TABLE test_data REPLICA IDENTITY FULL` },
|
|
213
|
+
// We need an operation to detect the change
|
|
214
|
+
{ statement: `INSERT INTO test_data(id, description) VALUES('t2', 'test2')` }
|
|
215
|
+
);
|
|
216
|
+
|
|
217
|
+
const data = await context.getBucketData('global[]');
|
|
218
|
+
|
|
219
|
+
expect(data.slice(0, 2)).toMatchObject([
|
|
53
220
|
// Initial inserts
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
);
|
|
68
|
-
|
|
69
|
-
test(
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
221
|
+
PUT_T1,
|
|
222
|
+
// Truncate
|
|
223
|
+
REMOVE_T1
|
|
224
|
+
]);
|
|
225
|
+
|
|
226
|
+
// Snapshot - order doesn't matter
|
|
227
|
+
expect(data.slice(2, 4).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2]);
|
|
228
|
+
|
|
229
|
+
expect(data.slice(4).sort(compareIds)).toMatchObject([
|
|
230
|
+
// Replicated insert
|
|
231
|
+
// We may eventually be able to de-duplicate this
|
|
232
|
+
PUT_T2
|
|
233
|
+
]);
|
|
234
|
+
});
|
|
235
|
+
|
|
236
|
+
test('change full replica id by adding column', async () => {
|
|
237
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
238
|
+
// Change replica id from full by adding column
|
|
239
|
+
// Causes a re-import of the table.
|
|
240
|
+
// Other changes such as renaming column would have the same effect
|
|
241
|
+
|
|
242
|
+
const { pool } = context;
|
|
243
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
244
|
+
|
|
245
|
+
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
246
|
+
await pool.query(`ALTER TABLE test_data REPLICA IDENTITY FULL`);
|
|
247
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
248
|
+
|
|
249
|
+
await context.replicateSnapshot();
|
|
250
|
+
context.startStreaming();
|
|
251
|
+
|
|
252
|
+
await pool.query(
|
|
253
|
+
{ statement: `ALTER TABLE test_data ADD COLUMN other TEXT` },
|
|
254
|
+
{ statement: `INSERT INTO test_data(id, description) VALUES('t2', 'test2')` }
|
|
255
|
+
);
|
|
256
|
+
|
|
257
|
+
const data = await context.getBucketData('global[]');
|
|
258
|
+
|
|
259
|
+
expect(data.slice(0, 2)).toMatchObject([
|
|
260
|
+
// Initial inserts
|
|
261
|
+
PUT_T1,
|
|
262
|
+
// Truncate
|
|
263
|
+
REMOVE_T1
|
|
264
|
+
]);
|
|
265
|
+
|
|
266
|
+
// Snapshot - order doesn't matter
|
|
267
|
+
expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
|
|
268
|
+
putOp('test_data', { id: 't1', description: 'test1', other: null }),
|
|
269
|
+
putOp('test_data', { id: 't2', description: 'test2', other: null })
|
|
270
|
+
]);
|
|
271
|
+
|
|
272
|
+
expect(data.slice(4).sort(compareIds)).toMatchObject([
|
|
273
|
+
// Replicated insert
|
|
274
|
+
// We may eventually be able to de-duplicate this
|
|
275
|
+
putOp('test_data', { id: 't2', description: 'test2', other: null })
|
|
276
|
+
]);
|
|
277
|
+
});
|
|
278
|
+
|
|
279
|
+
test('change default replica id by changing column type', async () => {
|
|
280
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
281
|
+
// Change default replica id by changing column type
|
|
282
|
+
// Causes a re-import of the table.
|
|
283
|
+
const { pool } = context;
|
|
284
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
285
|
+
|
|
286
|
+
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
287
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
288
|
+
|
|
289
|
+
await context.replicateSnapshot();
|
|
290
|
+
context.startStreaming();
|
|
291
|
+
|
|
292
|
+
await pool.query(
|
|
293
|
+
{ statement: `ALTER TABLE test_data ALTER COLUMN id TYPE varchar` },
|
|
294
|
+
{ statement: `INSERT INTO test_data(id, description) VALUES('t2', 'test2')` }
|
|
295
|
+
);
|
|
296
|
+
|
|
297
|
+
const data = await context.getBucketData('global[]');
|
|
298
|
+
|
|
299
|
+
expect(data.slice(0, 2)).toMatchObject([
|
|
300
|
+
// Initial inserts
|
|
301
|
+
PUT_T1,
|
|
302
|
+
// Truncate
|
|
303
|
+
REMOVE_T1
|
|
304
|
+
]);
|
|
305
|
+
|
|
306
|
+
// Snapshot - order doesn't matter
|
|
307
|
+
expect(data.slice(2, 4).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2]);
|
|
308
|
+
|
|
309
|
+
expect(data.slice(4).sort(compareIds)).toMatchObject([
|
|
310
|
+
// Replicated insert
|
|
311
|
+
// We may eventually be able to de-duplicate this
|
|
312
|
+
PUT_T2
|
|
313
|
+
]);
|
|
314
|
+
});
|
|
315
|
+
|
|
316
|
+
test('change index id by changing column type', async () => {
|
|
317
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
318
|
+
// Change index replica id by changing column type
|
|
319
|
+
// Causes a re-import of the table.
|
|
320
|
+
// Secondary functionality tested here is that replica id column order stays
|
|
321
|
+
// the same between initial and incremental replication.
|
|
322
|
+
const { pool } = context;
|
|
323
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
324
|
+
|
|
325
|
+
await pool.query(`CREATE TABLE test_data(id text primary key, description text not null)`);
|
|
326
|
+
await pool.query(`CREATE UNIQUE INDEX i1 ON test_data(description, id)`);
|
|
327
|
+
await pool.query(`ALTER TABLE test_data REPLICA IDENTITY USING INDEX i1`);
|
|
328
|
+
|
|
329
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
330
|
+
|
|
331
|
+
await context.replicateSnapshot();
|
|
332
|
+
context.startStreaming();
|
|
333
|
+
|
|
334
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
|
|
335
|
+
|
|
336
|
+
await pool.query(
|
|
337
|
+
{ statement: `ALTER TABLE test_data ALTER COLUMN description TYPE varchar` },
|
|
338
|
+
{ statement: `INSERT INTO test_data(id, description) VALUES('t3', 'test3')` }
|
|
339
|
+
);
|
|
340
|
+
|
|
341
|
+
const data = await context.getBucketData('global[]');
|
|
342
|
+
|
|
343
|
+
expect(data.slice(0, 2)).toMatchObject([
|
|
344
|
+
// Initial snapshot
|
|
345
|
+
PUT_T1,
|
|
346
|
+
// Streamed
|
|
347
|
+
PUT_T2
|
|
348
|
+
]);
|
|
349
|
+
|
|
350
|
+
expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
|
|
351
|
+
// Truncate - any order
|
|
352
|
+
REMOVE_T1,
|
|
353
|
+
REMOVE_T2
|
|
354
|
+
]);
|
|
141
355
|
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
await context.replicateSnapshot();
|
|
146
|
-
context.startStreaming();
|
|
147
|
-
|
|
148
|
-
await pool.query(
|
|
149
|
-
{ statement: `ALTER TABLE test_data1 RENAME TO test_data2` },
|
|
150
|
-
// We need an operation to detect the change
|
|
151
|
-
{ statement: `INSERT INTO test_data2(id, description) VALUES('t2', 'test2')` }
|
|
152
|
-
);
|
|
153
|
-
|
|
154
|
-
const data = await context.getBucketData('global[]');
|
|
155
|
-
|
|
156
|
-
expect(data.slice(0, 2)).toMatchObject([
|
|
157
|
-
// Initial replication
|
|
158
|
-
putOp('test_data1', { id: 't1', description: 'test1' }),
|
|
159
|
-
// Initial truncate
|
|
160
|
-
removeOp('test_data1', 't1')
|
|
161
|
-
]);
|
|
162
|
-
|
|
163
|
-
expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
|
|
164
|
-
// Snapshot insert
|
|
165
|
-
putOp('test_data2', { id: 't1', description: 'test1' }),
|
|
166
|
-
putOp('test_data2', { id: 't2', description: 'test2' })
|
|
167
|
-
]);
|
|
168
|
-
expect(data.slice(4)).toMatchObject([
|
|
169
|
-
// Replicated insert
|
|
170
|
-
// We may eventually be able to de-duplicate this
|
|
171
|
-
putOp('test_data2', { id: 't2', description: 'test2' })
|
|
172
|
-
]);
|
|
173
|
-
})
|
|
174
|
-
);
|
|
175
|
-
|
|
176
|
-
test(
|
|
177
|
-
'rename table (3)',
|
|
178
|
-
walStreamTest(factory, async (context) => {
|
|
179
|
-
// Rename table in sync rules -> not in sync rules
|
|
180
|
-
|
|
181
|
-
const { pool } = context;
|
|
182
|
-
|
|
183
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
184
|
-
|
|
185
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
186
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
187
|
-
|
|
188
|
-
await context.replicateSnapshot();
|
|
189
|
-
context.startStreaming();
|
|
190
|
-
|
|
191
|
-
await pool.query(
|
|
192
|
-
{ statement: `ALTER TABLE test_data RENAME TO test_data_na` },
|
|
193
|
-
// We need an operation to detect the change
|
|
194
|
-
{ statement: `INSERT INTO test_data_na(id, description) VALUES('t2', 'test2')` }
|
|
195
|
-
);
|
|
196
|
-
|
|
197
|
-
const data = await context.getBucketData('global[]');
|
|
198
|
-
|
|
199
|
-
expect(data).toMatchObject([
|
|
200
|
-
// Initial replication
|
|
201
|
-
PUT_T1,
|
|
202
|
-
// Truncate
|
|
203
|
-
REMOVE_T1
|
|
204
|
-
]);
|
|
205
|
-
})
|
|
206
|
-
);
|
|
207
|
-
|
|
208
|
-
test(
|
|
209
|
-
'change replica id',
|
|
210
|
-
walStreamTest(factory, async (context) => {
|
|
211
|
-
// Change replica id from default to full
|
|
212
|
-
// Causes a re-import of the table.
|
|
213
|
-
|
|
214
|
-
const { pool } = context;
|
|
215
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
216
|
-
|
|
217
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
218
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
219
|
-
|
|
220
|
-
await context.replicateSnapshot();
|
|
221
|
-
context.startStreaming();
|
|
222
|
-
|
|
223
|
-
await pool.query(
|
|
224
|
-
{ statement: `ALTER TABLE test_data REPLICA IDENTITY FULL` },
|
|
225
|
-
// We need an operation to detect the change
|
|
226
|
-
{ statement: `INSERT INTO test_data(id, description) VALUES('t2', 'test2')` }
|
|
227
|
-
);
|
|
228
|
-
|
|
229
|
-
const data = await context.getBucketData('global[]');
|
|
230
|
-
|
|
231
|
-
expect(data.slice(0, 2)).toMatchObject([
|
|
232
|
-
// Initial inserts
|
|
233
|
-
PUT_T1,
|
|
234
|
-
// Truncate
|
|
235
|
-
REMOVE_T1
|
|
236
|
-
]);
|
|
237
|
-
|
|
238
|
-
// Snapshot - order doesn't matter
|
|
239
|
-
expect(data.slice(2, 4).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2]);
|
|
240
|
-
|
|
241
|
-
expect(data.slice(4).sort(compareIds)).toMatchObject([
|
|
242
|
-
// Replicated insert
|
|
243
|
-
// We may eventually be able to de-duplicate this
|
|
244
|
-
PUT_T2
|
|
245
|
-
]);
|
|
246
|
-
})
|
|
247
|
-
);
|
|
248
|
-
|
|
249
|
-
test(
|
|
250
|
-
'change full replica id by adding column',
|
|
251
|
-
walStreamTest(factory, async (context) => {
|
|
252
|
-
// Change replica id from full by adding column
|
|
253
|
-
// Causes a re-import of the table.
|
|
254
|
-
// Other changes such as renaming column would have the same effect
|
|
255
|
-
|
|
256
|
-
const { pool } = context;
|
|
257
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
258
|
-
|
|
259
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
260
|
-
await pool.query(`ALTER TABLE test_data REPLICA IDENTITY FULL`);
|
|
261
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
262
|
-
|
|
263
|
-
await context.replicateSnapshot();
|
|
264
|
-
context.startStreaming();
|
|
265
|
-
|
|
266
|
-
await pool.query(
|
|
267
|
-
{ statement: `ALTER TABLE test_data ADD COLUMN other TEXT` },
|
|
268
|
-
{ statement: `INSERT INTO test_data(id, description) VALUES('t2', 'test2')` }
|
|
269
|
-
);
|
|
270
|
-
|
|
271
|
-
const data = await context.getBucketData('global[]');
|
|
272
|
-
|
|
273
|
-
expect(data.slice(0, 2)).toMatchObject([
|
|
274
|
-
// Initial inserts
|
|
275
|
-
PUT_T1,
|
|
276
|
-
// Truncate
|
|
277
|
-
REMOVE_T1
|
|
278
|
-
]);
|
|
279
|
-
|
|
280
|
-
// Snapshot - order doesn't matter
|
|
281
|
-
expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
|
|
282
|
-
putOp('test_data', { id: 't1', description: 'test1', other: null }),
|
|
283
|
-
putOp('test_data', { id: 't2', description: 'test2', other: null })
|
|
284
|
-
]);
|
|
285
|
-
|
|
286
|
-
expect(data.slice(4).sort(compareIds)).toMatchObject([
|
|
287
|
-
// Replicated insert
|
|
288
|
-
// We may eventually be able to de-duplicate this
|
|
289
|
-
putOp('test_data', { id: 't2', description: 'test2', other: null })
|
|
290
|
-
]);
|
|
291
|
-
})
|
|
292
|
-
);
|
|
293
|
-
|
|
294
|
-
test(
|
|
295
|
-
'change default replica id by changing column type',
|
|
296
|
-
walStreamTest(factory, async (context) => {
|
|
297
|
-
// Change default replica id by changing column type
|
|
298
|
-
// Causes a re-import of the table.
|
|
299
|
-
const { pool } = context;
|
|
300
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
301
|
-
|
|
302
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
303
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
304
|
-
|
|
305
|
-
await context.replicateSnapshot();
|
|
306
|
-
context.startStreaming();
|
|
307
|
-
|
|
308
|
-
await pool.query(
|
|
309
|
-
{ statement: `ALTER TABLE test_data ALTER COLUMN id TYPE varchar` },
|
|
310
|
-
{ statement: `INSERT INTO test_data(id, description) VALUES('t2', 'test2')` }
|
|
311
|
-
);
|
|
312
|
-
|
|
313
|
-
const data = await context.getBucketData('global[]');
|
|
314
|
-
|
|
315
|
-
expect(data.slice(0, 2)).toMatchObject([
|
|
316
|
-
// Initial inserts
|
|
317
|
-
PUT_T1,
|
|
318
|
-
// Truncate
|
|
319
|
-
REMOVE_T1
|
|
320
|
-
]);
|
|
321
|
-
|
|
322
|
-
// Snapshot - order doesn't matter
|
|
323
|
-
expect(data.slice(2, 4).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2]);
|
|
324
|
-
|
|
325
|
-
expect(data.slice(4).sort(compareIds)).toMatchObject([
|
|
326
|
-
// Replicated insert
|
|
327
|
-
// We may eventually be able to de-duplicate this
|
|
328
|
-
PUT_T2
|
|
329
|
-
]);
|
|
330
|
-
})
|
|
331
|
-
);
|
|
332
|
-
|
|
333
|
-
test(
|
|
334
|
-
'change index id by changing column type',
|
|
335
|
-
walStreamTest(factory, async (context) => {
|
|
336
|
-
// Change index replica id by changing column type
|
|
337
|
-
// Causes a re-import of the table.
|
|
338
|
-
// Secondary functionality tested here is that replica id column order stays
|
|
339
|
-
// the same between initial and incremental replication.
|
|
340
|
-
const { pool } = context;
|
|
341
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
342
|
-
|
|
343
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text not null)`);
|
|
344
|
-
await pool.query(`CREATE UNIQUE INDEX i1 ON test_data(description, id)`);
|
|
345
|
-
await pool.query(`ALTER TABLE test_data REPLICA IDENTITY USING INDEX i1`);
|
|
346
|
-
|
|
347
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
348
|
-
|
|
349
|
-
await context.replicateSnapshot();
|
|
350
|
-
context.startStreaming();
|
|
351
|
-
|
|
352
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
|
|
353
|
-
|
|
354
|
-
await pool.query(
|
|
355
|
-
{ statement: `ALTER TABLE test_data ALTER COLUMN description TYPE varchar` },
|
|
356
|
-
{ statement: `INSERT INTO test_data(id, description) VALUES('t3', 'test3')` }
|
|
357
|
-
);
|
|
358
|
-
|
|
359
|
-
const data = await context.getBucketData('global[]');
|
|
360
|
-
|
|
361
|
-
expect(data.slice(0, 2)).toMatchObject([
|
|
362
|
-
// Initial snapshot
|
|
363
|
-
PUT_T1,
|
|
364
|
-
// Streamed
|
|
365
|
-
PUT_T2
|
|
366
|
-
]);
|
|
367
|
-
|
|
368
|
-
expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
|
|
369
|
-
// Truncate - any order
|
|
370
|
-
REMOVE_T1,
|
|
371
|
-
REMOVE_T2
|
|
372
|
-
]);
|
|
373
|
-
|
|
374
|
-
// Snapshot - order doesn't matter
|
|
375
|
-
expect(data.slice(4, 7).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2, PUT_T3]);
|
|
376
|
-
|
|
377
|
-
expect(data.slice(7).sort(compareIds)).toMatchObject([
|
|
378
|
-
// Replicated insert
|
|
379
|
-
// We may eventually be able to de-duplicate this
|
|
380
|
-
PUT_T3
|
|
381
|
-
]);
|
|
382
|
-
})
|
|
383
|
-
);
|
|
384
|
-
|
|
385
|
-
test(
|
|
386
|
-
'add to publication',
|
|
387
|
-
walStreamTest(factory, async (context) => {
|
|
388
|
-
// Add table to publication after initial replication
|
|
389
|
-
const { pool } = context;
|
|
390
|
-
|
|
391
|
-
await pool.query(`DROP PUBLICATION powersync`);
|
|
392
|
-
await pool.query(`CREATE TABLE test_foo(id text primary key, description text)`);
|
|
393
|
-
await pool.query(`CREATE PUBLICATION powersync FOR table test_foo`);
|
|
394
|
-
|
|
395
|
-
const storage = await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
396
|
-
|
|
397
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
398
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
399
|
-
|
|
400
|
-
await context.replicateSnapshot();
|
|
401
|
-
context.startStreaming();
|
|
402
|
-
|
|
403
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
|
|
404
|
-
|
|
405
|
-
await pool.query(`ALTER PUBLICATION powersync ADD TABLE test_data`);
|
|
406
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('t3', 'test3')`);
|
|
407
|
-
|
|
408
|
-
const data = await context.getBucketData('global[]');
|
|
409
|
-
|
|
410
|
-
expect(data.slice(0, 3).sort(compareIds)).toMatchObject([
|
|
411
|
-
// Snapshot insert - any order
|
|
412
|
-
PUT_T1,
|
|
413
|
-
PUT_T2,
|
|
414
|
-
PUT_T3
|
|
415
|
-
]);
|
|
416
|
-
|
|
417
|
-
expect(data.slice(3)).toMatchObject([
|
|
418
|
-
// Replicated insert
|
|
419
|
-
// We may eventually be able to de-duplicate this
|
|
420
|
-
PUT_T3
|
|
421
|
-
]);
|
|
422
|
-
|
|
423
|
-
const metrics = await storage.factory.getStorageMetrics();
|
|
424
|
-
expect(metrics.replication_size_bytes).toBeGreaterThan(0);
|
|
425
|
-
})
|
|
426
|
-
);
|
|
427
|
-
|
|
428
|
-
test(
|
|
429
|
-
'add to publication (not in sync rules)',
|
|
430
|
-
walStreamTest(factory, async (context) => {
|
|
431
|
-
// Add table to publication after initial replication
|
|
432
|
-
// Since the table is not in sync rules, it should not be replicated.
|
|
433
|
-
const { pool } = context;
|
|
434
|
-
|
|
435
|
-
await pool.query(`DROP PUBLICATION powersync`);
|
|
436
|
-
await pool.query(`CREATE TABLE test_foo(id text primary key, description text)`);
|
|
437
|
-
await pool.query(`CREATE PUBLICATION powersync FOR table test_foo`);
|
|
438
|
-
|
|
439
|
-
const storage = await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
440
|
-
|
|
441
|
-
await pool.query(`CREATE TABLE test_other(id text primary key, description text)`);
|
|
442
|
-
await pool.query(`INSERT INTO test_other(id, description) VALUES('t1', 'test1')`);
|
|
443
|
-
|
|
444
|
-
await context.replicateSnapshot();
|
|
445
|
-
context.startStreaming();
|
|
446
|
-
|
|
447
|
-
await pool.query(`INSERT INTO test_other(id, description) VALUES('t2', 'test2')`);
|
|
448
|
-
|
|
449
|
-
await pool.query(`ALTER PUBLICATION powersync ADD TABLE test_other`);
|
|
450
|
-
await pool.query(`INSERT INTO test_other(id, description) VALUES('t3', 'test3')`);
|
|
451
|
-
|
|
452
|
-
const data = await context.getBucketData('global[]');
|
|
453
|
-
expect(data).toMatchObject([]);
|
|
454
|
-
|
|
455
|
-
const metrics = await storage.factory.getStorageMetrics();
|
|
456
|
-
expect(metrics.replication_size_bytes).toEqual(0);
|
|
457
|
-
})
|
|
458
|
-
);
|
|
459
|
-
|
|
460
|
-
test(
|
|
461
|
-
'replica identity nothing',
|
|
462
|
-
walStreamTest(factory, async (context) => {
|
|
463
|
-
// Technically not a schema change, but fits here.
|
|
464
|
-
// Replica ID works a little differently here - the table doesn't have
|
|
465
|
-
// one defined, but we generate a unique one for each replicated row.
|
|
466
|
-
|
|
467
|
-
const { pool } = context;
|
|
468
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
469
|
-
|
|
470
|
-
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
471
|
-
await pool.query(`ALTER TABLE test_data REPLICA IDENTITY NOTHING`);
|
|
472
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
473
|
-
|
|
474
|
-
await context.replicateSnapshot();
|
|
475
|
-
context.startStreaming();
|
|
476
|
-
|
|
477
|
-
await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
|
|
478
|
-
|
|
479
|
-
// Just as an FYI - cannot update or delete here
|
|
480
|
-
expect(pool.query(`UPDATE test_data SET description = 'test2b' WHERE id = 't2'`)).rejects.toThrow(
|
|
481
|
-
'does not have a replica identity and publishes updates'
|
|
482
|
-
);
|
|
483
|
-
|
|
484
|
-
// Testing TRUNCATE is important here - this depends on current_data having unique
|
|
485
|
-
// ids.
|
|
486
|
-
await pool.query(`TRUNCATE TABLE test_data`);
|
|
487
|
-
|
|
488
|
-
const data = await context.getBucketData('global[]');
|
|
356
|
+
// Snapshot - order doesn't matter
|
|
357
|
+
expect(data.slice(4, 7).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2, PUT_T3]);
|
|
489
358
|
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
359
|
+
expect(data.slice(7).sort(compareIds)).toMatchObject([
|
|
360
|
+
// Replicated insert
|
|
361
|
+
// We may eventually be able to de-duplicate this
|
|
362
|
+
PUT_T3
|
|
363
|
+
]);
|
|
364
|
+
});
|
|
365
|
+
|
|
366
|
+
test('add to publication', async () => {
|
|
367
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
368
|
+
// Add table to publication after initial replication
|
|
369
|
+
const { pool } = context;
|
|
370
|
+
|
|
371
|
+
await pool.query(`DROP PUBLICATION powersync`);
|
|
372
|
+
await pool.query(`CREATE TABLE test_foo(id text primary key, description text)`);
|
|
373
|
+
await pool.query(`CREATE PUBLICATION powersync FOR table test_foo`);
|
|
374
|
+
|
|
375
|
+
const storage = await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
376
|
+
|
|
377
|
+
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
378
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
379
|
+
|
|
380
|
+
await context.replicateSnapshot();
|
|
381
|
+
context.startStreaming();
|
|
382
|
+
|
|
383
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
|
|
384
|
+
|
|
385
|
+
await pool.query(`ALTER PUBLICATION powersync ADD TABLE test_data`);
|
|
386
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t3', 'test3')`);
|
|
387
|
+
|
|
388
|
+
const data = await context.getBucketData('global[]');
|
|
389
|
+
|
|
390
|
+
expect(data.slice(0, 3).sort(compareIds)).toMatchObject([
|
|
391
|
+
// Snapshot insert - any order
|
|
392
|
+
PUT_T1,
|
|
393
|
+
PUT_T2,
|
|
394
|
+
PUT_T3
|
|
395
|
+
]);
|
|
396
|
+
|
|
397
|
+
expect(data.slice(3)).toMatchObject([
|
|
398
|
+
// Replicated insert
|
|
399
|
+
// We may eventually be able to de-duplicate this
|
|
400
|
+
PUT_T3
|
|
401
|
+
]);
|
|
402
|
+
|
|
403
|
+
const metrics = await storage.factory.getStorageMetrics();
|
|
404
|
+
expect(metrics.replication_size_bytes).toBeGreaterThan(0);
|
|
405
|
+
});
|
|
406
|
+
|
|
407
|
+
test('add to publication (not in sync rules)', async () => {
|
|
408
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
409
|
+
// Add table to publication after initial replication
|
|
410
|
+
// Since the table is not in sync rules, it should not be replicated.
|
|
411
|
+
const { pool } = context;
|
|
412
|
+
|
|
413
|
+
await pool.query(`DROP PUBLICATION powersync`);
|
|
414
|
+
await pool.query(`CREATE TABLE test_foo(id text primary key, description text)`);
|
|
415
|
+
await pool.query(`CREATE PUBLICATION powersync FOR table test_foo`);
|
|
416
|
+
|
|
417
|
+
const storage = await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
418
|
+
|
|
419
|
+
await pool.query(`CREATE TABLE test_other(id text primary key, description text)`);
|
|
420
|
+
await pool.query(`INSERT INTO test_other(id, description) VALUES('t1', 'test1')`);
|
|
421
|
+
|
|
422
|
+
await context.replicateSnapshot();
|
|
423
|
+
context.startStreaming();
|
|
424
|
+
|
|
425
|
+
await pool.query(`INSERT INTO test_other(id, description) VALUES('t2', 'test2')`);
|
|
426
|
+
|
|
427
|
+
await pool.query(`ALTER PUBLICATION powersync ADD TABLE test_other`);
|
|
428
|
+
await pool.query(`INSERT INTO test_other(id, description) VALUES('t3', 'test3')`);
|
|
429
|
+
|
|
430
|
+
const data = await context.getBucketData('global[]');
|
|
431
|
+
expect(data).toMatchObject([]);
|
|
432
|
+
|
|
433
|
+
const metrics = await storage.factory.getStorageMetrics();
|
|
434
|
+
expect(metrics.replication_size_bytes).toEqual(0);
|
|
435
|
+
});
|
|
436
|
+
|
|
437
|
+
test('replica identity nothing', async () => {
|
|
438
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
439
|
+
// Technically not a schema change, but fits here.
|
|
440
|
+
// Replica ID works a little differently here - the table doesn't have
|
|
441
|
+
// one defined, but we generate a unique one for each replicated row.
|
|
442
|
+
|
|
443
|
+
const { pool } = context;
|
|
444
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
445
|
+
|
|
446
|
+
await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
|
|
447
|
+
await pool.query(`ALTER TABLE test_data REPLICA IDENTITY NOTHING`);
|
|
448
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
449
|
+
|
|
450
|
+
await context.replicateSnapshot();
|
|
451
|
+
context.startStreaming();
|
|
452
|
+
|
|
453
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
|
|
454
|
+
|
|
455
|
+
// Just as an FYI - cannot update or delete here
|
|
456
|
+
expect(pool.query(`UPDATE test_data SET description = 'test2b' WHERE id = 't2'`)).rejects.toThrow(
|
|
457
|
+
'does not have a replica identity and publishes updates'
|
|
458
|
+
);
|
|
459
|
+
|
|
460
|
+
// Testing TRUNCATE is important here - this depends on current_data having unique
|
|
461
|
+
// ids.
|
|
462
|
+
await pool.query(`TRUNCATE TABLE test_data`);
|
|
463
|
+
|
|
464
|
+
const data = await context.getBucketData('global[]');
|
|
465
|
+
|
|
466
|
+
expect(data.slice(0, 2)).toMatchObject([
|
|
467
|
+
// Initial inserts
|
|
468
|
+
PUT_T1,
|
|
469
|
+
PUT_T2
|
|
470
|
+
]);
|
|
471
|
+
|
|
472
|
+
expect(data.slice(2).sort(compareIds)).toMatchObject([
|
|
473
|
+
// Truncate
|
|
474
|
+
REMOVE_T1,
|
|
475
|
+
REMOVE_T2
|
|
476
|
+
]);
|
|
477
|
+
});
|
|
478
|
+
|
|
479
|
+
test('replica identity default without PK', async () => {
|
|
480
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
481
|
+
// Same as no replica identity
|
|
482
|
+
const { pool } = context;
|
|
483
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
484
|
+
|
|
485
|
+
await pool.query(`CREATE TABLE test_data(id text, description text)`);
|
|
486
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
|
|
487
|
+
|
|
488
|
+
await context.replicateSnapshot();
|
|
489
|
+
context.startStreaming();
|
|
490
|
+
|
|
491
|
+
await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
|
|
492
|
+
|
|
493
|
+
// Just as an FYI - cannot update or delete here
|
|
494
|
+
expect(pool.query(`UPDATE test_data SET description = 'test2b' WHERE id = 't2'`)).rejects.toThrow(
|
|
495
|
+
'does not have a replica identity and publishes updates'
|
|
496
|
+
);
|
|
497
|
+
|
|
498
|
+
// Testing TRUNCATE is important here - this depends on current_data having unique
|
|
499
|
+
// ids.
|
|
500
|
+
await pool.query(`TRUNCATE TABLE test_data`);
|
|
501
|
+
|
|
502
|
+
const data = await context.getBucketData('global[]');
|
|
503
|
+
|
|
504
|
+
expect(data.slice(0, 2)).toMatchObject([
|
|
505
|
+
// Initial inserts
|
|
506
|
+
PUT_T1,
|
|
507
|
+
PUT_T2
|
|
508
|
+
]);
|
|
509
|
+
|
|
510
|
+
expect(data.slice(2).sort(compareIds)).toMatchObject([
|
|
511
|
+
// Truncate
|
|
512
|
+
REMOVE_T1,
|
|
513
|
+
REMOVE_T2
|
|
514
|
+
]);
|
|
515
|
+
});
|
|
516
|
+
|
|
517
|
+
// Test consistency of table snapshots.
|
|
518
|
+
// Renames a table to trigger a snapshot.
|
|
519
|
+
// To trigger the failure, modify the snapshot implementation to
|
|
520
|
+
// introduce an arbitrary delay (in WalStream.ts):
|
|
521
|
+
//
|
|
522
|
+
// const rs = await db.query(`select pg_current_wal_lsn() as lsn`);
|
|
523
|
+
// lsn = rs.rows[0][0];
|
|
524
|
+
// await new Promise((resolve) => setTimeout(resolve, 100));
|
|
525
|
+
// await this.snapshotTable(batch, db, result.table);
|
|
526
|
+
test('table snapshot consistency', async () => {
|
|
527
|
+
await using context = await WalStreamTestContext.open(factory);
|
|
528
|
+
const { pool } = context;
|
|
529
|
+
|
|
530
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
531
|
+
|
|
532
|
+
// Rename table not in sync rules -> in sync rules
|
|
533
|
+
await pool.query(`CREATE TABLE test_data_old(id text primary key, num integer)`);
|
|
534
|
+
await pool.query(`INSERT INTO test_data_old(id, num) VALUES('t1', 0)`);
|
|
535
|
+
await pool.query(`INSERT INTO test_data_old(id, num) VALUES('t2', 0)`);
|
|
536
|
+
|
|
537
|
+
await context.replicateSnapshot();
|
|
538
|
+
context.startStreaming();
|
|
539
|
+
|
|
540
|
+
await pool.query(
|
|
541
|
+
{ statement: `ALTER TABLE test_data_old RENAME TO test_data` },
|
|
542
|
+
// This first update will trigger a snapshot
|
|
543
|
+
{ statement: `UPDATE test_data SET num = 0 WHERE id = 't2'` }
|
|
544
|
+
);
|
|
545
|
+
|
|
546
|
+
// Need some delay for the snapshot to be triggered
|
|
547
|
+
await setTimeout(5);
|
|
548
|
+
|
|
549
|
+
let stop = false;
|
|
550
|
+
|
|
551
|
+
let failures: any[] = [];
|
|
552
|
+
|
|
553
|
+
// This is a tight loop that checks that t2.num >= t1.num
|
|
554
|
+
const p = (async () => {
|
|
555
|
+
let lopid = '';
|
|
556
|
+
while (!stop) {
|
|
557
|
+
const data = await context.getCurrentBucketData('global[]');
|
|
558
|
+
const last = data[data.length - 1];
|
|
559
|
+
if (last == null) {
|
|
560
|
+
continue;
|
|
561
|
+
}
|
|
562
|
+
if (last.op_id != lopid) {
|
|
563
|
+
const reduced = reduceBucket(data);
|
|
564
|
+
reduced.shift();
|
|
565
|
+
lopid = last.op_id;
|
|
566
|
+
|
|
567
|
+
const t1 = reduced.find((op) => op.object_id == 't1');
|
|
568
|
+
const t2 = reduced.find((op) => op.object_id == 't2');
|
|
569
|
+
if (t1 && t2) {
|
|
570
|
+
const d1 = JSON.parse(t1.data as string);
|
|
571
|
+
const d2 = JSON.parse(t2.data as string);
|
|
572
|
+
if (d1.num > d2.num) {
|
|
573
|
+
failures.push({ d1, d2 });
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
}
|
|
577
|
+
}
|
|
578
|
+
})();
|
|
579
|
+
|
|
580
|
+
// We always have t2.num >= t1.num
|
|
581
|
+
for (let i = 1; i <= 20; i++) {
|
|
582
|
+
await pool.query({ statement: `UPDATE test_data SET num = ${i} WHERE id = 't2'` });
|
|
583
|
+
}
|
|
584
|
+
await pool.query({ statement: `UPDATE test_data SET num = 20 WHERE id = 't1'` });
|
|
585
|
+
|
|
586
|
+
await context.getBucketData('global[]');
|
|
587
|
+
stop = true;
|
|
588
|
+
await p;
|
|
589
|
+
|
|
590
|
+
expect(failures).toEqual([]);
|
|
591
|
+
});
|
|
543
592
|
}
|