@powersync/service-module-mysql 0.7.3 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/CHANGELOG.md +44 -0
  2. package/dev/docker/mysql/init-scripts/my.cnf +1 -3
  3. package/dist/api/MySQLRouteAPIAdapter.js +11 -3
  4. package/dist/api/MySQLRouteAPIAdapter.js.map +1 -1
  5. package/dist/common/ReplicatedGTID.js +4 -0
  6. package/dist/common/ReplicatedGTID.js.map +1 -1
  7. package/dist/common/common-index.d.ts +1 -2
  8. package/dist/common/common-index.js +1 -2
  9. package/dist/common/common-index.js.map +1 -1
  10. package/dist/common/mysql-to-sqlite.js +4 -0
  11. package/dist/common/mysql-to-sqlite.js.map +1 -1
  12. package/dist/common/schema-utils.d.ts +20 -0
  13. package/dist/common/{get-replication-columns.js → schema-utils.js} +73 -30
  14. package/dist/common/schema-utils.js.map +1 -0
  15. package/dist/replication/BinLogReplicationJob.js +10 -2
  16. package/dist/replication/BinLogReplicationJob.js.map +1 -1
  17. package/dist/replication/BinLogStream.d.ts +9 -6
  18. package/dist/replication/BinLogStream.js +99 -70
  19. package/dist/replication/BinLogStream.js.map +1 -1
  20. package/dist/replication/zongji/BinLogListener.d.ts +52 -5
  21. package/dist/replication/zongji/BinLogListener.js +302 -85
  22. package/dist/replication/zongji/BinLogListener.js.map +1 -1
  23. package/dist/replication/zongji/zongji-utils.d.ts +2 -1
  24. package/dist/replication/zongji/zongji-utils.js +3 -0
  25. package/dist/replication/zongji/zongji-utils.js.map +1 -1
  26. package/dist/types/node-sql-parser-extended-types.d.ts +31 -0
  27. package/dist/types/node-sql-parser-extended-types.js +2 -0
  28. package/dist/types/node-sql-parser-extended-types.js.map +1 -0
  29. package/dist/utils/mysql-utils.d.ts +4 -2
  30. package/dist/utils/mysql-utils.js +15 -3
  31. package/dist/utils/mysql-utils.js.map +1 -1
  32. package/dist/utils/parser-utils.d.ts +16 -0
  33. package/dist/utils/parser-utils.js +58 -0
  34. package/dist/utils/parser-utils.js.map +1 -0
  35. package/package.json +10 -9
  36. package/src/api/MySQLRouteAPIAdapter.ts +11 -3
  37. package/src/common/ReplicatedGTID.ts +6 -1
  38. package/src/common/common-index.ts +1 -2
  39. package/src/common/mysql-to-sqlite.ts +3 -0
  40. package/src/common/{get-replication-columns.ts → schema-utils.ts} +96 -37
  41. package/src/replication/BinLogReplicationJob.ts +12 -2
  42. package/src/replication/BinLogStream.ts +119 -91
  43. package/src/replication/zongji/BinLogListener.ts +370 -93
  44. package/src/replication/zongji/zongji-utils.ts +6 -1
  45. package/src/types/node-sql-parser-extended-types.ts +25 -0
  46. package/src/utils/mysql-utils.ts +19 -4
  47. package/src/utils/parser-utils.ts +73 -0
  48. package/test/src/BinLogListener.test.ts +415 -32
  49. package/test/src/BinLogStream.test.ts +128 -52
  50. package/test/src/BinlogStreamUtils.ts +12 -2
  51. package/test/src/parser-utils.test.ts +24 -0
  52. package/test/src/schema-changes.test.ts +663 -0
  53. package/test/src/util.ts +6 -0
  54. package/tsconfig.tsbuildinfo +1 -1
  55. package/dist/common/get-replication-columns.d.ts +0 -12
  56. package/dist/common/get-replication-columns.js.map +0 -1
  57. package/dist/common/get-tables-from-pattern.d.ts +0 -7
  58. package/dist/common/get-tables-from-pattern.js +0 -28
  59. package/dist/common/get-tables-from-pattern.js.map +0 -1
  60. package/src/common/get-tables-from-pattern.ts +0 -44
@@ -0,0 +1,663 @@
1
+ import { compareIds, putOp, removeOp, test_utils } from '@powersync/service-core-tests';
2
+ import { beforeAll, describe, expect, test } from 'vitest';
3
+
4
+ import { storage } from '@powersync/service-core';
5
+ import { createTestDb, describeWithStorage, TEST_CONNECTION_OPTIONS } from './util.js';
6
+ import { BinlogStreamTestContext } from './BinlogStreamUtils.js';
7
+ import timers from 'timers/promises';
8
+ import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js';
9
+ import { getMySQLVersion, qualifiedMySQLTable, satisfiesVersion } from '@module/utils/mysql-utils.js';
10
+
11
+ describe('MySQL Schema Changes', () => {
12
+ describeWithStorage({ timeout: 20_000 }, defineTests);
13
+ });
14
+
15
+ const BASIC_SYNC_RULES = `
16
+ bucket_definitions:
17
+ global:
18
+ data:
19
+ - SELECT id, * FROM "test_data"
20
+ `;
21
+
22
+ const PUT_T1 = test_utils.putOp('test_data', { id: 't1', description: 'test1' });
23
+ const PUT_T2 = test_utils.putOp('test_data', { id: 't2', description: 'test2' });
24
+ const PUT_T3 = test_utils.putOp('test_data', { id: 't3', description: 'test3' });
25
+
26
+ const REMOVE_T1 = test_utils.removeOp('test_data', 't1');
27
+ const REMOVE_T2 = test_utils.removeOp('test_data', 't2');
28
+
29
+ function defineTests(factory: storage.TestStorageFactory) {
30
+ let isMySQL57: boolean = false;
31
+
32
+ beforeAll(async () => {
33
+ const connectionManager = new MySQLConnectionManager(TEST_CONNECTION_OPTIONS, {});
34
+ const connection = await connectionManager.getConnection();
35
+ const version = await getMySQLVersion(connection);
36
+ isMySQL57 = satisfiesVersion(version, '5.7.x');
37
+ connection.release();
38
+ await connectionManager.end();
39
+ });
40
+
41
+ test('Re-create table', async () => {
42
+ await using context = await BinlogStreamTestContext.open(factory);
43
+ // Drop a table and re-create it.
44
+ await context.updateSyncRules(BASIC_SYNC_RULES);
45
+
46
+ const { connectionManager } = context;
47
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT)`);
48
+
49
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
50
+
51
+ await context.replicateSnapshot();
52
+ await context.startStreaming();
53
+
54
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t2','test2')`);
55
+
56
+ // Dropping the table immediately leads to a rare race condition where Zongji tries to get the table information
57
+ // for the previous write event, but the table is already gone. Without the table info the tablemap event can't be correctly
58
+ // populated and replication will fail.
59
+ await timers.setTimeout(50);
60
+ await connectionManager.query(`DROP TABLE test_data`);
61
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT)`);
62
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t3','test3')`);
63
+
64
+ const data = await context.getBucketData('global[]');
65
+
66
+ // Initial inserts
67
+ expect(data.slice(0, 2)).toMatchObject([PUT_T1, PUT_T2]);
68
+
69
+ // Truncate - order doesn't matter
70
+ expect(data.slice(2, 4).sort(compareIds)).toMatchObject([REMOVE_T1, REMOVE_T2]);
71
+
72
+ // Due to the async nature of this replication test,
73
+ // the insert for t3 is picked up both in the snapshot and in the replication stream.
74
+ expect(data.slice(4)).toMatchObject([
75
+ PUT_T3, // Snapshot insert
76
+ PUT_T3 // Insert from binlog replication stream
77
+ ]);
78
+ });
79
+
80
+ test('Create table: New table in is in the sync rules', async () => {
81
+ await using context = await BinlogStreamTestContext.open(factory);
82
+ const { connectionManager } = context;
83
+ await context.updateSyncRules(BASIC_SYNC_RULES);
84
+
85
+ await context.replicateSnapshot();
86
+ await context.startStreaming();
87
+
88
+ // Add table after initial replication
89
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT)`);
90
+
91
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
92
+
93
+ const data = await context.getBucketData('global[]');
94
+
95
+ expect(data).toMatchObject([PUT_T1, PUT_T1]);
96
+ });
97
+
98
+ test('Create table: New table is created from existing data', async () => {
99
+ // Create table with select from is not allowed in MySQL 5.7 when enforce_gtid_consistency=ON
100
+ if (!isMySQL57) {
101
+ await using context = await BinlogStreamTestContext.open(factory);
102
+ const { connectionManager } = context;
103
+ await context.updateSyncRules(BASIC_SYNC_RULES);
104
+
105
+ await connectionManager.query(`CREATE TABLE test_data_from
106
+ (
107
+ id CHAR(36) PRIMARY KEY,
108
+ description TEXT
109
+ )`);
110
+ await connectionManager.query(`INSERT INTO test_data_from(id, description)
111
+ VALUES ('t1', 'test1')`);
112
+ await connectionManager.query(`INSERT INTO test_data_from(id, description)
113
+ VALUES ('t2', 'test2')`);
114
+ await connectionManager.query(`INSERT INTO test_data_from(id, description)
115
+ VALUES ('t3', 'test3')`);
116
+
117
+ await context.replicateSnapshot();
118
+ await context.startStreaming();
119
+
120
+ // Add table after initial replication
121
+ await connectionManager.query(`CREATE TABLE test_data SELECT * FROM test_data_from`);
122
+
123
+ const data = await context.getBucketData('global[]');
124
+
125
+ // Interestingly, the create with select triggers binlog row write events
126
+ expect(data).toMatchObject([
127
+ // From snapshot
128
+ PUT_T1,
129
+ PUT_T2,
130
+ PUT_T3,
131
+ // From replication stream
132
+ PUT_T1,
133
+ PUT_T2,
134
+ PUT_T3
135
+ ]);
136
+ }
137
+ });
138
+
139
+ test('Create table: New table is not in the sync rules', async () => {
140
+ await using context = await BinlogStreamTestContext.open(factory);
141
+ const { connectionManager } = context;
142
+ await context.updateSyncRules(BASIC_SYNC_RULES);
143
+
144
+ await context.replicateSnapshot();
145
+ await context.startStreaming();
146
+
147
+ // Add table after initial replication
148
+ await connectionManager.query(`CREATE TABLE test_data_ignored (id CHAR(36) PRIMARY KEY, description TEXT)`);
149
+
150
+ await connectionManager.query(`INSERT INTO test_data_ignored(id, description) VALUES('t1','test ignored')`);
151
+
152
+ const data = await context.getBucketData('global[]');
153
+
154
+ expect(data).toMatchObject([]);
155
+ });
156
+
157
+ test('Rename table: Table not in the sync rules to one in the sync rules', async () => {
158
+ await using context = await BinlogStreamTestContext.open(factory);
159
+ const { connectionManager } = context;
160
+ await context.updateSyncRules(BASIC_SYNC_RULES);
161
+
162
+ // Rename table not that is not in sync rules -> in sync rules
163
+ await connectionManager.query(`CREATE TABLE test_data_old (id CHAR(36) PRIMARY KEY, description TEXT)`);
164
+ await connectionManager.query(`INSERT INTO test_data_old(id, description) VALUES('t1','test1')`);
165
+
166
+ await context.replicateSnapshot();
167
+ await context.startStreaming();
168
+
169
+ await connectionManager.query(`RENAME TABLE test_data_old TO test_data`);
170
+
171
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t2','test2')`);
172
+
173
+ const data = await context.getBucketData('global[]');
174
+
175
+ expect(data).toMatchObject([
176
+ // Snapshot insert
177
+ PUT_T1,
178
+ PUT_T2,
179
+ // Replicated insert
180
+ PUT_T2
181
+ ]);
182
+ });
183
+
184
+ test('Rename table: Table in the sync rules to another table in the sync rules', async () => {
185
+ await using context = await BinlogStreamTestContext.open(factory);
186
+
187
+ await context.updateSyncRules(`
188
+ bucket_definitions:
189
+ global:
190
+ data:
191
+ - SELECT id, * FROM "test_data%"
192
+ `);
193
+
194
+ const { connectionManager } = context;
195
+ await connectionManager.query(`CREATE TABLE test_data1 (id CHAR(36) PRIMARY KEY, description TEXT)`);
196
+ await connectionManager.query(`INSERT INTO test_data1(id, description) VALUES('t1','test1')`);
197
+
198
+ await context.replicateSnapshot();
199
+ await context.startStreaming();
200
+
201
+ await connectionManager.query(`RENAME TABLE test_data1 TO test_data2`);
202
+ await connectionManager.query(`INSERT INTO test_data2(id, description) VALUES('t2','test2')`);
203
+
204
+ const data = await context.getBucketData('global[]');
205
+
206
+ expect(data.slice(0, 2)).toMatchObject([
207
+ // Initial replication
208
+ putOp('test_data1', { id: 't1', description: 'test1' }),
209
+ // Initial truncate
210
+ removeOp('test_data1', 't1')
211
+ ]);
212
+
213
+ expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
214
+ // Snapshot insert
215
+ putOp('test_data2', { id: 't1', description: 'test1' }),
216
+ putOp('test_data2', { id: 't2', description: 'test2' })
217
+ ]);
218
+ expect(data.slice(4)).toMatchObject([
219
+ // Replicated insert
220
+ // We may eventually be able to de-duplicate this
221
+ putOp('test_data2', { id: 't2', description: 'test2' })
222
+ ]);
223
+ });
224
+
225
+ test('Rename table: Table in the sync rules to not in the sync rules', async () => {
226
+ await using context = await BinlogStreamTestContext.open(factory);
227
+ await context.updateSyncRules(BASIC_SYNC_RULES);
228
+
229
+ const { connectionManager } = context;
230
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT)`);
231
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
232
+
233
+ await context.replicateSnapshot();
234
+ await context.startStreaming();
235
+
236
+ await connectionManager.query(`RENAME TABLE test_data TO test_data_not_in_sync_rules`);
237
+ await connectionManager.query(`INSERT INTO test_data_not_in_sync_rules(id, description) VALUES('t2','test2')`);
238
+
239
+ const data = await context.getBucketData('global[]');
240
+
241
+ expect(data).toMatchObject([
242
+ // Initial replication
243
+ PUT_T1,
244
+ // Truncate
245
+ REMOVE_T1
246
+ ]);
247
+ });
248
+
249
+ test('Change Replication Identity default to full by dropping the primary key', async () => {
250
+ await using context = await BinlogStreamTestContext.open(factory);
251
+ // Change replica id from default (PK) to full
252
+ // Requires re-snapshotting the table.
253
+
254
+ await context.updateSyncRules(BASIC_SYNC_RULES);
255
+ const { connectionManager } = context;
256
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT)`);
257
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
258
+
259
+ await context.replicateSnapshot();
260
+ await context.startStreaming();
261
+
262
+ await connectionManager.query(`ALTER TABLE test_data DROP PRIMARY KEY;`);
263
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t2','test2')`);
264
+
265
+ const data = await context.getBucketData('global[]');
266
+
267
+ expect(data.slice(0, 2)).toMatchObject([
268
+ // Initial inserts
269
+ PUT_T1,
270
+ // Truncate
271
+ REMOVE_T1
272
+ ]);
273
+
274
+ expect(data.slice(2)).toMatchObject([
275
+ // Snapshot inserts
276
+ PUT_T1,
277
+ PUT_T2,
278
+ // Replicated insert
279
+ PUT_T2
280
+ ]);
281
+ });
282
+
283
+ test('Change Replication Identity full by adding a column', async () => {
284
+ await using context = await BinlogStreamTestContext.open(factory);
285
+ // Change replica id from full by adding column
286
+ // Causes a re-import of the table.
287
+ // Other changes such as renaming column would have the same effect
288
+
289
+ await context.updateSyncRules(BASIC_SYNC_RULES);
290
+
291
+ const { connectionManager } = context;
292
+ // No primary key, no unique column, so full replication identity will be used
293
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36), description TEXT)`);
294
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
295
+
296
+ await context.replicateSnapshot();
297
+ await context.startStreaming();
298
+
299
+ await connectionManager.query(`ALTER TABLE test_data ADD COLUMN new_column TEXT`);
300
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t2','test2')`);
301
+
302
+ const data = await context.getBucketData('global[]');
303
+
304
+ expect(data.slice(0, 2)).toMatchObject([
305
+ // Initial inserts
306
+ PUT_T1,
307
+ // Truncate
308
+ REMOVE_T1
309
+ ]);
310
+
311
+ // Snapshot - order doesn't matter
312
+ expect(data.slice(2)).toMatchObject([
313
+ // Snapshot inserts
314
+ putOp('test_data', { id: 't1', description: 'test1', new_column: null }),
315
+ putOp('test_data', { id: 't2', description: 'test2', new_column: null }),
316
+ // Replicated insert
317
+ putOp('test_data', { id: 't2', description: 'test2', new_column: null })
318
+ ]);
319
+ });
320
+
321
+ test('Change Replication Identity from full to index by adding a unique constraint', async () => {
322
+ await using context = await BinlogStreamTestContext.open(factory);
323
+ // Change replica id full by adding a unique index that can serve as the replication id
324
+
325
+ await context.updateSyncRules(BASIC_SYNC_RULES);
326
+
327
+ const { connectionManager } = context;
328
+ // No primary key, no unique column, so full replication identity will be used
329
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36), description TEXT)`);
330
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
331
+
332
+ await context.replicateSnapshot();
333
+ await context.startStreaming();
334
+
335
+ await connectionManager.query(`ALTER TABLE test_data ADD UNIQUE (id)`);
336
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t2','test2')`);
337
+
338
+ const data = await context.getBucketData('global[]');
339
+
340
+ expect(data.slice(0, 2)).toMatchObject([
341
+ // Initial inserts
342
+ PUT_T1,
343
+ // Truncate
344
+ REMOVE_T1
345
+ ]);
346
+
347
+ // Snapshot - order doesn't matter
348
+ expect(data.slice(2)).toMatchObject([
349
+ // Snapshot inserts
350
+ PUT_T1,
351
+ PUT_T2,
352
+ // Replicated insert
353
+ PUT_T2
354
+ ]);
355
+ });
356
+
357
+ test('Change Replication Identity from full to index by adding a unique index', async () => {
358
+ await using context = await BinlogStreamTestContext.open(factory);
359
+ // Change replica id full by adding a unique index that can serve as the replication id
360
+ await context.updateSyncRules(BASIC_SYNC_RULES);
361
+
362
+ const { connectionManager } = context;
363
+ // No primary key, no unique column, so full replication identity will be used
364
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36), description TEXT)`);
365
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
366
+
367
+ await context.replicateSnapshot();
368
+ await context.startStreaming();
369
+
370
+ await connectionManager.query(`CREATE UNIQUE INDEX id_idx ON test_data (id)`);
371
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t2','test2')`);
372
+
373
+ const data = await context.getBucketData('global[]');
374
+
375
+ expect(data.slice(0, 2)).toMatchObject([
376
+ // Initial inserts
377
+ PUT_T1,
378
+ // Truncate
379
+ REMOVE_T1
380
+ ]);
381
+
382
+ // Snapshot - order doesn't matter
383
+ expect(data.slice(2)).toMatchObject([
384
+ // Snapshot inserts
385
+ PUT_T1,
386
+ PUT_T2,
387
+ // Replicated insert
388
+ PUT_T2
389
+ ]);
390
+ });
391
+
392
+ test('Change Replication Identity from index by dropping the unique constraint', async () => {
393
+ await using context = await BinlogStreamTestContext.open(factory);
394
+ // Change replica id full by adding a unique index that can serve as the replication id
395
+
396
+ await context.updateSyncRules(BASIC_SYNC_RULES);
397
+
398
+ const { connectionManager } = context;
399
+ // Unique constraint on id
400
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36), description TEXT, UNIQUE (id))`);
401
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
402
+
403
+ await context.replicateSnapshot();
404
+ await context.startStreaming();
405
+
406
+ await connectionManager.query(`ALTER TABLE test_data DROP INDEX id`);
407
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t2','test2')`);
408
+
409
+ const data = await context.getBucketData('global[]');
410
+
411
+ expect(data.slice(0, 2)).toMatchObject([
412
+ // Initial inserts
413
+ PUT_T1,
414
+ // Truncate
415
+ REMOVE_T1
416
+ ]);
417
+
418
+ // Snapshot - order doesn't matter
419
+ expect(data.slice(2)).toMatchObject([
420
+ // Snapshot inserts
421
+ PUT_T1,
422
+ PUT_T2,
423
+ // Replicated insert
424
+ PUT_T2
425
+ ]);
426
+ });
427
+
428
+ test('Change Replication Identity default by modifying primary key column type', async () => {
429
+ await using context = await BinlogStreamTestContext.open(factory);
430
+ await context.updateSyncRules(BASIC_SYNC_RULES);
431
+
432
+ const { connectionManager } = context;
433
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT)`);
434
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
435
+
436
+ await context.replicateSnapshot();
437
+ await context.startStreaming();
438
+
439
+ await connectionManager.query(`ALTER TABLE test_data MODIFY COLUMN id VARCHAR(36)`);
440
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t2','test2')`);
441
+
442
+ const data = await context.getBucketData('global[]');
443
+
444
+ expect(data.slice(0, 2)).toMatchObject([
445
+ // Initial inserts
446
+ PUT_T1,
447
+ // Truncate
448
+ REMOVE_T1
449
+ ]);
450
+
451
+ expect(data.slice(2)).toMatchObject([
452
+ // Snapshot inserts
453
+ PUT_T1,
454
+ PUT_T2,
455
+ // Replicated insert
456
+ PUT_T2
457
+ ]);
458
+ });
459
+
460
+ test('Change Replication Identity by changing the type of a column in a compound unique index', async () => {
461
+ await using context = await BinlogStreamTestContext.open(factory);
462
+ // Change index replica id by changing column type
463
+ // Causes a re-import of the table.
464
+ // Secondary functionality tested here is that replica id column order stays
465
+ // the same between initial and incremental replication.
466
+ await context.updateSyncRules(BASIC_SYNC_RULES);
467
+
468
+ const { connectionManager } = context;
469
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36), description CHAR(100))`);
470
+ await connectionManager.query(`ALTER TABLE test_data ADD INDEX (id, description)`);
471
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
472
+
473
+ await context.replicateSnapshot();
474
+ await context.startStreaming();
475
+
476
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t2','test2')`);
477
+
478
+ await connectionManager.query(`ALTER TABLE test_data MODIFY COLUMN id VARCHAR(36)`);
479
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t3','test3')`);
480
+
481
+ const data = await context.getBucketData('global[]');
482
+
483
+ expect(data.slice(0, 2)).toMatchObject([
484
+ // Initial snapshot
485
+ PUT_T1,
486
+ // Streamed
487
+ PUT_T2
488
+ ]);
489
+
490
+ expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
491
+ // Truncate - any order
492
+ REMOVE_T1,
493
+ REMOVE_T2
494
+ ]);
495
+
496
+ // Snapshot - order doesn't matter
497
+ expect(data.slice(4, 7).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2, PUT_T3]);
498
+
499
+ expect(data.slice(7).sort(compareIds)).toMatchObject([
500
+ // Replicated insert
501
+ PUT_T3
502
+ ]);
503
+ });
504
+
505
+ test('Add column: New non replication identity column does not trigger re-sync', async () => {
506
+ await using context = await BinlogStreamTestContext.open(factory);
507
+ // Added column not in replication identity so it should not cause a re-import
508
+
509
+ await context.updateSyncRules(BASIC_SYNC_RULES);
510
+
511
+ const { connectionManager } = context;
512
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT)`);
513
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
514
+
515
+ await context.replicateSnapshot();
516
+ await context.startStreaming();
517
+
518
+ await connectionManager.query(`ALTER TABLE test_data ADD COLUMN new_column TEXT`);
519
+ await connectionManager.query(
520
+ `INSERT INTO test_data(id, description, new_column) VALUES('t2','test2', 'new_data')`
521
+ );
522
+
523
+ const data = await context.getBucketData('global[]');
524
+
525
+ expect(data.slice(0, 1)).toMatchObject([PUT_T1]);
526
+
527
+ expect(data.slice(1)).toMatchObject([
528
+ // Snapshot inserts
529
+ putOp('test_data', { id: 't2', description: 'test2', new_column: 'new_data' })
530
+ ]);
531
+ });
532
+
533
+ test('Modify non replication identity column', async () => {
534
+ await using context = await BinlogStreamTestContext.open(factory);
535
+ // Changing the type of a column that is not part of the replication identity does not cause a re-sync of the table.
536
+ await context.updateSyncRules(BASIC_SYNC_RULES);
537
+
538
+ const { connectionManager } = context;
539
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT)`);
540
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
541
+
542
+ await context.replicateSnapshot();
543
+ await context.startStreaming();
544
+
545
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t2','test2')`);
546
+
547
+ await connectionManager.query(`ALTER TABLE test_data MODIFY COLUMN description VARCHAR(100)`);
548
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t3','test3')`);
549
+
550
+ const data = await context.getBucketData('global[]');
551
+
552
+ expect(data.slice(0, 2)).toMatchObject([
553
+ // Initial snapshot
554
+ PUT_T1,
555
+ // Streamed
556
+ PUT_T2
557
+ ]);
558
+
559
+ expect(data.slice(2)).toMatchObject([
560
+ // Replicated insert
561
+ PUT_T3
562
+ ]);
563
+ });
564
+
565
+ test('Drop a table in the sync rules', async () => {
566
+ await using context = await BinlogStreamTestContext.open(factory);
567
+ // Technically not a schema change, but fits here.
568
+ await context.updateSyncRules(BASIC_SYNC_RULES);
569
+
570
+ const { connectionManager } = context;
571
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36), description CHAR(100))`);
572
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
573
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t2','test2')`);
574
+
575
+ await context.replicateSnapshot();
576
+ await context.startStreaming();
577
+
578
+ await connectionManager.query(`DROP TABLE test_data`);
579
+
580
+ const data = await context.getBucketData('global[]');
581
+
582
+ expect(data.slice(0, 2)).toMatchObject([
583
+ // Initial inserts
584
+ PUT_T1,
585
+ PUT_T2
586
+ ]);
587
+
588
+ expect(data.slice(2).sort(compareIds)).toMatchObject([
589
+ // Drop
590
+ REMOVE_T1,
591
+ REMOVE_T2
592
+ ]);
593
+ });
594
+
595
+ test('Schema changes for tables in other schemas in the sync rules', async () => {
596
+ await using context = await BinlogStreamTestContext.open(factory);
597
+ // Technically not a schema change, but fits here.
598
+ await context.updateSyncRules(`
599
+ bucket_definitions:
600
+ multi_schema_test_data:
601
+ data:
602
+ - SELECT id, description, num FROM "multi_schema"."test_data"
603
+ `);
604
+
605
+ const { connectionManager } = context;
606
+ await createTestDb(connectionManager, 'multi_schema');
607
+ const testTable = qualifiedMySQLTable('test_data', 'multi_schema');
608
+ await connectionManager.query(
609
+ `CREATE TABLE IF NOT EXISTS ${testTable} (id CHAR(36) PRIMARY KEY,description TEXT);`
610
+ );
611
+ await connectionManager.query(`INSERT INTO ${testTable}(id, description) VALUES('t1','test1')`);
612
+ await connectionManager.query(`INSERT INTO ${testTable}(id, description) VALUES('t2','test2')`);
613
+
614
+ await context.replicateSnapshot();
615
+ await context.startStreaming();
616
+
617
+ await connectionManager.query(`DROP TABLE ${testTable}`);
618
+
619
+ const data = await context.getBucketData('multi_schema_test_data[]');
620
+
621
+ expect(data.slice(0, 2)).toMatchObject([
622
+ // Initial inserts
623
+ PUT_T1,
624
+ PUT_T2
625
+ ]);
626
+
627
+ expect(data.slice(2).sort(compareIds)).toMatchObject([
628
+ // Drop
629
+ REMOVE_T1,
630
+ REMOVE_T2
631
+ ]);
632
+ });
633
+
634
+ test('Changes for tables in schemas not in the sync rules are ignored', async () => {
635
+ await using context = await BinlogStreamTestContext.open(factory);
636
+ await context.updateSyncRules(BASIC_SYNC_RULES);
637
+
638
+ const { connectionManager } = context;
639
+ await connectionManager.query(`CREATE TABLE test_data (id CHAR(36), description CHAR(100))`);
640
+
641
+ await createTestDb(connectionManager, 'multi_schema');
642
+ const testTable = qualifiedMySQLTable('test_data_ignored', 'multi_schema');
643
+ await connectionManager.query(
644
+ `CREATE TABLE IF NOT EXISTS ${testTable} (id CHAR(36) PRIMARY KEY,description TEXT);`
645
+ );
646
+ await connectionManager.query(`INSERT INTO ${testTable}(id, description) VALUES('t1','test1')`);
647
+ await connectionManager.query(`INSERT INTO ${testTable}(id, description) VALUES('t2','test2')`);
648
+
649
+ await context.replicateSnapshot();
650
+ await context.startStreaming();
651
+
652
+ await connectionManager.query(`INSERT INTO ${testTable}(id, description) VALUES('t3','test3')`);
653
+ await connectionManager.query(`DROP TABLE ${testTable}`);
654
+
655
+ // Force a commit on the watched schema to advance the checkpoint
656
+ await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
657
+
658
+ const data = await context.getBucketData('global[]');
659
+
660
+ // Should only include the entry used to advance the checkpoint
661
+ expect(data).toMatchObject([PUT_T1]);
662
+ });
663
+ }
package/test/src/util.ts CHANGED
@@ -6,6 +6,7 @@ import mysqlPromise from 'mysql2/promise';
6
6
  import { env } from './env.js';
7
7
  import { describe, TestOptions } from 'vitest';
8
8
  import { TestStorageFactory } from '@powersync/service-core';
9
+ import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js';
9
10
 
10
11
  export const TEST_URI = env.MYSQL_TEST_URI;
11
12
 
@@ -52,3 +53,8 @@ export async function clearTestDb(connection: mysqlPromise.Connection) {
52
53
  }
53
54
  }
54
55
  }
56
+
57
+ export async function createTestDb(connectionManager: MySQLConnectionManager, dbName: string) {
58
+ await connectionManager.query(`DROP DATABASE IF EXISTS ${dbName}`);
59
+ await connectionManager.query(`CREATE DATABASE IF NOT EXISTS ${dbName}`);
60
+ }