@powersync/service-module-mysql 0.7.4 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +30 -0
- package/dev/docker/mysql/init-scripts/my.cnf +1 -3
- package/dist/api/MySQLRouteAPIAdapter.js +11 -3
- package/dist/api/MySQLRouteAPIAdapter.js.map +1 -1
- package/dist/common/ReplicatedGTID.js +4 -0
- package/dist/common/ReplicatedGTID.js.map +1 -1
- package/dist/common/common-index.d.ts +1 -2
- package/dist/common/common-index.js +1 -2
- package/dist/common/common-index.js.map +1 -1
- package/dist/common/mysql-to-sqlite.js +4 -0
- package/dist/common/mysql-to-sqlite.js.map +1 -1
- package/dist/common/schema-utils.d.ts +20 -0
- package/dist/common/{get-replication-columns.js → schema-utils.js} +73 -30
- package/dist/common/schema-utils.js.map +1 -0
- package/dist/replication/BinLogStream.d.ts +9 -6
- package/dist/replication/BinLogStream.js +99 -70
- package/dist/replication/BinLogStream.js.map +1 -1
- package/dist/replication/zongji/BinLogListener.d.ts +52 -5
- package/dist/replication/zongji/BinLogListener.js +302 -85
- package/dist/replication/zongji/BinLogListener.js.map +1 -1
- package/dist/replication/zongji/zongji-utils.d.ts +2 -1
- package/dist/replication/zongji/zongji-utils.js +3 -0
- package/dist/replication/zongji/zongji-utils.js.map +1 -1
- package/dist/types/node-sql-parser-extended-types.d.ts +31 -0
- package/dist/types/node-sql-parser-extended-types.js +2 -0
- package/dist/types/node-sql-parser-extended-types.js.map +1 -0
- package/dist/utils/mysql-utils.d.ts +4 -2
- package/dist/utils/mysql-utils.js +15 -3
- package/dist/utils/mysql-utils.js.map +1 -1
- package/dist/utils/parser-utils.d.ts +16 -0
- package/dist/utils/parser-utils.js +58 -0
- package/dist/utils/parser-utils.js.map +1 -0
- package/package.json +9 -8
- package/src/api/MySQLRouteAPIAdapter.ts +11 -3
- package/src/common/ReplicatedGTID.ts +6 -1
- package/src/common/common-index.ts +1 -2
- package/src/common/mysql-to-sqlite.ts +3 -0
- package/src/common/{get-replication-columns.ts → schema-utils.ts} +96 -37
- package/src/replication/BinLogStream.ts +119 -91
- package/src/replication/zongji/BinLogListener.ts +370 -93
- package/src/replication/zongji/zongji-utils.ts +6 -1
- package/src/types/node-sql-parser-extended-types.ts +25 -0
- package/src/utils/mysql-utils.ts +19 -4
- package/src/utils/parser-utils.ts +73 -0
- package/test/src/BinLogListener.test.ts +415 -32
- package/test/src/BinLogStream.test.ts +128 -52
- package/test/src/BinlogStreamUtils.ts +12 -2
- package/test/src/parser-utils.test.ts +24 -0
- package/test/src/schema-changes.test.ts +663 -0
- package/test/src/util.ts +6 -0
- package/tsconfig.tsbuildinfo +1 -1
- package/dist/common/get-replication-columns.d.ts +0 -12
- package/dist/common/get-replication-columns.js.map +0 -1
- package/dist/common/get-tables-from-pattern.d.ts +0 -7
- package/dist/common/get-tables-from-pattern.js +0 -28
- package/dist/common/get-tables-from-pattern.js.map +0 -1
- package/src/common/get-tables-from-pattern.ts +0 -44
|
@@ -11,6 +11,7 @@ import {
|
|
|
11
11
|
framework,
|
|
12
12
|
getUuidReplicaIdentityBson,
|
|
13
13
|
MetricsEngine,
|
|
14
|
+
SourceTable,
|
|
14
15
|
storage
|
|
15
16
|
} from '@powersync/service-core';
|
|
16
17
|
import mysql from 'mysql2';
|
|
@@ -18,10 +19,10 @@ import mysqlPromise from 'mysql2/promise';
|
|
|
18
19
|
|
|
19
20
|
import { TableMapEntry } from '@powersync/mysql-zongji';
|
|
20
21
|
import * as common from '../common/common-index.js';
|
|
21
|
-
import { createRandomServerId,
|
|
22
|
+
import { createRandomServerId, qualifiedMySQLTable } from '../utils/mysql-utils.js';
|
|
22
23
|
import { MySQLConnectionManager } from './MySQLConnectionManager.js';
|
|
23
24
|
import { ReplicationMetric } from '@powersync/service-types';
|
|
24
|
-
import { BinLogEventHandler, BinLogListener, Row } from './zongji/BinLogListener.js';
|
|
25
|
+
import { BinLogEventHandler, BinLogListener, Row, SchemaChange, SchemaChangeType } from './zongji/BinLogListener.js';
|
|
25
26
|
|
|
26
27
|
export interface BinLogStreamOptions {
|
|
27
28
|
connections: MySQLConnectionManager;
|
|
@@ -31,11 +32,6 @@ export interface BinLogStreamOptions {
|
|
|
31
32
|
logger?: Logger;
|
|
32
33
|
}
|
|
33
34
|
|
|
34
|
-
interface MysqlRelId {
|
|
35
|
-
schema: string;
|
|
36
|
-
name: string;
|
|
37
|
-
}
|
|
38
|
-
|
|
39
35
|
interface WriteChangePayload {
|
|
40
36
|
type: storage.SaveOperationTag;
|
|
41
37
|
row: Row;
|
|
@@ -53,11 +49,14 @@ export class BinlogConfigurationError extends Error {
|
|
|
53
49
|
}
|
|
54
50
|
|
|
55
51
|
/**
|
|
56
|
-
*
|
|
57
|
-
*
|
|
52
|
+
* Unlike Postgres' relation id, MySQL's tableId is only guaranteed to be unique and stay the same
|
|
53
|
+
* in the context of a single replication session.
|
|
54
|
+
* Instead, we create a unique key by combining the source schema and table name
|
|
55
|
+
* @param schema
|
|
56
|
+
* @param tableName
|
|
58
57
|
*/
|
|
59
|
-
function
|
|
60
|
-
return `${
|
|
58
|
+
function createTableId(schema: string, tableName: string): string {
|
|
59
|
+
return `${schema}.${tableName}`;
|
|
61
60
|
}
|
|
62
61
|
|
|
63
62
|
export class BinLogStream {
|
|
@@ -68,11 +67,11 @@ export class BinLogStream {
|
|
|
68
67
|
|
|
69
68
|
private readonly connections: MySQLConnectionManager;
|
|
70
69
|
|
|
71
|
-
private abortSignal: AbortSignal;
|
|
70
|
+
private readonly abortSignal: AbortSignal;
|
|
72
71
|
|
|
73
|
-
private
|
|
72
|
+
private readonly logger: Logger;
|
|
74
73
|
|
|
75
|
-
private
|
|
74
|
+
private tableCache = new Map<string | number, storage.SourceTable>();
|
|
76
75
|
|
|
77
76
|
/**
|
|
78
77
|
* Time of the oldest uncommitted change, according to the source db.
|
|
@@ -83,7 +82,7 @@ export class BinLogStream {
|
|
|
83
82
|
* Keep track of whether we have done a commit or keepalive yet.
|
|
84
83
|
* We can only compute replication lag if isStartingReplication == false, or oldestUncommittedChange is present.
|
|
85
84
|
*/
|
|
86
|
-
|
|
85
|
+
isStartingReplication = true;
|
|
87
86
|
|
|
88
87
|
constructor(private options: BinLogStreamOptions) {
|
|
89
88
|
this.logger = options.logger ?? defaultLogger;
|
|
@@ -134,15 +133,15 @@ export class BinLogStream {
|
|
|
134
133
|
entity_descriptor: entity,
|
|
135
134
|
sync_rules: this.syncRules
|
|
136
135
|
});
|
|
137
|
-
// objectId is always defined
|
|
136
|
+
// Since we create the objectId ourselves, this is always defined
|
|
138
137
|
this.tableCache.set(entity.objectId!, result.table);
|
|
139
138
|
|
|
140
|
-
// Drop conflicting tables.
|
|
139
|
+
// Drop conflicting tables. In the MySQL case with ObjectIds created from the table name, renames cannot be detected by the storage.
|
|
141
140
|
await batch.drop(result.dropTables);
|
|
142
141
|
|
|
143
142
|
// Snapshot if:
|
|
144
143
|
// 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere)
|
|
145
|
-
// 2. Snapshot is not
|
|
144
|
+
// 2. Snapshot is not done yet, AND:
|
|
146
145
|
// 3. The table is used in sync rules.
|
|
147
146
|
const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny;
|
|
148
147
|
|
|
@@ -158,10 +157,10 @@ export class BinLogStream {
|
|
|
158
157
|
const promiseConnection = (connection as mysql.Connection).promise();
|
|
159
158
|
try {
|
|
160
159
|
await promiseConnection.query(`SET time_zone = '+00:00'`);
|
|
161
|
-
await promiseConnection.query('
|
|
160
|
+
await promiseConnection.query('START TRANSACTION');
|
|
162
161
|
try {
|
|
163
162
|
gtid = await common.readExecutedGtid(promiseConnection);
|
|
164
|
-
await this.snapshotTable(connection.
|
|
163
|
+
await this.snapshotTable(connection as mysql.Connection, batch, result.table);
|
|
165
164
|
await promiseConnection.query('COMMIT');
|
|
166
165
|
} catch (e) {
|
|
167
166
|
await this.tryRollback(promiseConnection);
|
|
@@ -185,62 +184,21 @@ export class BinLogStream {
|
|
|
185
184
|
return [];
|
|
186
185
|
}
|
|
187
186
|
|
|
188
|
-
|
|
189
|
-
const
|
|
190
|
-
|
|
191
|
-
const result = await this.connections.query(
|
|
192
|
-
`SELECT TABLE_NAME
|
|
193
|
-
FROM information_schema.tables
|
|
194
|
-
WHERE TABLE_SCHEMA = ? AND TABLE_NAME LIKE ?;
|
|
195
|
-
`,
|
|
196
|
-
[tablePattern.schema, tablePattern.tablePattern]
|
|
197
|
-
);
|
|
198
|
-
tableRows = result[0];
|
|
199
|
-
} else {
|
|
200
|
-
const result = await this.connections.query(
|
|
201
|
-
`SELECT TABLE_NAME
|
|
202
|
-
FROM information_schema.tables
|
|
203
|
-
WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ?;
|
|
204
|
-
`,
|
|
205
|
-
[tablePattern.schema, tablePattern.tablePattern]
|
|
206
|
-
);
|
|
207
|
-
tableRows = result[0];
|
|
208
|
-
}
|
|
209
|
-
let tables: storage.SourceTable[] = [];
|
|
210
|
-
|
|
211
|
-
for (let row of tableRows) {
|
|
212
|
-
const name = row['TABLE_NAME'] as string;
|
|
213
|
-
if (prefix && !name.startsWith(prefix)) {
|
|
214
|
-
continue;
|
|
215
|
-
}
|
|
216
|
-
|
|
217
|
-
const result = await this.connections.query(
|
|
218
|
-
`SELECT 1
|
|
219
|
-
FROM information_schema.tables
|
|
220
|
-
WHERE table_schema = ? AND table_name = ?
|
|
221
|
-
AND table_type = 'BASE TABLE';`,
|
|
222
|
-
[tablePattern.schema, tablePattern.name]
|
|
223
|
-
);
|
|
224
|
-
if (result[0].length == 0) {
|
|
225
|
-
this.logger.info(`Skipping ${tablePattern.schema}.${name} - no table exists/is not a base table`);
|
|
226
|
-
continue;
|
|
227
|
-
}
|
|
187
|
+
const connection = await this.connections.getConnection();
|
|
188
|
+
const matchedTables: string[] = await common.getTablesFromPattern(connection, tablePattern);
|
|
189
|
+
connection.release();
|
|
228
190
|
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
schema: tablePattern.schema,
|
|
233
|
-
table_name: tablePattern.name
|
|
234
|
-
});
|
|
235
|
-
connection.release();
|
|
191
|
+
let tables: storage.SourceTable[] = [];
|
|
192
|
+
for (const matchedTable of matchedTables) {
|
|
193
|
+
const replicaIdColumns = await this.getReplicaIdColumns(matchedTable, tablePattern.schema);
|
|
236
194
|
|
|
237
195
|
const table = await this.handleRelation(
|
|
238
196
|
batch,
|
|
239
197
|
{
|
|
240
|
-
name,
|
|
198
|
+
name: matchedTable,
|
|
241
199
|
schema: tablePattern.schema,
|
|
242
|
-
objectId:
|
|
243
|
-
|
|
200
|
+
objectId: createTableId(tablePattern.schema, matchedTable),
|
|
201
|
+
replicaIdColumns: replicaIdColumns
|
|
244
202
|
},
|
|
245
203
|
false
|
|
246
204
|
);
|
|
@@ -251,7 +209,7 @@ AND table_type = 'BASE TABLE';`,
|
|
|
251
209
|
}
|
|
252
210
|
|
|
253
211
|
/**
|
|
254
|
-
* Checks if the initial sync has been completed
|
|
212
|
+
* Checks if the initial sync has already been completed
|
|
255
213
|
*/
|
|
256
214
|
protected async checkInitialReplicated(): Promise<boolean> {
|
|
257
215
|
const status = await this.storage.getStatus();
|
|
@@ -260,7 +218,7 @@ AND table_type = 'BASE TABLE';`,
|
|
|
260
218
|
this.logger.info(`Initial replication already done.`);
|
|
261
219
|
|
|
262
220
|
if (lastKnowGTID) {
|
|
263
|
-
// Check if the binlog is still available. If it isn't we need to snapshot again.
|
|
221
|
+
// Check if the specific binlog file is still available. If it isn't, we need to snapshot again.
|
|
264
222
|
const connection = await this.connections.getConnection();
|
|
265
223
|
try {
|
|
266
224
|
const isAvailable = await common.isBinlogStillAvailable(connection, lastKnowGTID.position.filename);
|
|
@@ -337,11 +295,11 @@ AND table_type = 'BASE TABLE';`,
|
|
|
337
295
|
batch: storage.BucketStorageBatch,
|
|
338
296
|
table: storage.SourceTable
|
|
339
297
|
) {
|
|
340
|
-
this.logger.info(`Replicating ${table
|
|
298
|
+
this.logger.info(`Replicating ${qualifiedMySQLTable(table)}`);
|
|
341
299
|
// TODO count rows and log progress at certain batch sizes
|
|
342
300
|
|
|
343
301
|
// MAX_EXECUTION_TIME(0) hint disables execution timeout for this query
|
|
344
|
-
const query = connection.query(`SELECT /*+ MAX_EXECUTION_TIME(0) */ * FROM ${
|
|
302
|
+
const query = connection.query(`SELECT /*+ MAX_EXECUTION_TIME(0) */ * FROM ${qualifiedMySQLTable(table)}`);
|
|
345
303
|
const stream = query.stream();
|
|
346
304
|
|
|
347
305
|
let columns: Map<string, ColumnDescriptor> | undefined = undefined;
|
|
@@ -430,8 +388,6 @@ AND table_type = 'BASE TABLE';`,
|
|
|
430
388
|
}
|
|
431
389
|
|
|
432
390
|
async streamChanges() {
|
|
433
|
-
// Auto-activate as soon as initial replication is done
|
|
434
|
-
await this.storage.autoActivate();
|
|
435
391
|
const serverId = createRandomServerId(this.storage.group_id);
|
|
436
392
|
|
|
437
393
|
const connection = await this.connections.getConnection();
|
|
@@ -450,11 +406,9 @@ AND table_type = 'BASE TABLE';`,
|
|
|
450
406
|
{ zeroLSN: common.ReplicatedGTID.ZERO.comparable, defaultSchema: this.defaultSchema, storeCurrentData: true },
|
|
451
407
|
async (batch) => {
|
|
452
408
|
const binlogEventHandler = this.createBinlogEventHandler(batch);
|
|
453
|
-
// Only listen for changes to tables in the sync rules
|
|
454
|
-
const includedTables = [...this.tableCache.values()].map((table) => table.table);
|
|
455
409
|
const binlogListener = new BinLogListener({
|
|
456
410
|
logger: this.logger,
|
|
457
|
-
|
|
411
|
+
sourceTables: this.syncRules.getSourceTables(),
|
|
458
412
|
startPosition: binLogPositionState,
|
|
459
413
|
connectionManager: this.connections,
|
|
460
414
|
serverId: serverId,
|
|
@@ -463,15 +417,14 @@ AND table_type = 'BASE TABLE';`,
|
|
|
463
417
|
|
|
464
418
|
this.abortSignal.addEventListener(
|
|
465
419
|
'abort',
|
|
466
|
-
() => {
|
|
467
|
-
|
|
468
|
-
binlogListener.stop();
|
|
420
|
+
async () => {
|
|
421
|
+
await binlogListener.stop();
|
|
469
422
|
},
|
|
470
423
|
{ once: true }
|
|
471
424
|
);
|
|
472
425
|
|
|
473
|
-
// Only returns when the replication is stopped or interrupted by an error
|
|
474
426
|
await binlogListener.start();
|
|
427
|
+
await binlogListener.replicateUntilStopped();
|
|
475
428
|
}
|
|
476
429
|
);
|
|
477
430
|
}
|
|
@@ -517,10 +470,82 @@ AND table_type = 'BASE TABLE';`,
|
|
|
517
470
|
},
|
|
518
471
|
onRotate: async () => {
|
|
519
472
|
this.isStartingReplication = false;
|
|
473
|
+
},
|
|
474
|
+
onSchemaChange: async (change: SchemaChange) => {
|
|
475
|
+
await this.handleSchemaChange(batch, change);
|
|
520
476
|
}
|
|
521
477
|
};
|
|
522
478
|
}
|
|
523
479
|
|
|
480
|
+
private async handleSchemaChange(batch: storage.BucketStorageBatch, change: SchemaChange): Promise<void> {
|
|
481
|
+
if (change.type === SchemaChangeType.RENAME_TABLE) {
|
|
482
|
+
const fromTableId = createTableId(change.schema, change.table);
|
|
483
|
+
|
|
484
|
+
const fromTable = this.tableCache.get(fromTableId);
|
|
485
|
+
// Old table needs to be cleaned up
|
|
486
|
+
if (fromTable) {
|
|
487
|
+
await batch.drop([fromTable]);
|
|
488
|
+
this.tableCache.delete(fromTableId);
|
|
489
|
+
}
|
|
490
|
+
// The new table matched a table in the sync rules
|
|
491
|
+
if (change.newTable) {
|
|
492
|
+
await this.handleCreateOrUpdateTable(batch, change.newTable!, change.schema);
|
|
493
|
+
}
|
|
494
|
+
} else {
|
|
495
|
+
const tableId = createTableId(change.schema, change.table);
|
|
496
|
+
|
|
497
|
+
const table = this.getTable(tableId);
|
|
498
|
+
|
|
499
|
+
switch (change.type) {
|
|
500
|
+
case SchemaChangeType.ALTER_TABLE_COLUMN:
|
|
501
|
+
case SchemaChangeType.REPLICATION_IDENTITY:
|
|
502
|
+
// For these changes, we need to update the table if the replication identity columns have changed.
|
|
503
|
+
await this.handleCreateOrUpdateTable(batch, change.table, change.schema);
|
|
504
|
+
break;
|
|
505
|
+
case SchemaChangeType.TRUNCATE_TABLE:
|
|
506
|
+
await batch.truncate([table]);
|
|
507
|
+
break;
|
|
508
|
+
case SchemaChangeType.DROP_TABLE:
|
|
509
|
+
await batch.drop([table]);
|
|
510
|
+
this.tableCache.delete(tableId);
|
|
511
|
+
break;
|
|
512
|
+
default:
|
|
513
|
+
// No action needed for other schema changes
|
|
514
|
+
break;
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
private async getReplicaIdColumns(tableName: string, schema: string) {
|
|
520
|
+
const connection = await this.connections.getConnection();
|
|
521
|
+
const replicaIdColumns = await common.getReplicationIdentityColumns({
|
|
522
|
+
connection,
|
|
523
|
+
schema,
|
|
524
|
+
tableName
|
|
525
|
+
});
|
|
526
|
+
connection.release();
|
|
527
|
+
|
|
528
|
+
return replicaIdColumns.columns;
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
private async handleCreateOrUpdateTable(
|
|
532
|
+
batch: storage.BucketStorageBatch,
|
|
533
|
+
tableName: string,
|
|
534
|
+
schema: string
|
|
535
|
+
): Promise<SourceTable> {
|
|
536
|
+
const replicaIdColumns = await this.getReplicaIdColumns(tableName, schema);
|
|
537
|
+
return await this.handleRelation(
|
|
538
|
+
batch,
|
|
539
|
+
{
|
|
540
|
+
name: tableName,
|
|
541
|
+
schema: schema,
|
|
542
|
+
objectId: createTableId(schema, tableName),
|
|
543
|
+
replicaIdColumns: replicaIdColumns
|
|
544
|
+
},
|
|
545
|
+
true
|
|
546
|
+
);
|
|
547
|
+
}
|
|
548
|
+
|
|
524
549
|
private async writeChanges(
|
|
525
550
|
batch: storage.BucketStorageBatch,
|
|
526
551
|
msg: {
|
|
@@ -531,17 +556,20 @@ AND table_type = 'BASE TABLE';`,
|
|
|
531
556
|
}
|
|
532
557
|
): Promise<storage.FlushedResult | null> {
|
|
533
558
|
const columns = common.toColumnDescriptors(msg.tableEntry);
|
|
559
|
+
const tableId = createTableId(msg.tableEntry.parentSchema, msg.tableEntry.tableName);
|
|
560
|
+
|
|
561
|
+
let table = this.tableCache.get(tableId);
|
|
562
|
+
if (table == null) {
|
|
563
|
+
// This is an insert for a new table that matches a table in the sync rules
|
|
564
|
+
// We need to create the table in the storage and cache it.
|
|
565
|
+
table = await this.handleCreateOrUpdateTable(batch, msg.tableEntry.tableName, msg.tableEntry.parentSchema);
|
|
566
|
+
}
|
|
534
567
|
|
|
535
568
|
for (const [index, row] of msg.rows.entries()) {
|
|
536
569
|
await this.writeChange(batch, {
|
|
537
570
|
type: msg.type,
|
|
538
571
|
database: msg.tableEntry.parentSchema,
|
|
539
|
-
sourceTable:
|
|
540
|
-
getMysqlRelId({
|
|
541
|
-
schema: msg.tableEntry.parentSchema,
|
|
542
|
-
name: msg.tableEntry.tableName
|
|
543
|
-
})
|
|
544
|
-
),
|
|
572
|
+
sourceTable: table!,
|
|
545
573
|
table: msg.tableEntry.tableName,
|
|
546
574
|
columns: columns,
|
|
547
575
|
row: row,
|
|
@@ -569,7 +597,7 @@ AND table_type = 'BASE TABLE';`,
|
|
|
569
597
|
});
|
|
570
598
|
case storage.SaveOperationTag.UPDATE:
|
|
571
599
|
this.metrics.getCounter(ReplicationMetric.ROWS_REPLICATED).add(1);
|
|
572
|
-
//
|
|
600
|
+
// The previous row may be null if the replica id columns are unchanged.
|
|
573
601
|
// It's fine to treat that the same as an insert.
|
|
574
602
|
const beforeUpdated = payload.previous_row
|
|
575
603
|
? common.toSQLiteRow(payload.previous_row, payload.columns)
|
|
@@ -610,7 +638,7 @@ AND table_type = 'BASE TABLE';`,
|
|
|
610
638
|
// We don't have anything to compute replication lag with yet.
|
|
611
639
|
return undefined;
|
|
612
640
|
} else {
|
|
613
|
-
// We don't have any uncommitted changes, so replication is up
|
|
641
|
+
// We don't have any uncommitted changes, so replication is up to date.
|
|
614
642
|
return 0;
|
|
615
643
|
}
|
|
616
644
|
}
|