@powersync/service-module-mysql 0.7.4 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +30 -0
- package/dev/docker/mysql/init-scripts/my.cnf +1 -3
- package/dist/api/MySQLRouteAPIAdapter.js +11 -3
- package/dist/api/MySQLRouteAPIAdapter.js.map +1 -1
- package/dist/common/ReplicatedGTID.js +4 -0
- package/dist/common/ReplicatedGTID.js.map +1 -1
- package/dist/common/common-index.d.ts +1 -2
- package/dist/common/common-index.js +1 -2
- package/dist/common/common-index.js.map +1 -1
- package/dist/common/mysql-to-sqlite.js +4 -0
- package/dist/common/mysql-to-sqlite.js.map +1 -1
- package/dist/common/schema-utils.d.ts +20 -0
- package/dist/common/{get-replication-columns.js → schema-utils.js} +73 -30
- package/dist/common/schema-utils.js.map +1 -0
- package/dist/replication/BinLogStream.d.ts +9 -6
- package/dist/replication/BinLogStream.js +99 -70
- package/dist/replication/BinLogStream.js.map +1 -1
- package/dist/replication/zongji/BinLogListener.d.ts +52 -5
- package/dist/replication/zongji/BinLogListener.js +302 -85
- package/dist/replication/zongji/BinLogListener.js.map +1 -1
- package/dist/replication/zongji/zongji-utils.d.ts +2 -1
- package/dist/replication/zongji/zongji-utils.js +3 -0
- package/dist/replication/zongji/zongji-utils.js.map +1 -1
- package/dist/types/node-sql-parser-extended-types.d.ts +31 -0
- package/dist/types/node-sql-parser-extended-types.js +2 -0
- package/dist/types/node-sql-parser-extended-types.js.map +1 -0
- package/dist/utils/mysql-utils.d.ts +4 -2
- package/dist/utils/mysql-utils.js +15 -3
- package/dist/utils/mysql-utils.js.map +1 -1
- package/dist/utils/parser-utils.d.ts +16 -0
- package/dist/utils/parser-utils.js +58 -0
- package/dist/utils/parser-utils.js.map +1 -0
- package/package.json +9 -8
- package/src/api/MySQLRouteAPIAdapter.ts +11 -3
- package/src/common/ReplicatedGTID.ts +6 -1
- package/src/common/common-index.ts +1 -2
- package/src/common/mysql-to-sqlite.ts +3 -0
- package/src/common/{get-replication-columns.ts → schema-utils.ts} +96 -37
- package/src/replication/BinLogStream.ts +119 -91
- package/src/replication/zongji/BinLogListener.ts +370 -93
- package/src/replication/zongji/zongji-utils.ts +6 -1
- package/src/types/node-sql-parser-extended-types.ts +25 -0
- package/src/utils/mysql-utils.ts +19 -4
- package/src/utils/parser-utils.ts +73 -0
- package/test/src/BinLogListener.test.ts +415 -32
- package/test/src/BinLogStream.test.ts +128 -52
- package/test/src/BinlogStreamUtils.ts +12 -2
- package/test/src/parser-utils.test.ts +24 -0
- package/test/src/schema-changes.test.ts +663 -0
- package/test/src/util.ts +6 -0
- package/tsconfig.tsbuildinfo +1 -1
- package/dist/common/get-replication-columns.d.ts +0 -12
- package/dist/common/get-replication-columns.js.map +0 -1
- package/dist/common/get-tables-from-pattern.d.ts +0 -7
- package/dist/common/get-tables-from-pattern.js +0 -28
- package/dist/common/get-tables-from-pattern.js.map +0 -1
- package/src/common/get-tables-from-pattern.ts +0 -44
|
@@ -1,16 +1,62 @@
|
|
|
1
1
|
import * as common from '../../common/common-index.js';
|
|
2
2
|
import async from 'async';
|
|
3
|
-
import { BinLogEvent, StartOptions, TableMapEntry, ZongJi } from '@powersync/mysql-zongji';
|
|
3
|
+
import { BinLogEvent, BinLogQueryEvent, StartOptions, TableMapEntry, ZongJi } from '@powersync/mysql-zongji';
|
|
4
4
|
import * as zongji_utils from './zongji-utils.js';
|
|
5
5
|
import { Logger, logger as defaultLogger } from '@powersync/lib-services-framework';
|
|
6
6
|
import { MySQLConnectionManager } from '../MySQLConnectionManager.js';
|
|
7
|
+
import timers from 'timers/promises';
|
|
8
|
+
import pkg, {
|
|
9
|
+
AST,
|
|
10
|
+
BaseFrom,
|
|
11
|
+
DropIndexStatement,
|
|
12
|
+
Parser as ParserType,
|
|
13
|
+
RenameStatement,
|
|
14
|
+
TruncateStatement
|
|
15
|
+
} from 'node-sql-parser';
|
|
16
|
+
import {
|
|
17
|
+
isAlterTable,
|
|
18
|
+
isColumnExpression,
|
|
19
|
+
isConstraintExpression,
|
|
20
|
+
isCreateUniqueIndex,
|
|
21
|
+
isDropIndex,
|
|
22
|
+
isDropTable,
|
|
23
|
+
isRenameExpression,
|
|
24
|
+
isRenameTable,
|
|
25
|
+
isTruncate,
|
|
26
|
+
matchedSchemaChangeQuery
|
|
27
|
+
} from '../../utils/parser-utils.js';
|
|
28
|
+
import { TablePattern } from '@powersync/service-sync-rules';
|
|
7
29
|
|
|
8
|
-
|
|
9
|
-
// MySQL server will automatically terminate replication connections after 60 seconds of inactivity, so this guards against that.
|
|
10
|
-
const MAX_QUEUE_PAUSE_TIME_MS = 45_000;
|
|
30
|
+
const { Parser } = pkg;
|
|
11
31
|
|
|
12
32
|
export type Row = Record<string, any>;
|
|
13
33
|
|
|
34
|
+
/**
|
|
35
|
+
* Schema changes that are detectable by inspecting query events.
|
|
36
|
+
* Create table statements are not included here, since new tables are automatically detected when row events
|
|
37
|
+
* are received for them.
|
|
38
|
+
*/
|
|
39
|
+
export enum SchemaChangeType {
|
|
40
|
+
RENAME_TABLE = 'Rename Table',
|
|
41
|
+
DROP_TABLE = 'Drop Table',
|
|
42
|
+
TRUNCATE_TABLE = 'Truncate Table',
|
|
43
|
+
ALTER_TABLE_COLUMN = 'Alter Table Column',
|
|
44
|
+
REPLICATION_IDENTITY = 'Alter Replication Identity'
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
export interface SchemaChange {
|
|
48
|
+
type: SchemaChangeType;
|
|
49
|
+
/**
|
|
50
|
+
* The table that the schema change applies to.
|
|
51
|
+
*/
|
|
52
|
+
table: string;
|
|
53
|
+
schema: string;
|
|
54
|
+
/**
|
|
55
|
+
* Populated for table renames if the newTable was matched by the DatabaseFilter
|
|
56
|
+
*/
|
|
57
|
+
newTable?: string;
|
|
58
|
+
}
|
|
59
|
+
|
|
14
60
|
export interface BinLogEventHandler {
|
|
15
61
|
onTransactionStart: (options: { timestamp: Date }) => Promise<void>;
|
|
16
62
|
onRotate: () => Promise<void>;
|
|
@@ -18,12 +64,13 @@ export interface BinLogEventHandler {
|
|
|
18
64
|
onUpdate: (rowsAfter: Row[], rowsBefore: Row[], tableMap: TableMapEntry) => Promise<void>;
|
|
19
65
|
onDelete: (rows: Row[], tableMap: TableMapEntry) => Promise<void>;
|
|
20
66
|
onCommit: (lsn: string) => Promise<void>;
|
|
67
|
+
onSchemaChange: (change: SchemaChange) => Promise<void>;
|
|
21
68
|
}
|
|
22
69
|
|
|
23
70
|
export interface BinLogListenerOptions {
|
|
24
71
|
connectionManager: MySQLConnectionManager;
|
|
25
72
|
eventHandler: BinLogEventHandler;
|
|
26
|
-
|
|
73
|
+
sourceTables: TablePattern[];
|
|
27
74
|
serverId: number;
|
|
28
75
|
startPosition: common.BinLogPosition;
|
|
29
76
|
logger?: Logger;
|
|
@@ -34,18 +81,24 @@ export interface BinLogListenerOptions {
|
|
|
34
81
|
* events on the provided BinLogEventHandler.
|
|
35
82
|
*/
|
|
36
83
|
export class BinLogListener {
|
|
84
|
+
private sqlParser: ParserType;
|
|
37
85
|
private connectionManager: MySQLConnectionManager;
|
|
38
86
|
private eventHandler: BinLogEventHandler;
|
|
39
87
|
private binLogPosition: common.BinLogPosition;
|
|
40
88
|
private currentGTID: common.ReplicatedGTID | null;
|
|
41
89
|
private logger: Logger;
|
|
90
|
+
private listenerError: Error | null;
|
|
91
|
+
private databaseFilter: { [schema: string]: (table: string) => boolean };
|
|
42
92
|
|
|
43
93
|
zongji: ZongJi;
|
|
44
94
|
processingQueue: async.QueueObject<BinLogEvent>;
|
|
95
|
+
|
|
96
|
+
isStopped: boolean = false;
|
|
97
|
+
isStopping: boolean = false;
|
|
45
98
|
/**
|
|
46
99
|
* The combined size in bytes of all the binlog events currently in the processing queue.
|
|
47
100
|
*/
|
|
48
|
-
queueMemoryUsage: number;
|
|
101
|
+
queueMemoryUsage: number = 0;
|
|
49
102
|
|
|
50
103
|
constructor(public options: BinLogListenerOptions) {
|
|
51
104
|
this.logger = options.logger ?? defaultLogger;
|
|
@@ -53,10 +106,11 @@ export class BinLogListener {
|
|
|
53
106
|
this.eventHandler = options.eventHandler;
|
|
54
107
|
this.binLogPosition = options.startPosition;
|
|
55
108
|
this.currentGTID = null;
|
|
56
|
-
|
|
57
|
-
this.processingQueue =
|
|
58
|
-
this.queueMemoryUsage = 0;
|
|
109
|
+
this.sqlParser = new Parser();
|
|
110
|
+
this.processingQueue = this.createProcessingQueue();
|
|
59
111
|
this.zongji = this.createZongjiListener();
|
|
112
|
+
this.listenerError = null;
|
|
113
|
+
this.databaseFilter = this.createDatabaseFilter(options.sourceTables);
|
|
60
114
|
}
|
|
61
115
|
|
|
62
116
|
/**
|
|
@@ -67,126 +121,155 @@ export class BinLogListener {
|
|
|
67
121
|
return this.connectionManager.options.binlog_queue_memory_limit * 1024 * 1024;
|
|
68
122
|
}
|
|
69
123
|
|
|
70
|
-
public async start(): Promise<void> {
|
|
124
|
+
public async start(isRestart: boolean = false): Promise<void> {
|
|
71
125
|
if (this.isStopped) {
|
|
72
126
|
return;
|
|
73
127
|
}
|
|
74
|
-
|
|
128
|
+
|
|
129
|
+
this.logger.info(
|
|
130
|
+
`${isRestart ? 'Restarting' : 'Starting'} BinLog Listener with replica client id:${this.options.serverId}...`
|
|
131
|
+
);
|
|
132
|
+
|
|
133
|
+
// Set a heartbeat interval for the Zongji replication connection
|
|
134
|
+
// Zongji does not explicitly handle the heartbeat events - they are categorized as event:unknown
|
|
135
|
+
// The heartbeat events are enough to keep the connection alive for setTimeout to work on the socket.
|
|
136
|
+
// The heartbeat needs to be set before starting the listener, since the replication connection is locked once replicating
|
|
137
|
+
await new Promise((resolve, reject) => {
|
|
138
|
+
this.zongji.connection.query(
|
|
139
|
+
// In nanoseconds, 10^9 = 1s
|
|
140
|
+
'set @master_heartbeat_period=28*1000000000',
|
|
141
|
+
(error: any, results: any, _fields: any) => {
|
|
142
|
+
if (error) {
|
|
143
|
+
reject(error);
|
|
144
|
+
} else {
|
|
145
|
+
this.logger.info('Successfully set up replication connection heartbeat.');
|
|
146
|
+
resolve(results);
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
);
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
// The _socket member is only set after a query is run on the connection, so we set the timeout after setting the heartbeat.
|
|
153
|
+
// The timeout here must be greater than the master_heartbeat_period.
|
|
154
|
+
const socket = this.zongji.connection._socket!;
|
|
155
|
+
socket.setTimeout(60_000, () => {
|
|
156
|
+
this.logger.info('Destroying socket due to replication connection timeout.');
|
|
157
|
+
socket.destroy(new Error('Replication connection timeout.'));
|
|
158
|
+
});
|
|
75
159
|
|
|
76
160
|
this.zongji.start({
|
|
77
161
|
// We ignore the unknown/heartbeat event since it currently serves no purpose other than to keep the connection alive
|
|
78
162
|
// tablemap events always need to be included for the other row events to work
|
|
79
|
-
includeEvents: ['tablemap', 'writerows', 'updaterows', 'deleterows', 'xid', 'rotate', 'gtidlog'],
|
|
80
|
-
includeSchema:
|
|
163
|
+
includeEvents: ['tablemap', 'writerows', 'updaterows', 'deleterows', 'xid', 'rotate', 'gtidlog', 'query'],
|
|
164
|
+
includeSchema: this.databaseFilter,
|
|
81
165
|
filename: this.binLogPosition.filename,
|
|
82
166
|
position: this.binLogPosition.offset,
|
|
83
167
|
serverId: this.options.serverId
|
|
84
168
|
} satisfies StartOptions);
|
|
85
169
|
|
|
86
|
-
return new Promise
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
170
|
+
return new Promise((resolve) => {
|
|
171
|
+
this.zongji.once('ready', () => {
|
|
172
|
+
this.logger.info(
|
|
173
|
+
`BinLog Listener ${isRestart ? 'restarted' : 'started'}. Listening for events from position: ${this.binLogPosition.filename}:${this.binLogPosition.offset}`
|
|
174
|
+
);
|
|
90
175
|
resolve();
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
this.zongji.on('error', (error) => {
|
|
94
|
-
if (!this.isStopped) {
|
|
95
|
-
this.logger.error('Binlog listener error:', error);
|
|
96
|
-
this.stop();
|
|
97
|
-
reject(error);
|
|
98
|
-
} else {
|
|
99
|
-
this.logger.warn('Binlog listener error during shutdown:', error);
|
|
100
|
-
}
|
|
101
176
|
});
|
|
177
|
+
});
|
|
178
|
+
}
|
|
102
179
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
this.logger.warn('BinlogEvent processing error during shutdown:', error);
|
|
110
|
-
}
|
|
111
|
-
});
|
|
180
|
+
private async restartZongji(): Promise<void> {
|
|
181
|
+
if (this.zongji.stopped) {
|
|
182
|
+
this.zongji = this.createZongjiListener();
|
|
183
|
+
await this.start(true);
|
|
184
|
+
}
|
|
185
|
+
}
|
|
112
186
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
187
|
+
private async stopZongji(): Promise<void> {
|
|
188
|
+
if (!this.zongji.stopped) {
|
|
189
|
+
this.logger.info('Stopping BinLog Listener...');
|
|
190
|
+
await new Promise<void>((resolve) => {
|
|
191
|
+
this.zongji.once('stopped', () => {
|
|
192
|
+
resolve();
|
|
193
|
+
});
|
|
194
|
+
this.zongji.stop();
|
|
116
195
|
});
|
|
117
|
-
|
|
196
|
+
this.logger.info('BinLog Listener stopped.');
|
|
197
|
+
}
|
|
118
198
|
}
|
|
119
199
|
|
|
120
|
-
public stop(): void {
|
|
121
|
-
if (!this.isStopped) {
|
|
122
|
-
this.
|
|
200
|
+
public async stop(): Promise<void> {
|
|
201
|
+
if (!(this.isStopped || this.isStopping)) {
|
|
202
|
+
this.isStopping = true;
|
|
203
|
+
await this.stopZongji();
|
|
123
204
|
this.processingQueue.kill();
|
|
205
|
+
|
|
206
|
+
this.isStopped = true;
|
|
124
207
|
}
|
|
125
208
|
}
|
|
126
209
|
|
|
127
|
-
|
|
128
|
-
|
|
210
|
+
public async replicateUntilStopped(): Promise<void> {
|
|
211
|
+
while (!this.isStopped) {
|
|
212
|
+
await timers.setTimeout(1_000);
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
if (this.listenerError) {
|
|
216
|
+
this.logger.error('BinLog Listener stopped due to an error:', this.listenerError);
|
|
217
|
+
throw this.listenerError;
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
private createProcessingQueue(): async.QueueObject<BinLogEvent> {
|
|
222
|
+
const queue = async.queue(this.createQueueWorker(), 1);
|
|
223
|
+
|
|
224
|
+
queue.error((error) => {
|
|
225
|
+
if (!(this.isStopped || this.isStopping)) {
|
|
226
|
+
this.listenerError = error;
|
|
227
|
+
this.stop();
|
|
228
|
+
} else {
|
|
229
|
+
this.logger.warn('Error processing BinLog event during shutdown:', error);
|
|
230
|
+
}
|
|
231
|
+
});
|
|
232
|
+
|
|
233
|
+
return queue;
|
|
129
234
|
}
|
|
130
235
|
|
|
131
236
|
private createZongjiListener(): ZongJi {
|
|
132
237
|
const zongji = this.connectionManager.createBinlogListener();
|
|
133
238
|
|
|
134
239
|
zongji.on('binlog', async (evt) => {
|
|
135
|
-
this.logger.
|
|
240
|
+
this.logger.debug(`Received BinLog event:${evt.getEventName()}`);
|
|
241
|
+
|
|
136
242
|
this.processingQueue.push(evt);
|
|
137
243
|
this.queueMemoryUsage += evt.size;
|
|
138
244
|
|
|
139
245
|
// When the processing queue grows past the threshold, we pause the binlog listener
|
|
140
246
|
if (this.isQueueOverCapacity()) {
|
|
141
247
|
this.logger.info(
|
|
142
|
-
`
|
|
248
|
+
`BinLog processing queue has reached its memory limit of [${this.connectionManager.options.binlog_queue_memory_limit}MB]. Pausing BinLog Listener.`
|
|
143
249
|
);
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
await Promise.race([this.processingQueue.empty(), resumeTimeoutPromise]);
|
|
150
|
-
|
|
151
|
-
this.logger.info(`Binlog processing queue backlog cleared. Resuming Binlog listener.`);
|
|
152
|
-
zongji.resume();
|
|
250
|
+
await this.stopZongji();
|
|
251
|
+
await this.processingQueue.drain();
|
|
252
|
+
this.logger.info(`BinLog processing queue backlog cleared. Resuming BinLog Listener.`);
|
|
253
|
+
await this.restartZongji();
|
|
153
254
|
}
|
|
154
255
|
});
|
|
155
256
|
|
|
156
|
-
zongji.on('
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
this.
|
|
162
|
-
|
|
163
|
-
'set @master_heartbeat_period=28*1000000000',
|
|
164
|
-
(error: any, results: any, fields: any) => {
|
|
165
|
-
if (error) {
|
|
166
|
-
reject(error);
|
|
167
|
-
} else {
|
|
168
|
-
this.logger.info('Successfully set up replication connection heartbeat...');
|
|
169
|
-
resolve(results);
|
|
170
|
-
}
|
|
171
|
-
}
|
|
172
|
-
);
|
|
173
|
-
});
|
|
174
|
-
|
|
175
|
-
// The _socket member is only set after a query is run on the connection, so we set the timeout after setting the heartbeat.
|
|
176
|
-
// The timeout here must be greater than the master_heartbeat_period.
|
|
177
|
-
const socket = this.zongji.connection._socket!;
|
|
178
|
-
socket.setTimeout(60_000, () => {
|
|
179
|
-
this.logger.info('Destroying socket due to replication connection timeout.');
|
|
180
|
-
socket.destroy(new Error('Replication connection timeout.'));
|
|
181
|
-
});
|
|
182
|
-
this.logger.info(
|
|
183
|
-
`BinLog listener setup complete. Reading binlog from: ${this.binLogPosition.filename}:${this.binLogPosition.offset}`
|
|
184
|
-
);
|
|
257
|
+
zongji.on('error', (error) => {
|
|
258
|
+
if (!(this.isStopped || this.isStopping)) {
|
|
259
|
+
this.listenerError = error;
|
|
260
|
+
this.stop();
|
|
261
|
+
} else {
|
|
262
|
+
this.logger.warn('Ignored BinLog Listener error during shutdown:', error);
|
|
263
|
+
}
|
|
185
264
|
});
|
|
186
265
|
|
|
187
266
|
return zongji;
|
|
188
267
|
}
|
|
189
268
|
|
|
269
|
+
isQueueOverCapacity(): boolean {
|
|
270
|
+
return this.queueMemoryUsage >= this.queueMemoryLimit;
|
|
271
|
+
}
|
|
272
|
+
|
|
190
273
|
private createQueueWorker() {
|
|
191
274
|
return async (evt: BinLogEvent) => {
|
|
192
275
|
switch (true) {
|
|
@@ -201,15 +284,27 @@ export class BinLogListener {
|
|
|
201
284
|
offset: evt.nextPosition
|
|
202
285
|
}
|
|
203
286
|
});
|
|
287
|
+
this.binLogPosition.offset = evt.nextPosition;
|
|
204
288
|
await this.eventHandler.onTransactionStart({ timestamp: new Date(evt.timestamp) });
|
|
289
|
+
this.logger.info(`Processed GTID event: ${this.currentGTID.comparable}`);
|
|
205
290
|
break;
|
|
206
291
|
case zongji_utils.eventIsRotation(evt):
|
|
292
|
+
const newFile = this.binLogPosition.filename !== evt.binlogName;
|
|
207
293
|
this.binLogPosition.filename = evt.binlogName;
|
|
208
|
-
this.binLogPosition.offset = evt.position;
|
|
209
294
|
await this.eventHandler.onRotate();
|
|
295
|
+
|
|
296
|
+
if (newFile) {
|
|
297
|
+
this.logger.info(
|
|
298
|
+
`Processed Rotate event. New BinLog file is: ${this.binLogPosition.filename}:${this.binLogPosition.offset}`
|
|
299
|
+
);
|
|
300
|
+
}
|
|
210
301
|
break;
|
|
211
302
|
case zongji_utils.eventIsWriteMutation(evt):
|
|
212
|
-
|
|
303
|
+
const tableMap = evt.tableMap[evt.tableId];
|
|
304
|
+
await this.eventHandler.onWrite(evt.rows, tableMap);
|
|
305
|
+
this.logger.info(
|
|
306
|
+
`Processed Write event for table [${tableMap.parentSchema}.${tableMap.tableName}]. ${evt.rows.length} row(s) inserted.`
|
|
307
|
+
);
|
|
213
308
|
break;
|
|
214
309
|
case zongji_utils.eventIsUpdateMutation(evt):
|
|
215
310
|
await this.eventHandler.onUpdate(
|
|
@@ -217,27 +312,209 @@ export class BinLogListener {
|
|
|
217
312
|
evt.rows.map((row) => row.before),
|
|
218
313
|
evt.tableMap[evt.tableId]
|
|
219
314
|
);
|
|
315
|
+
this.logger.info(
|
|
316
|
+
`Processed Update event for table [${evt.tableMap[evt.tableId].tableName}]. ${evt.rows.length} row(s) updated.`
|
|
317
|
+
);
|
|
220
318
|
break;
|
|
221
319
|
case zongji_utils.eventIsDeleteMutation(evt):
|
|
222
320
|
await this.eventHandler.onDelete(evt.rows, evt.tableMap[evt.tableId]);
|
|
321
|
+
this.logger.info(
|
|
322
|
+
`Processed Delete event for table [${evt.tableMap[evt.tableId].tableName}]. ${evt.rows.length} row(s) deleted.`
|
|
323
|
+
);
|
|
223
324
|
break;
|
|
224
325
|
case zongji_utils.eventIsXid(evt):
|
|
326
|
+
this.binLogPosition.offset = evt.nextPosition;
|
|
225
327
|
const LSN = new common.ReplicatedGTID({
|
|
226
328
|
raw_gtid: this.currentGTID!.raw,
|
|
227
|
-
position:
|
|
228
|
-
filename: this.binLogPosition.filename,
|
|
229
|
-
offset: evt.nextPosition
|
|
230
|
-
}
|
|
329
|
+
position: this.binLogPosition
|
|
231
330
|
}).comparable;
|
|
232
331
|
await this.eventHandler.onCommit(LSN);
|
|
332
|
+
this.logger.info(`Processed Xid event - transaction complete. LSN: ${LSN}.`);
|
|
333
|
+
break;
|
|
334
|
+
case zongji_utils.eventIsQuery(evt):
|
|
335
|
+
await this.processQueryEvent(evt);
|
|
233
336
|
break;
|
|
234
337
|
}
|
|
235
338
|
|
|
339
|
+
// Update the binlog position after processing the event
|
|
340
|
+
this.binLogPosition.offset = evt.nextPosition;
|
|
236
341
|
this.queueMemoryUsage -= evt.size;
|
|
237
342
|
};
|
|
238
343
|
}
|
|
239
344
|
|
|
240
|
-
|
|
241
|
-
|
|
345
|
+
private async processQueryEvent(event: BinLogQueryEvent): Promise<void> {
|
|
346
|
+
const { query, nextPosition } = event;
|
|
347
|
+
|
|
348
|
+
// BEGIN query events mark the start of a transaction before any row events. They are not relevant for schema changes
|
|
349
|
+
if (query === 'BEGIN') {
|
|
350
|
+
return;
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
const schemaChanges = this.toSchemaChanges(query, event.schema);
|
|
354
|
+
if (schemaChanges.length > 0) {
|
|
355
|
+
// Since handling the schema changes can take a long time, we need to stop the Zongji listener instead of pausing it.
|
|
356
|
+
await this.stopZongji();
|
|
357
|
+
|
|
358
|
+
for (const change of schemaChanges) {
|
|
359
|
+
this.logger.info(`Processing schema change ${change.type} for table [${change.schema}.${change.table}]`);
|
|
360
|
+
await this.eventHandler.onSchemaChange(change);
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
// DDL queries are auto commited, but do not come with a corresponding Xid event.
|
|
364
|
+
// This is problematic for DDL queries which result in row events because the checkpoint is not moved on,
|
|
365
|
+
// so we manually commit here.
|
|
366
|
+
this.binLogPosition.offset = nextPosition;
|
|
367
|
+
const LSN = new common.ReplicatedGTID({
|
|
368
|
+
raw_gtid: this.currentGTID!.raw,
|
|
369
|
+
position: this.binLogPosition
|
|
370
|
+
}).comparable;
|
|
371
|
+
await this.eventHandler.onCommit(LSN);
|
|
372
|
+
|
|
373
|
+
this.logger.info(`Successfully processed ${schemaChanges.length} schema change(s).`);
|
|
374
|
+
|
|
375
|
+
// If there are still events in the processing queue, we need to process those before restarting Zongji
|
|
376
|
+
if (!this.processingQueue.idle()) {
|
|
377
|
+
this.logger.info(`Processing [${this.processingQueue.length()}] events(s) before resuming...`);
|
|
378
|
+
this.processingQueue.drain(async () => {
|
|
379
|
+
await this.restartZongji();
|
|
380
|
+
});
|
|
381
|
+
} else {
|
|
382
|
+
await this.restartZongji();
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
/**
|
|
388
|
+
* Function that interprets a DDL query for any applicable schema changes.
|
|
389
|
+
* If the query does not contain any relevant schema changes, an empty array is returned.
|
|
390
|
+
* The defaultSchema is derived from the database set on the MySQL Node.js connection client.
|
|
391
|
+
* It is used as a fallback when the schema/database cannot be determined from the query DDL.
|
|
392
|
+
*
|
|
393
|
+
* @param query
|
|
394
|
+
* @param defaultSchema
|
|
395
|
+
*/
|
|
396
|
+
private toSchemaChanges(query: string, defaultSchema: string): SchemaChange[] {
|
|
397
|
+
let statements: AST[] = [];
|
|
398
|
+
try {
|
|
399
|
+
const ast = this.sqlParser.astify(query, { database: 'MySQL' });
|
|
400
|
+
statements = Array.isArray(ast) ? ast : [ast];
|
|
401
|
+
} catch (error) {
|
|
402
|
+
if (matchedSchemaChangeQuery(query, Object.values(this.databaseFilter))) {
|
|
403
|
+
this.logger.warn(
|
|
404
|
+
`Failed to parse query: [${query}].
|
|
405
|
+
Please review for the schema changes and manually redeploy the sync rules if required.`
|
|
406
|
+
);
|
|
407
|
+
}
|
|
408
|
+
return [];
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
const changes: SchemaChange[] = [];
|
|
412
|
+
for (const statement of statements) {
|
|
413
|
+
if (isTruncate(statement)) {
|
|
414
|
+
const truncateStatement = statement as TruncateStatement;
|
|
415
|
+
// Truncate statements can apply to multiple tables
|
|
416
|
+
for (const entity of truncateStatement.name) {
|
|
417
|
+
changes.push({
|
|
418
|
+
type: SchemaChangeType.TRUNCATE_TABLE,
|
|
419
|
+
table: entity.table,
|
|
420
|
+
schema: entity.db ?? defaultSchema
|
|
421
|
+
});
|
|
422
|
+
}
|
|
423
|
+
} else if (isDropTable(statement)) {
|
|
424
|
+
for (const entity of statement.name) {
|
|
425
|
+
changes.push({ type: SchemaChangeType.DROP_TABLE, table: entity.table, schema: entity.db ?? defaultSchema });
|
|
426
|
+
}
|
|
427
|
+
} else if (isDropIndex(statement)) {
|
|
428
|
+
const dropStatement = statement as DropIndexStatement;
|
|
429
|
+
changes.push({
|
|
430
|
+
type: SchemaChangeType.REPLICATION_IDENTITY,
|
|
431
|
+
table: dropStatement.table.table,
|
|
432
|
+
schema: dropStatement.table.db ?? defaultSchema
|
|
433
|
+
});
|
|
434
|
+
} else if (isCreateUniqueIndex(statement)) {
|
|
435
|
+
// Potential change to the replication identity if the table has no prior unique constraint
|
|
436
|
+
changes.push({
|
|
437
|
+
type: SchemaChangeType.REPLICATION_IDENTITY,
|
|
438
|
+
// @ts-ignore - The type definitions for node-sql-parser do not reflect the correct structure here
|
|
439
|
+
table: statement.table!.table,
|
|
440
|
+
// @ts-ignore
|
|
441
|
+
schema: statement.table!.db ?? defaultSchema
|
|
442
|
+
});
|
|
443
|
+
} else if (isRenameTable(statement)) {
|
|
444
|
+
const renameStatement = statement as RenameStatement;
|
|
445
|
+
// Rename statements can apply to multiple tables
|
|
446
|
+
for (const table of renameStatement.table) {
|
|
447
|
+
const schema = table[0].db ?? defaultSchema;
|
|
448
|
+
const isNewTableIncluded = this.databaseFilter[schema](table[1].table);
|
|
449
|
+
changes.push({
|
|
450
|
+
type: SchemaChangeType.RENAME_TABLE,
|
|
451
|
+
table: table[0].table,
|
|
452
|
+
newTable: isNewTableIncluded ? table[1].table : undefined,
|
|
453
|
+
schema
|
|
454
|
+
});
|
|
455
|
+
}
|
|
456
|
+
} else if (isAlterTable(statement)) {
|
|
457
|
+
const fromTable = statement.table[0] as BaseFrom;
|
|
458
|
+
for (const expression of statement.expr) {
|
|
459
|
+
if (isRenameExpression(expression)) {
|
|
460
|
+
changes.push({
|
|
461
|
+
type: SchemaChangeType.RENAME_TABLE,
|
|
462
|
+
table: fromTable.table,
|
|
463
|
+
newTable: expression.table,
|
|
464
|
+
schema: fromTable.db ?? defaultSchema
|
|
465
|
+
});
|
|
466
|
+
} else if (isColumnExpression(expression)) {
|
|
467
|
+
changes.push({
|
|
468
|
+
type: SchemaChangeType.ALTER_TABLE_COLUMN,
|
|
469
|
+
table: fromTable.table,
|
|
470
|
+
schema: fromTable.db ?? defaultSchema
|
|
471
|
+
});
|
|
472
|
+
} else if (isConstraintExpression(expression)) {
|
|
473
|
+
// Potential changes to the replication identity
|
|
474
|
+
changes.push({
|
|
475
|
+
type: SchemaChangeType.REPLICATION_IDENTITY,
|
|
476
|
+
table: fromTable.table,
|
|
477
|
+
schema: fromTable.db ?? defaultSchema
|
|
478
|
+
});
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
// Filter out schema changes that are not relevant to the included tables
|
|
484
|
+
return changes.filter(
|
|
485
|
+
(change) =>
|
|
486
|
+
this.isTableIncluded(change.table, change.schema) ||
|
|
487
|
+
(change.newTable && this.isTableIncluded(change.newTable, change.schema))
|
|
488
|
+
);
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
private isTableIncluded(tableName: string, schema: string): boolean {
|
|
492
|
+
return this.databaseFilter[schema] && this.databaseFilter[schema](tableName);
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
private createDatabaseFilter(sourceTables: TablePattern[]): { [schema: string]: (table: string) => boolean } {
|
|
496
|
+
// Group sync rule tables by schema
|
|
497
|
+
const schemaMap = new Map<string, TablePattern[]>();
|
|
498
|
+
for (const table of sourceTables) {
|
|
499
|
+
if (!schemaMap.has(table.schema)) {
|
|
500
|
+
const tables = [table];
|
|
501
|
+
schemaMap.set(table.schema, tables);
|
|
502
|
+
} else {
|
|
503
|
+
schemaMap.get(table.schema)!.push(table);
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
const databaseFilter: { [schema: string]: (table: string) => boolean } = {};
|
|
508
|
+
for (const entry of schemaMap.entries()) {
|
|
509
|
+
const [schema, sourceTables] = entry;
|
|
510
|
+
databaseFilter[schema] = (table: string) =>
|
|
511
|
+
sourceTables.findIndex((sourceTable) =>
|
|
512
|
+
sourceTable.isWildcard
|
|
513
|
+
? table.startsWith(sourceTable.tablePattern.substring(0, sourceTable.tablePattern.length - 1))
|
|
514
|
+
: table === sourceTable.name
|
|
515
|
+
) !== -1;
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
return databaseFilter;
|
|
242
519
|
}
|
|
243
520
|
}
|
|
@@ -5,7 +5,8 @@ import {
|
|
|
5
5
|
BinLogRotationEvent,
|
|
6
6
|
BinLogTableMapEvent,
|
|
7
7
|
BinLogRowUpdateEvent,
|
|
8
|
-
BinLogXidEvent
|
|
8
|
+
BinLogXidEvent,
|
|
9
|
+
BinLogQueryEvent
|
|
9
10
|
} from '@powersync/mysql-zongji';
|
|
10
11
|
|
|
11
12
|
export function eventIsGTIDLog(event: BinLogEvent): event is BinLogGTIDLogEvent {
|
|
@@ -35,3 +36,7 @@ export function eventIsDeleteMutation(event: BinLogEvent): event is BinLogRowEve
|
|
|
35
36
|
export function eventIsUpdateMutation(event: BinLogEvent): event is BinLogRowUpdateEvent {
|
|
36
37
|
return event.getEventName() == 'updaterows';
|
|
37
38
|
}
|
|
39
|
+
|
|
40
|
+
export function eventIsQuery(event: BinLogEvent): event is BinLogQueryEvent {
|
|
41
|
+
return event.getEventName() == 'query';
|
|
42
|
+
}
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import 'node-sql-parser';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Missing Type definitions for the node-sql-parser
|
|
5
|
+
*/
|
|
6
|
+
declare module 'node-sql-parser' {
|
|
7
|
+
interface RenameStatement {
|
|
8
|
+
type: 'rename';
|
|
9
|
+
table: { db: string | null; table: string }[][];
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
interface TruncateStatement {
|
|
13
|
+
type: 'truncate';
|
|
14
|
+
keyword: 'table'; // There are more keywords possible, but we only care about 'table'
|
|
15
|
+
name: { db: string | null; table: string; as: string | null }[];
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
// This custom type more accurately describes what the structure of a Drop statement looks like for indexes.
|
|
19
|
+
interface DropIndexStatement {
|
|
20
|
+
type: 'drop';
|
|
21
|
+
keyword: 'index';
|
|
22
|
+
table: { db: string | null; table: string };
|
|
23
|
+
name: any[];
|
|
24
|
+
}
|
|
25
|
+
}
|
package/src/utils/mysql-utils.ts
CHANGED
|
@@ -2,8 +2,8 @@ import { logger } from '@powersync/lib-services-framework';
|
|
|
2
2
|
import mysql from 'mysql2';
|
|
3
3
|
import mysqlPromise from 'mysql2/promise';
|
|
4
4
|
import * as types from '../types/types.js';
|
|
5
|
-
import { coerce, gte } from 'semver';
|
|
6
|
-
import {
|
|
5
|
+
import { coerce, gte, satisfies } from 'semver';
|
|
6
|
+
import { SourceEntityDescriptor } from '@powersync/service-core';
|
|
7
7
|
|
|
8
8
|
export type RetriedQueryOptions = {
|
|
9
9
|
connection: mysqlPromise.Connection;
|
|
@@ -86,6 +86,21 @@ export function isVersionAtLeast(version: string, minimumVersion: string): boole
|
|
|
86
86
|
return gte(coercedVersion!, coercedMinimumVersion!, { loose: true });
|
|
87
87
|
}
|
|
88
88
|
|
|
89
|
-
export function
|
|
90
|
-
|
|
89
|
+
export function satisfiesVersion(version: string, targetVersion: string): boolean {
|
|
90
|
+
const coercedVersion = coerce(version);
|
|
91
|
+
|
|
92
|
+
return satisfies(coercedVersion!, targetVersion!, { loose: true });
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
export function qualifiedMySQLTable(table: SourceEntityDescriptor): string;
|
|
96
|
+
export function qualifiedMySQLTable(table: string, schema: string): string;
|
|
97
|
+
|
|
98
|
+
export function qualifiedMySQLTable(table: SourceEntityDescriptor | string, schema?: string): string {
|
|
99
|
+
if (typeof table === 'object') {
|
|
100
|
+
return `\`${table.schema.replaceAll('`', '``')}\`.\`${table.name.replaceAll('`', '``')}\``;
|
|
101
|
+
} else if (schema) {
|
|
102
|
+
return `\`${schema.replaceAll('`', '``')}\`.\`${table.replaceAll('`', '``')}\``;
|
|
103
|
+
} else {
|
|
104
|
+
return `\`${table.replaceAll('`', '``')}\``;
|
|
105
|
+
}
|
|
91
106
|
}
|