@powersync/service-module-mysql 0.7.4 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +61 -0
- package/LICENSE +3 -3
- package/dev/docker/mysql/init-scripts/my.cnf +1 -3
- package/dist/api/MySQLRouteAPIAdapter.js +12 -4
- package/dist/api/MySQLRouteAPIAdapter.js.map +1 -1
- package/dist/common/ReplicatedGTID.js +4 -0
- package/dist/common/ReplicatedGTID.js.map +1 -1
- package/dist/common/common-index.d.ts +1 -2
- package/dist/common/common-index.js +1 -2
- package/dist/common/common-index.js.map +1 -1
- package/dist/common/mysql-to-sqlite.d.ts +1 -1
- package/dist/common/mysql-to-sqlite.js +4 -0
- package/dist/common/mysql-to-sqlite.js.map +1 -1
- package/dist/common/schema-utils.d.ts +20 -0
- package/dist/common/{get-replication-columns.js → schema-utils.js} +73 -30
- package/dist/common/schema-utils.js.map +1 -0
- package/dist/replication/BinLogReplicationJob.js +4 -1
- package/dist/replication/BinLogReplicationJob.js.map +1 -1
- package/dist/replication/BinLogStream.d.ts +9 -6
- package/dist/replication/BinLogStream.js +117 -73
- package/dist/replication/BinLogStream.js.map +1 -1
- package/dist/replication/zongji/BinLogListener.d.ts +60 -6
- package/dist/replication/zongji/BinLogListener.js +347 -89
- package/dist/replication/zongji/BinLogListener.js.map +1 -1
- package/dist/replication/zongji/zongji-utils.d.ts +4 -1
- package/dist/replication/zongji/zongji-utils.js +9 -0
- package/dist/replication/zongji/zongji-utils.js.map +1 -1
- package/dist/types/node-sql-parser-extended-types.d.ts +31 -0
- package/dist/types/node-sql-parser-extended-types.js +2 -0
- package/dist/types/node-sql-parser-extended-types.js.map +1 -0
- package/dist/utils/mysql-utils.d.ts +4 -2
- package/dist/utils/mysql-utils.js +15 -3
- package/dist/utils/mysql-utils.js.map +1 -1
- package/dist/utils/parser-utils.d.ts +16 -0
- package/dist/utils/parser-utils.js +58 -0
- package/dist/utils/parser-utils.js.map +1 -0
- package/package.json +12 -11
- package/src/api/MySQLRouteAPIAdapter.ts +15 -4
- package/src/common/ReplicatedGTID.ts +6 -1
- package/src/common/common-index.ts +1 -2
- package/src/common/mysql-to-sqlite.ts +7 -1
- package/src/common/{get-replication-columns.ts → schema-utils.ts} +96 -37
- package/src/replication/BinLogReplicationJob.ts +4 -1
- package/src/replication/BinLogStream.ts +139 -94
- package/src/replication/zongji/BinLogListener.ts +421 -100
- package/src/replication/zongji/zongji-utils.ts +16 -1
- package/src/types/node-sql-parser-extended-types.ts +25 -0
- package/src/utils/mysql-utils.ts +19 -4
- package/src/utils/parser-utils.ts +73 -0
- package/test/src/BinLogListener.test.ts +421 -77
- package/test/src/BinLogStream.test.ts +128 -52
- package/test/src/BinlogStreamUtils.ts +12 -2
- package/test/src/mysql-to-sqlite.test.ts +5 -5
- package/test/src/parser-utils.test.ts +24 -0
- package/test/src/schema-changes.test.ts +659 -0
- package/test/src/util.ts +87 -1
- package/tsconfig.tsbuildinfo +1 -1
- package/dist/common/get-replication-columns.d.ts +0 -12
- package/dist/common/get-replication-columns.js.map +0 -1
- package/dist/common/get-tables-from-pattern.d.ts +0 -7
- package/dist/common/get-tables-from-pattern.js +0 -28
- package/dist/common/get-tables-from-pattern.js.map +0 -1
- package/src/common/get-tables-from-pattern.ts +0 -44
|
@@ -1,16 +1,66 @@
|
|
|
1
1
|
import * as common from '../../common/common-index.js';
|
|
2
2
|
import async from 'async';
|
|
3
|
-
import { BinLogEvent, StartOptions, TableMapEntry, ZongJi } from '@powersync/mysql-zongji';
|
|
3
|
+
import { BinLogEvent, BinLogQueryEvent, StartOptions, TableMapEntry, ZongJi } from '@powersync/mysql-zongji';
|
|
4
4
|
import * as zongji_utils from './zongji-utils.js';
|
|
5
5
|
import { Logger, logger as defaultLogger } from '@powersync/lib-services-framework';
|
|
6
6
|
import { MySQLConnectionManager } from '../MySQLConnectionManager.js';
|
|
7
|
+
import timers from 'timers/promises';
|
|
8
|
+
import pkg, {
|
|
9
|
+
AST,
|
|
10
|
+
BaseFrom,
|
|
11
|
+
DropIndexStatement,
|
|
12
|
+
Parser as ParserType,
|
|
13
|
+
RenameStatement,
|
|
14
|
+
TruncateStatement
|
|
15
|
+
} from 'node-sql-parser';
|
|
16
|
+
import {
|
|
17
|
+
isAlterTable,
|
|
18
|
+
isColumnExpression,
|
|
19
|
+
isConstraintExpression,
|
|
20
|
+
isCreateUniqueIndex,
|
|
21
|
+
isDropIndex,
|
|
22
|
+
isDropTable,
|
|
23
|
+
isRenameExpression,
|
|
24
|
+
isRenameTable,
|
|
25
|
+
isTruncate,
|
|
26
|
+
matchedSchemaChangeQuery
|
|
27
|
+
} from '../../utils/parser-utils.js';
|
|
28
|
+
import { TablePattern } from '@powersync/service-sync-rules';
|
|
7
29
|
|
|
8
|
-
|
|
9
|
-
// MySQL server will automatically terminate replication connections after 60 seconds of inactivity, so this guards against that.
|
|
10
|
-
const MAX_QUEUE_PAUSE_TIME_MS = 45_000;
|
|
30
|
+
const { Parser } = pkg;
|
|
11
31
|
|
|
32
|
+
/**
|
|
33
|
+
* Seconds of inactivity after which a keepalive event is sent by the MySQL server.
|
|
34
|
+
*/
|
|
35
|
+
export const KEEPALIVE_INACTIVITY_THRESHOLD = 30;
|
|
12
36
|
export type Row = Record<string, any>;
|
|
13
37
|
|
|
38
|
+
/**
|
|
39
|
+
* Schema changes that are detectable by inspecting query events.
|
|
40
|
+
* Create table statements are not included here, since new tables are automatically detected when row events
|
|
41
|
+
* are received for them.
|
|
42
|
+
*/
|
|
43
|
+
export enum SchemaChangeType {
|
|
44
|
+
RENAME_TABLE = 'Rename Table',
|
|
45
|
+
DROP_TABLE = 'Drop Table',
|
|
46
|
+
TRUNCATE_TABLE = 'Truncate Table',
|
|
47
|
+
ALTER_TABLE_COLUMN = 'Alter Table Column',
|
|
48
|
+
REPLICATION_IDENTITY = 'Alter Replication Identity'
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
export interface SchemaChange {
|
|
52
|
+
type: SchemaChangeType;
|
|
53
|
+
/**
|
|
54
|
+
* The table that the schema change applies to.
|
|
55
|
+
*/
|
|
56
|
+
table: string;
|
|
57
|
+
schema: string;
|
|
58
|
+
/**
|
|
59
|
+
* Populated for table renames if the newTable was matched by the DatabaseFilter
|
|
60
|
+
*/
|
|
61
|
+
newTable?: string;
|
|
62
|
+
}
|
|
63
|
+
|
|
14
64
|
export interface BinLogEventHandler {
|
|
15
65
|
onTransactionStart: (options: { timestamp: Date }) => Promise<void>;
|
|
16
66
|
onRotate: () => Promise<void>;
|
|
@@ -18,15 +68,18 @@ export interface BinLogEventHandler {
|
|
|
18
68
|
onUpdate: (rowsAfter: Row[], rowsBefore: Row[], tableMap: TableMapEntry) => Promise<void>;
|
|
19
69
|
onDelete: (rows: Row[], tableMap: TableMapEntry) => Promise<void>;
|
|
20
70
|
onCommit: (lsn: string) => Promise<void>;
|
|
71
|
+
onSchemaChange: (change: SchemaChange) => Promise<void>;
|
|
72
|
+
onKeepAlive: (lsn: string) => Promise<void>;
|
|
21
73
|
}
|
|
22
74
|
|
|
23
75
|
export interface BinLogListenerOptions {
|
|
24
76
|
connectionManager: MySQLConnectionManager;
|
|
25
77
|
eventHandler: BinLogEventHandler;
|
|
26
|
-
|
|
78
|
+
sourceTables: TablePattern[];
|
|
27
79
|
serverId: number;
|
|
28
|
-
|
|
80
|
+
startGTID: common.ReplicatedGTID;
|
|
29
81
|
logger?: Logger;
|
|
82
|
+
keepAliveInactivitySeconds?: number;
|
|
30
83
|
}
|
|
31
84
|
|
|
32
85
|
/**
|
|
@@ -34,29 +87,39 @@ export interface BinLogListenerOptions {
|
|
|
34
87
|
* events on the provided BinLogEventHandler.
|
|
35
88
|
*/
|
|
36
89
|
export class BinLogListener {
|
|
90
|
+
private sqlParser: ParserType;
|
|
37
91
|
private connectionManager: MySQLConnectionManager;
|
|
38
92
|
private eventHandler: BinLogEventHandler;
|
|
39
93
|
private binLogPosition: common.BinLogPosition;
|
|
40
|
-
private currentGTID: common.ReplicatedGTID
|
|
94
|
+
private currentGTID: common.ReplicatedGTID;
|
|
41
95
|
private logger: Logger;
|
|
96
|
+
private listenerError: Error | null;
|
|
97
|
+
private databaseFilter: { [schema: string]: (table: string) => boolean };
|
|
42
98
|
|
|
99
|
+
private isStopped: boolean = false;
|
|
100
|
+
private isStopping: boolean = false;
|
|
101
|
+
|
|
102
|
+
// Flag to indicate if are currently in a transaction that involves multiple row mutation events.
|
|
103
|
+
private isTransactionOpen = false;
|
|
43
104
|
zongji: ZongJi;
|
|
44
105
|
processingQueue: async.QueueObject<BinLogEvent>;
|
|
106
|
+
|
|
45
107
|
/**
|
|
46
108
|
* The combined size in bytes of all the binlog events currently in the processing queue.
|
|
47
109
|
*/
|
|
48
|
-
queueMemoryUsage: number;
|
|
110
|
+
queueMemoryUsage: number = 0;
|
|
49
111
|
|
|
50
112
|
constructor(public options: BinLogListenerOptions) {
|
|
51
113
|
this.logger = options.logger ?? defaultLogger;
|
|
52
114
|
this.connectionManager = options.connectionManager;
|
|
53
115
|
this.eventHandler = options.eventHandler;
|
|
54
|
-
this.binLogPosition = options.
|
|
55
|
-
this.currentGTID =
|
|
56
|
-
|
|
57
|
-
this.processingQueue =
|
|
58
|
-
this.queueMemoryUsage = 0;
|
|
116
|
+
this.binLogPosition = options.startGTID.position;
|
|
117
|
+
this.currentGTID = options.startGTID;
|
|
118
|
+
this.sqlParser = new Parser();
|
|
119
|
+
this.processingQueue = this.createProcessingQueue();
|
|
59
120
|
this.zongji = this.createZongjiListener();
|
|
121
|
+
this.listenerError = null;
|
|
122
|
+
this.databaseFilter = this.createDatabaseFilter(options.sourceTables);
|
|
60
123
|
}
|
|
61
124
|
|
|
62
125
|
/**
|
|
@@ -67,126 +130,164 @@ export class BinLogListener {
|
|
|
67
130
|
return this.connectionManager.options.binlog_queue_memory_limit * 1024 * 1024;
|
|
68
131
|
}
|
|
69
132
|
|
|
70
|
-
public async start(): Promise<void> {
|
|
133
|
+
public async start(isRestart: boolean = false): Promise<void> {
|
|
71
134
|
if (this.isStopped) {
|
|
72
135
|
return;
|
|
73
136
|
}
|
|
74
|
-
|
|
137
|
+
|
|
138
|
+
this.logger.info(
|
|
139
|
+
`${isRestart ? 'Restarting' : 'Starting'} BinLog Listener with replica client id:${this.options.serverId}...`
|
|
140
|
+
);
|
|
141
|
+
|
|
142
|
+
// Set a heartbeat interval for the Zongji replication connection, these events are enough to keep the connection
|
|
143
|
+
// alive for setTimeout to work on the socket.
|
|
144
|
+
// The heartbeat needs to be set before starting the listener, since the replication connection is locked once replicating
|
|
145
|
+
await new Promise((resolve, reject) => {
|
|
146
|
+
this.zongji.connection.query(
|
|
147
|
+
// In nanoseconds, 10^9 = 1s
|
|
148
|
+
`set @master_heartbeat_period=${this.options.keepAliveInactivitySeconds ?? KEEPALIVE_INACTIVITY_THRESHOLD}*1000000000`,
|
|
149
|
+
(error: any, results: any, _fields: any) => {
|
|
150
|
+
if (error) {
|
|
151
|
+
reject(error);
|
|
152
|
+
} else {
|
|
153
|
+
this.logger.info('Successfully set up replication connection heartbeat.');
|
|
154
|
+
resolve(results);
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
);
|
|
158
|
+
});
|
|
159
|
+
|
|
160
|
+
// The _socket member is only set after a query is run on the connection, so we set the timeout after setting the heartbeat.
|
|
161
|
+
// The timeout here must be greater than the master_heartbeat_period.
|
|
162
|
+
const socket = this.zongji.connection._socket!;
|
|
163
|
+
socket.setTimeout(60_000, () => {
|
|
164
|
+
this.logger.info('Destroying socket due to replication connection timeout.');
|
|
165
|
+
socket.destroy(new Error('Replication connection timeout.'));
|
|
166
|
+
});
|
|
75
167
|
|
|
76
168
|
this.zongji.start({
|
|
77
|
-
//
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
169
|
+
// Tablemap events always need to be included for the other row events to work
|
|
170
|
+
includeEvents: [
|
|
171
|
+
'tablemap',
|
|
172
|
+
'writerows',
|
|
173
|
+
'updaterows',
|
|
174
|
+
'deleterows',
|
|
175
|
+
'xid',
|
|
176
|
+
'rotate',
|
|
177
|
+
'gtidlog',
|
|
178
|
+
'query',
|
|
179
|
+
'heartbeat',
|
|
180
|
+
'heartbeat_v2'
|
|
181
|
+
],
|
|
182
|
+
includeSchema: this.databaseFilter,
|
|
81
183
|
filename: this.binLogPosition.filename,
|
|
82
184
|
position: this.binLogPosition.offset,
|
|
83
185
|
serverId: this.options.serverId
|
|
84
186
|
} satisfies StartOptions);
|
|
85
187
|
|
|
86
|
-
return new Promise
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
188
|
+
return new Promise((resolve) => {
|
|
189
|
+
this.zongji.once('ready', () => {
|
|
190
|
+
this.logger.info(
|
|
191
|
+
`BinLog Listener ${isRestart ? 'restarted' : 'started'}. Listening for events from position: ${this.binLogPosition.filename}:${this.binLogPosition.offset}`
|
|
192
|
+
);
|
|
90
193
|
resolve();
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
this.zongji.on('error', (error) => {
|
|
94
|
-
if (!this.isStopped) {
|
|
95
|
-
this.logger.error('Binlog listener error:', error);
|
|
96
|
-
this.stop();
|
|
97
|
-
reject(error);
|
|
98
|
-
} else {
|
|
99
|
-
this.logger.warn('Binlog listener error during shutdown:', error);
|
|
100
|
-
}
|
|
101
194
|
});
|
|
195
|
+
});
|
|
196
|
+
}
|
|
102
197
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
this.logger.warn('BinlogEvent processing error during shutdown:', error);
|
|
110
|
-
}
|
|
111
|
-
});
|
|
198
|
+
private async restartZongji(): Promise<void> {
|
|
199
|
+
if (this.zongji.stopped) {
|
|
200
|
+
this.zongji = this.createZongjiListener();
|
|
201
|
+
await this.start(true);
|
|
202
|
+
}
|
|
203
|
+
}
|
|
112
204
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
205
|
+
private async stopZongji(): Promise<void> {
|
|
206
|
+
if (!this.zongji.stopped) {
|
|
207
|
+
this.logger.info('Stopping BinLog Listener...');
|
|
208
|
+
await new Promise<void>((resolve) => {
|
|
209
|
+
this.zongji.once('stopped', () => {
|
|
210
|
+
resolve();
|
|
211
|
+
});
|
|
212
|
+
this.zongji.stop();
|
|
116
213
|
});
|
|
117
|
-
|
|
214
|
+
this.logger.info('BinLog Listener stopped.');
|
|
215
|
+
}
|
|
118
216
|
}
|
|
119
217
|
|
|
120
|
-
public stop(): void {
|
|
121
|
-
if (!this.isStopped) {
|
|
122
|
-
this.
|
|
218
|
+
public async stop(): Promise<void> {
|
|
219
|
+
if (!(this.isStopped || this.isStopping)) {
|
|
220
|
+
this.isStopping = true;
|
|
221
|
+
await this.stopZongji();
|
|
123
222
|
this.processingQueue.kill();
|
|
223
|
+
|
|
224
|
+
this.isStopped = true;
|
|
124
225
|
}
|
|
125
226
|
}
|
|
126
227
|
|
|
127
|
-
|
|
128
|
-
|
|
228
|
+
public async replicateUntilStopped(): Promise<void> {
|
|
229
|
+
while (!this.isStopped) {
|
|
230
|
+
await timers.setTimeout(1_000);
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
if (this.listenerError) {
|
|
234
|
+
this.logger.error('BinLog Listener stopped due to an error:', this.listenerError);
|
|
235
|
+
throw this.listenerError;
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
private createProcessingQueue(): async.QueueObject<BinLogEvent> {
|
|
240
|
+
const queue = async.queue(this.createQueueWorker(), 1);
|
|
241
|
+
|
|
242
|
+
queue.error((error) => {
|
|
243
|
+
if (!(this.isStopped || this.isStopping)) {
|
|
244
|
+
this.listenerError = error;
|
|
245
|
+
this.stop();
|
|
246
|
+
} else {
|
|
247
|
+
this.logger.warn('Error processing BinLog event during shutdown:', error);
|
|
248
|
+
}
|
|
249
|
+
});
|
|
250
|
+
|
|
251
|
+
return queue;
|
|
129
252
|
}
|
|
130
253
|
|
|
131
254
|
private createZongjiListener(): ZongJi {
|
|
132
255
|
const zongji = this.connectionManager.createBinlogListener();
|
|
133
256
|
|
|
134
257
|
zongji.on('binlog', async (evt) => {
|
|
135
|
-
this.logger.
|
|
258
|
+
this.logger.debug(`Received BinLog event:${evt.getEventName()}`);
|
|
259
|
+
|
|
136
260
|
this.processingQueue.push(evt);
|
|
137
261
|
this.queueMemoryUsage += evt.size;
|
|
138
262
|
|
|
139
263
|
// When the processing queue grows past the threshold, we pause the binlog listener
|
|
140
264
|
if (this.isQueueOverCapacity()) {
|
|
141
265
|
this.logger.info(
|
|
142
|
-
`
|
|
266
|
+
`BinLog processing queue has reached its memory limit of [${this.connectionManager.options.binlog_queue_memory_limit}MB]. Pausing BinLog Listener.`
|
|
143
267
|
);
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
await Promise.race([this.processingQueue.empty(), resumeTimeoutPromise]);
|
|
150
|
-
|
|
151
|
-
this.logger.info(`Binlog processing queue backlog cleared. Resuming Binlog listener.`);
|
|
152
|
-
zongji.resume();
|
|
268
|
+
await this.stopZongji();
|
|
269
|
+
await this.processingQueue.drain();
|
|
270
|
+
this.logger.info(`BinLog processing queue backlog cleared. Resuming BinLog Listener.`);
|
|
271
|
+
await this.restartZongji();
|
|
153
272
|
}
|
|
154
273
|
});
|
|
155
274
|
|
|
156
|
-
zongji.on('
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
this.
|
|
162
|
-
|
|
163
|
-
'set @master_heartbeat_period=28*1000000000',
|
|
164
|
-
(error: any, results: any, fields: any) => {
|
|
165
|
-
if (error) {
|
|
166
|
-
reject(error);
|
|
167
|
-
} else {
|
|
168
|
-
this.logger.info('Successfully set up replication connection heartbeat...');
|
|
169
|
-
resolve(results);
|
|
170
|
-
}
|
|
171
|
-
}
|
|
172
|
-
);
|
|
173
|
-
});
|
|
174
|
-
|
|
175
|
-
// The _socket member is only set after a query is run on the connection, so we set the timeout after setting the heartbeat.
|
|
176
|
-
// The timeout here must be greater than the master_heartbeat_period.
|
|
177
|
-
const socket = this.zongji.connection._socket!;
|
|
178
|
-
socket.setTimeout(60_000, () => {
|
|
179
|
-
this.logger.info('Destroying socket due to replication connection timeout.');
|
|
180
|
-
socket.destroy(new Error('Replication connection timeout.'));
|
|
181
|
-
});
|
|
182
|
-
this.logger.info(
|
|
183
|
-
`BinLog listener setup complete. Reading binlog from: ${this.binLogPosition.filename}:${this.binLogPosition.offset}`
|
|
184
|
-
);
|
|
275
|
+
zongji.on('error', (error) => {
|
|
276
|
+
if (!(this.isStopped || this.isStopping)) {
|
|
277
|
+
this.listenerError = error;
|
|
278
|
+
this.stop();
|
|
279
|
+
} else {
|
|
280
|
+
this.logger.warn('Ignored BinLog Listener error during shutdown:', error);
|
|
281
|
+
}
|
|
185
282
|
});
|
|
186
283
|
|
|
187
284
|
return zongji;
|
|
188
285
|
}
|
|
189
286
|
|
|
287
|
+
isQueueOverCapacity(): boolean {
|
|
288
|
+
return this.queueMemoryUsage >= this.queueMemoryLimit;
|
|
289
|
+
}
|
|
290
|
+
|
|
190
291
|
private createQueueWorker() {
|
|
191
292
|
return async (evt: BinLogEvent) => {
|
|
192
293
|
switch (true) {
|
|
@@ -201,15 +302,32 @@ export class BinLogListener {
|
|
|
201
302
|
offset: evt.nextPosition
|
|
202
303
|
}
|
|
203
304
|
});
|
|
305
|
+
this.binLogPosition.offset = evt.nextPosition;
|
|
204
306
|
await this.eventHandler.onTransactionStart({ timestamp: new Date(evt.timestamp) });
|
|
307
|
+
this.logger.info(`Processed GTID event: ${this.currentGTID.comparable}`);
|
|
205
308
|
break;
|
|
206
309
|
case zongji_utils.eventIsRotation(evt):
|
|
310
|
+
// The first event when starting replication is a synthetic Rotate event
|
|
311
|
+
// It describes the last binlog file and position that the replica client processed
|
|
207
312
|
this.binLogPosition.filename = evt.binlogName;
|
|
208
|
-
this.binLogPosition.offset = evt.position;
|
|
313
|
+
this.binLogPosition.offset = evt.nextPosition !== 0 ? evt.nextPosition : evt.position;
|
|
209
314
|
await this.eventHandler.onRotate();
|
|
315
|
+
|
|
316
|
+
const newFile = this.binLogPosition.filename !== evt.binlogName;
|
|
317
|
+
if (newFile) {
|
|
318
|
+
this.logger.info(
|
|
319
|
+
`Processed Rotate event. New BinLog file is: ${this.binLogPosition.filename}:${this.binLogPosition.offset}`
|
|
320
|
+
);
|
|
321
|
+
}
|
|
322
|
+
|
|
210
323
|
break;
|
|
211
324
|
case zongji_utils.eventIsWriteMutation(evt):
|
|
212
|
-
|
|
325
|
+
const tableMap = evt.tableMap[evt.tableId];
|
|
326
|
+
await this.eventHandler.onWrite(evt.rows, tableMap);
|
|
327
|
+
this.binLogPosition.offset = evt.nextPosition;
|
|
328
|
+
this.logger.info(
|
|
329
|
+
`Processed Write event for table [${tableMap.parentSchema}.${tableMap.tableName}]. ${evt.rows.length} row(s) inserted.`
|
|
330
|
+
);
|
|
213
331
|
break;
|
|
214
332
|
case zongji_utils.eventIsUpdateMutation(evt):
|
|
215
333
|
await this.eventHandler.onUpdate(
|
|
@@ -217,19 +335,40 @@ export class BinLogListener {
|
|
|
217
335
|
evt.rows.map((row) => row.before),
|
|
218
336
|
evt.tableMap[evt.tableId]
|
|
219
337
|
);
|
|
338
|
+
this.binLogPosition.offset = evt.nextPosition;
|
|
339
|
+
this.logger.info(
|
|
340
|
+
`Processed Update event for table [${evt.tableMap[evt.tableId].tableName}]. ${evt.rows.length} row(s) updated.`
|
|
341
|
+
);
|
|
220
342
|
break;
|
|
221
343
|
case zongji_utils.eventIsDeleteMutation(evt):
|
|
222
344
|
await this.eventHandler.onDelete(evt.rows, evt.tableMap[evt.tableId]);
|
|
345
|
+
this.binLogPosition.offset = evt.nextPosition;
|
|
346
|
+
this.logger.info(
|
|
347
|
+
`Processed Delete event for table [${evt.tableMap[evt.tableId].tableName}]. ${evt.rows.length} row(s) deleted.`
|
|
348
|
+
);
|
|
349
|
+
break;
|
|
350
|
+
case zongji_utils.eventIsHeartbeat(evt):
|
|
351
|
+
case zongji_utils.eventIsHeartbeat_v2(evt):
|
|
352
|
+
// Heartbeats are sent by the master to keep the connection alive after a period of inactivity. They are synthetic
|
|
353
|
+
// so are not written to the binlog. Consequently, they have no effect on the binlog position.
|
|
354
|
+
// We forward these along with the current GTID to the event handler, but don't want to do this if a transaction is in progress.
|
|
355
|
+
if (!this.isTransactionOpen) {
|
|
356
|
+
await this.eventHandler.onKeepAlive(this.currentGTID.comparable);
|
|
357
|
+
}
|
|
358
|
+
this.logger.debug(`Processed Heartbeat event. Current GTID is: ${this.currentGTID.comparable}`);
|
|
223
359
|
break;
|
|
224
360
|
case zongji_utils.eventIsXid(evt):
|
|
361
|
+
this.isTransactionOpen = false;
|
|
362
|
+
this.binLogPosition.offset = evt.nextPosition;
|
|
225
363
|
const LSN = new common.ReplicatedGTID({
|
|
226
|
-
raw_gtid: this.currentGTID
|
|
227
|
-
position:
|
|
228
|
-
filename: this.binLogPosition.filename,
|
|
229
|
-
offset: evt.nextPosition
|
|
230
|
-
}
|
|
364
|
+
raw_gtid: this.currentGTID.raw,
|
|
365
|
+
position: this.binLogPosition
|
|
231
366
|
}).comparable;
|
|
232
367
|
await this.eventHandler.onCommit(LSN);
|
|
368
|
+
this.logger.info(`Processed Xid event - transaction complete. LSN: ${LSN}.`);
|
|
369
|
+
break;
|
|
370
|
+
case zongji_utils.eventIsQuery(evt):
|
|
371
|
+
await this.processQueryEvent(evt);
|
|
233
372
|
break;
|
|
234
373
|
}
|
|
235
374
|
|
|
@@ -237,7 +376,189 @@ export class BinLogListener {
|
|
|
237
376
|
};
|
|
238
377
|
}
|
|
239
378
|
|
|
240
|
-
|
|
241
|
-
|
|
379
|
+
private async processQueryEvent(event: BinLogQueryEvent): Promise<void> {
|
|
380
|
+
const { query, nextPosition } = event;
|
|
381
|
+
|
|
382
|
+
// BEGIN query events mark the start of a transaction before any row events. They are not schema changes so no further parsing is necessary.
|
|
383
|
+
if (query === 'BEGIN') {
|
|
384
|
+
this.isTransactionOpen = true;
|
|
385
|
+
return;
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
const schemaChanges = this.toSchemaChanges(query, event.schema);
|
|
389
|
+
if (schemaChanges.length > 0) {
|
|
390
|
+
// Handling schema changes can take a long time, so we stop the Zongji listener whilst handling them to prevent the listener from timing out.
|
|
391
|
+
await this.stopZongji();
|
|
392
|
+
|
|
393
|
+
for (const change of schemaChanges) {
|
|
394
|
+
this.logger.info(`Processing schema change ${change.type} for table [${change.schema}.${change.table}]`);
|
|
395
|
+
await this.eventHandler.onSchemaChange(change);
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
// DDL queries are auto commited, but do not come with a corresponding Xid event, in those cases we trigger a manual commit if we are not already in a transaction.
|
|
399
|
+
// Some DDL queries include row events, and in those cases will include a Xid event.
|
|
400
|
+
if (!this.isTransactionOpen) {
|
|
401
|
+
this.binLogPosition.offset = nextPosition;
|
|
402
|
+
const LSN = new common.ReplicatedGTID({
|
|
403
|
+
raw_gtid: this.currentGTID.raw,
|
|
404
|
+
position: this.binLogPosition
|
|
405
|
+
}).comparable;
|
|
406
|
+
await this.eventHandler.onCommit(LSN);
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
this.logger.info(`Successfully processed ${schemaChanges.length} schema change(s).`);
|
|
410
|
+
|
|
411
|
+
// If there are still events in the processing queue, we need to process those before restarting Zongji
|
|
412
|
+
// This avoids potentially processing the same events again after a restart.
|
|
413
|
+
if (!this.processingQueue.idle()) {
|
|
414
|
+
this.logger.info(`Processing [${this.processingQueue.length()}] events(s) before resuming...`);
|
|
415
|
+
this.processingQueue.drain(async () => {
|
|
416
|
+
await this.restartZongji();
|
|
417
|
+
});
|
|
418
|
+
} else {
|
|
419
|
+
await this.restartZongji();
|
|
420
|
+
}
|
|
421
|
+
} else if (!this.isTransactionOpen) {
|
|
422
|
+
this.binLogPosition.offset = nextPosition;
|
|
423
|
+
const LSN = new common.ReplicatedGTID({
|
|
424
|
+
raw_gtid: this.currentGTID.raw,
|
|
425
|
+
position: this.binLogPosition
|
|
426
|
+
}).comparable;
|
|
427
|
+
await this.eventHandler.onCommit(LSN);
|
|
428
|
+
}
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
/**
|
|
432
|
+
* Function that interprets a DDL query for any applicable schema changes.
|
|
433
|
+
* If the query does not contain any relevant schema changes, an empty array is returned.
|
|
434
|
+
* The defaultSchema is derived from the database set on the MySQL Node.js connection client.
|
|
435
|
+
* It is used as a fallback when the schema/database cannot be determined from the query DDL.
|
|
436
|
+
*
|
|
437
|
+
* @param query
|
|
438
|
+
* @param defaultSchema
|
|
439
|
+
*/
|
|
440
|
+
private toSchemaChanges(query: string, defaultSchema: string): SchemaChange[] {
|
|
441
|
+
let statements: AST[] = [];
|
|
442
|
+
try {
|
|
443
|
+
const ast = this.sqlParser.astify(query, { database: 'MySQL' });
|
|
444
|
+
statements = Array.isArray(ast) ? ast : [ast];
|
|
445
|
+
} catch (error) {
|
|
446
|
+
if (matchedSchemaChangeQuery(query, Object.values(this.databaseFilter))) {
|
|
447
|
+
this.logger.warn(
|
|
448
|
+
`Failed to parse query: [${query}].
|
|
449
|
+
Please review for the schema changes and manually redeploy the sync rules if required.`
|
|
450
|
+
);
|
|
451
|
+
}
|
|
452
|
+
return [];
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
const changes: SchemaChange[] = [];
|
|
456
|
+
for (const statement of statements) {
|
|
457
|
+
if (isTruncate(statement)) {
|
|
458
|
+
const truncateStatement = statement as TruncateStatement;
|
|
459
|
+
// Truncate statements can apply to multiple tables
|
|
460
|
+
for (const entity of truncateStatement.name) {
|
|
461
|
+
changes.push({
|
|
462
|
+
type: SchemaChangeType.TRUNCATE_TABLE,
|
|
463
|
+
table: entity.table,
|
|
464
|
+
schema: entity.db ?? defaultSchema
|
|
465
|
+
});
|
|
466
|
+
}
|
|
467
|
+
} else if (isDropTable(statement)) {
|
|
468
|
+
for (const entity of statement.name) {
|
|
469
|
+
changes.push({ type: SchemaChangeType.DROP_TABLE, table: entity.table, schema: entity.db ?? defaultSchema });
|
|
470
|
+
}
|
|
471
|
+
} else if (isDropIndex(statement)) {
|
|
472
|
+
const dropStatement = statement as DropIndexStatement;
|
|
473
|
+
changes.push({
|
|
474
|
+
type: SchemaChangeType.REPLICATION_IDENTITY,
|
|
475
|
+
table: dropStatement.table.table,
|
|
476
|
+
schema: dropStatement.table.db ?? defaultSchema
|
|
477
|
+
});
|
|
478
|
+
} else if (isCreateUniqueIndex(statement)) {
|
|
479
|
+
// Potential change to the replication identity if the table has no prior unique constraint
|
|
480
|
+
changes.push({
|
|
481
|
+
type: SchemaChangeType.REPLICATION_IDENTITY,
|
|
482
|
+
// @ts-ignore - The type definitions for node-sql-parser do not reflect the correct structure here
|
|
483
|
+
table: statement.table!.table,
|
|
484
|
+
// @ts-ignore
|
|
485
|
+
schema: statement.table!.db ?? defaultSchema
|
|
486
|
+
});
|
|
487
|
+
} else if (isRenameTable(statement)) {
|
|
488
|
+
const renameStatement = statement as RenameStatement;
|
|
489
|
+
// Rename statements can apply to multiple tables
|
|
490
|
+
for (const table of renameStatement.table) {
|
|
491
|
+
const schema = table[0].db ?? defaultSchema;
|
|
492
|
+
const isNewTableIncluded = this.databaseFilter[schema](table[1].table);
|
|
493
|
+
changes.push({
|
|
494
|
+
type: SchemaChangeType.RENAME_TABLE,
|
|
495
|
+
table: table[0].table,
|
|
496
|
+
newTable: isNewTableIncluded ? table[1].table : undefined,
|
|
497
|
+
schema
|
|
498
|
+
});
|
|
499
|
+
}
|
|
500
|
+
} else if (isAlterTable(statement)) {
|
|
501
|
+
const fromTable = statement.table[0] as BaseFrom;
|
|
502
|
+
for (const expression of statement.expr) {
|
|
503
|
+
if (isRenameExpression(expression)) {
|
|
504
|
+
changes.push({
|
|
505
|
+
type: SchemaChangeType.RENAME_TABLE,
|
|
506
|
+
table: fromTable.table,
|
|
507
|
+
newTable: expression.table,
|
|
508
|
+
schema: fromTable.db ?? defaultSchema
|
|
509
|
+
});
|
|
510
|
+
} else if (isColumnExpression(expression)) {
|
|
511
|
+
changes.push({
|
|
512
|
+
type: SchemaChangeType.ALTER_TABLE_COLUMN,
|
|
513
|
+
table: fromTable.table,
|
|
514
|
+
schema: fromTable.db ?? defaultSchema
|
|
515
|
+
});
|
|
516
|
+
} else if (isConstraintExpression(expression)) {
|
|
517
|
+
// Potential changes to the replication identity
|
|
518
|
+
changes.push({
|
|
519
|
+
type: SchemaChangeType.REPLICATION_IDENTITY,
|
|
520
|
+
table: fromTable.table,
|
|
521
|
+
schema: fromTable.db ?? defaultSchema
|
|
522
|
+
});
|
|
523
|
+
}
|
|
524
|
+
}
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
// Filter out schema changes that are not relevant to the included tables
|
|
528
|
+
return changes.filter(
|
|
529
|
+
(change) =>
|
|
530
|
+
this.isTableIncluded(change.table, change.schema) ||
|
|
531
|
+
(change.newTable && this.isTableIncluded(change.newTable, change.schema))
|
|
532
|
+
);
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
private isTableIncluded(tableName: string, schema: string): boolean {
|
|
536
|
+
return this.databaseFilter[schema] && this.databaseFilter[schema](tableName);
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
private createDatabaseFilter(sourceTables: TablePattern[]): { [schema: string]: (table: string) => boolean } {
|
|
540
|
+
// Group sync rule tables by schema
|
|
541
|
+
const schemaMap = new Map<string, TablePattern[]>();
|
|
542
|
+
for (const table of sourceTables) {
|
|
543
|
+
if (!schemaMap.has(table.schema)) {
|
|
544
|
+
const tables = [table];
|
|
545
|
+
schemaMap.set(table.schema, tables);
|
|
546
|
+
} else {
|
|
547
|
+
schemaMap.get(table.schema)!.push(table);
|
|
548
|
+
}
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
const databaseFilter: { [schema: string]: (table: string) => boolean } = {};
|
|
552
|
+
for (const entry of schemaMap.entries()) {
|
|
553
|
+
const [schema, sourceTables] = entry;
|
|
554
|
+
databaseFilter[schema] = (table: string) =>
|
|
555
|
+
sourceTables.findIndex((sourceTable) =>
|
|
556
|
+
sourceTable.isWildcard
|
|
557
|
+
? table.startsWith(sourceTable.tablePattern.substring(0, sourceTable.tablePattern.length - 1))
|
|
558
|
+
: table === sourceTable.name
|
|
559
|
+
) !== -1;
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
return databaseFilter;
|
|
242
563
|
}
|
|
243
564
|
}
|
|
@@ -5,7 +5,10 @@ import {
|
|
|
5
5
|
BinLogRotationEvent,
|
|
6
6
|
BinLogTableMapEvent,
|
|
7
7
|
BinLogRowUpdateEvent,
|
|
8
|
-
BinLogXidEvent
|
|
8
|
+
BinLogXidEvent,
|
|
9
|
+
BinLogQueryEvent,
|
|
10
|
+
BinLogHeartbeatEvent,
|
|
11
|
+
BinLogHeartbeatEvent_V2
|
|
9
12
|
} from '@powersync/mysql-zongji';
|
|
10
13
|
|
|
11
14
|
export function eventIsGTIDLog(event: BinLogEvent): event is BinLogGTIDLogEvent {
|
|
@@ -20,6 +23,14 @@ export function eventIsXid(event: BinLogEvent): event is BinLogXidEvent {
|
|
|
20
23
|
return event.getEventName() == 'xid';
|
|
21
24
|
}
|
|
22
25
|
|
|
26
|
+
export function eventIsHeartbeat(event: BinLogEvent): event is BinLogHeartbeatEvent {
|
|
27
|
+
return event.getEventName() == 'heartbeat';
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export function eventIsHeartbeat_v2(event: BinLogEvent): event is BinLogHeartbeatEvent_V2 {
|
|
31
|
+
return event.getEventName() == 'heartbeat_v2';
|
|
32
|
+
}
|
|
33
|
+
|
|
23
34
|
export function eventIsRotation(event: BinLogEvent): event is BinLogRotationEvent {
|
|
24
35
|
return event.getEventName() == 'rotate';
|
|
25
36
|
}
|
|
@@ -35,3 +46,7 @@ export function eventIsDeleteMutation(event: BinLogEvent): event is BinLogRowEve
|
|
|
35
46
|
export function eventIsUpdateMutation(event: BinLogEvent): event is BinLogRowUpdateEvent {
|
|
36
47
|
return event.getEventName() == 'updaterows';
|
|
37
48
|
}
|
|
49
|
+
|
|
50
|
+
export function eventIsQuery(event: BinLogEvent): event is BinLogQueryEvent {
|
|
51
|
+
return event.getEventName() == 'query';
|
|
52
|
+
}
|