@powersync/service-module-mysql 0.6.5 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +30 -0
- package/dist/api/MySQLRouteAPIAdapter.d.ts +1 -1
- package/dist/api/MySQLRouteAPIAdapter.js +1 -1
- package/dist/api/MySQLRouteAPIAdapter.js.map +1 -1
- package/dist/replication/BinLogReplicationJob.d.ts +2 -0
- package/dist/replication/BinLogReplicationJob.js +10 -3
- package/dist/replication/BinLogReplicationJob.js.map +1 -1
- package/dist/replication/BinLogReplicator.d.ts +1 -0
- package/dist/replication/BinLogReplicator.js +22 -0
- package/dist/replication/BinLogReplicator.js.map +1 -1
- package/dist/replication/BinLogStream.d.ts +17 -1
- package/dist/replication/BinLogStream.js +126 -174
- package/dist/replication/BinLogStream.js.map +1 -1
- package/dist/replication/MySQLConnectionManager.d.ts +1 -1
- package/dist/replication/MySQLConnectionManager.js +2 -1
- package/dist/replication/MySQLConnectionManager.js.map +1 -1
- package/dist/replication/zongji/BinLogListener.d.ts +54 -0
- package/dist/replication/zongji/BinLogListener.js +192 -0
- package/dist/replication/zongji/BinLogListener.js.map +1 -0
- package/dist/replication/zongji/zongji-utils.d.ts +5 -4
- package/dist/replication/zongji/zongji-utils.js +3 -0
- package/dist/replication/zongji/zongji-utils.js.map +1 -1
- package/dist/types/types.d.ts +2 -0
- package/dist/types/types.js +5 -1
- package/dist/types/types.js.map +1 -1
- package/dist/utils/mysql-utils.js +1 -0
- package/dist/utils/mysql-utils.js.map +1 -1
- package/package.json +9 -9
- package/src/api/MySQLRouteAPIAdapter.ts +1 -1
- package/src/replication/BinLogReplicationJob.ts +11 -3
- package/src/replication/BinLogReplicator.ts +25 -0
- package/src/replication/BinLogStream.ts +151 -201
- package/src/replication/MySQLConnectionManager.ts +2 -1
- package/src/replication/zongji/BinLogListener.ts +243 -0
- package/src/replication/zongji/zongji-utils.ts +10 -5
- package/src/types/types.ts +8 -1
- package/src/utils/mysql-utils.ts +1 -0
- package/test/src/BinLogListener.test.ts +161 -0
- package/test/src/BinLogStream.test.ts +4 -9
- package/test/src/mysql-to-sqlite.test.ts +1 -1
- package/test/src/util.ts +12 -0
- package/test/tsconfig.json +1 -1
- package/tsconfig.tsbuildinfo +1 -1
- package/src/replication/zongji/zongji.d.ts +0 -129
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
import * as common from '../../common/common-index.js';
|
|
2
|
+
import async from 'async';
|
|
3
|
+
import { BinLogEvent, StartOptions, TableMapEntry, ZongJi } from '@powersync/mysql-zongji';
|
|
4
|
+
import * as zongji_utils from './zongji-utils.js';
|
|
5
|
+
import { Logger, logger as defaultLogger } from '@powersync/lib-services-framework';
|
|
6
|
+
import { MySQLConnectionManager } from '../MySQLConnectionManager.js';
|
|
7
|
+
|
|
8
|
+
// Maximum time the processing queue can be paused before resuming automatically
|
|
9
|
+
// MySQL server will automatically terminate replication connections after 60 seconds of inactivity, so this guards against that.
|
|
10
|
+
const MAX_QUEUE_PAUSE_TIME_MS = 45_000;
|
|
11
|
+
|
|
12
|
+
export type Row = Record<string, any>;
|
|
13
|
+
|
|
14
|
+
export interface BinLogEventHandler {
|
|
15
|
+
onTransactionStart: (options: { timestamp: Date }) => Promise<void>;
|
|
16
|
+
onRotate: () => Promise<void>;
|
|
17
|
+
onWrite: (rows: Row[], tableMap: TableMapEntry) => Promise<void>;
|
|
18
|
+
onUpdate: (rowsAfter: Row[], rowsBefore: Row[], tableMap: TableMapEntry) => Promise<void>;
|
|
19
|
+
onDelete: (rows: Row[], tableMap: TableMapEntry) => Promise<void>;
|
|
20
|
+
onCommit: (lsn: string) => Promise<void>;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export interface BinLogListenerOptions {
|
|
24
|
+
connectionManager: MySQLConnectionManager;
|
|
25
|
+
eventHandler: BinLogEventHandler;
|
|
26
|
+
includedTables: string[];
|
|
27
|
+
serverId: number;
|
|
28
|
+
startPosition: common.BinLogPosition;
|
|
29
|
+
logger?: Logger;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Wrapper class for the Zongji BinLog listener. Internally handles the creation and management of the listener and posts
|
|
34
|
+
* events on the provided BinLogEventHandler.
|
|
35
|
+
*/
|
|
36
|
+
export class BinLogListener {
|
|
37
|
+
private connectionManager: MySQLConnectionManager;
|
|
38
|
+
private eventHandler: BinLogEventHandler;
|
|
39
|
+
private binLogPosition: common.BinLogPosition;
|
|
40
|
+
private currentGTID: common.ReplicatedGTID | null;
|
|
41
|
+
private logger: Logger;
|
|
42
|
+
|
|
43
|
+
zongji: ZongJi;
|
|
44
|
+
processingQueue: async.QueueObject<BinLogEvent>;
|
|
45
|
+
/**
|
|
46
|
+
* The combined size in bytes of all the binlog events currently in the processing queue.
|
|
47
|
+
*/
|
|
48
|
+
queueMemoryUsage: number;
|
|
49
|
+
|
|
50
|
+
constructor(public options: BinLogListenerOptions) {
|
|
51
|
+
this.logger = options.logger ?? defaultLogger;
|
|
52
|
+
this.connectionManager = options.connectionManager;
|
|
53
|
+
this.eventHandler = options.eventHandler;
|
|
54
|
+
this.binLogPosition = options.startPosition;
|
|
55
|
+
this.currentGTID = null;
|
|
56
|
+
|
|
57
|
+
this.processingQueue = async.queue(this.createQueueWorker(), 1);
|
|
58
|
+
this.queueMemoryUsage = 0;
|
|
59
|
+
this.zongji = this.createZongjiListener();
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* The queue memory limit in bytes as defined in the connection options.
|
|
64
|
+
* @private
|
|
65
|
+
*/
|
|
66
|
+
private get queueMemoryLimit(): number {
|
|
67
|
+
return this.connectionManager.options.binlog_queue_memory_limit * 1024 * 1024;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
public async start(): Promise<void> {
|
|
71
|
+
if (this.isStopped) {
|
|
72
|
+
return;
|
|
73
|
+
}
|
|
74
|
+
this.logger.info(`Starting replication. Created replica client with serverId:${this.options.serverId}`);
|
|
75
|
+
|
|
76
|
+
this.zongji.start({
|
|
77
|
+
// We ignore the unknown/heartbeat event since it currently serves no purpose other than to keep the connection alive
|
|
78
|
+
// tablemap events always need to be included for the other row events to work
|
|
79
|
+
includeEvents: ['tablemap', 'writerows', 'updaterows', 'deleterows', 'xid', 'rotate', 'gtidlog'],
|
|
80
|
+
includeSchema: { [this.connectionManager.databaseName]: this.options.includedTables },
|
|
81
|
+
filename: this.binLogPosition.filename,
|
|
82
|
+
position: this.binLogPosition.offset,
|
|
83
|
+
serverId: this.options.serverId
|
|
84
|
+
} satisfies StartOptions);
|
|
85
|
+
|
|
86
|
+
return new Promise<void>((resolve, reject) => {
|
|
87
|
+
// Handle an edge case where the listener has already been stopped before completing startup
|
|
88
|
+
if (this.isStopped) {
|
|
89
|
+
this.logger.info('BinLog listener was stopped before startup completed.');
|
|
90
|
+
resolve();
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
this.zongji.on('error', (error) => {
|
|
94
|
+
if (!this.isStopped) {
|
|
95
|
+
this.logger.error('Binlog listener error:', error);
|
|
96
|
+
this.stop();
|
|
97
|
+
reject(error);
|
|
98
|
+
} else {
|
|
99
|
+
this.logger.warn('Binlog listener error during shutdown:', error);
|
|
100
|
+
}
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
this.processingQueue.error((error) => {
|
|
104
|
+
if (!this.isStopped) {
|
|
105
|
+
this.logger.error('BinlogEvent processing error:', error);
|
|
106
|
+
this.stop();
|
|
107
|
+
reject(error);
|
|
108
|
+
} else {
|
|
109
|
+
this.logger.warn('BinlogEvent processing error during shutdown:', error);
|
|
110
|
+
}
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
this.zongji.on('stopped', () => {
|
|
114
|
+
resolve();
|
|
115
|
+
this.logger.info('BinLog listener stopped. Replication ended.');
|
|
116
|
+
});
|
|
117
|
+
});
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
public stop(): void {
|
|
121
|
+
if (!this.isStopped) {
|
|
122
|
+
this.zongji.stop();
|
|
123
|
+
this.processingQueue.kill();
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
private get isStopped(): boolean {
|
|
128
|
+
return this.zongji.stopped;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
private createZongjiListener(): ZongJi {
|
|
132
|
+
const zongji = this.connectionManager.createBinlogListener();
|
|
133
|
+
|
|
134
|
+
zongji.on('binlog', async (evt) => {
|
|
135
|
+
this.logger.info(`Received Binlog event:${evt.getEventName()}`);
|
|
136
|
+
this.processingQueue.push(evt);
|
|
137
|
+
this.queueMemoryUsage += evt.size;
|
|
138
|
+
|
|
139
|
+
// When the processing queue grows past the threshold, we pause the binlog listener
|
|
140
|
+
if (this.isQueueOverCapacity()) {
|
|
141
|
+
this.logger.info(
|
|
142
|
+
`Binlog processing queue has reached its memory limit of [${this.connectionManager.options.binlog_queue_memory_limit}MB]. Pausing Binlog listener.`
|
|
143
|
+
);
|
|
144
|
+
zongji.pause();
|
|
145
|
+
const resumeTimeoutPromise = new Promise((resolve) => {
|
|
146
|
+
setTimeout(() => resolve('timeout'), MAX_QUEUE_PAUSE_TIME_MS);
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
await Promise.race([this.processingQueue.empty(), resumeTimeoutPromise]);
|
|
150
|
+
|
|
151
|
+
this.logger.info(`Binlog processing queue backlog cleared. Resuming Binlog listener.`);
|
|
152
|
+
zongji.resume();
|
|
153
|
+
}
|
|
154
|
+
});
|
|
155
|
+
|
|
156
|
+
zongji.on('ready', async () => {
|
|
157
|
+
// Set a heartbeat interval for the Zongji replication connection
|
|
158
|
+
// Zongji does not explicitly handle the heartbeat events - they are categorized as event:unknown
|
|
159
|
+
// The heartbeat events are enough to keep the connection alive for setTimeout to work on the socket.
|
|
160
|
+
await new Promise((resolve, reject) => {
|
|
161
|
+
this.zongji.connection.query(
|
|
162
|
+
// In nanoseconds, 10^9 = 1s
|
|
163
|
+
'set @master_heartbeat_period=28*1000000000',
|
|
164
|
+
(error: any, results: any, fields: any) => {
|
|
165
|
+
if (error) {
|
|
166
|
+
reject(error);
|
|
167
|
+
} else {
|
|
168
|
+
this.logger.info('Successfully set up replication connection heartbeat...');
|
|
169
|
+
resolve(results);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
);
|
|
173
|
+
});
|
|
174
|
+
|
|
175
|
+
// The _socket member is only set after a query is run on the connection, so we set the timeout after setting the heartbeat.
|
|
176
|
+
// The timeout here must be greater than the master_heartbeat_period.
|
|
177
|
+
const socket = this.zongji.connection._socket!;
|
|
178
|
+
socket.setTimeout(60_000, () => {
|
|
179
|
+
this.logger.info('Destroying socket due to replication connection timeout.');
|
|
180
|
+
socket.destroy(new Error('Replication connection timeout.'));
|
|
181
|
+
});
|
|
182
|
+
this.logger.info(
|
|
183
|
+
`BinLog listener setup complete. Reading binlog from: ${this.binLogPosition.filename}:${this.binLogPosition.offset}`
|
|
184
|
+
);
|
|
185
|
+
});
|
|
186
|
+
|
|
187
|
+
return zongji;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
private createQueueWorker() {
|
|
191
|
+
return async (evt: BinLogEvent) => {
|
|
192
|
+
switch (true) {
|
|
193
|
+
case zongji_utils.eventIsGTIDLog(evt):
|
|
194
|
+
this.currentGTID = common.ReplicatedGTID.fromBinLogEvent({
|
|
195
|
+
raw_gtid: {
|
|
196
|
+
server_id: evt.serverId,
|
|
197
|
+
transaction_range: evt.transactionRange
|
|
198
|
+
},
|
|
199
|
+
position: {
|
|
200
|
+
filename: this.binLogPosition.filename,
|
|
201
|
+
offset: evt.nextPosition
|
|
202
|
+
}
|
|
203
|
+
});
|
|
204
|
+
await this.eventHandler.onTransactionStart({ timestamp: new Date(evt.timestamp) });
|
|
205
|
+
break;
|
|
206
|
+
case zongji_utils.eventIsRotation(evt):
|
|
207
|
+
this.binLogPosition.filename = evt.binlogName;
|
|
208
|
+
this.binLogPosition.offset = evt.position;
|
|
209
|
+
await this.eventHandler.onRotate();
|
|
210
|
+
break;
|
|
211
|
+
case zongji_utils.eventIsWriteMutation(evt):
|
|
212
|
+
await this.eventHandler.onWrite(evt.rows, evt.tableMap[evt.tableId]);
|
|
213
|
+
break;
|
|
214
|
+
case zongji_utils.eventIsUpdateMutation(evt):
|
|
215
|
+
await this.eventHandler.onUpdate(
|
|
216
|
+
evt.rows.map((row) => row.after),
|
|
217
|
+
evt.rows.map((row) => row.before),
|
|
218
|
+
evt.tableMap[evt.tableId]
|
|
219
|
+
);
|
|
220
|
+
break;
|
|
221
|
+
case zongji_utils.eventIsDeleteMutation(evt):
|
|
222
|
+
await this.eventHandler.onDelete(evt.rows, evt.tableMap[evt.tableId]);
|
|
223
|
+
break;
|
|
224
|
+
case zongji_utils.eventIsXid(evt):
|
|
225
|
+
const LSN = new common.ReplicatedGTID({
|
|
226
|
+
raw_gtid: this.currentGTID!.raw,
|
|
227
|
+
position: {
|
|
228
|
+
filename: this.binLogPosition.filename,
|
|
229
|
+
offset: evt.nextPosition
|
|
230
|
+
}
|
|
231
|
+
}).comparable;
|
|
232
|
+
await this.eventHandler.onCommit(LSN);
|
|
233
|
+
break;
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
this.queueMemoryUsage -= evt.size;
|
|
237
|
+
};
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
isQueueOverCapacity(): boolean {
|
|
241
|
+
return this.queueMemoryUsage >= this.queueMemoryLimit;
|
|
242
|
+
}
|
|
243
|
+
}
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
import {
|
|
2
2
|
BinLogEvent,
|
|
3
3
|
BinLogGTIDLogEvent,
|
|
4
|
-
|
|
4
|
+
BinLogRowEvent,
|
|
5
5
|
BinLogRotationEvent,
|
|
6
|
-
|
|
6
|
+
BinLogTableMapEvent,
|
|
7
|
+
BinLogRowUpdateEvent,
|
|
7
8
|
BinLogXidEvent
|
|
8
9
|
} from '@powersync/mysql-zongji';
|
|
9
10
|
|
|
@@ -11,6 +12,10 @@ export function eventIsGTIDLog(event: BinLogEvent): event is BinLogGTIDLogEvent
|
|
|
11
12
|
return event.getEventName() == 'gtidlog';
|
|
12
13
|
}
|
|
13
14
|
|
|
15
|
+
export function eventIsTableMap(event: BinLogEvent): event is BinLogTableMapEvent {
|
|
16
|
+
return event.getEventName() == 'tablemap';
|
|
17
|
+
}
|
|
18
|
+
|
|
14
19
|
export function eventIsXid(event: BinLogEvent): event is BinLogXidEvent {
|
|
15
20
|
return event.getEventName() == 'xid';
|
|
16
21
|
}
|
|
@@ -19,14 +24,14 @@ export function eventIsRotation(event: BinLogEvent): event is BinLogRotationEven
|
|
|
19
24
|
return event.getEventName() == 'rotate';
|
|
20
25
|
}
|
|
21
26
|
|
|
22
|
-
export function eventIsWriteMutation(event: BinLogEvent): event is
|
|
27
|
+
export function eventIsWriteMutation(event: BinLogEvent): event is BinLogRowEvent {
|
|
23
28
|
return event.getEventName() == 'writerows';
|
|
24
29
|
}
|
|
25
30
|
|
|
26
|
-
export function eventIsDeleteMutation(event: BinLogEvent): event is
|
|
31
|
+
export function eventIsDeleteMutation(event: BinLogEvent): event is BinLogRowEvent {
|
|
27
32
|
return event.getEventName() == 'deleterows';
|
|
28
33
|
}
|
|
29
34
|
|
|
30
|
-
export function eventIsUpdateMutation(event: BinLogEvent): event is
|
|
35
|
+
export function eventIsUpdateMutation(event: BinLogEvent): event is BinLogRowUpdateEvent {
|
|
31
36
|
return event.getEventName() == 'updaterows';
|
|
32
37
|
}
|
package/src/types/types.ts
CHANGED
|
@@ -23,6 +23,8 @@ export interface NormalizedMySQLConnectionConfig {
|
|
|
23
23
|
client_private_key?: string;
|
|
24
24
|
|
|
25
25
|
lookup?: LookupFunction;
|
|
26
|
+
|
|
27
|
+
binlog_queue_memory_limit: number;
|
|
26
28
|
}
|
|
27
29
|
|
|
28
30
|
export const MySQLConnectionConfig = service_types.configFile.DataSourceConfig.and(
|
|
@@ -40,7 +42,9 @@ export const MySQLConnectionConfig = service_types.configFile.DataSourceConfig.a
|
|
|
40
42
|
client_certificate: t.string.optional(),
|
|
41
43
|
client_private_key: t.string.optional(),
|
|
42
44
|
|
|
43
|
-
reject_ip_ranges: t.array(t.string).optional()
|
|
45
|
+
reject_ip_ranges: t.array(t.string).optional(),
|
|
46
|
+
// The combined size of binlog events that can be queued in memory before throttling is applied.
|
|
47
|
+
binlog_queue_memory_limit: t.number.optional()
|
|
44
48
|
})
|
|
45
49
|
);
|
|
46
50
|
|
|
@@ -114,6 +118,9 @@ export function normalizeConnectionConfig(options: MySQLConnectionConfig): Norma
|
|
|
114
118
|
|
|
115
119
|
server_id: options.server_id ?? 1,
|
|
116
120
|
|
|
121
|
+
// Binlog processing queue memory limit before throttling is applied.
|
|
122
|
+
binlog_queue_memory_limit: options.binlog_queue_memory_limit ?? 50,
|
|
123
|
+
|
|
117
124
|
lookup
|
|
118
125
|
};
|
|
119
126
|
}
|
package/src/utils/mysql-utils.ts
CHANGED
|
@@ -41,6 +41,7 @@ export function createPool(config: types.NormalizedMySQLConnectionConfig, option
|
|
|
41
41
|
return mysql.createPool({
|
|
42
42
|
host: config.hostname,
|
|
43
43
|
user: config.username,
|
|
44
|
+
port: config.port,
|
|
44
45
|
password: config.password,
|
|
45
46
|
database: config.database,
|
|
46
47
|
ssl: hasSSLOptions ? sslOptions : undefined,
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
import { describe, test, beforeEach, vi, expect, afterEach } from 'vitest';
|
|
2
|
+
import { BinLogEventHandler, BinLogListener, Row } from '@module/replication/zongji/BinLogListener.js';
|
|
3
|
+
import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js';
|
|
4
|
+
import { clearTestDb, TEST_CONNECTION_OPTIONS } from './util.js';
|
|
5
|
+
import { v4 as uuid } from 'uuid';
|
|
6
|
+
import * as common from '@module/common/common-index.js';
|
|
7
|
+
import { createRandomServerId } from '@module/utils/mysql-utils.js';
|
|
8
|
+
import { TableMapEntry } from '@powersync/mysql-zongji';
|
|
9
|
+
import crypto from 'crypto';
|
|
10
|
+
|
|
11
|
+
describe('BinlogListener tests', () => {
|
|
12
|
+
const MAX_QUEUE_CAPACITY_MB = 1;
|
|
13
|
+
const BINLOG_LISTENER_CONNECTION_OPTIONS = {
|
|
14
|
+
...TEST_CONNECTION_OPTIONS,
|
|
15
|
+
binlog_queue_memory_limit: MAX_QUEUE_CAPACITY_MB
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
let connectionManager: MySQLConnectionManager;
|
|
19
|
+
let eventHandler: TestBinLogEventHandler;
|
|
20
|
+
let binLogListener: BinLogListener;
|
|
21
|
+
|
|
22
|
+
beforeEach(async () => {
|
|
23
|
+
connectionManager = new MySQLConnectionManager(BINLOG_LISTENER_CONNECTION_OPTIONS, {});
|
|
24
|
+
const connection = await connectionManager.getConnection();
|
|
25
|
+
await clearTestDb(connection);
|
|
26
|
+
await connection.query(`CREATE TABLE test_DATA (id CHAR(36) PRIMARY KEY, description MEDIUMTEXT)`);
|
|
27
|
+
connection.release();
|
|
28
|
+
const fromGTID = await getFromGTID(connectionManager);
|
|
29
|
+
|
|
30
|
+
eventHandler = new TestBinLogEventHandler();
|
|
31
|
+
binLogListener = new BinLogListener({
|
|
32
|
+
connectionManager: connectionManager,
|
|
33
|
+
eventHandler: eventHandler,
|
|
34
|
+
startPosition: fromGTID.position,
|
|
35
|
+
includedTables: ['test_DATA'],
|
|
36
|
+
serverId: createRandomServerId(1)
|
|
37
|
+
});
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
afterEach(async () => {
|
|
41
|
+
await connectionManager.end();
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
test('Stop binlog listener', async () => {
|
|
45
|
+
const stopSpy = vi.spyOn(binLogListener.zongji, 'stop');
|
|
46
|
+
const queueStopSpy = vi.spyOn(binLogListener.processingQueue, 'kill');
|
|
47
|
+
|
|
48
|
+
const startPromise = binLogListener.start();
|
|
49
|
+
setTimeout(async () => binLogListener.stop(), 50);
|
|
50
|
+
|
|
51
|
+
await expect(startPromise).resolves.toBeUndefined();
|
|
52
|
+
expect(stopSpy).toHaveBeenCalled();
|
|
53
|
+
expect(queueStopSpy).toHaveBeenCalled();
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
test('Pause Zongji binlog listener when processing queue reaches maximum memory size', async () => {
|
|
57
|
+
const pauseSpy = vi.spyOn(binLogListener.zongji, 'pause');
|
|
58
|
+
const resumeSpy = vi.spyOn(binLogListener.zongji, 'resume');
|
|
59
|
+
|
|
60
|
+
// Pause the event handler to force a backlog on the processing queue
|
|
61
|
+
eventHandler.pause();
|
|
62
|
+
|
|
63
|
+
const ROW_COUNT = 10;
|
|
64
|
+
await insertRows(connectionManager, ROW_COUNT);
|
|
65
|
+
|
|
66
|
+
const startPromise = binLogListener.start();
|
|
67
|
+
|
|
68
|
+
// Wait for listener to pause due to queue reaching capacity
|
|
69
|
+
await vi.waitFor(() => expect(pauseSpy).toHaveBeenCalled(), { timeout: 5000 });
|
|
70
|
+
|
|
71
|
+
expect(binLogListener.isQueueOverCapacity()).toBeTruthy();
|
|
72
|
+
// Resume event processing
|
|
73
|
+
eventHandler.unpause!();
|
|
74
|
+
|
|
75
|
+
await vi.waitFor(() => expect(eventHandler.rowsWritten).equals(ROW_COUNT), { timeout: 5000 });
|
|
76
|
+
binLogListener.stop();
|
|
77
|
+
await expect(startPromise).resolves.toBeUndefined();
|
|
78
|
+
// Confirm resume was called after unpausing
|
|
79
|
+
expect(resumeSpy).toHaveBeenCalled();
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
test('Binlog events are correctly forwarded to provided binlog events handler', async () => {
|
|
83
|
+
const startPromise = binLogListener.start();
|
|
84
|
+
|
|
85
|
+
const ROW_COUNT = 10;
|
|
86
|
+
await insertRows(connectionManager, ROW_COUNT);
|
|
87
|
+
await vi.waitFor(() => expect(eventHandler.rowsWritten).equals(ROW_COUNT), { timeout: 5000 });
|
|
88
|
+
expect(eventHandler.commitCount).equals(ROW_COUNT);
|
|
89
|
+
|
|
90
|
+
await updateRows(connectionManager);
|
|
91
|
+
await vi.waitFor(() => expect(eventHandler.rowsUpdated).equals(ROW_COUNT), { timeout: 5000 });
|
|
92
|
+
|
|
93
|
+
await deleteRows(connectionManager);
|
|
94
|
+
await vi.waitFor(() => expect(eventHandler.rowsDeleted).equals(ROW_COUNT), { timeout: 5000 });
|
|
95
|
+
|
|
96
|
+
binLogListener.stop();
|
|
97
|
+
await expect(startPromise).resolves.toBeUndefined();
|
|
98
|
+
});
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
async function getFromGTID(connectionManager: MySQLConnectionManager) {
|
|
102
|
+
const connection = await connectionManager.getConnection();
|
|
103
|
+
const fromGTID = await common.readExecutedGtid(connection);
|
|
104
|
+
connection.release();
|
|
105
|
+
|
|
106
|
+
return fromGTID;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
async function insertRows(connectionManager: MySQLConnectionManager, count: number) {
|
|
110
|
+
for (let i = 0; i < count; i++) {
|
|
111
|
+
await connectionManager.query(
|
|
112
|
+
`INSERT INTO test_DATA(id, description) VALUES('${uuid()}','test${i} ${crypto.randomBytes(100_000).toString('hex')}')`
|
|
113
|
+
);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
async function updateRows(connectionManager: MySQLConnectionManager) {
|
|
118
|
+
await connectionManager.query(`UPDATE test_DATA SET description='updated'`);
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
async function deleteRows(connectionManager: MySQLConnectionManager) {
|
|
122
|
+
await connectionManager.query(`DELETE FROM test_DATA`);
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
class TestBinLogEventHandler implements BinLogEventHandler {
|
|
126
|
+
rowsWritten = 0;
|
|
127
|
+
rowsUpdated = 0;
|
|
128
|
+
rowsDeleted = 0;
|
|
129
|
+
commitCount = 0;
|
|
130
|
+
|
|
131
|
+
unpause: ((value: void | PromiseLike<void>) => void) | undefined;
|
|
132
|
+
private pausedPromise: Promise<void> | undefined;
|
|
133
|
+
|
|
134
|
+
pause() {
|
|
135
|
+
this.pausedPromise = new Promise((resolve) => {
|
|
136
|
+
this.unpause = resolve;
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
async onWrite(rows: Row[], tableMap: TableMapEntry) {
|
|
141
|
+
if (this.pausedPromise) {
|
|
142
|
+
await this.pausedPromise;
|
|
143
|
+
}
|
|
144
|
+
this.rowsWritten = this.rowsWritten + rows.length;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
async onUpdate(afterRows: Row[], beforeRows: Row[], tableMap: TableMapEntry) {
|
|
148
|
+
this.rowsUpdated = this.rowsUpdated + afterRows.length;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
async onDelete(rows: Row[], tableMap: TableMapEntry) {
|
|
152
|
+
this.rowsDeleted = this.rowsDeleted + rows.length;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
async onCommit(lsn: string) {
|
|
156
|
+
this.commitCount++;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
async onTransactionStart(options: { timestamp: Date }) {}
|
|
160
|
+
async onRotate() {}
|
|
161
|
+
}
|
|
@@ -1,11 +1,10 @@
|
|
|
1
1
|
import { storage } from '@powersync/service-core';
|
|
2
2
|
import { METRICS_HELPER, putOp, removeOp } from '@powersync/service-core-tests';
|
|
3
|
+
import { ReplicationMetric } from '@powersync/service-types';
|
|
3
4
|
import { v4 as uuid } from 'uuid';
|
|
4
5
|
import { describe, expect, test } from 'vitest';
|
|
5
6
|
import { BinlogStreamTestContext } from './BinlogStreamUtils.js';
|
|
6
|
-
import {
|
|
7
|
-
import { INITIALIZED_MONGO_STORAGE_FACTORY, INITIALIZED_POSTGRES_STORAGE_FACTORY } from './util.js';
|
|
8
|
-
import { ReplicationMetric } from '@powersync/service-types';
|
|
7
|
+
import { describeWithStorage } from './util.js';
|
|
9
8
|
|
|
10
9
|
const BASIC_SYNC_RULES = `
|
|
11
10
|
bucket_definitions:
|
|
@@ -14,12 +13,8 @@ bucket_definitions:
|
|
|
14
13
|
- SELECT id, description FROM "test_data"
|
|
15
14
|
`;
|
|
16
15
|
|
|
17
|
-
describe
|
|
18
|
-
defineBinlogStreamTests
|
|
19
|
-
});
|
|
20
|
-
|
|
21
|
-
describe.skipIf(!env.TEST_POSTGRES_STORAGE)(' Binlog stream - postgres', { timeout: 20_000 }, function () {
|
|
22
|
-
defineBinlogStreamTests(INITIALIZED_POSTGRES_STORAGE_FACTORY);
|
|
16
|
+
describe('BigLog stream', () => {
|
|
17
|
+
describeWithStorage({ timeout: 20_000 }, defineBinlogStreamTests);
|
|
23
18
|
});
|
|
24
19
|
|
|
25
20
|
function defineBinlogStreamTests(factory: storage.TestStorageFactory) {
|
|
@@ -3,7 +3,7 @@ import { afterAll, describe, expect, test } from 'vitest';
|
|
|
3
3
|
import { clearTestDb, TEST_CONNECTION_OPTIONS } from './util.js';
|
|
4
4
|
import { eventIsWriteMutation, eventIsXid } from '@module/replication/zongji/zongji-utils.js';
|
|
5
5
|
import * as common from '@module/common/common-index.js';
|
|
6
|
-
import
|
|
6
|
+
import { BinLogEvent, ZongJi } from '@powersync/mysql-zongji';
|
|
7
7
|
import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js';
|
|
8
8
|
import { toColumnDescriptors } from '@module/common/common-index.js';
|
|
9
9
|
|
package/test/src/util.ts
CHANGED
|
@@ -4,6 +4,8 @@ import * as mongo_storage from '@powersync/service-module-mongodb-storage';
|
|
|
4
4
|
import * as postgres_storage from '@powersync/service-module-postgres-storage';
|
|
5
5
|
import mysqlPromise from 'mysql2/promise';
|
|
6
6
|
import { env } from './env.js';
|
|
7
|
+
import { describe, TestOptions } from 'vitest';
|
|
8
|
+
import { TestStorageFactory } from '@powersync/service-core';
|
|
7
9
|
|
|
8
10
|
export const TEST_URI = env.MYSQL_TEST_URI;
|
|
9
11
|
|
|
@@ -21,6 +23,16 @@ export const INITIALIZED_POSTGRES_STORAGE_FACTORY = postgres_storage.PostgresTes
|
|
|
21
23
|
url: env.PG_STORAGE_TEST_URL
|
|
22
24
|
});
|
|
23
25
|
|
|
26
|
+
export function describeWithStorage(options: TestOptions, fn: (factory: TestStorageFactory) => void) {
|
|
27
|
+
describe.skipIf(!env.TEST_MONGO_STORAGE)(`mongodb storage`, options, function () {
|
|
28
|
+
fn(INITIALIZED_MONGO_STORAGE_FACTORY);
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
describe.skipIf(!env.TEST_POSTGRES_STORAGE)(`postgres storage`, options, function () {
|
|
32
|
+
fn(INITIALIZED_POSTGRES_STORAGE_FACTORY);
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
|
|
24
36
|
export async function clearTestDb(connection: mysqlPromise.Connection) {
|
|
25
37
|
const version = await getMySQLVersion(connection);
|
|
26
38
|
if (isVersionAtLeast(version, '8.4.0')) {
|