@powersync/service-module-mysql 0.0.0-dev-20241015210820

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. package/CHANGELOG.md +20 -0
  2. package/LICENSE +67 -0
  3. package/README.md +3 -0
  4. package/dev/.env.template +2 -0
  5. package/dev/README.md +9 -0
  6. package/dev/config/sync_rules.yaml +12 -0
  7. package/dev/docker/mysql/docker-compose.yaml +17 -0
  8. package/dev/docker/mysql/init-scripts/my.cnf +9 -0
  9. package/dev/docker/mysql/init-scripts/mysql.sql +38 -0
  10. package/dist/api/MySQLRouteAPIAdapter.d.ts +24 -0
  11. package/dist/api/MySQLRouteAPIAdapter.js +311 -0
  12. package/dist/api/MySQLRouteAPIAdapter.js.map +1 -0
  13. package/dist/common/ReplicatedGTID.d.ts +59 -0
  14. package/dist/common/ReplicatedGTID.js +110 -0
  15. package/dist/common/ReplicatedGTID.js.map +1 -0
  16. package/dist/common/check-source-configuration.d.ts +3 -0
  17. package/dist/common/check-source-configuration.js +46 -0
  18. package/dist/common/check-source-configuration.js.map +1 -0
  19. package/dist/common/common-index.d.ts +6 -0
  20. package/dist/common/common-index.js +7 -0
  21. package/dist/common/common-index.js.map +1 -0
  22. package/dist/common/get-replication-columns.d.ts +12 -0
  23. package/dist/common/get-replication-columns.js +103 -0
  24. package/dist/common/get-replication-columns.js.map +1 -0
  25. package/dist/common/get-tables-from-pattern.d.ts +7 -0
  26. package/dist/common/get-tables-from-pattern.js +28 -0
  27. package/dist/common/get-tables-from-pattern.js.map +1 -0
  28. package/dist/common/mysql-to-sqlite.d.ts +4 -0
  29. package/dist/common/mysql-to-sqlite.js +56 -0
  30. package/dist/common/mysql-to-sqlite.js.map +1 -0
  31. package/dist/common/read-executed-gtid.d.ts +6 -0
  32. package/dist/common/read-executed-gtid.js +40 -0
  33. package/dist/common/read-executed-gtid.js.map +1 -0
  34. package/dist/index.d.ts +3 -0
  35. package/dist/index.js +4 -0
  36. package/dist/index.js.map +1 -0
  37. package/dist/module/MySQLModule.d.ts +13 -0
  38. package/dist/module/MySQLModule.js +46 -0
  39. package/dist/module/MySQLModule.js.map +1 -0
  40. package/dist/replication/BinLogReplicationJob.d.ts +14 -0
  41. package/dist/replication/BinLogReplicationJob.js +88 -0
  42. package/dist/replication/BinLogReplicationJob.js.map +1 -0
  43. package/dist/replication/BinLogReplicator.d.ts +13 -0
  44. package/dist/replication/BinLogReplicator.js +25 -0
  45. package/dist/replication/BinLogReplicator.js.map +1 -0
  46. package/dist/replication/BinLogStream.d.ts +43 -0
  47. package/dist/replication/BinLogStream.js +421 -0
  48. package/dist/replication/BinLogStream.js.map +1 -0
  49. package/dist/replication/MySQLConnectionManager.d.ts +43 -0
  50. package/dist/replication/MySQLConnectionManager.js +81 -0
  51. package/dist/replication/MySQLConnectionManager.js.map +1 -0
  52. package/dist/replication/MySQLConnectionManagerFactory.d.ts +10 -0
  53. package/dist/replication/MySQLConnectionManagerFactory.js +21 -0
  54. package/dist/replication/MySQLConnectionManagerFactory.js.map +1 -0
  55. package/dist/replication/MySQLErrorRateLimiter.d.ts +10 -0
  56. package/dist/replication/MySQLErrorRateLimiter.js +43 -0
  57. package/dist/replication/MySQLErrorRateLimiter.js.map +1 -0
  58. package/dist/replication/zongji/zongji-utils.d.ts +7 -0
  59. package/dist/replication/zongji/zongji-utils.js +19 -0
  60. package/dist/replication/zongji/zongji-utils.js.map +1 -0
  61. package/dist/types/types.d.ts +50 -0
  62. package/dist/types/types.js +61 -0
  63. package/dist/types/types.js.map +1 -0
  64. package/dist/utils/mysql_utils.d.ts +14 -0
  65. package/dist/utils/mysql_utils.js +38 -0
  66. package/dist/utils/mysql_utils.js.map +1 -0
  67. package/package.json +51 -0
  68. package/src/api/MySQLRouteAPIAdapter.ts +357 -0
  69. package/src/common/ReplicatedGTID.ts +158 -0
  70. package/src/common/check-source-configuration.ts +59 -0
  71. package/src/common/common-index.ts +6 -0
  72. package/src/common/get-replication-columns.ts +124 -0
  73. package/src/common/get-tables-from-pattern.ts +44 -0
  74. package/src/common/mysql-to-sqlite.ts +59 -0
  75. package/src/common/read-executed-gtid.ts +43 -0
  76. package/src/index.ts +5 -0
  77. package/src/module/MySQLModule.ts +53 -0
  78. package/src/replication/BinLogReplicationJob.ts +97 -0
  79. package/src/replication/BinLogReplicator.ts +35 -0
  80. package/src/replication/BinLogStream.ts +547 -0
  81. package/src/replication/MySQLConnectionManager.ts +104 -0
  82. package/src/replication/MySQLConnectionManagerFactory.ts +28 -0
  83. package/src/replication/MySQLErrorRateLimiter.ts +44 -0
  84. package/src/replication/zongji/zongji-utils.ts +32 -0
  85. package/src/replication/zongji/zongji.d.ts +98 -0
  86. package/src/types/types.ts +102 -0
  87. package/src/utils/mysql_utils.ts +47 -0
  88. package/test/src/binlog_stream.test.ts +288 -0
  89. package/test/src/binlog_stream_utils.ts +152 -0
  90. package/test/src/env.ts +7 -0
  91. package/test/src/setup.ts +7 -0
  92. package/test/src/util.ts +62 -0
  93. package/test/tsconfig.json +28 -0
  94. package/tsconfig.json +26 -0
  95. package/tsconfig.tsbuildinfo +1 -0
  96. package/vitest.config.ts +15 -0
@@ -0,0 +1,547 @@
1
+ import { logger } from '@powersync/lib-services-framework';
2
+ import * as sync_rules from '@powersync/service-sync-rules';
3
+ import async from 'async';
4
+
5
+ import { framework, getUuidReplicaIdentityBson, Metrics, storage } from '@powersync/service-core';
6
+ import mysql from 'mysql2';
7
+
8
+ import { BinLogEvent } from '@powersync/mysql-zongji';
9
+ import * as common from '../common/common-index.js';
10
+ import * as zongji_utils from './zongji/zongji-utils.js';
11
+ import { MySQLConnectionManager } from './MySQLConnectionManager.js';
12
+ import { ReplicatedGTID } from '../common/common-index.js';
13
+ import mysqlPromise from 'mysql2/promise';
14
+
15
+ export interface BinLogStreamOptions {
16
+ connections: MySQLConnectionManager;
17
+ storage: storage.SyncRulesBucketStorage;
18
+ abortSignal: AbortSignal;
19
+ }
20
+
21
+ interface MysqlRelId {
22
+ schema: string;
23
+ name: string;
24
+ }
25
+
26
+ interface WriteChangePayload {
27
+ type: storage.SaveOperationTag;
28
+ data: Data;
29
+ previous_data?: Data;
30
+ database: string;
31
+ table: string;
32
+ sourceTable: storage.SourceTable;
33
+ }
34
+
35
+ export type Data = Record<string, any>;
36
+
37
+ /**
38
+ * MySQL does not have same relation structure. Just returning unique key as string.
39
+ * @param source
40
+ */
41
+ function getMysqlRelId(source: MysqlRelId): string {
42
+ return `${source.schema}.${source.name}`;
43
+ }
44
+
45
+ export class BinLogStream {
46
+ private readonly syncRules: sync_rules.SqlSyncRules;
47
+ private readonly groupId: number;
48
+
49
+ private readonly storage: storage.SyncRulesBucketStorage;
50
+
51
+ private readonly connections: MySQLConnectionManager;
52
+
53
+ private abortSignal: AbortSignal;
54
+
55
+ private tableCache = new Map<string | number, storage.SourceTable>();
56
+
57
+ constructor(protected options: BinLogStreamOptions) {
58
+ this.storage = options.storage;
59
+ this.connections = options.connections;
60
+ this.syncRules = options.storage.getParsedSyncRules({ defaultSchema: this.defaultSchema });
61
+ this.groupId = options.storage.group_id;
62
+ this.abortSignal = options.abortSignal;
63
+ }
64
+
65
+ get connectionTag() {
66
+ return this.connections.connectionTag;
67
+ }
68
+
69
+ get connectionId() {
70
+ // Default to 1 if not set
71
+ return this.connections.connectionId ? Number.parseInt(this.connections.connectionId) : 1;
72
+ }
73
+
74
+ get stopped() {
75
+ return this.abortSignal.aborted;
76
+ }
77
+
78
+ get defaultSchema() {
79
+ return this.connections.databaseName;
80
+ }
81
+
82
+ async handleRelation(batch: storage.BucketStorageBatch, entity: storage.SourceEntityDescriptor, snapshot: boolean) {
83
+ const result = await this.storage.resolveTable({
84
+ group_id: this.groupId,
85
+ connection_id: this.connectionId,
86
+ connection_tag: this.connectionTag,
87
+ entity_descriptor: entity,
88
+ sync_rules: this.syncRules
89
+ });
90
+ this.tableCache.set(entity.objectId, result.table);
91
+
92
+ // Drop conflicting tables. This includes for example renamed tables.
93
+ await batch.drop(result.dropTables);
94
+
95
+ // Snapshot if:
96
+ // 1. Snapshot is requested (false for initial snapshot, since that process handles it elsewhere)
97
+ // 2. Snapshot is not already done, AND:
98
+ // 3. The table is used in sync rules.
99
+ const shouldSnapshot = snapshot && !result.table.snapshotComplete && result.table.syncAny;
100
+
101
+ if (shouldSnapshot) {
102
+ // Truncate this table, in case a previous snapshot was interrupted.
103
+ await batch.truncate([result.table]);
104
+
105
+ let gtid: common.ReplicatedGTID;
106
+ // Start the snapshot inside a transaction.
107
+ // We use a dedicated connection for this.
108
+ const connection = await this.connections.getStreamingConnection();
109
+ const promiseConnection = (connection as mysql.Connection).promise();
110
+ try {
111
+ await promiseConnection.query('BEGIN');
112
+ try {
113
+ gtid = await common.readExecutedGtid(promiseConnection);
114
+ await this.snapshotTable(connection.connection, batch, result.table);
115
+ await promiseConnection.query('COMMIT');
116
+ } catch (e) {
117
+ await promiseConnection.query('ROLLBACK');
118
+ throw e;
119
+ }
120
+ } finally {
121
+ connection.release();
122
+ }
123
+ const [table] = await batch.markSnapshotDone([result.table], gtid.comparable);
124
+ return table;
125
+ }
126
+
127
+ return result.table;
128
+ }
129
+
130
+ async getQualifiedTableNames(
131
+ batch: storage.BucketStorageBatch,
132
+ tablePattern: sync_rules.TablePattern
133
+ ): Promise<storage.SourceTable[]> {
134
+ if (tablePattern.connectionTag != this.connectionTag) {
135
+ return [];
136
+ }
137
+
138
+ let tableRows: any[];
139
+ const prefix = tablePattern.isWildcard ? tablePattern.tablePrefix : undefined;
140
+ if (tablePattern.isWildcard) {
141
+ const result = await this.connections.query(
142
+ `SELECT TABLE_NAME
143
+ FROM information_schema.tables
144
+ WHERE TABLE_SCHEMA = ? AND TABLE_NAME LIKE ?;
145
+ `,
146
+ [tablePattern.schema, tablePattern.tablePattern]
147
+ );
148
+ tableRows = result[0];
149
+ } else {
150
+ const result = await this.connections.query(
151
+ `SELECT TABLE_NAME
152
+ FROM information_schema.tables
153
+ WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ?;
154
+ `,
155
+ [tablePattern.schema, tablePattern.tablePattern]
156
+ );
157
+ tableRows = result[0];
158
+ }
159
+ let tables: storage.SourceTable[] = [];
160
+
161
+ for (let row of tableRows) {
162
+ const name = row['TABLE_NAME'] as string;
163
+ if (prefix && !name.startsWith(prefix)) {
164
+ continue;
165
+ }
166
+
167
+ const result = await this.connections.query(
168
+ `SELECT 1
169
+ FROM information_schema.tables
170
+ WHERE table_schema = ? AND table_name = ?
171
+ AND table_type = 'BASE TABLE';`,
172
+ [tablePattern.schema, tablePattern.name]
173
+ );
174
+ if (result[0].length == 0) {
175
+ logger.info(`Skipping ${tablePattern.schema}.${name} - no table exists/is not a base table`);
176
+ continue;
177
+ }
178
+
179
+ const connection = await this.connections.getConnection();
180
+ const replicationColumns = await common.getReplicationIdentityColumns({
181
+ connection: connection,
182
+ schema: tablePattern.schema,
183
+ table_name: tablePattern.name
184
+ });
185
+ connection.release();
186
+
187
+ const table = await this.handleRelation(
188
+ batch,
189
+ {
190
+ name,
191
+ schema: tablePattern.schema,
192
+ objectId: getMysqlRelId(tablePattern),
193
+ replicationColumns: replicationColumns.columns
194
+ },
195
+ false
196
+ );
197
+
198
+ tables.push(table);
199
+ }
200
+ return tables;
201
+ }
202
+
203
+ /**
204
+ * Checks if the initial sync has been completed yet.
205
+ */
206
+ protected async checkInitialReplicated(): Promise<boolean> {
207
+ const status = await this.storage.getStatus();
208
+ if (status.snapshot_done && status.checkpoint_lsn) {
209
+ logger.info(`Initial replication already done. MySQL appears healthy`);
210
+ return true;
211
+ }
212
+ return false;
213
+ }
214
+
215
+ /**
216
+ * Does the initial replication of the database tables.
217
+ *
218
+ * If (partial) replication was done before on this slot, this clears the state
219
+ * and starts again from scratch.
220
+ */
221
+ async startInitialReplication() {
222
+ await this.storage.clear();
223
+ // Replication will be performed in a single transaction on this connection
224
+ const connection = await this.connections.getStreamingConnection();
225
+ const promiseConnection = (connection as mysql.Connection).promise();
226
+ const headGTID = await common.readExecutedGtid(promiseConnection);
227
+ logger.info(`Using snapshot checkpoint GTID:: '${headGTID}'`);
228
+ try {
229
+ logger.info(`Starting initial replication`);
230
+ await promiseConnection.query<mysqlPromise.RowDataPacket[]>(
231
+ 'SET TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ ONLY'
232
+ );
233
+ await promiseConnection.query<mysqlPromise.RowDataPacket[]>('START TRANSACTION');
234
+ const sourceTables = this.syncRules.getSourceTables();
235
+ await this.storage.startBatch(
236
+ { zeroLSN: ReplicatedGTID.ZERO.comparable, defaultSchema: this.defaultSchema },
237
+ async (batch) => {
238
+ for (let tablePattern of sourceTables) {
239
+ const tables = await this.getQualifiedTableNames(batch, tablePattern);
240
+ for (let table of tables) {
241
+ await this.snapshotTable(connection as mysql.Connection, batch, table);
242
+ await batch.markSnapshotDone([table], headGTID.comparable);
243
+ await framework.container.probes.touch();
244
+ }
245
+ }
246
+ await batch.commit(headGTID.comparable);
247
+ }
248
+ );
249
+ logger.info(`Initial replication done`);
250
+ await promiseConnection.query('COMMIT');
251
+ } catch (e) {
252
+ await promiseConnection.query('ROLLBACK');
253
+ throw e;
254
+ } finally {
255
+ connection.release();
256
+ }
257
+ }
258
+
259
+ private async snapshotTable(
260
+ connection: mysql.Connection,
261
+ batch: storage.BucketStorageBatch,
262
+ table: storage.SourceTable
263
+ ) {
264
+ logger.info(`Replicating ${table.qualifiedName}`);
265
+ // TODO count rows and log progress at certain batch sizes
266
+
267
+ return new Promise<void>((resolve, reject) => {
268
+ // MAX_EXECUTION_TIME(0) hint disables execution timeout for this query
269
+ connection
270
+ .query(`SELECT /*+ MAX_EXECUTION_TIME(0) */ * FROM ${table.schema}.${table.table}`)
271
+ .stream()
272
+ .on('error', (err) => {
273
+ reject(err);
274
+ })
275
+ .on('data', async (row) => {
276
+ connection.pause();
277
+ const record = common.toSQLiteRow(row);
278
+
279
+ await batch.save({
280
+ tag: storage.SaveOperationTag.INSERT,
281
+ sourceTable: table,
282
+ before: undefined,
283
+ beforeReplicaId: undefined,
284
+ after: record,
285
+ afterReplicaId: getUuidReplicaIdentityBson(record, table.replicaIdColumns)
286
+ });
287
+ connection.resume();
288
+ // TODO: These metrics can probably be reported in batches
289
+ Metrics.getInstance().rows_replicated_total.add(1);
290
+ })
291
+ .on('end', async function () {
292
+ await batch.flush();
293
+ resolve();
294
+ });
295
+ });
296
+ }
297
+
298
+ async replicate() {
299
+ try {
300
+ // If anything errors here, the entire replication process is halted, and
301
+ // all connections automatically closed, including this one.
302
+ await this.initReplication();
303
+ await this.streamChanges();
304
+ } catch (e) {
305
+ await this.storage.reportError(e);
306
+ throw e;
307
+ }
308
+ }
309
+
310
+ async initReplication() {
311
+ const connection = await this.connections.getConnection();
312
+ await common.checkSourceConfiguration(connection);
313
+ connection.release();
314
+
315
+ const initialReplicationCompleted = await this.checkInitialReplicated();
316
+ if (!initialReplicationCompleted) {
317
+ await this.startInitialReplication();
318
+ }
319
+ }
320
+
321
+ private getTable(tableId: string): storage.SourceTable {
322
+ const table = this.tableCache.get(tableId);
323
+ if (table == null) {
324
+ // We should always receive a replication message before the relation is used.
325
+ // If we can't find it, it's a bug.
326
+ throw new Error(`Missing relation cache for ${tableId}`);
327
+ }
328
+ return table;
329
+ }
330
+
331
+ async streamChanges() {
332
+ // Auto-activate as soon as initial replication is done
333
+ await this.storage.autoActivate();
334
+
335
+ const connection = await this.connections.getConnection();
336
+ const { checkpoint_lsn } = await this.storage.getStatus();
337
+ logger.info(`Last known LSN from storage: ${checkpoint_lsn}`);
338
+
339
+ const fromGTID = checkpoint_lsn
340
+ ? common.ReplicatedGTID.fromSerialized(checkpoint_lsn)
341
+ : await common.readExecutedGtid(connection);
342
+ const binLogPositionState = fromGTID.position;
343
+ connection.release();
344
+
345
+ await this.storage.startBatch(
346
+ { zeroLSN: ReplicatedGTID.ZERO.comparable, defaultSchema: this.defaultSchema },
347
+ async (batch) => {
348
+ const zongji = this.connections.createBinlogListener();
349
+
350
+ let currentGTID: common.ReplicatedGTID | null = null;
351
+
352
+ const queue = async.queue(async (evt: BinLogEvent) => {
353
+ // State machine
354
+ switch (true) {
355
+ case zongji_utils.eventIsGTIDLog(evt):
356
+ currentGTID = common.ReplicatedGTID.fromBinLogEvent({
357
+ raw_gtid: {
358
+ server_id: evt.serverId,
359
+ transaction_range: evt.transactionRange
360
+ },
361
+ position: {
362
+ filename: binLogPositionState.filename,
363
+ offset: evt.nextPosition
364
+ }
365
+ });
366
+ break;
367
+ case zongji_utils.eventIsRotation(evt):
368
+ // Update the position
369
+ binLogPositionState.filename = evt.binlogName;
370
+ binLogPositionState.offset = evt.position;
371
+ break;
372
+ case zongji_utils.eventIsWriteMutation(evt):
373
+ // TODO, can multiple tables be present?
374
+ const writeTableInfo = evt.tableMap[evt.tableId];
375
+ await this.writeChanges(batch, {
376
+ type: storage.SaveOperationTag.INSERT,
377
+ data: evt.rows,
378
+ database: writeTableInfo.parentSchema,
379
+ table: writeTableInfo.tableName,
380
+ sourceTable: this.getTable(
381
+ getMysqlRelId({
382
+ schema: writeTableInfo.parentSchema,
383
+ name: writeTableInfo.tableName
384
+ })
385
+ )
386
+ });
387
+ break;
388
+ case zongji_utils.eventIsUpdateMutation(evt):
389
+ const updateTableInfo = evt.tableMap[evt.tableId];
390
+ await this.writeChanges(batch, {
391
+ type: storage.SaveOperationTag.UPDATE,
392
+ data: evt.rows.map((row) => row.after),
393
+ previous_data: evt.rows.map((row) => row.before),
394
+ database: updateTableInfo.parentSchema,
395
+ table: updateTableInfo.tableName,
396
+ sourceTable: this.getTable(
397
+ getMysqlRelId({
398
+ schema: updateTableInfo.parentSchema,
399
+ name: updateTableInfo.tableName
400
+ })
401
+ )
402
+ });
403
+ break;
404
+ case zongji_utils.eventIsDeleteMutation(evt):
405
+ // TODO, can multiple tables be present?
406
+ const deleteTableInfo = evt.tableMap[evt.tableId];
407
+ await this.writeChanges(batch, {
408
+ type: storage.SaveOperationTag.DELETE,
409
+ data: evt.rows,
410
+ database: deleteTableInfo.parentSchema,
411
+ table: deleteTableInfo.tableName,
412
+ // TODO cleanup
413
+ sourceTable: this.getTable(
414
+ getMysqlRelId({
415
+ schema: deleteTableInfo.parentSchema,
416
+ name: deleteTableInfo.tableName
417
+ })
418
+ )
419
+ });
420
+ break;
421
+ case zongji_utils.eventIsXid(evt):
422
+ Metrics.getInstance().transactions_replicated_total.add(1);
423
+ // Need to commit with a replicated GTID with updated next position
424
+ await batch.commit(
425
+ new common.ReplicatedGTID({
426
+ raw_gtid: currentGTID!.raw,
427
+ position: {
428
+ filename: binLogPositionState.filename,
429
+ offset: evt.nextPosition
430
+ }
431
+ }).comparable
432
+ );
433
+ currentGTID = null;
434
+ // chunks_replicated_total.add(1);
435
+ break;
436
+ }
437
+ }, 1);
438
+
439
+ zongji.on('binlog', (evt: BinLogEvent) => {
440
+ logger.info(`Pushing Binlog event ${evt.getEventName()}`);
441
+ queue.push(evt);
442
+ });
443
+
444
+ logger.info(`Starting replication from ${binLogPositionState.filename}:${binLogPositionState.offset}`);
445
+ zongji.start({
446
+ includeEvents: ['tablemap', 'writerows', 'updaterows', 'deleterows', 'xid', 'rotate', 'gtidlog'],
447
+ excludeEvents: [],
448
+ filename: binLogPositionState.filename,
449
+ position: binLogPositionState.offset
450
+ });
451
+
452
+ // Forever young
453
+ await new Promise<void>((resolve, reject) => {
454
+ queue.error((error) => {
455
+ zongji.stop();
456
+ queue.kill();
457
+ reject(error);
458
+ });
459
+ this.abortSignal.addEventListener(
460
+ 'abort',
461
+ async () => {
462
+ zongji.stop();
463
+ queue.kill();
464
+ if (!queue.length) {
465
+ await queue.drain();
466
+ }
467
+ resolve();
468
+ },
469
+ { once: true }
470
+ );
471
+ });
472
+ }
473
+ );
474
+ }
475
+
476
+ private async writeChanges(
477
+ batch: storage.BucketStorageBatch,
478
+ msg: {
479
+ type: storage.SaveOperationTag;
480
+ data: Data[];
481
+ previous_data?: Data[];
482
+ database: string;
483
+ table: string;
484
+ sourceTable: storage.SourceTable;
485
+ }
486
+ ): Promise<storage.FlushedResult | null> {
487
+ for (const [index, row] of msg.data.entries()) {
488
+ await this.writeChange(batch, {
489
+ ...msg,
490
+ data: row,
491
+ previous_data: msg.previous_data?.[index]
492
+ });
493
+ }
494
+ return null;
495
+ }
496
+
497
+ private async writeChange(
498
+ batch: storage.BucketStorageBatch,
499
+ payload: WriteChangePayload
500
+ ): Promise<storage.FlushedResult | null> {
501
+ switch (payload.type) {
502
+ case storage.SaveOperationTag.INSERT:
503
+ Metrics.getInstance().rows_replicated_total.add(1);
504
+ const record = common.toSQLiteRow(payload.data);
505
+ return await batch.save({
506
+ tag: storage.SaveOperationTag.INSERT,
507
+ sourceTable: payload.sourceTable,
508
+ before: undefined,
509
+ beforeReplicaId: undefined,
510
+ after: record,
511
+ afterReplicaId: getUuidReplicaIdentityBson(record, payload.sourceTable.replicaIdColumns)
512
+ });
513
+ case storage.SaveOperationTag.UPDATE:
514
+ Metrics.getInstance().rows_replicated_total.add(1);
515
+ // "before" may be null if the replica id columns are unchanged
516
+ // It's fine to treat that the same as an insert.
517
+ const beforeUpdated = payload.previous_data ? common.toSQLiteRow(payload.previous_data) : undefined;
518
+ const after = common.toSQLiteRow(payload.data);
519
+
520
+ return await batch.save({
521
+ tag: storage.SaveOperationTag.UPDATE,
522
+ sourceTable: payload.sourceTable,
523
+ before: beforeUpdated,
524
+ beforeReplicaId: beforeUpdated
525
+ ? getUuidReplicaIdentityBson(beforeUpdated, payload.sourceTable.replicaIdColumns)
526
+ : undefined,
527
+ after: common.toSQLiteRow(payload.data),
528
+ afterReplicaId: getUuidReplicaIdentityBson(after, payload.sourceTable.replicaIdColumns)
529
+ });
530
+
531
+ case storage.SaveOperationTag.DELETE:
532
+ Metrics.getInstance().rows_replicated_total.add(1);
533
+ const beforeDeleted = common.toSQLiteRow(payload.data);
534
+
535
+ return await batch.save({
536
+ tag: storage.SaveOperationTag.DELETE,
537
+ sourceTable: payload.sourceTable,
538
+ before: beforeDeleted,
539
+ beforeReplicaId: getUuidReplicaIdentityBson(beforeDeleted, payload.sourceTable.replicaIdColumns),
540
+ after: undefined,
541
+ afterReplicaId: undefined
542
+ });
543
+ default:
544
+ return null;
545
+ }
546
+ }
547
+ }
@@ -0,0 +1,104 @@
1
+ import { NormalizedMySQLConnectionConfig } from '../types/types.js';
2
+ import mysqlPromise from 'mysql2/promise';
3
+ import mysql, { RowDataPacket } from 'mysql2';
4
+ import * as mysql_utils from '../utils/mysql_utils.js';
5
+ import ZongJi from '@powersync/mysql-zongji';
6
+
7
+ export class MySQLConnectionManager {
8
+ /**
9
+ * Pool that can create streamable connections
10
+ */
11
+ private readonly pool: mysql.Pool;
12
+ /**
13
+ * Pool that can create promise-based connections
14
+ */
15
+ private readonly promisePool: mysqlPromise.Pool;
16
+
17
+ private binlogListeners: ZongJi[] = [];
18
+
19
+ constructor(
20
+ public options: NormalizedMySQLConnectionConfig,
21
+ public poolOptions: mysqlPromise.PoolOptions
22
+ ) {
23
+ // The pool is lazy - no connections are opened until a query is performed.
24
+ this.pool = mysql_utils.createPool(options, poolOptions);
25
+ this.promisePool = this.pool.promise();
26
+ }
27
+
28
+ public get connectionTag() {
29
+ return this.options.tag;
30
+ }
31
+
32
+ public get connectionId() {
33
+ return this.options.id;
34
+ }
35
+
36
+ public get databaseName() {
37
+ return this.options.database;
38
+ }
39
+
40
+ /**
41
+ * Create a new replication listener
42
+ */
43
+ createBinlogListener(): ZongJi {
44
+ const listener = new ZongJi({
45
+ host: this.options.hostname,
46
+ user: this.options.username,
47
+ password: this.options.password
48
+ });
49
+
50
+ this.binlogListeners.push(listener);
51
+
52
+ return listener;
53
+ }
54
+
55
+ /**
56
+ * Run a query using a connection from the pool
57
+ * A promise with the result is returned
58
+ * @param query
59
+ * @param params
60
+ */
61
+ async query(query: string, params?: any[]) {
62
+ return this.promisePool.query<RowDataPacket[]>(query, params);
63
+ }
64
+
65
+ /**
66
+ * Get a streamable connection from this manager's pool
67
+ * The connection should be released when it is no longer needed
68
+ */
69
+ async getStreamingConnection(): Promise<mysql.PoolConnection> {
70
+ return new Promise((resolve, reject) => {
71
+ this.pool.getConnection((err, connection) => {
72
+ if (err) {
73
+ reject(err);
74
+ } else {
75
+ resolve(connection);
76
+ }
77
+ });
78
+ });
79
+ }
80
+
81
+ /**
82
+ * Get a promise connection from this manager's pool
83
+ * The connection should be released when it is no longer needed
84
+ */
85
+ async getConnection(): Promise<mysqlPromise.PoolConnection> {
86
+ return this.promisePool.getConnection();
87
+ }
88
+
89
+ async end(): Promise<void> {
90
+ for (const listener of this.binlogListeners) {
91
+ listener.stop();
92
+ }
93
+
94
+ await new Promise<void>((resolve, reject) => {
95
+ this.pool.end((err) => {
96
+ if (err) {
97
+ reject(err);
98
+ } else {
99
+ resolve();
100
+ }
101
+ });
102
+ });
103
+ }
104
+ }
@@ -0,0 +1,28 @@
1
+ import { logger } from '@powersync/lib-services-framework';
2
+ import mysql from 'mysql2/promise';
3
+ import { MySQLConnectionManager } from './MySQLConnectionManager.js';
4
+ import { ResolvedConnectionConfig } from '../types/types.js';
5
+
6
+ export class MySQLConnectionManagerFactory {
7
+ private readonly connectionManagers: MySQLConnectionManager[];
8
+ private readonly connectionConfig: ResolvedConnectionConfig;
9
+
10
+ constructor(connectionConfig: ResolvedConnectionConfig) {
11
+ this.connectionConfig = connectionConfig;
12
+ this.connectionManagers = [];
13
+ }
14
+
15
+ create(poolOptions: mysql.PoolOptions) {
16
+ const manager = new MySQLConnectionManager(this.connectionConfig, poolOptions);
17
+ this.connectionManagers.push(manager);
18
+ return manager;
19
+ }
20
+
21
+ async shutdown() {
22
+ logger.info('Shutting down MySQL connection Managers...');
23
+ for (const manager of this.connectionManagers) {
24
+ await manager.end();
25
+ }
26
+ logger.info('MySQL connection Managers shutdown completed.');
27
+ }
28
+ }