@powersync/service-module-mysql 0.0.0-dev-20250813075005 → 0.0.0-dev-20250818104041

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -29,6 +29,10 @@ import { TablePattern } from '@powersync/service-sync-rules';
29
29
 
30
30
  const { Parser } = pkg;
31
31
 
32
+ /**
33
+ * Seconds of inactivity after which a keepalive event is sent by the MySQL server.
34
+ */
35
+ export const KEEPALIVE_INACTIVITY_THRESHOLD = 30;
32
36
  export type Row = Record<string, any>;
33
37
 
34
38
  /**
@@ -65,6 +69,7 @@ export interface BinLogEventHandler {
65
69
  onDelete: (rows: Row[], tableMap: TableMapEntry) => Promise<void>;
66
70
  onCommit: (lsn: string) => Promise<void>;
67
71
  onSchemaChange: (change: SchemaChange) => Promise<void>;
72
+ onKeepAlive: (lsn: string) => Promise<void>;
68
73
  }
69
74
 
70
75
  export interface BinLogListenerOptions {
@@ -72,8 +77,9 @@ export interface BinLogListenerOptions {
72
77
  eventHandler: BinLogEventHandler;
73
78
  sourceTables: TablePattern[];
74
79
  serverId: number;
75
- startPosition: common.BinLogPosition;
80
+ startGTID: common.ReplicatedGTID;
76
81
  logger?: Logger;
82
+ keepAliveInactivitySeconds?: number;
77
83
  }
78
84
 
79
85
  /**
@@ -85,16 +91,19 @@ export class BinLogListener {
85
91
  private connectionManager: MySQLConnectionManager;
86
92
  private eventHandler: BinLogEventHandler;
87
93
  private binLogPosition: common.BinLogPosition;
88
- private currentGTID: common.ReplicatedGTID | null;
94
+ private currentGTID: common.ReplicatedGTID;
89
95
  private logger: Logger;
90
96
  private listenerError: Error | null;
91
97
  private databaseFilter: { [schema: string]: (table: string) => boolean };
92
98
 
99
+ private isStopped: boolean = false;
100
+ private isStopping: boolean = false;
101
+
102
+ // Flag to indicate if are currently in a transaction that involves multiple row mutation events.
103
+ private isTransactionOpen = false;
93
104
  zongji: ZongJi;
94
105
  processingQueue: async.QueueObject<BinLogEvent>;
95
106
 
96
- isStopped: boolean = false;
97
- isStopping: boolean = false;
98
107
  /**
99
108
  * The combined size in bytes of all the binlog events currently in the processing queue.
100
109
  */
@@ -104,8 +113,8 @@ export class BinLogListener {
104
113
  this.logger = options.logger ?? defaultLogger;
105
114
  this.connectionManager = options.connectionManager;
106
115
  this.eventHandler = options.eventHandler;
107
- this.binLogPosition = options.startPosition;
108
- this.currentGTID = null;
116
+ this.binLogPosition = options.startGTID.position;
117
+ this.currentGTID = options.startGTID;
109
118
  this.sqlParser = new Parser();
110
119
  this.processingQueue = this.createProcessingQueue();
111
120
  this.zongji = this.createZongjiListener();
@@ -130,14 +139,13 @@ export class BinLogListener {
130
139
  `${isRestart ? 'Restarting' : 'Starting'} BinLog Listener with replica client id:${this.options.serverId}...`
131
140
  );
132
141
 
133
- // Set a heartbeat interval for the Zongji replication connection
134
- // Zongji does not explicitly handle the heartbeat events - they are categorized as event:unknown
135
- // The heartbeat events are enough to keep the connection alive for setTimeout to work on the socket.
142
+ // Set a heartbeat interval for the Zongji replication connection, these events are enough to keep the connection
143
+ // alive for setTimeout to work on the socket.
136
144
  // The heartbeat needs to be set before starting the listener, since the replication connection is locked once replicating
137
145
  await new Promise((resolve, reject) => {
138
146
  this.zongji.connection.query(
139
147
  // In nanoseconds, 10^9 = 1s
140
- 'set @master_heartbeat_period=28*1000000000',
148
+ `set @master_heartbeat_period=${this.options.keepAliveInactivitySeconds ?? KEEPALIVE_INACTIVITY_THRESHOLD}*1000000000`,
141
149
  (error: any, results: any, _fields: any) => {
142
150
  if (error) {
143
151
  reject(error);
@@ -158,9 +166,19 @@ export class BinLogListener {
158
166
  });
159
167
 
160
168
  this.zongji.start({
161
- // We ignore the unknown/heartbeat event since it currently serves no purpose other than to keep the connection alive
162
- // tablemap events always need to be included for the other row events to work
163
- includeEvents: ['tablemap', 'writerows', 'updaterows', 'deleterows', 'xid', 'rotate', 'gtidlog', 'query'],
169
+ // Tablemap events always need to be included for the other row events to work
170
+ includeEvents: [
171
+ 'tablemap',
172
+ 'writerows',
173
+ 'updaterows',
174
+ 'deleterows',
175
+ 'xid',
176
+ 'rotate',
177
+ 'gtidlog',
178
+ 'query',
179
+ 'heartbeat',
180
+ 'heartbeat_v2'
181
+ ],
164
182
  includeSchema: this.databaseFilter,
165
183
  filename: this.binLogPosition.filename,
166
184
  position: this.binLogPosition.offset,
@@ -289,19 +307,24 @@ export class BinLogListener {
289
307
  this.logger.info(`Processed GTID event: ${this.currentGTID.comparable}`);
290
308
  break;
291
309
  case zongji_utils.eventIsRotation(evt):
292
- const newFile = this.binLogPosition.filename !== evt.binlogName;
310
+ // The first event when starting replication is a synthetic Rotate event
311
+ // It describes the last binlog file and position that the replica client processed
293
312
  this.binLogPosition.filename = evt.binlogName;
313
+ this.binLogPosition.offset = evt.nextPosition !== 0 ? evt.nextPosition : evt.position;
294
314
  await this.eventHandler.onRotate();
295
315
 
316
+ const newFile = this.binLogPosition.filename !== evt.binlogName;
296
317
  if (newFile) {
297
318
  this.logger.info(
298
319
  `Processed Rotate event. New BinLog file is: ${this.binLogPosition.filename}:${this.binLogPosition.offset}`
299
320
  );
300
321
  }
322
+
301
323
  break;
302
324
  case zongji_utils.eventIsWriteMutation(evt):
303
325
  const tableMap = evt.tableMap[evt.tableId];
304
326
  await this.eventHandler.onWrite(evt.rows, tableMap);
327
+ this.binLogPosition.offset = evt.nextPosition;
305
328
  this.logger.info(
306
329
  `Processed Write event for table [${tableMap.parentSchema}.${tableMap.tableName}]. ${evt.rows.length} row(s) inserted.`
307
330
  );
@@ -312,20 +335,33 @@ export class BinLogListener {
312
335
  evt.rows.map((row) => row.before),
313
336
  evt.tableMap[evt.tableId]
314
337
  );
338
+ this.binLogPosition.offset = evt.nextPosition;
315
339
  this.logger.info(
316
340
  `Processed Update event for table [${evt.tableMap[evt.tableId].tableName}]. ${evt.rows.length} row(s) updated.`
317
341
  );
318
342
  break;
319
343
  case zongji_utils.eventIsDeleteMutation(evt):
320
344
  await this.eventHandler.onDelete(evt.rows, evt.tableMap[evt.tableId]);
345
+ this.binLogPosition.offset = evt.nextPosition;
321
346
  this.logger.info(
322
347
  `Processed Delete event for table [${evt.tableMap[evt.tableId].tableName}]. ${evt.rows.length} row(s) deleted.`
323
348
  );
324
349
  break;
350
+ case zongji_utils.eventIsHeartbeat(evt):
351
+ case zongji_utils.eventIsHeartbeat_v2(evt):
352
+ // Heartbeats are sent by the master to keep the connection alive after a period of inactivity. They are synthetic
353
+ // so are not written to the binlog. Consequently, they have no effect on the binlog position.
354
+ // We forward these along with the current GTID to the event handler, but don't want to do this if a transaction is in progress.
355
+ if (!this.isTransactionOpen) {
356
+ await this.eventHandler.onKeepAlive(this.currentGTID.comparable);
357
+ }
358
+ this.logger.debug(`Processed Heartbeat event. Current GTID is: ${this.currentGTID.comparable}`);
359
+ break;
325
360
  case zongji_utils.eventIsXid(evt):
361
+ this.isTransactionOpen = false;
326
362
  this.binLogPosition.offset = evt.nextPosition;
327
363
  const LSN = new common.ReplicatedGTID({
328
- raw_gtid: this.currentGTID!.raw,
364
+ raw_gtid: this.currentGTID.raw,
329
365
  position: this.binLogPosition
330
366
  }).comparable;
331
367
  await this.eventHandler.onCommit(LSN);
@@ -336,8 +372,6 @@ export class BinLogListener {
336
372
  break;
337
373
  }
338
374
 
339
- // Update the binlog position after processing the event
340
- this.binLogPosition.offset = evt.nextPosition;
341
375
  this.queueMemoryUsage -= evt.size;
342
376
  };
343
377
  }
@@ -345,14 +379,15 @@ export class BinLogListener {
345
379
  private async processQueryEvent(event: BinLogQueryEvent): Promise<void> {
346
380
  const { query, nextPosition } = event;
347
381
 
348
- // BEGIN query events mark the start of a transaction before any row events. They are not relevant for schema changes
382
+ // BEGIN query events mark the start of a transaction before any row events. They are not schema changes so no further parsing is necessary.
349
383
  if (query === 'BEGIN') {
384
+ this.isTransactionOpen = true;
350
385
  return;
351
386
  }
352
387
 
353
388
  const schemaChanges = this.toSchemaChanges(query, event.schema);
354
389
  if (schemaChanges.length > 0) {
355
- // Since handling the schema changes can take a long time, we need to stop the Zongji listener instead of pausing it.
390
+ // Handling schema changes can take a long time, so we stop the Zongji listener whilst handling them to prevent the listener from timing out.
356
391
  await this.stopZongji();
357
392
 
358
393
  for (const change of schemaChanges) {
@@ -360,19 +395,21 @@ export class BinLogListener {
360
395
  await this.eventHandler.onSchemaChange(change);
361
396
  }
362
397
 
363
- // DDL queries are auto commited, but do not come with a corresponding Xid event.
364
- // This is problematic for DDL queries which result in row events because the checkpoint is not moved on,
365
- // so we manually commit here.
366
- this.binLogPosition.offset = nextPosition;
367
- const LSN = new common.ReplicatedGTID({
368
- raw_gtid: this.currentGTID!.raw,
369
- position: this.binLogPosition
370
- }).comparable;
371
- await this.eventHandler.onCommit(LSN);
398
+ // DDL queries are auto commited, but do not come with a corresponding Xid event, in those cases we trigger a manual commit if we are not already in a transaction.
399
+ // Some DDL queries include row events, and in those cases will include a Xid event.
400
+ if (!this.isTransactionOpen) {
401
+ this.binLogPosition.offset = nextPosition;
402
+ const LSN = new common.ReplicatedGTID({
403
+ raw_gtid: this.currentGTID.raw,
404
+ position: this.binLogPosition
405
+ }).comparable;
406
+ await this.eventHandler.onCommit(LSN);
407
+ }
372
408
 
373
409
  this.logger.info(`Successfully processed ${schemaChanges.length} schema change(s).`);
374
410
 
375
411
  // If there are still events in the processing queue, we need to process those before restarting Zongji
412
+ // This avoids potentially processing the same events again after a restart.
376
413
  if (!this.processingQueue.idle()) {
377
414
  this.logger.info(`Processing [${this.processingQueue.length()}] events(s) before resuming...`);
378
415
  this.processingQueue.drain(async () => {
@@ -381,6 +418,13 @@ export class BinLogListener {
381
418
  } else {
382
419
  await this.restartZongji();
383
420
  }
421
+ } else if (!this.isTransactionOpen) {
422
+ this.binLogPosition.offset = nextPosition;
423
+ const LSN = new common.ReplicatedGTID({
424
+ raw_gtid: this.currentGTID.raw,
425
+ position: this.binLogPosition
426
+ }).comparable;
427
+ await this.eventHandler.onCommit(LSN);
384
428
  }
385
429
  }
386
430
 
@@ -6,7 +6,9 @@ import {
6
6
  BinLogTableMapEvent,
7
7
  BinLogRowUpdateEvent,
8
8
  BinLogXidEvent,
9
- BinLogQueryEvent
9
+ BinLogQueryEvent,
10
+ BinLogHeartbeatEvent,
11
+ BinLogHeartbeatEvent_V2
10
12
  } from '@powersync/mysql-zongji';
11
13
 
12
14
  export function eventIsGTIDLog(event: BinLogEvent): event is BinLogGTIDLogEvent {
@@ -21,6 +23,14 @@ export function eventIsXid(event: BinLogEvent): event is BinLogXidEvent {
21
23
  return event.getEventName() == 'xid';
22
24
  }
23
25
 
26
+ export function eventIsHeartbeat(event: BinLogEvent): event is BinLogHeartbeatEvent {
27
+ return event.getEventName() == 'heartbeat';
28
+ }
29
+
30
+ export function eventIsHeartbeat_v2(event: BinLogEvent): event is BinLogHeartbeatEvent_V2 {
31
+ return event.getEventName() == 'heartbeat_v2';
32
+ }
33
+
24
34
  export function eventIsRotation(event: BinLogEvent): event is BinLogRotationEvent {
25
35
  return event.getEventName() == 'rotate';
26
36
  }
@@ -1,22 +1,15 @@
1
1
  import { afterAll, beforeAll, beforeEach, describe, expect, test, vi } from 'vitest';
2
- import {
3
- BinLogEventHandler,
4
- BinLogListener,
5
- Row,
6
- SchemaChange,
7
- SchemaChangeType
8
- } from '@module/replication/zongji/BinLogListener.js';
2
+ import { BinLogListener, SchemaChange, SchemaChangeType } from '@module/replication/zongji/BinLogListener.js';
9
3
  import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js';
10
- import { clearTestDb, createTestDb, TEST_CONNECTION_OPTIONS } from './util.js';
11
- import { v4 as uuid } from 'uuid';
12
- import * as common from '@module/common/common-index.js';
13
4
  import {
14
- createRandomServerId,
15
- getMySQLVersion,
16
- qualifiedMySQLTable,
17
- satisfiesVersion
18
- } from '@module/utils/mysql-utils.js';
19
- import { TableMapEntry } from '@powersync/mysql-zongji';
5
+ clearTestDb,
6
+ createBinlogListener,
7
+ createTestDb,
8
+ TEST_CONNECTION_OPTIONS,
9
+ TestBinLogEventHandler
10
+ } from './util.js';
11
+ import { v4 as uuid } from 'uuid';
12
+ import { getMySQLVersion, qualifiedMySQLTable, satisfiesVersion } from '@module/utils/mysql-utils.js';
20
13
  import crypto from 'crypto';
21
14
  import { TablePattern } from '@powersync/service-sync-rules';
22
15
 
@@ -46,7 +39,11 @@ describe('BinlogListener tests', () => {
46
39
  await connectionManager.query(`CREATE TABLE test_DATA (id CHAR(36) PRIMARY KEY, description MEDIUMTEXT)`);
47
40
  connection.release();
48
41
  eventHandler = new TestBinLogEventHandler();
49
- binLogListener = await createBinlogListener();
42
+ binLogListener = await createBinlogListener({
43
+ connectionManager,
44
+ sourceTables: [new TablePattern(connectionManager.databaseName, 'test_DATA')],
45
+ eventHandler
46
+ });
50
47
  });
51
48
 
52
49
  afterAll(async () => {
@@ -106,6 +103,14 @@ describe('BinlogListener tests', () => {
106
103
  await binLogListener.stop();
107
104
  });
108
105
 
106
+ test('Keepalive event', async () => {
107
+ binLogListener.options.keepAliveInactivitySeconds = 1;
108
+ await binLogListener.start();
109
+ await vi.waitFor(() => expect(eventHandler.lastKeepAlive).toBeDefined(), { timeout: 10000 });
110
+ await binLogListener.stop();
111
+ expect(eventHandler.lastKeepAlive).toEqual(binLogListener.options.startGTID.comparable);
112
+ });
113
+
109
114
  test('Schema change event: Rename table', async () => {
110
115
  await binLogListener.start();
111
116
  await connectionManager.query(`ALTER TABLE test_DATA RENAME test_DATA_new`);
@@ -276,7 +281,11 @@ describe('BinlogListener tests', () => {
276
281
  test('Schema change event: Drop and Add primary key', async () => {
277
282
  await connectionManager.query(`CREATE TABLE test_constraints (id CHAR(36), description VARCHAR(100))`);
278
283
  const sourceTables = [new TablePattern(connectionManager.databaseName, 'test_constraints')];
279
- binLogListener = await createBinlogListener(sourceTables);
284
+ binLogListener = await createBinlogListener({
285
+ connectionManager,
286
+ eventHandler,
287
+ sourceTables
288
+ });
280
289
  await binLogListener.start();
281
290
  await connectionManager.query(`ALTER TABLE test_constraints ADD PRIMARY KEY (id)`);
282
291
  await connectionManager.query(`ALTER TABLE test_constraints DROP PRIMARY KEY`);
@@ -301,7 +310,11 @@ describe('BinlogListener tests', () => {
301
310
  test('Schema change event: Add and drop unique constraint', async () => {
302
311
  await connectionManager.query(`CREATE TABLE test_constraints (id CHAR(36), description VARCHAR(100))`);
303
312
  const sourceTables = [new TablePattern(connectionManager.databaseName, 'test_constraints')];
304
- binLogListener = await createBinlogListener(sourceTables);
313
+ binLogListener = await createBinlogListener({
314
+ connectionManager,
315
+ eventHandler,
316
+ sourceTables
317
+ });
305
318
  await binLogListener.start();
306
319
  await connectionManager.query(`ALTER TABLE test_constraints ADD UNIQUE (description)`);
307
320
  await connectionManager.query(`ALTER TABLE test_constraints DROP INDEX description`);
@@ -326,7 +339,11 @@ describe('BinlogListener tests', () => {
326
339
  test('Schema change event: Add and drop a unique index', async () => {
327
340
  await connectionManager.query(`CREATE TABLE test_constraints (id CHAR(36), description VARCHAR(100))`);
328
341
  const sourceTables = [new TablePattern(connectionManager.databaseName, 'test_constraints')];
329
- binLogListener = await createBinlogListener(sourceTables);
342
+ binLogListener = await createBinlogListener({
343
+ connectionManager,
344
+ eventHandler,
345
+ sourceTables
346
+ });
330
347
  await binLogListener.start();
331
348
  await connectionManager.query(`CREATE UNIQUE INDEX description_idx ON test_constraints (description)`);
332
349
  await connectionManager.query(`DROP INDEX description_idx ON test_constraints`);
@@ -367,7 +384,11 @@ describe('BinlogListener tests', () => {
367
384
  // If there are multiple schema changes in the binlog processing queue, we only restart the binlog listener once
368
385
  // all the schema changes have been processed
369
386
  const sourceTables = [new TablePattern(connectionManager.databaseName, 'test_multiple')];
370
- binLogListener = await createBinlogListener(sourceTables);
387
+ binLogListener = await createBinlogListener({
388
+ connectionManager,
389
+ eventHandler,
390
+ sourceTables
391
+ });
371
392
 
372
393
  await connectionManager.query(`CREATE TABLE test_multiple (id CHAR(36), description VARCHAR(100))`);
373
394
  await connectionManager.query(`ALTER TABLE test_multiple ADD COLUMN new_column VARCHAR(10)`);
@@ -388,7 +409,11 @@ describe('BinlogListener tests', () => {
388
409
  test('Unprocessed binlog event received that does match the current table schema', async () => {
389
410
  // If we process a binlog event for a table which has since had its schema changed, we expect the binlog listener to stop with an error
390
411
  const sourceTables = [new TablePattern(connectionManager.databaseName, 'test_failure')];
391
- binLogListener = await createBinlogListener(sourceTables);
412
+ binLogListener = await createBinlogListener({
413
+ connectionManager,
414
+ eventHandler,
415
+ sourceTables
416
+ });
392
417
 
393
418
  await connectionManager.query(`CREATE TABLE test_failure (id CHAR(36), description VARCHAR(100))`);
394
419
  await connectionManager.query(`INSERT INTO test_failure(id, description) VALUES('${uuid()}','test_failure')`);
@@ -403,7 +428,11 @@ describe('BinlogListener tests', () => {
403
428
 
404
429
  test('Unprocessed binlog event received for a dropped table', async () => {
405
430
  const sourceTables = [new TablePattern(connectionManager.databaseName, 'test_failure')];
406
- binLogListener = await createBinlogListener(sourceTables);
431
+ binLogListener = await createBinlogListener({
432
+ connectionManager,
433
+ eventHandler,
434
+ sourceTables
435
+ });
407
436
 
408
437
  // If we process a binlog event for a table which has since been dropped, we expect the binlog listener to stop with an error
409
438
  await connectionManager.query(`CREATE TABLE test_failure (id CHAR(36), description VARCHAR(100))`);
@@ -424,7 +453,11 @@ describe('BinlogListener tests', () => {
424
453
  new TablePattern(connectionManager.databaseName, 'test_DATA'),
425
454
  new TablePattern('multi_schema', 'test_DATA_multi')
426
455
  ];
427
- binLogListener = await createBinlogListener(sourceTables);
456
+ binLogListener = await createBinlogListener({
457
+ connectionManager,
458
+ eventHandler,
459
+ sourceTables
460
+ });
428
461
  await binLogListener.start();
429
462
 
430
463
  // Default database insert into test_DATA
@@ -439,28 +472,6 @@ describe('BinlogListener tests', () => {
439
472
  assertSchemaChange(eventHandler.schemaChanges[0], SchemaChangeType.DROP_TABLE, 'multi_schema', 'test_DATA_multi');
440
473
  });
441
474
 
442
- async function createBinlogListener(
443
- sourceTables?: TablePattern[],
444
- startPosition?: common.BinLogPosition
445
- ): Promise<BinLogListener> {
446
- if (!sourceTables) {
447
- sourceTables = [new TablePattern(connectionManager.databaseName, 'test_DATA')];
448
- }
449
-
450
- if (!startPosition) {
451
- const fromGTID = await getFromGTID(connectionManager);
452
- startPosition = fromGTID.position;
453
- }
454
-
455
- return new BinLogListener({
456
- connectionManager: connectionManager,
457
- eventHandler: eventHandler,
458
- startPosition: startPosition,
459
- sourceTables: sourceTables,
460
- serverId: createRandomServerId(1)
461
- });
462
- }
463
-
464
475
  function assertSchemaChange(
465
476
  change: SchemaChange,
466
477
  type: SchemaChangeType,
@@ -477,14 +488,6 @@ describe('BinlogListener tests', () => {
477
488
  }
478
489
  });
479
490
 
480
- async function getFromGTID(connectionManager: MySQLConnectionManager) {
481
- const connection = await connectionManager.getConnection();
482
- const fromGTID = await common.readExecutedGtid(connection);
483
- connection.release();
484
-
485
- return fromGTID;
486
- }
487
-
488
491
  async function insertRows(connectionManager: MySQLConnectionManager, count: number) {
489
492
  for (let i = 0; i < count; i++) {
490
493
  await connectionManager.query(
@@ -500,45 +503,3 @@ async function updateRows(connectionManager: MySQLConnectionManager) {
500
503
  async function deleteRows(connectionManager: MySQLConnectionManager) {
501
504
  await connectionManager.query(`DELETE FROM test_DATA`);
502
505
  }
503
-
504
- class TestBinLogEventHandler implements BinLogEventHandler {
505
- rowsWritten = 0;
506
- rowsUpdated = 0;
507
- rowsDeleted = 0;
508
- commitCount = 0;
509
- schemaChanges: SchemaChange[] = [];
510
-
511
- unpause: ((value: void | PromiseLike<void>) => void) | undefined;
512
- private pausedPromise: Promise<void> | undefined;
513
-
514
- pause() {
515
- this.pausedPromise = new Promise((resolve) => {
516
- this.unpause = resolve;
517
- });
518
- }
519
-
520
- async onWrite(rows: Row[], tableMap: TableMapEntry) {
521
- if (this.pausedPromise) {
522
- await this.pausedPromise;
523
- }
524
- this.rowsWritten = this.rowsWritten + rows.length;
525
- }
526
-
527
- async onUpdate(afterRows: Row[], beforeRows: Row[], tableMap: TableMapEntry) {
528
- this.rowsUpdated = this.rowsUpdated + afterRows.length;
529
- }
530
-
531
- async onDelete(rows: Row[], tableMap: TableMapEntry) {
532
- this.rowsDeleted = this.rowsDeleted + rows.length;
533
- }
534
-
535
- async onCommit(lsn: string) {
536
- this.commitCount++;
537
- }
538
-
539
- async onSchemaChange(change: SchemaChange) {
540
- this.schemaChanges.push(change);
541
- }
542
- async onTransactionStart(options: { timestamp: Date }) {}
543
- async onRotate() {}
544
- }
@@ -652,12 +652,8 @@ function defineTests(factory: storage.TestStorageFactory) {
652
652
  await connectionManager.query(`INSERT INTO ${testTable}(id, description) VALUES('t3','test3')`);
653
653
  await connectionManager.query(`DROP TABLE ${testTable}`);
654
654
 
655
- // Force a commit on the watched schema to advance the checkpoint
656
- await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('t1','test1')`);
657
-
658
655
  const data = await context.getBucketData('global[]');
659
656
 
660
- // Should only include the entry used to advance the checkpoint
661
- expect(data).toMatchObject([PUT_T1]);
657
+ expect(data).toMatchObject([]);
662
658
  });
663
659
  }
package/test/src/util.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import * as types from '@module/types/types.js';
2
- import { getMySQLVersion, isVersionAtLeast } from '@module/utils/mysql-utils.js';
2
+ import { createRandomServerId, getMySQLVersion, isVersionAtLeast } from '@module/utils/mysql-utils.js';
3
3
  import * as mongo_storage from '@powersync/service-module-mongodb-storage';
4
4
  import * as postgres_storage from '@powersync/service-module-postgres-storage';
5
5
  import mysqlPromise from 'mysql2/promise';
@@ -7,6 +7,10 @@ import { env } from './env.js';
7
7
  import { describe, TestOptions } from 'vitest';
8
8
  import { TestStorageFactory } from '@powersync/service-core';
9
9
  import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js';
10
+ import { BinLogEventHandler, BinLogListener, Row, SchemaChange } from '@module/replication/zongji/BinLogListener.js';
11
+ import { TableMapEntry } from '@powersync/mysql-zongji';
12
+ import * as common from '@module/common/common-index.js';
13
+ import { TablePattern } from '@powersync/service-sync-rules';
10
14
 
11
15
  export const TEST_URI = env.MYSQL_TEST_URI;
12
16
 
@@ -58,3 +62,79 @@ export async function createTestDb(connectionManager: MySQLConnectionManager, db
58
62
  await connectionManager.query(`DROP DATABASE IF EXISTS ${dbName}`);
59
63
  await connectionManager.query(`CREATE DATABASE IF NOT EXISTS ${dbName}`);
60
64
  }
65
+
66
+ export async function getFromGTID(connectionManager: MySQLConnectionManager) {
67
+ const connection = await connectionManager.getConnection();
68
+ const fromGTID = await common.readExecutedGtid(connection);
69
+ connection.release();
70
+
71
+ return fromGTID;
72
+ }
73
+
74
+ export interface CreateBinlogListenerParams {
75
+ connectionManager: MySQLConnectionManager;
76
+ eventHandler: BinLogEventHandler;
77
+ sourceTables: TablePattern[];
78
+ startGTID?: common.ReplicatedGTID;
79
+ }
80
+ export async function createBinlogListener(params: CreateBinlogListenerParams): Promise<BinLogListener> {
81
+ let { connectionManager, eventHandler, sourceTables, startGTID } = params;
82
+
83
+ if (!startGTID) {
84
+ startGTID = await getFromGTID(connectionManager);
85
+ }
86
+
87
+ return new BinLogListener({
88
+ connectionManager: connectionManager,
89
+ eventHandler: eventHandler,
90
+ startGTID: startGTID!,
91
+ sourceTables: sourceTables,
92
+ serverId: createRandomServerId(1)
93
+ });
94
+ }
95
+
96
+ export class TestBinLogEventHandler implements BinLogEventHandler {
97
+ rowsWritten = 0;
98
+ rowsUpdated = 0;
99
+ rowsDeleted = 0;
100
+ commitCount = 0;
101
+ schemaChanges: SchemaChange[] = [];
102
+ lastKeepAlive: string | undefined;
103
+
104
+ unpause: ((value: void | PromiseLike<void>) => void) | undefined;
105
+ private pausedPromise: Promise<void> | undefined;
106
+
107
+ pause() {
108
+ this.pausedPromise = new Promise((resolve) => {
109
+ this.unpause = resolve;
110
+ });
111
+ }
112
+
113
+ async onWrite(rows: Row[], tableMap: TableMapEntry) {
114
+ if (this.pausedPromise) {
115
+ await this.pausedPromise;
116
+ }
117
+ this.rowsWritten = this.rowsWritten + rows.length;
118
+ }
119
+
120
+ async onUpdate(afterRows: Row[], beforeRows: Row[], tableMap: TableMapEntry) {
121
+ this.rowsUpdated = this.rowsUpdated + afterRows.length;
122
+ }
123
+
124
+ async onDelete(rows: Row[], tableMap: TableMapEntry) {
125
+ this.rowsDeleted = this.rowsDeleted + rows.length;
126
+ }
127
+
128
+ async onCommit(lsn: string) {
129
+ this.commitCount++;
130
+ }
131
+
132
+ async onSchemaChange(change: SchemaChange) {
133
+ this.schemaChanges.push(change);
134
+ }
135
+ async onTransactionStart(options: { timestamp: Date }) {}
136
+ async onRotate() {}
137
+ async onKeepAlive(lsn: string) {
138
+ this.lastKeepAlive = lsn;
139
+ }
140
+ }