@powersync/service-module-mysql 0.0.0-dev-20241101083236 → 0.0.0-dev-20241111122558
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +11 -8
- package/dev/config/sync_rules.yaml +2 -4
- package/dist/api/MySQLRouteAPIAdapter.js +9 -8
- package/dist/api/MySQLRouteAPIAdapter.js.map +1 -1
- package/dist/common/ReplicatedGTID.js +1 -1
- package/dist/common/check-source-configuration.d.ts +0 -1
- package/dist/common/check-source-configuration.js +6 -8
- package/dist/common/check-source-configuration.js.map +1 -1
- package/dist/common/get-replication-columns.js +1 -1
- package/dist/common/mysql-to-sqlite.d.ts +17 -1
- package/dist/common/mysql-to-sqlite.js +133 -8
- package/dist/common/mysql-to-sqlite.js.map +1 -1
- package/dist/common/read-executed-gtid.js +4 -10
- package/dist/common/read-executed-gtid.js.map +1 -1
- package/dist/replication/BinLogStream.js +25 -21
- package/dist/replication/BinLogStream.js.map +1 -1
- package/dist/replication/MySQLConnectionManager.d.ts +2 -2
- package/dist/replication/MySQLConnectionManager.js +1 -1
- package/dist/utils/mysql-utils.d.ts +30 -0
- package/dist/utils/mysql-utils.js +70 -0
- package/dist/utils/mysql-utils.js.map +1 -0
- package/package.json +7 -6
- package/src/api/MySQLRouteAPIAdapter.ts +11 -9
- package/src/common/ReplicatedGTID.ts +1 -1
- package/src/common/check-source-configuration.ts +9 -10
- package/src/common/get-replication-columns.ts +1 -1
- package/src/common/mysql-to-sqlite.ts +147 -8
- package/src/common/read-executed-gtid.ts +5 -12
- package/src/replication/BinLogStream.ts +29 -21
- package/src/replication/MySQLConnectionManager.ts +3 -3
- package/src/utils/{mysql_utils.ts → mysql-utils.ts} +36 -5
- package/test/src/BinLogStream.test.ts +306 -0
- package/test/src/BinlogStreamUtils.ts +157 -0
- package/test/src/env.ts +1 -1
- package/test/src/mysql-to-sqlite.test.ts +322 -0
- package/test/src/mysql-utils.test.ts +17 -0
- package/test/src/util.ts +11 -17
- package/test/tsconfig.json +1 -1
- package/tsconfig.tsbuildinfo +1 -1
- package/dist/utils/mysql_utils.d.ts +0 -17
- package/dist/utils/mysql_utils.js +0 -43
- package/dist/utils/mysql_utils.js.map +0 -1
|
@@ -9,9 +9,9 @@ import { BinLogEvent, StartOptions, TableMapEntry } from '@powersync/mysql-zongj
|
|
|
9
9
|
import * as common from '../common/common-index.js';
|
|
10
10
|
import * as zongji_utils from './zongji/zongji-utils.js';
|
|
11
11
|
import { MySQLConnectionManager } from './MySQLConnectionManager.js';
|
|
12
|
-
import { isBinlogStillAvailable, ReplicatedGTID } from '../common/common-index.js';
|
|
12
|
+
import { isBinlogStillAvailable, ReplicatedGTID, toColumnDescriptors } from '../common/common-index.js';
|
|
13
13
|
import mysqlPromise from 'mysql2/promise';
|
|
14
|
-
import {
|
|
14
|
+
import { createRandomServerId } from '../utils/mysql-utils.js';
|
|
15
15
|
|
|
16
16
|
export interface BinLogStreamOptions {
|
|
17
17
|
connections: MySQLConnectionManager;
|
|
@@ -221,7 +221,13 @@ AND table_type = 'BASE TABLE';`,
|
|
|
221
221
|
// Check if the binlog is still available. If it isn't we need to snapshot again.
|
|
222
222
|
const connection = await this.connections.getConnection();
|
|
223
223
|
try {
|
|
224
|
-
|
|
224
|
+
const isAvailable = await isBinlogStillAvailable(connection, lastKnowGTID.position.filename);
|
|
225
|
+
if (!isAvailable) {
|
|
226
|
+
logger.info(
|
|
227
|
+
`Binlog file ${lastKnowGTID.position.filename} is no longer available, starting initial replication again.`
|
|
228
|
+
);
|
|
229
|
+
}
|
|
230
|
+
return isAvailable;
|
|
225
231
|
} finally {
|
|
226
232
|
connection.release();
|
|
227
233
|
}
|
|
@@ -245,7 +251,7 @@ AND table_type = 'BASE TABLE';`,
|
|
|
245
251
|
const connection = await this.connections.getStreamingConnection();
|
|
246
252
|
const promiseConnection = (connection as mysql.Connection).promise();
|
|
247
253
|
const headGTID = await common.readExecutedGtid(promiseConnection);
|
|
248
|
-
logger.info(`Using snapshot checkpoint GTID
|
|
254
|
+
logger.info(`Using snapshot checkpoint GTID: '${headGTID}'`);
|
|
249
255
|
try {
|
|
250
256
|
logger.info(`Starting initial replication`);
|
|
251
257
|
await promiseConnection.query<mysqlPromise.RowDataPacket[]>(
|
|
@@ -285,7 +291,7 @@ AND table_type = 'BASE TABLE';`,
|
|
|
285
291
|
logger.info(`Replicating ${table.qualifiedName}`);
|
|
286
292
|
// TODO count rows and log progress at certain batch sizes
|
|
287
293
|
|
|
288
|
-
|
|
294
|
+
let columns: Map<string, ColumnDescriptor>;
|
|
289
295
|
return new Promise<void>((resolve, reject) => {
|
|
290
296
|
// MAX_EXECUTION_TIME(0) hint disables execution timeout for this query
|
|
291
297
|
connection
|
|
@@ -295,10 +301,7 @@ AND table_type = 'BASE TABLE';`,
|
|
|
295
301
|
})
|
|
296
302
|
.on('fields', (fields: FieldPacket[]) => {
|
|
297
303
|
// Map the columns and their types
|
|
298
|
-
fields
|
|
299
|
-
const columnType = MySQLTypesMap[field.type as number];
|
|
300
|
-
columns.set(field.name, { name: field.name, type: columnType, typeId: field.type });
|
|
301
|
-
});
|
|
304
|
+
columns = toColumnDescriptors(fields);
|
|
302
305
|
})
|
|
303
306
|
.on('result', async (row) => {
|
|
304
307
|
connection.pause();
|
|
@@ -363,10 +366,14 @@ AND table_type = 'BASE TABLE';`,
|
|
|
363
366
|
async streamChanges() {
|
|
364
367
|
// Auto-activate as soon as initial replication is done
|
|
365
368
|
await this.storage.autoActivate();
|
|
369
|
+
const serverId = createRandomServerId(this.storage.group_id);
|
|
370
|
+
logger.info(`Starting replication. Created replica client with serverId:${serverId}`);
|
|
366
371
|
|
|
367
372
|
const connection = await this.connections.getConnection();
|
|
368
373
|
const { checkpoint_lsn } = await this.storage.getStatus();
|
|
369
|
-
|
|
374
|
+
if (checkpoint_lsn) {
|
|
375
|
+
logger.info(`Existing checkpoint found: ${checkpoint_lsn}`);
|
|
376
|
+
}
|
|
370
377
|
|
|
371
378
|
const fromGTID = checkpoint_lsn
|
|
372
379
|
? common.ReplicatedGTID.fromSerialized(checkpoint_lsn)
|
|
@@ -447,7 +454,7 @@ AND table_type = 'BASE TABLE';`,
|
|
|
447
454
|
|
|
448
455
|
zongji.on('binlog', (evt: BinLogEvent) => {
|
|
449
456
|
if (!this.stopped) {
|
|
450
|
-
logger.info(`
|
|
457
|
+
logger.info(`Received Binlog event:${evt.getEventName()}`);
|
|
451
458
|
queue.push(evt);
|
|
452
459
|
} else {
|
|
453
460
|
logger.info(`Replication is busy stopping, ignoring event ${evt.getEventName()}`);
|
|
@@ -458,16 +465,18 @@ AND table_type = 'BASE TABLE';`,
|
|
|
458
465
|
// Powersync is shutting down, don't start replicating
|
|
459
466
|
return;
|
|
460
467
|
}
|
|
468
|
+
|
|
469
|
+
logger.info(`Reading binlog from: ${binLogPositionState.filename}:${binLogPositionState.offset}`);
|
|
470
|
+
|
|
461
471
|
// Only listen for changes to tables in the sync rules
|
|
462
472
|
const includedTables = [...this.tableCache.values()].map((table) => table.table);
|
|
463
|
-
logger.info(`Starting replication from ${binLogPositionState.filename}:${binLogPositionState.offset}`);
|
|
464
473
|
zongji.start({
|
|
465
474
|
includeEvents: ['tablemap', 'writerows', 'updaterows', 'deleterows', 'xid', 'rotate', 'gtidlog'],
|
|
466
475
|
excludeEvents: [],
|
|
467
476
|
includeSchema: { [this.defaultSchema]: includedTables },
|
|
468
477
|
filename: binLogPositionState.filename,
|
|
469
478
|
position: binLogPositionState.offset,
|
|
470
|
-
serverId:
|
|
479
|
+
serverId: serverId
|
|
471
480
|
} satisfies StartOptions);
|
|
472
481
|
|
|
473
482
|
// Forever young
|
|
@@ -516,10 +525,7 @@ AND table_type = 'BASE TABLE';`,
|
|
|
516
525
|
tableEntry: TableMapEntry;
|
|
517
526
|
}
|
|
518
527
|
): Promise<storage.FlushedResult | null> {
|
|
519
|
-
const columns =
|
|
520
|
-
msg.tableEntry.columns.forEach((column) => {
|
|
521
|
-
columns.set(column.name, { name: column.name, typeId: column.type });
|
|
522
|
-
});
|
|
528
|
+
const columns = toColumnDescriptors(msg.tableEntry);
|
|
523
529
|
|
|
524
530
|
for (const [index, row] of msg.data.entries()) {
|
|
525
531
|
await this.writeChange(batch, {
|
|
@@ -560,8 +566,10 @@ AND table_type = 'BASE TABLE';`,
|
|
|
560
566
|
Metrics.getInstance().rows_replicated_total.add(1);
|
|
561
567
|
// "before" may be null if the replica id columns are unchanged
|
|
562
568
|
// It's fine to treat that the same as an insert.
|
|
563
|
-
const beforeUpdated = payload.previous_data
|
|
564
|
-
|
|
569
|
+
const beforeUpdated = payload.previous_data
|
|
570
|
+
? common.toSQLiteRow(payload.previous_data, payload.columns)
|
|
571
|
+
: undefined;
|
|
572
|
+
const after = common.toSQLiteRow(payload.data, payload.columns);
|
|
565
573
|
|
|
566
574
|
return await batch.save({
|
|
567
575
|
tag: storage.SaveOperationTag.UPDATE,
|
|
@@ -570,13 +578,13 @@ AND table_type = 'BASE TABLE';`,
|
|
|
570
578
|
beforeReplicaId: beforeUpdated
|
|
571
579
|
? getUuidReplicaIdentityBson(beforeUpdated, payload.sourceTable.replicaIdColumns)
|
|
572
580
|
: undefined,
|
|
573
|
-
after: common.toSQLiteRow(payload.data),
|
|
581
|
+
after: common.toSQLiteRow(payload.data, payload.columns),
|
|
574
582
|
afterReplicaId: getUuidReplicaIdentityBson(after, payload.sourceTable.replicaIdColumns)
|
|
575
583
|
});
|
|
576
584
|
|
|
577
585
|
case storage.SaveOperationTag.DELETE:
|
|
578
586
|
Metrics.getInstance().rows_replicated_total.add(1);
|
|
579
|
-
const beforeDeleted = common.toSQLiteRow(payload.data);
|
|
587
|
+
const beforeDeleted = common.toSQLiteRow(payload.data, payload.columns);
|
|
580
588
|
|
|
581
589
|
return await batch.save({
|
|
582
590
|
tag: storage.SaveOperationTag.DELETE,
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { NormalizedMySQLConnectionConfig } from '../types/types.js';
|
|
2
2
|
import mysqlPromise from 'mysql2/promise';
|
|
3
|
-
import mysql, { RowDataPacket } from 'mysql2';
|
|
4
|
-
import * as mysql_utils from '../utils/
|
|
3
|
+
import mysql, { FieldPacket, RowDataPacket } from 'mysql2';
|
|
4
|
+
import * as mysql_utils from '../utils/mysql-utils.js';
|
|
5
5
|
import ZongJi from '@powersync/mysql-zongji';
|
|
6
6
|
import { logger } from '@powersync/lib-services-framework';
|
|
7
7
|
|
|
@@ -61,7 +61,7 @@ export class MySQLConnectionManager {
|
|
|
61
61
|
* @param query
|
|
62
62
|
* @param params
|
|
63
63
|
*/
|
|
64
|
-
async query(query: string, params?: any[]) {
|
|
64
|
+
async query(query: string, params?: any[]): Promise<[RowDataPacket[], FieldPacket[]]> {
|
|
65
65
|
return this.promisePool.query<RowDataPacket[]>(query, params);
|
|
66
66
|
}
|
|
67
67
|
|
|
@@ -2,11 +2,7 @@ import { logger } from '@powersync/lib-services-framework';
|
|
|
2
2
|
import mysql from 'mysql2';
|
|
3
3
|
import mysqlPromise from 'mysql2/promise';
|
|
4
4
|
import * as types from '../types/types.js';
|
|
5
|
-
|
|
6
|
-
export const MySQLTypesMap: { [key: number]: string } = {};
|
|
7
|
-
for (const [name, code] of Object.entries(mysql.Types)) {
|
|
8
|
-
MySQLTypesMap[code as number] = name;
|
|
9
|
-
}
|
|
5
|
+
import { coerce, gte } from 'semver';
|
|
10
6
|
|
|
11
7
|
export type RetriedQueryOptions = {
|
|
12
8
|
connection: mysqlPromise.Connection;
|
|
@@ -47,7 +43,42 @@ export function createPool(config: types.NormalizedMySQLConnectionConfig, option
|
|
|
47
43
|
database: config.database,
|
|
48
44
|
ssl: hasSSLOptions ? sslOptions : undefined,
|
|
49
45
|
supportBigNumbers: true,
|
|
46
|
+
decimalNumbers: true,
|
|
50
47
|
timezone: 'Z', // Ensure no auto timezone manipulation of the dates occur
|
|
48
|
+
jsonStrings: true, // Return JSON columns as strings
|
|
51
49
|
...(options || {})
|
|
52
50
|
});
|
|
53
51
|
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Return a random server id for a given sync rule id.
|
|
55
|
+
* Expected format is: <syncRuleId>00<random number>
|
|
56
|
+
* The max value for server id in MySQL is 2^32 - 1.
|
|
57
|
+
* We use the GTID format to keep track of our position in the binlog, no state is kept by the MySQL server, therefore
|
|
58
|
+
* it is ok to use a randomised server id every time.
|
|
59
|
+
* @param syncRuleId
|
|
60
|
+
*/
|
|
61
|
+
export function createRandomServerId(syncRuleId: number): number {
|
|
62
|
+
return Number.parseInt(`${syncRuleId}00${Math.floor(Math.random() * 10000)}`);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
export async function getMySQLVersion(connection: mysqlPromise.Connection): Promise<string> {
|
|
66
|
+
const [[versionResult]] = await retriedQuery({
|
|
67
|
+
connection,
|
|
68
|
+
query: `SELECT VERSION() as version`
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
return versionResult.version as string;
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Check if the current MySQL version is newer or equal to the target version.
|
|
76
|
+
* @param version
|
|
77
|
+
* @param minimumVersion
|
|
78
|
+
*/
|
|
79
|
+
export function isVersionAtLeast(version: string, minimumVersion: string): boolean {
|
|
80
|
+
const coercedVersion = coerce(version);
|
|
81
|
+
const coercedMinimumVersion = coerce(minimumVersion);
|
|
82
|
+
|
|
83
|
+
return gte(coercedVersion!, coercedMinimumVersion!, { loose: true });
|
|
84
|
+
}
|
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
import { putOp, removeOp } from '@core-tests/stream_utils.js';
|
|
2
|
+
import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js';
|
|
3
|
+
import { BucketStorageFactory, Metrics } from '@powersync/service-core';
|
|
4
|
+
import { describe, expect, test } from 'vitest';
|
|
5
|
+
import { binlogStreamTest } from './BinlogStreamUtils.js';
|
|
6
|
+
import { v4 as uuid } from 'uuid';
|
|
7
|
+
|
|
8
|
+
type StorageFactory = () => Promise<BucketStorageFactory>;
|
|
9
|
+
|
|
10
|
+
const BASIC_SYNC_RULES = `
|
|
11
|
+
bucket_definitions:
|
|
12
|
+
global:
|
|
13
|
+
data:
|
|
14
|
+
- SELECT id, description FROM "test_data"
|
|
15
|
+
`;
|
|
16
|
+
|
|
17
|
+
describe(
|
|
18
|
+
' Binlog stream - mongodb',
|
|
19
|
+
function () {
|
|
20
|
+
defineBinlogStreamTests(MONGO_STORAGE_FACTORY);
|
|
21
|
+
},
|
|
22
|
+
{ timeout: 20_000 }
|
|
23
|
+
);
|
|
24
|
+
|
|
25
|
+
function defineBinlogStreamTests(factory: StorageFactory) {
|
|
26
|
+
test(
|
|
27
|
+
'Replicate basic values',
|
|
28
|
+
binlogStreamTest(factory, async (context) => {
|
|
29
|
+
const { connectionManager } = context;
|
|
30
|
+
await context.updateSyncRules(`
|
|
31
|
+
bucket_definitions:
|
|
32
|
+
global:
|
|
33
|
+
data:
|
|
34
|
+
- SELECT id, description, num FROM "test_data"`);
|
|
35
|
+
|
|
36
|
+
await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT, num BIGINT)`);
|
|
37
|
+
|
|
38
|
+
await context.replicateSnapshot();
|
|
39
|
+
|
|
40
|
+
const startRowCount =
|
|
41
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
42
|
+
const startTxCount =
|
|
43
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
44
|
+
|
|
45
|
+
context.startStreaming();
|
|
46
|
+
const testId = uuid();
|
|
47
|
+
await connectionManager.query(
|
|
48
|
+
`INSERT INTO test_data(id, description, num) VALUES('${testId}', 'test1', 1152921504606846976)`
|
|
49
|
+
);
|
|
50
|
+
const data = await context.getBucketData('global[]');
|
|
51
|
+
|
|
52
|
+
expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1', num: 1152921504606846976n })]);
|
|
53
|
+
const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
54
|
+
const endTxCount =
|
|
55
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
56
|
+
expect(endRowCount - startRowCount).toEqual(1);
|
|
57
|
+
expect(endTxCount - startTxCount).toEqual(1);
|
|
58
|
+
})
|
|
59
|
+
);
|
|
60
|
+
|
|
61
|
+
test(
|
|
62
|
+
'replicating case sensitive table',
|
|
63
|
+
binlogStreamTest(factory, async (context) => {
|
|
64
|
+
const { connectionManager } = context;
|
|
65
|
+
await context.updateSyncRules(`
|
|
66
|
+
bucket_definitions:
|
|
67
|
+
global:
|
|
68
|
+
data:
|
|
69
|
+
- SELECT id, description FROM "test_DATA"
|
|
70
|
+
`);
|
|
71
|
+
|
|
72
|
+
await connectionManager.query(`CREATE TABLE test_DATA (id CHAR(36) PRIMARY KEY, description text)`);
|
|
73
|
+
|
|
74
|
+
await context.replicateSnapshot();
|
|
75
|
+
|
|
76
|
+
const startRowCount =
|
|
77
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
78
|
+
const startTxCount =
|
|
79
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
80
|
+
|
|
81
|
+
context.startStreaming();
|
|
82
|
+
|
|
83
|
+
const testId = uuid();
|
|
84
|
+
await connectionManager.query(`INSERT INTO test_DATA(id, description) VALUES('${testId}','test1')`);
|
|
85
|
+
|
|
86
|
+
const data = await context.getBucketData('global[]');
|
|
87
|
+
|
|
88
|
+
expect(data).toMatchObject([putOp('test_DATA', { id: testId, description: 'test1' })]);
|
|
89
|
+
const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
90
|
+
const endTxCount =
|
|
91
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
92
|
+
expect(endRowCount - startRowCount).toEqual(1);
|
|
93
|
+
expect(endTxCount - startTxCount).toEqual(1);
|
|
94
|
+
})
|
|
95
|
+
);
|
|
96
|
+
|
|
97
|
+
// TODO: Not supported yet
|
|
98
|
+
// test(
|
|
99
|
+
// 'replicating TRUNCATE',
|
|
100
|
+
// binlogStreamTest(factory, async (context) => {
|
|
101
|
+
// const { connectionManager } = context;
|
|
102
|
+
// const syncRuleContent = `
|
|
103
|
+
// bucket_definitions:
|
|
104
|
+
// global:
|
|
105
|
+
// data:
|
|
106
|
+
// - SELECT id, description FROM "test_data"
|
|
107
|
+
// by_test_data:
|
|
108
|
+
// parameters: SELECT id FROM test_data WHERE id = token_parameters.user_id
|
|
109
|
+
// data: []
|
|
110
|
+
// `;
|
|
111
|
+
// await context.updateSyncRules(syncRuleContent);
|
|
112
|
+
// await connectionManager.query(`DROP TABLE IF EXISTS test_data`);
|
|
113
|
+
// await connectionManager.query(
|
|
114
|
+
// `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`
|
|
115
|
+
// );
|
|
116
|
+
//
|
|
117
|
+
// await context.replicateSnapshot();
|
|
118
|
+
// context.startStreaming();
|
|
119
|
+
//
|
|
120
|
+
// const [{ test_id }] = pgwireRows(
|
|
121
|
+
// await connectionManager.query(`INSERT INTO test_data(description) VALUES('test1') returning id as test_id`)
|
|
122
|
+
// );
|
|
123
|
+
// await connectionManager.query(`TRUNCATE test_data`);
|
|
124
|
+
//
|
|
125
|
+
// const data = await context.getBucketData('global[]');
|
|
126
|
+
//
|
|
127
|
+
// expect(data).toMatchObject([
|
|
128
|
+
// putOp('test_data', { id: test_id, description: 'test1' }),
|
|
129
|
+
// removeOp('test_data', test_id)
|
|
130
|
+
// ]);
|
|
131
|
+
// })
|
|
132
|
+
// );
|
|
133
|
+
|
|
134
|
+
test(
|
|
135
|
+
'replicating changing primary key',
|
|
136
|
+
binlogStreamTest(factory, async (context) => {
|
|
137
|
+
const { connectionManager } = context;
|
|
138
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
139
|
+
|
|
140
|
+
await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description text)`);
|
|
141
|
+
|
|
142
|
+
await context.replicateSnapshot();
|
|
143
|
+
context.startStreaming();
|
|
144
|
+
|
|
145
|
+
const testId1 = uuid();
|
|
146
|
+
await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('${testId1}','test1')`);
|
|
147
|
+
|
|
148
|
+
const testId2 = uuid();
|
|
149
|
+
await connectionManager.query(
|
|
150
|
+
`UPDATE test_data SET id = '${testId2}', description = 'test2a' WHERE id = '${testId1}'`
|
|
151
|
+
);
|
|
152
|
+
|
|
153
|
+
// This update may fail replicating with:
|
|
154
|
+
// Error: Update on missing record public.test_data:074a601e-fc78-4c33-a15d-f89fdd4af31d :: {"g":1,"t":"651e9fbe9fec6155895057ec","k":"1a0b34da-fb8c-5e6f-8421-d7a3c5d4df4f"}
|
|
155
|
+
await connectionManager.query(`UPDATE test_data SET description = 'test2b' WHERE id = '${testId2}'`);
|
|
156
|
+
|
|
157
|
+
// Re-use old id again
|
|
158
|
+
await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('${testId1}', 'test1b')`);
|
|
159
|
+
await connectionManager.query(`UPDATE test_data SET description = 'test1c' WHERE id = '${testId1}'`);
|
|
160
|
+
|
|
161
|
+
const data = await context.getBucketData('global[]');
|
|
162
|
+
expect(data).toMatchObject([
|
|
163
|
+
// Initial insert
|
|
164
|
+
putOp('test_data', { id: testId1, description: 'test1' }),
|
|
165
|
+
// Update id, then description
|
|
166
|
+
removeOp('test_data', testId1),
|
|
167
|
+
putOp('test_data', { id: testId2, description: 'test2a' }),
|
|
168
|
+
putOp('test_data', { id: testId2, description: 'test2b' }),
|
|
169
|
+
// Re-use old id
|
|
170
|
+
putOp('test_data', { id: testId1, description: 'test1b' }),
|
|
171
|
+
putOp('test_data', { id: testId1, description: 'test1c' })
|
|
172
|
+
]);
|
|
173
|
+
})
|
|
174
|
+
);
|
|
175
|
+
|
|
176
|
+
test(
|
|
177
|
+
'initial sync',
|
|
178
|
+
binlogStreamTest(factory, async (context) => {
|
|
179
|
+
const { connectionManager } = context;
|
|
180
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
181
|
+
|
|
182
|
+
await connectionManager.query(`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description text)`);
|
|
183
|
+
|
|
184
|
+
const testId = uuid();
|
|
185
|
+
await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('${testId}','test1')`);
|
|
186
|
+
|
|
187
|
+
await context.replicateSnapshot();
|
|
188
|
+
|
|
189
|
+
const data = await context.getBucketData('global[]');
|
|
190
|
+
expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1' })]);
|
|
191
|
+
})
|
|
192
|
+
);
|
|
193
|
+
|
|
194
|
+
test(
|
|
195
|
+
'snapshot with date values',
|
|
196
|
+
binlogStreamTest(factory, async (context) => {
|
|
197
|
+
const { connectionManager } = context;
|
|
198
|
+
await context.updateSyncRules(`
|
|
199
|
+
bucket_definitions:
|
|
200
|
+
global:
|
|
201
|
+
data:
|
|
202
|
+
- SELECT * FROM "test_data"
|
|
203
|
+
`);
|
|
204
|
+
|
|
205
|
+
await connectionManager.query(
|
|
206
|
+
`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT, date DATE, datetime DATETIME, timestamp TIMESTAMP)`
|
|
207
|
+
);
|
|
208
|
+
|
|
209
|
+
const testId = uuid();
|
|
210
|
+
await connectionManager.query(`
|
|
211
|
+
INSERT INTO test_data(id, description, date, datetime, timestamp) VALUES('${testId}','testDates', '2023-03-06', '2023-03-06 15:47', '2023-03-06 15:47')
|
|
212
|
+
`);
|
|
213
|
+
|
|
214
|
+
await context.replicateSnapshot();
|
|
215
|
+
|
|
216
|
+
const data = await context.getBucketData('global[]');
|
|
217
|
+
expect(data).toMatchObject([
|
|
218
|
+
putOp('test_data', {
|
|
219
|
+
id: testId,
|
|
220
|
+
description: 'testDates',
|
|
221
|
+
date: `2023-03-06`,
|
|
222
|
+
datetime: '2023-03-06T15:47:00.000Z',
|
|
223
|
+
timestamp: '2023-03-06T15:47:00.000Z'
|
|
224
|
+
})
|
|
225
|
+
]);
|
|
226
|
+
})
|
|
227
|
+
);
|
|
228
|
+
|
|
229
|
+
test(
|
|
230
|
+
'replication with date values',
|
|
231
|
+
binlogStreamTest(factory, async (context) => {
|
|
232
|
+
const { connectionManager } = context;
|
|
233
|
+
await context.updateSyncRules(`
|
|
234
|
+
bucket_definitions:
|
|
235
|
+
global:
|
|
236
|
+
data:
|
|
237
|
+
- SELECT * FROM "test_data"
|
|
238
|
+
`);
|
|
239
|
+
|
|
240
|
+
await connectionManager.query(
|
|
241
|
+
`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY, description TEXT, date DATE, datetime DATETIME, timestamp TIMESTAMP)`
|
|
242
|
+
);
|
|
243
|
+
|
|
244
|
+
await context.replicateSnapshot();
|
|
245
|
+
|
|
246
|
+
const startRowCount =
|
|
247
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
248
|
+
const startTxCount =
|
|
249
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
250
|
+
|
|
251
|
+
context.startStreaming();
|
|
252
|
+
|
|
253
|
+
const testId = uuid();
|
|
254
|
+
await connectionManager.query(`
|
|
255
|
+
INSERT INTO test_data(id, description, date, datetime, timestamp) VALUES('${testId}','testDates', '2023-03-06', '2023-03-06 15:47', '2023-03-06 15:47')
|
|
256
|
+
`);
|
|
257
|
+
|
|
258
|
+
const data = await context.getBucketData('global[]');
|
|
259
|
+
expect(data).toMatchObject([
|
|
260
|
+
putOp('test_data', {
|
|
261
|
+
id: testId,
|
|
262
|
+
description: 'testDates',
|
|
263
|
+
date: `2023-03-06`,
|
|
264
|
+
datetime: '2023-03-06T15:47:00.000Z',
|
|
265
|
+
timestamp: '2023-03-06T15:47:00.000Z'
|
|
266
|
+
})
|
|
267
|
+
]);
|
|
268
|
+
const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
269
|
+
const endTxCount =
|
|
270
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
271
|
+
expect(endRowCount - startRowCount).toEqual(1);
|
|
272
|
+
expect(endTxCount - startTxCount).toEqual(1);
|
|
273
|
+
})
|
|
274
|
+
);
|
|
275
|
+
|
|
276
|
+
test(
|
|
277
|
+
'table not in sync rules',
|
|
278
|
+
binlogStreamTest(factory, async (context) => {
|
|
279
|
+
const { connectionManager } = context;
|
|
280
|
+
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
281
|
+
|
|
282
|
+
await connectionManager.query(`CREATE TABLE test_donotsync (id CHAR(36) PRIMARY KEY, description text)`);
|
|
283
|
+
|
|
284
|
+
await context.replicateSnapshot();
|
|
285
|
+
|
|
286
|
+
const startRowCount =
|
|
287
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
288
|
+
const startTxCount =
|
|
289
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
290
|
+
|
|
291
|
+
context.startStreaming();
|
|
292
|
+
|
|
293
|
+
await connectionManager.query(`INSERT INTO test_donotsync(id, description) VALUES('${uuid()}','test1')`);
|
|
294
|
+
const data = await context.getBucketData('global[]');
|
|
295
|
+
|
|
296
|
+
expect(data).toMatchObject([]);
|
|
297
|
+
const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
298
|
+
const endTxCount =
|
|
299
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
300
|
+
|
|
301
|
+
// There was a transaction, but we should not replicate any actual data
|
|
302
|
+
expect(endRowCount - startRowCount).toEqual(0);
|
|
303
|
+
expect(endTxCount - startTxCount).toEqual(1);
|
|
304
|
+
})
|
|
305
|
+
);
|
|
306
|
+
}
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
import {
|
|
2
|
+
ActiveCheckpoint,
|
|
3
|
+
BucketStorageFactory,
|
|
4
|
+
OpId,
|
|
5
|
+
OplogEntry,
|
|
6
|
+
SyncRulesBucketStorage
|
|
7
|
+
} from '@powersync/service-core';
|
|
8
|
+
import { TEST_CONNECTION_OPTIONS, clearTestDb } from './util.js';
|
|
9
|
+
import { fromAsync } from '@core-tests/stream_utils.js';
|
|
10
|
+
import { BinLogStream, BinLogStreamOptions } from '@module/replication/BinLogStream.js';
|
|
11
|
+
import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js';
|
|
12
|
+
import mysqlPromise from 'mysql2/promise';
|
|
13
|
+
import { readExecutedGtid } from '@module/common/read-executed-gtid.js';
|
|
14
|
+
import { logger } from '@powersync/lib-services-framework';
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Tests operating on the binlog stream need to configure the stream and manage asynchronous
|
|
18
|
+
* replication, which gets a little tricky.
|
|
19
|
+
*
|
|
20
|
+
* This wraps a test in a function that configures all the context, and tears it down afterward.
|
|
21
|
+
*/
|
|
22
|
+
export function binlogStreamTest(
|
|
23
|
+
factory: () => Promise<BucketStorageFactory>,
|
|
24
|
+
test: (context: BinlogStreamTestContext) => Promise<void>
|
|
25
|
+
): () => Promise<void> {
|
|
26
|
+
return async () => {
|
|
27
|
+
const f = await factory();
|
|
28
|
+
const connectionManager = new MySQLConnectionManager(TEST_CONNECTION_OPTIONS, {});
|
|
29
|
+
|
|
30
|
+
const connection = await connectionManager.getConnection();
|
|
31
|
+
await clearTestDb(connection);
|
|
32
|
+
connection.release();
|
|
33
|
+
const context = new BinlogStreamTestContext(f, connectionManager);
|
|
34
|
+
try {
|
|
35
|
+
await test(context);
|
|
36
|
+
} finally {
|
|
37
|
+
await context.dispose();
|
|
38
|
+
}
|
|
39
|
+
};
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
export class BinlogStreamTestContext {
|
|
43
|
+
private _binlogStream?: BinLogStream;
|
|
44
|
+
private abortController = new AbortController();
|
|
45
|
+
private streamPromise?: Promise<void>;
|
|
46
|
+
public storage?: SyncRulesBucketStorage;
|
|
47
|
+
private replicationDone = false;
|
|
48
|
+
|
|
49
|
+
constructor(
|
|
50
|
+
public factory: BucketStorageFactory,
|
|
51
|
+
public connectionManager: MySQLConnectionManager
|
|
52
|
+
) {}
|
|
53
|
+
|
|
54
|
+
async dispose() {
|
|
55
|
+
this.abortController.abort();
|
|
56
|
+
await this.streamPromise;
|
|
57
|
+
await this.connectionManager.end();
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
get connectionTag() {
|
|
61
|
+
return this.connectionManager.connectionTag;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
async updateSyncRules(content: string): Promise<SyncRulesBucketStorage> {
|
|
65
|
+
const syncRules = await this.factory.updateSyncRules({ content: content });
|
|
66
|
+
this.storage = this.factory.getInstance(syncRules);
|
|
67
|
+
return this.storage!;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
get binlogStream(): BinLogStream {
|
|
71
|
+
if (this.storage == null) {
|
|
72
|
+
throw new Error('updateSyncRules() first');
|
|
73
|
+
}
|
|
74
|
+
if (this._binlogStream) {
|
|
75
|
+
return this._binlogStream;
|
|
76
|
+
}
|
|
77
|
+
const options: BinLogStreamOptions = {
|
|
78
|
+
storage: this.storage,
|
|
79
|
+
connections: this.connectionManager,
|
|
80
|
+
abortSignal: this.abortController.signal
|
|
81
|
+
};
|
|
82
|
+
this._binlogStream = new BinLogStream(options);
|
|
83
|
+
return this._binlogStream!;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
async replicateSnapshot() {
|
|
87
|
+
await this.binlogStream.initReplication();
|
|
88
|
+
this.replicationDone = true;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
startStreaming() {
|
|
92
|
+
if (!this.replicationDone) {
|
|
93
|
+
throw new Error('Call replicateSnapshot() before startStreaming()');
|
|
94
|
+
}
|
|
95
|
+
this.streamPromise = this.binlogStream.streamChanges();
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
async getCheckpoint(options?: { timeout?: number }): Promise<string> {
|
|
99
|
+
const connection = await this.connectionManager.getConnection();
|
|
100
|
+
let checkpoint = await Promise.race([
|
|
101
|
+
getClientCheckpoint(connection, this.factory, { timeout: options?.timeout ?? 60_000 }),
|
|
102
|
+
this.streamPromise
|
|
103
|
+
]);
|
|
104
|
+
connection.release();
|
|
105
|
+
if (typeof checkpoint == undefined) {
|
|
106
|
+
// This indicates an issue with the test setup - streamingPromise completed instead
|
|
107
|
+
// of getClientCheckpoint()
|
|
108
|
+
throw new Error('Test failure - streamingPromise completed');
|
|
109
|
+
}
|
|
110
|
+
return checkpoint as string;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
async getBucketsDataBatch(buckets: Record<string, string>, options?: { timeout?: number }) {
|
|
114
|
+
const checkpoint = await this.getCheckpoint(options);
|
|
115
|
+
const map = new Map<string, string>(Object.entries(buckets));
|
|
116
|
+
return fromAsync(this.storage!.getBucketDataBatch(checkpoint, map));
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
async getBucketData(bucket: string, start = '0', options?: { timeout?: number }): Promise<OplogEntry[]> {
|
|
120
|
+
const checkpoint = await this.getCheckpoint(options);
|
|
121
|
+
const map = new Map<string, string>([[bucket, start]]);
|
|
122
|
+
const batch = this.storage!.getBucketDataBatch(checkpoint, map);
|
|
123
|
+
const batches = await fromAsync(batch);
|
|
124
|
+
return batches[0]?.batch.data ?? [];
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
export async function getClientCheckpoint(
|
|
129
|
+
connection: mysqlPromise.Connection,
|
|
130
|
+
bucketStorage: BucketStorageFactory,
|
|
131
|
+
options?: { timeout?: number }
|
|
132
|
+
): Promise<OpId> {
|
|
133
|
+
const start = Date.now();
|
|
134
|
+
const gtid = await readExecutedGtid(connection);
|
|
135
|
+
// This old API needs a persisted checkpoint id.
|
|
136
|
+
// Since we don't use LSNs anymore, the only way to get that is to wait.
|
|
137
|
+
|
|
138
|
+
const timeout = options?.timeout ?? 50_000;
|
|
139
|
+
let lastCp: ActiveCheckpoint | null = null;
|
|
140
|
+
|
|
141
|
+
logger.info('Expected Checkpoint: ' + gtid.comparable);
|
|
142
|
+
while (Date.now() - start < timeout) {
|
|
143
|
+
const cp = await bucketStorage.getActiveCheckpoint();
|
|
144
|
+
lastCp = cp;
|
|
145
|
+
//logger.info('Last Checkpoint: ' + lastCp.lsn);
|
|
146
|
+
if (!cp.hasSyncRules()) {
|
|
147
|
+
throw new Error('No sync rules available');
|
|
148
|
+
}
|
|
149
|
+
if (cp.lsn && cp.lsn >= gtid.comparable) {
|
|
150
|
+
return cp.checkpoint;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
await new Promise((resolve) => setTimeout(resolve, 30));
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
throw new Error(`Timeout while waiting for checkpoint ${gtid.comparable}. Last checkpoint: ${lastCp?.lsn}`);
|
|
157
|
+
}
|
package/test/src/env.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { utils } from '@powersync/lib-services-framework';
|
|
2
2
|
|
|
3
3
|
export const env = utils.collectEnvironmentVariables({
|
|
4
|
-
MYSQL_TEST_URI: utils.type.string.default('mysql://
|
|
4
|
+
MYSQL_TEST_URI: utils.type.string.default('mysql://root:mypassword@localhost:3306/mydatabase'),
|
|
5
5
|
CI: utils.type.boolean.default('false'),
|
|
6
6
|
SLOW_TESTS: utils.type.boolean.default('false')
|
|
7
7
|
});
|