@powersync/service-module-mysql 0.0.0-dev-20241015210820
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +20 -0
- package/LICENSE +67 -0
- package/README.md +3 -0
- package/dev/.env.template +2 -0
- package/dev/README.md +9 -0
- package/dev/config/sync_rules.yaml +12 -0
- package/dev/docker/mysql/docker-compose.yaml +17 -0
- package/dev/docker/mysql/init-scripts/my.cnf +9 -0
- package/dev/docker/mysql/init-scripts/mysql.sql +38 -0
- package/dist/api/MySQLRouteAPIAdapter.d.ts +24 -0
- package/dist/api/MySQLRouteAPIAdapter.js +311 -0
- package/dist/api/MySQLRouteAPIAdapter.js.map +1 -0
- package/dist/common/ReplicatedGTID.d.ts +59 -0
- package/dist/common/ReplicatedGTID.js +110 -0
- package/dist/common/ReplicatedGTID.js.map +1 -0
- package/dist/common/check-source-configuration.d.ts +3 -0
- package/dist/common/check-source-configuration.js +46 -0
- package/dist/common/check-source-configuration.js.map +1 -0
- package/dist/common/common-index.d.ts +6 -0
- package/dist/common/common-index.js +7 -0
- package/dist/common/common-index.js.map +1 -0
- package/dist/common/get-replication-columns.d.ts +12 -0
- package/dist/common/get-replication-columns.js +103 -0
- package/dist/common/get-replication-columns.js.map +1 -0
- package/dist/common/get-tables-from-pattern.d.ts +7 -0
- package/dist/common/get-tables-from-pattern.js +28 -0
- package/dist/common/get-tables-from-pattern.js.map +1 -0
- package/dist/common/mysql-to-sqlite.d.ts +4 -0
- package/dist/common/mysql-to-sqlite.js +56 -0
- package/dist/common/mysql-to-sqlite.js.map +1 -0
- package/dist/common/read-executed-gtid.d.ts +6 -0
- package/dist/common/read-executed-gtid.js +40 -0
- package/dist/common/read-executed-gtid.js.map +1 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +4 -0
- package/dist/index.js.map +1 -0
- package/dist/module/MySQLModule.d.ts +13 -0
- package/dist/module/MySQLModule.js +46 -0
- package/dist/module/MySQLModule.js.map +1 -0
- package/dist/replication/BinLogReplicationJob.d.ts +14 -0
- package/dist/replication/BinLogReplicationJob.js +88 -0
- package/dist/replication/BinLogReplicationJob.js.map +1 -0
- package/dist/replication/BinLogReplicator.d.ts +13 -0
- package/dist/replication/BinLogReplicator.js +25 -0
- package/dist/replication/BinLogReplicator.js.map +1 -0
- package/dist/replication/BinLogStream.d.ts +43 -0
- package/dist/replication/BinLogStream.js +421 -0
- package/dist/replication/BinLogStream.js.map +1 -0
- package/dist/replication/MySQLConnectionManager.d.ts +43 -0
- package/dist/replication/MySQLConnectionManager.js +81 -0
- package/dist/replication/MySQLConnectionManager.js.map +1 -0
- package/dist/replication/MySQLConnectionManagerFactory.d.ts +10 -0
- package/dist/replication/MySQLConnectionManagerFactory.js +21 -0
- package/dist/replication/MySQLConnectionManagerFactory.js.map +1 -0
- package/dist/replication/MySQLErrorRateLimiter.d.ts +10 -0
- package/dist/replication/MySQLErrorRateLimiter.js +43 -0
- package/dist/replication/MySQLErrorRateLimiter.js.map +1 -0
- package/dist/replication/zongji/zongji-utils.d.ts +7 -0
- package/dist/replication/zongji/zongji-utils.js +19 -0
- package/dist/replication/zongji/zongji-utils.js.map +1 -0
- package/dist/types/types.d.ts +50 -0
- package/dist/types/types.js +61 -0
- package/dist/types/types.js.map +1 -0
- package/dist/utils/mysql_utils.d.ts +14 -0
- package/dist/utils/mysql_utils.js +38 -0
- package/dist/utils/mysql_utils.js.map +1 -0
- package/package.json +51 -0
- package/src/api/MySQLRouteAPIAdapter.ts +357 -0
- package/src/common/ReplicatedGTID.ts +158 -0
- package/src/common/check-source-configuration.ts +59 -0
- package/src/common/common-index.ts +6 -0
- package/src/common/get-replication-columns.ts +124 -0
- package/src/common/get-tables-from-pattern.ts +44 -0
- package/src/common/mysql-to-sqlite.ts +59 -0
- package/src/common/read-executed-gtid.ts +43 -0
- package/src/index.ts +5 -0
- package/src/module/MySQLModule.ts +53 -0
- package/src/replication/BinLogReplicationJob.ts +97 -0
- package/src/replication/BinLogReplicator.ts +35 -0
- package/src/replication/BinLogStream.ts +547 -0
- package/src/replication/MySQLConnectionManager.ts +104 -0
- package/src/replication/MySQLConnectionManagerFactory.ts +28 -0
- package/src/replication/MySQLErrorRateLimiter.ts +44 -0
- package/src/replication/zongji/zongji-utils.ts +32 -0
- package/src/replication/zongji/zongji.d.ts +98 -0
- package/src/types/types.ts +102 -0
- package/src/utils/mysql_utils.ts +47 -0
- package/test/src/binlog_stream.test.ts +288 -0
- package/test/src/binlog_stream_utils.ts +152 -0
- package/test/src/env.ts +7 -0
- package/test/src/setup.ts +7 -0
- package/test/src/util.ts +62 -0
- package/test/tsconfig.json +28 -0
- package/tsconfig.json +26 -0
- package/tsconfig.tsbuildinfo +1 -0
- package/vitest.config.ts +15 -0
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import { ErrorRateLimiter } from '@powersync/service-core';
|
|
2
|
+
import { setTimeout } from 'timers/promises';
|
|
3
|
+
|
|
4
|
+
export class MySQLErrorRateLimiter implements ErrorRateLimiter {
|
|
5
|
+
nextAllowed: number = Date.now();
|
|
6
|
+
|
|
7
|
+
async waitUntilAllowed(options?: { signal?: AbortSignal | undefined } | undefined): Promise<void> {
|
|
8
|
+
const delay = Math.max(0, this.nextAllowed - Date.now());
|
|
9
|
+
// Minimum delay between connections, even without errors
|
|
10
|
+
this.setDelay(500);
|
|
11
|
+
await setTimeout(delay, undefined, { signal: options?.signal });
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
mayPing(): boolean {
|
|
15
|
+
return Date.now() >= this.nextAllowed;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
reportError(e: any): void {
|
|
19
|
+
const message = (e.message as string) ?? '';
|
|
20
|
+
if (message.includes('password authentication failed')) {
|
|
21
|
+
// Wait 15 minutes, to avoid triggering Supabase's fail2ban
|
|
22
|
+
this.setDelay(900_000);
|
|
23
|
+
} else if (message.includes('ENOTFOUND')) {
|
|
24
|
+
// DNS lookup issue - incorrect URI or deleted instance
|
|
25
|
+
this.setDelay(120_000);
|
|
26
|
+
} else if (message.includes('ECONNREFUSED')) {
|
|
27
|
+
// Could be fail2ban or similar
|
|
28
|
+
this.setDelay(120_000);
|
|
29
|
+
} else if (
|
|
30
|
+
message.includes('Unable to do postgres query on ended pool') ||
|
|
31
|
+
message.includes('Postgres unexpectedly closed connection')
|
|
32
|
+
) {
|
|
33
|
+
// Connection timed out - ignore / immediately retry
|
|
34
|
+
// We don't explicitly set the delay to 0, since there could have been another error that
|
|
35
|
+
// we need to respect.
|
|
36
|
+
} else {
|
|
37
|
+
this.setDelay(30_000);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
private setDelay(delay: number) {
|
|
42
|
+
this.nextAllowed = Math.max(this.nextAllowed, Date.now() + delay);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import {
|
|
2
|
+
BinLogEvent,
|
|
3
|
+
BinLogGTIDLogEvent,
|
|
4
|
+
BinLogMutationEvent,
|
|
5
|
+
BinLogRotationEvent,
|
|
6
|
+
BinLogUpdateEvent,
|
|
7
|
+
BinLogXidEvent
|
|
8
|
+
} from '@powersync/mysql-zongji';
|
|
9
|
+
|
|
10
|
+
export function eventIsGTIDLog(event: BinLogEvent): event is BinLogGTIDLogEvent {
|
|
11
|
+
return event.getEventName() == 'gtidlog';
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export function eventIsXid(event: BinLogEvent): event is BinLogXidEvent {
|
|
15
|
+
return event.getEventName() == 'xid';
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export function eventIsRotation(event: BinLogEvent): event is BinLogRotationEvent {
|
|
19
|
+
return event.getEventName() == 'rotate';
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export function eventIsWriteMutation(event: BinLogEvent): event is BinLogMutationEvent {
|
|
23
|
+
return event.getEventName() == 'writerows';
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export function eventIsDeleteMutation(event: BinLogEvent): event is BinLogMutationEvent {
|
|
27
|
+
return event.getEventName() == 'deleterows';
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export function eventIsUpdateMutation(event: BinLogEvent): event is BinLogUpdateEvent {
|
|
31
|
+
return event.getEventName() == 'updaterows';
|
|
32
|
+
}
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
declare module '@powersync/mysql-zongji' {
|
|
2
|
+
export type ZongjiOptions = {
|
|
3
|
+
host: string;
|
|
4
|
+
user: string;
|
|
5
|
+
password: string;
|
|
6
|
+
};
|
|
7
|
+
|
|
8
|
+
export type StartOptions = {
|
|
9
|
+
includeEvents?: string[];
|
|
10
|
+
excludeEvents?: string[];
|
|
11
|
+
/**
|
|
12
|
+
* BinLog position filename to start reading events from
|
|
13
|
+
*/
|
|
14
|
+
filename?: string;
|
|
15
|
+
/**
|
|
16
|
+
* BinLog position offset to start reading events from in file specified
|
|
17
|
+
*/
|
|
18
|
+
position?: number;
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
export type ColumnSchema = {
|
|
22
|
+
COLUMN_NAME: string;
|
|
23
|
+
COLLATION_NAME: string;
|
|
24
|
+
CHARACTER_SET_NAME: string;
|
|
25
|
+
COLUMN_COMMENT: string;
|
|
26
|
+
COLUMN_TYPE: string;
|
|
27
|
+
};
|
|
28
|
+
|
|
29
|
+
export type ColumnDefinition = {
|
|
30
|
+
name: string;
|
|
31
|
+
charset: string;
|
|
32
|
+
type: number;
|
|
33
|
+
metadata: Record<string, any>;
|
|
34
|
+
};
|
|
35
|
+
|
|
36
|
+
export type TableMapEntry = {
|
|
37
|
+
columnSchemas: Array<ColumnSchema>;
|
|
38
|
+
parentSchema: string;
|
|
39
|
+
tableName: string;
|
|
40
|
+
columns: Array<ColumnDefinition>;
|
|
41
|
+
};
|
|
42
|
+
|
|
43
|
+
export type BaseBinLogEvent = {
|
|
44
|
+
timestamp: number;
|
|
45
|
+
getEventName(): string;
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Next position in BinLog file to read from after
|
|
49
|
+
* this event.
|
|
50
|
+
*/
|
|
51
|
+
nextPosition: number;
|
|
52
|
+
/**
|
|
53
|
+
* Size of this event
|
|
54
|
+
*/
|
|
55
|
+
size: number;
|
|
56
|
+
flags: number;
|
|
57
|
+
useChecksum: boolean;
|
|
58
|
+
};
|
|
59
|
+
|
|
60
|
+
export type BinLogRotationEvent = BaseBinLogEvent & {
|
|
61
|
+
binlogName: string;
|
|
62
|
+
position: number;
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
export type BinLogGTIDLogEvent = BaseBinLogEvent & {
|
|
66
|
+
serverId: Buffer;
|
|
67
|
+
transactionRange: number;
|
|
68
|
+
};
|
|
69
|
+
|
|
70
|
+
export type BinLogXidEvent = BaseBinLogEvent & {
|
|
71
|
+
xid: number;
|
|
72
|
+
};
|
|
73
|
+
|
|
74
|
+
export type BinLogMutationEvent = BaseBinLogEvent & {
|
|
75
|
+
tableId: number;
|
|
76
|
+
numberOfColumns: number;
|
|
77
|
+
tableMap: Record<string, TableMapEntry>;
|
|
78
|
+
rows: Array<Record<string, any>>;
|
|
79
|
+
};
|
|
80
|
+
|
|
81
|
+
export type BinLogUpdateEvent = Omit<BinLogMutationEvent, 'rows'> & {
|
|
82
|
+
rows: Array<{
|
|
83
|
+
before: Record<string, any>;
|
|
84
|
+
after: Record<string, any>;
|
|
85
|
+
}>;
|
|
86
|
+
};
|
|
87
|
+
|
|
88
|
+
export type BinLogEvent = BinLogRotationEvent | BinLogGTIDLogEvent | BinLogXidEvent | BinLogMutationEvent;
|
|
89
|
+
|
|
90
|
+
export default class ZongJi {
|
|
91
|
+
constructor(options: ZongjiOptions);
|
|
92
|
+
|
|
93
|
+
start(options: StartOptions): void;
|
|
94
|
+
stop(): void;
|
|
95
|
+
|
|
96
|
+
on(type: 'binlog' | string, callback: (event: BinLogEvent) => void);
|
|
97
|
+
}
|
|
98
|
+
}
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import * as service_types from '@powersync/service-types';
|
|
2
|
+
import * as t from 'ts-codec';
|
|
3
|
+
import * as urijs from 'uri-js';
|
|
4
|
+
|
|
5
|
+
export const MYSQL_CONNECTION_TYPE = 'mysql' as const;
|
|
6
|
+
|
|
7
|
+
export interface NormalizedMySQLConnectionConfig {
|
|
8
|
+
id: string;
|
|
9
|
+
tag: string;
|
|
10
|
+
|
|
11
|
+
hostname: string;
|
|
12
|
+
port: number;
|
|
13
|
+
database: string;
|
|
14
|
+
|
|
15
|
+
username: string;
|
|
16
|
+
password: string;
|
|
17
|
+
|
|
18
|
+
cacert?: string;
|
|
19
|
+
client_certificate?: string;
|
|
20
|
+
client_private_key?: string;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export const MySQLConnectionConfig = service_types.configFile.DataSourceConfig.and(
|
|
24
|
+
t.object({
|
|
25
|
+
type: t.literal(MYSQL_CONNECTION_TYPE),
|
|
26
|
+
uri: t.string.optional(),
|
|
27
|
+
hostname: t.string.optional(),
|
|
28
|
+
port: service_types.configFile.portCodec.optional(),
|
|
29
|
+
username: t.string.optional(),
|
|
30
|
+
password: t.string.optional(),
|
|
31
|
+
database: t.string.optional(),
|
|
32
|
+
|
|
33
|
+
cacert: t.string.optional(),
|
|
34
|
+
client_certificate: t.string.optional(),
|
|
35
|
+
client_private_key: t.string.optional()
|
|
36
|
+
})
|
|
37
|
+
);
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Config input specified when starting services
|
|
41
|
+
*/
|
|
42
|
+
export type MySQLConnectionConfig = t.Decoded<typeof MySQLConnectionConfig>;
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Resolved version of {@link MySQLConnectionConfig}
|
|
46
|
+
*/
|
|
47
|
+
export type ResolvedConnectionConfig = MySQLConnectionConfig & NormalizedMySQLConnectionConfig;
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Validate and normalize connection options.
|
|
51
|
+
*
|
|
52
|
+
* Returns destructured options.
|
|
53
|
+
*/
|
|
54
|
+
export function normalizeConnectionConfig(options: MySQLConnectionConfig): NormalizedMySQLConnectionConfig {
|
|
55
|
+
let uri: urijs.URIComponents;
|
|
56
|
+
if (options.uri) {
|
|
57
|
+
uri = urijs.parse(options.uri);
|
|
58
|
+
if (uri.scheme != 'mysql') {
|
|
59
|
+
throw new Error(`Invalid URI - protocol must be mysql, got ${uri.scheme}`);
|
|
60
|
+
}
|
|
61
|
+
} else {
|
|
62
|
+
uri = urijs.parse('mysql:///');
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
const hostname = options.hostname ?? uri.host ?? '';
|
|
66
|
+
const port = Number(options.port ?? uri.port ?? 3306);
|
|
67
|
+
|
|
68
|
+
const database = options.database ?? uri.path?.substring(1) ?? '';
|
|
69
|
+
|
|
70
|
+
const [uri_username, uri_password] = (uri.userinfo ?? '').split(':');
|
|
71
|
+
|
|
72
|
+
const username = options.username ?? uri_username ?? '';
|
|
73
|
+
const password = options.password ?? uri_password ?? '';
|
|
74
|
+
|
|
75
|
+
if (hostname == '') {
|
|
76
|
+
throw new Error(`hostname required`);
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if (username == '') {
|
|
80
|
+
throw new Error(`username required`);
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
if (password == '') {
|
|
84
|
+
throw new Error(`password required`);
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
if (database == '') {
|
|
88
|
+
throw new Error(`database required`);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
return {
|
|
92
|
+
id: options.id ?? 'default',
|
|
93
|
+
tag: options.tag ?? 'default',
|
|
94
|
+
|
|
95
|
+
hostname,
|
|
96
|
+
port,
|
|
97
|
+
database,
|
|
98
|
+
|
|
99
|
+
username,
|
|
100
|
+
password
|
|
101
|
+
};
|
|
102
|
+
}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { logger } from '@powersync/lib-services-framework';
|
|
2
|
+
import mysql from 'mysql2';
|
|
3
|
+
import mysqlPromise from 'mysql2/promise';
|
|
4
|
+
import * as types from '../types/types.js';
|
|
5
|
+
|
|
6
|
+
export type RetriedQueryOptions = {
|
|
7
|
+
connection: mysqlPromise.Connection;
|
|
8
|
+
query: string;
|
|
9
|
+
params?: any[];
|
|
10
|
+
retries?: number;
|
|
11
|
+
};
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Retry a simple query - up to 2 attempts total.
|
|
15
|
+
*/
|
|
16
|
+
export async function retriedQuery(options: RetriedQueryOptions) {
|
|
17
|
+
const { connection, query, params = [], retries = 2 } = options;
|
|
18
|
+
for (let tries = retries; ; tries--) {
|
|
19
|
+
try {
|
|
20
|
+
logger.debug(`Executing query: ${query}`);
|
|
21
|
+
return connection.query<mysqlPromise.RowDataPacket[]>(query, params);
|
|
22
|
+
} catch (e) {
|
|
23
|
+
if (tries == 1) {
|
|
24
|
+
throw e;
|
|
25
|
+
}
|
|
26
|
+
logger.warn('Query error, retrying', e);
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export function createPool(config: types.NormalizedMySQLConnectionConfig, options?: mysql.PoolOptions): mysql.Pool {
|
|
32
|
+
const sslOptions = {
|
|
33
|
+
ca: config.cacert,
|
|
34
|
+
key: config.client_private_key,
|
|
35
|
+
cert: config.client_certificate
|
|
36
|
+
};
|
|
37
|
+
const hasSSLOptions = Object.values(sslOptions).some((v) => !!v);
|
|
38
|
+
// TODO confirm if default options are fine for Powersync use case
|
|
39
|
+
return mysql.createPool({
|
|
40
|
+
host: config.hostname,
|
|
41
|
+
user: config.username,
|
|
42
|
+
password: config.password,
|
|
43
|
+
database: config.database,
|
|
44
|
+
ssl: hasSSLOptions ? sslOptions : undefined,
|
|
45
|
+
...(options || {})
|
|
46
|
+
});
|
|
47
|
+
}
|
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
import { putOp, removeOp } from '@core-tests/stream_utils.js';
|
|
2
|
+
import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js';
|
|
3
|
+
import { BucketStorageFactory, Metrics } from '@powersync/service-core';
|
|
4
|
+
import * as crypto from 'crypto';
|
|
5
|
+
import { describe, expect, test } from 'vitest';
|
|
6
|
+
import { binlogStreamTest } from './binlog_stream_utils.js';
|
|
7
|
+
import { logger } from '@powersync/lib-services-framework';
|
|
8
|
+
|
|
9
|
+
type StorageFactory = () => Promise<BucketStorageFactory>;
|
|
10
|
+
|
|
11
|
+
const BASIC_SYNC_RULES = `
|
|
12
|
+
bucket_definitions:
|
|
13
|
+
global:
|
|
14
|
+
data:
|
|
15
|
+
- SELECT id, description FROM "test_data"
|
|
16
|
+
`;
|
|
17
|
+
|
|
18
|
+
describe(
|
|
19
|
+
' Binlog stream - mongodb',
|
|
20
|
+
function () {
|
|
21
|
+
defineBinlogStreamTests(MONGO_STORAGE_FACTORY);
|
|
22
|
+
},
|
|
23
|
+
{ timeout: 20_000 }
|
|
24
|
+
);
|
|
25
|
+
|
|
26
|
+
function defineBinlogStreamTests(factory: StorageFactory) {
|
|
27
|
+
test(
|
|
28
|
+
'Replicate basic values',
|
|
29
|
+
binlogStreamTest(factory, async (context) => {
|
|
30
|
+
const { connectionManager } = context;
|
|
31
|
+
await context.updateSyncRules(`
|
|
32
|
+
bucket_definitions:
|
|
33
|
+
global:
|
|
34
|
+
data:
|
|
35
|
+
- SELECT id, description, num FROM "test_data"`);
|
|
36
|
+
|
|
37
|
+
await connectionManager.query(
|
|
38
|
+
`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY DEFAULT (UUID()), description TEXT, num BIGINT)`
|
|
39
|
+
);
|
|
40
|
+
|
|
41
|
+
await context.replicateSnapshot();
|
|
42
|
+
|
|
43
|
+
const startRowCount =
|
|
44
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
45
|
+
const startTxCount =
|
|
46
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
47
|
+
|
|
48
|
+
context.startStreaming();
|
|
49
|
+
await connectionManager.query(`INSERT INTO test_data(description, num) VALUES('test1', 1152921504606846976)`);
|
|
50
|
+
const [[result]] = await connectionManager.query(
|
|
51
|
+
`SELECT id AS test_id FROM test_data WHERE description = 'test1' AND num = 1152921504606846976`
|
|
52
|
+
);
|
|
53
|
+
const testId = result.test_id;
|
|
54
|
+
logger.info('Finished Inserting data with id:' + testId);
|
|
55
|
+
|
|
56
|
+
const data = await context.getBucketData('global[]');
|
|
57
|
+
|
|
58
|
+
expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1', num: 1152921504606846976n })]);
|
|
59
|
+
const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
60
|
+
const endTxCount =
|
|
61
|
+
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
62
|
+
expect(endRowCount - startRowCount).toEqual(1);
|
|
63
|
+
expect(endTxCount - startTxCount).toEqual(1);
|
|
64
|
+
})
|
|
65
|
+
);
|
|
66
|
+
|
|
67
|
+
// test(
|
|
68
|
+
// 'replicating case sensitive table',
|
|
69
|
+
// binlogStreamTest(factory, async (context) => {
|
|
70
|
+
// const { connectionManager } = context;
|
|
71
|
+
// await context.updateSyncRules(`
|
|
72
|
+
// bucket_definitions:
|
|
73
|
+
// global:
|
|
74
|
+
// data:
|
|
75
|
+
// - SELECT id, description FROM "test_DATA"
|
|
76
|
+
// `);
|
|
77
|
+
|
|
78
|
+
// await connectionManager.query(
|
|
79
|
+
// `CREATE TABLE test_DATA (id CHAR(36) PRIMARY KEY DEFAULT (UUID()), description text)`
|
|
80
|
+
// );
|
|
81
|
+
|
|
82
|
+
// await context.replicateSnapshot();
|
|
83
|
+
|
|
84
|
+
// const startRowCount =
|
|
85
|
+
// (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
86
|
+
// const startTxCount =
|
|
87
|
+
// (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
88
|
+
|
|
89
|
+
// context.startStreaming();
|
|
90
|
+
|
|
91
|
+
// await connectionManager.query(`INSERT INTO test_DATA(description) VALUES('test1')`);
|
|
92
|
+
// const [[result]] = await connectionManager.query(
|
|
93
|
+
// `SELECT id AS test_id FROM test_data WHERE description = 'test1'`
|
|
94
|
+
// );
|
|
95
|
+
// const testId = result.test_id;
|
|
96
|
+
|
|
97
|
+
// const data = await context.getBucketData('global[]');
|
|
98
|
+
|
|
99
|
+
// expect(data).toMatchObject([putOp('test_DATA', { id: testId, description: 'test1' })]);
|
|
100
|
+
// const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
101
|
+
// const endTxCount =
|
|
102
|
+
// (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
103
|
+
// expect(endRowCount - startRowCount).toEqual(1);
|
|
104
|
+
// expect(endTxCount - startTxCount).toEqual(1);
|
|
105
|
+
// })
|
|
106
|
+
// );
|
|
107
|
+
|
|
108
|
+
// // TODO: Not supported yet
|
|
109
|
+
// // test(
|
|
110
|
+
// // 'replicating TRUNCATE',
|
|
111
|
+
// // binlogStreamTest(factory, async (context) => {
|
|
112
|
+
// // const { connectionManager } = context;
|
|
113
|
+
// // const syncRuleContent = `
|
|
114
|
+
// // bucket_definitions:
|
|
115
|
+
// // global:
|
|
116
|
+
// // data:
|
|
117
|
+
// // - SELECT id, description FROM "test_data"
|
|
118
|
+
// // by_test_data:
|
|
119
|
+
// // parameters: SELECT id FROM test_data WHERE id = token_parameters.user_id
|
|
120
|
+
// // data: []
|
|
121
|
+
// // `;
|
|
122
|
+
// // await context.updateSyncRules(syncRuleContent);
|
|
123
|
+
// // await connectionManager.query(`DROP TABLE IF EXISTS test_data`);
|
|
124
|
+
// // await connectionManager.query(
|
|
125
|
+
// // `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`
|
|
126
|
+
// // );
|
|
127
|
+
// //
|
|
128
|
+
// // await context.replicateSnapshot();
|
|
129
|
+
// // context.startStreaming();
|
|
130
|
+
// //
|
|
131
|
+
// // const [{ test_id }] = pgwireRows(
|
|
132
|
+
// // await connectionManager.query(`INSERT INTO test_data(description) VALUES('test1') returning id as test_id`)
|
|
133
|
+
// // );
|
|
134
|
+
// // await connectionManager.query(`TRUNCATE test_data`);
|
|
135
|
+
// //
|
|
136
|
+
// // const data = await context.getBucketData('global[]');
|
|
137
|
+
// //
|
|
138
|
+
// // expect(data).toMatchObject([
|
|
139
|
+
// // putOp('test_data', { id: test_id, description: 'test1' }),
|
|
140
|
+
// // removeOp('test_data', test_id)
|
|
141
|
+
// // ]);
|
|
142
|
+
// // })
|
|
143
|
+
// // );
|
|
144
|
+
|
|
145
|
+
// test(
|
|
146
|
+
// 'replicating changing primary key',
|
|
147
|
+
// binlogStreamTest(factory, async (context) => {
|
|
148
|
+
// const { connectionManager } = context;
|
|
149
|
+
// await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
150
|
+
|
|
151
|
+
// await connectionManager.query(
|
|
152
|
+
// `CREATE TABLE test_data (id CHAR(36) PRIMARY KEY DEFAULT (UUID()), description text)`
|
|
153
|
+
// );
|
|
154
|
+
|
|
155
|
+
// await context.replicateSnapshot();
|
|
156
|
+
// context.startStreaming();
|
|
157
|
+
|
|
158
|
+
// await connectionManager.query(`INSERT INTO test_data(description) VALUES('test1')`);
|
|
159
|
+
// const [[result1]] = await connectionManager.query(
|
|
160
|
+
// `SELECT id AS test_id FROM test_data WHERE description = 'test1'`
|
|
161
|
+
// );
|
|
162
|
+
// const testId1 = result1.test_id;
|
|
163
|
+
|
|
164
|
+
// await connectionManager.query(`UPDATE test_data SET id = UUID(), description = 'test2a' WHERE id = '${testId1}'`);
|
|
165
|
+
// const [[result2]] = await connectionManager.query(
|
|
166
|
+
// `SELECT id AS test_id FROM test_data WHERE description = 'test2a'`
|
|
167
|
+
// );
|
|
168
|
+
// const testId2 = result2.test_id;
|
|
169
|
+
|
|
170
|
+
// // This update may fail replicating with:
|
|
171
|
+
// // Error: Update on missing record public.test_data:074a601e-fc78-4c33-a15d-f89fdd4af31d :: {"g":1,"t":"651e9fbe9fec6155895057ec","k":"1a0b34da-fb8c-5e6f-8421-d7a3c5d4df4f"}
|
|
172
|
+
// await connectionManager.query(`UPDATE test_data SET description = 'test2b' WHERE id = '${testId2}'`);
|
|
173
|
+
|
|
174
|
+
// // Re-use old id again
|
|
175
|
+
// await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('${testId1}', 'test1b')`);
|
|
176
|
+
// await connectionManager.query(`UPDATE test_data SET description = 'test1c' WHERE id = '${testId1}'`);
|
|
177
|
+
|
|
178
|
+
// const data = await context.getBucketData('global[]');
|
|
179
|
+
// expect(data).toMatchObject([
|
|
180
|
+
// // Initial insert
|
|
181
|
+
// putOp('test_data', { id: testId1, description: 'test1' }),
|
|
182
|
+
// // Update id, then description
|
|
183
|
+
// removeOp('test_data', testId1),
|
|
184
|
+
// putOp('test_data', { id: testId2, description: 'test2a' }),
|
|
185
|
+
// putOp('test_data', { id: testId2, description: 'test2b' }),
|
|
186
|
+
// // Re-use old id
|
|
187
|
+
// putOp('test_data', { id: testId1, description: 'test1b' }),
|
|
188
|
+
// putOp('test_data', { id: testId1, description: 'test1c' })
|
|
189
|
+
// ]);
|
|
190
|
+
// })
|
|
191
|
+
// );
|
|
192
|
+
|
|
193
|
+
// test(
|
|
194
|
+
// 'initial sync',
|
|
195
|
+
// binlogStreamTest(factory, async (context) => {
|
|
196
|
+
// const { connectionManager } = context;
|
|
197
|
+
// await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
198
|
+
|
|
199
|
+
// await connectionManager.query(
|
|
200
|
+
// `CREATE TABLE test_data (id CHAR(36) PRIMARY KEY DEFAULT (UUID()), description text)`
|
|
201
|
+
// );
|
|
202
|
+
|
|
203
|
+
// await connectionManager.query(`INSERT INTO test_data(description) VALUES('test1')`);
|
|
204
|
+
// const [[result]] = await connectionManager.query(
|
|
205
|
+
// `SELECT id AS test_id FROM test_data WHERE description = 'test1'`
|
|
206
|
+
// );
|
|
207
|
+
// const testId = result.test_id;
|
|
208
|
+
|
|
209
|
+
// await context.replicateSnapshot();
|
|
210
|
+
// context.startStreaming();
|
|
211
|
+
|
|
212
|
+
// const data = await context.getBucketData('global[]');
|
|
213
|
+
// expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1' })]);
|
|
214
|
+
// })
|
|
215
|
+
// );
|
|
216
|
+
|
|
217
|
+
// // test(
|
|
218
|
+
// // 'record too large',
|
|
219
|
+
// // binlogStreamTest(factory, async (context) => {
|
|
220
|
+
// // await context.updateSyncRules(`bucket_definitions:
|
|
221
|
+
// // global:
|
|
222
|
+
// // data:
|
|
223
|
+
// // - SELECT id, description, other FROM "test_data"`);
|
|
224
|
+
// // const { connectionManager } = context;
|
|
225
|
+
// //
|
|
226
|
+
// // await connectionManager.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
|
|
227
|
+
// //
|
|
228
|
+
// // await context.replicateSnapshot();
|
|
229
|
+
// //
|
|
230
|
+
// // // 4MB
|
|
231
|
+
// // const largeDescription = crypto.randomBytes(2_000_000).toString('hex');
|
|
232
|
+
// // // 18MB
|
|
233
|
+
// // const tooLargeDescription = crypto.randomBytes(9_000_000).toString('hex');
|
|
234
|
+
// //
|
|
235
|
+
// // await connectionManager.query({
|
|
236
|
+
// // statement: `INSERT INTO test_data(id, description, other) VALUES('t1', $1, 'foo')`,
|
|
237
|
+
// // params: [{ type: 'varchar', value: tooLargeDescription }]
|
|
238
|
+
// // });
|
|
239
|
+
// // await connectionManager.query({
|
|
240
|
+
// // statement: `UPDATE test_data SET description = $1 WHERE id = 't1'`,
|
|
241
|
+
// // params: [{ type: 'varchar', value: largeDescription }]
|
|
242
|
+
// // });
|
|
243
|
+
// //
|
|
244
|
+
// // context.startStreaming();
|
|
245
|
+
// //
|
|
246
|
+
// // const data = await context.getBucketData('global[]');
|
|
247
|
+
// // expect(data.length).toEqual(1);
|
|
248
|
+
// // const row = JSON.parse(data[0].data as string);
|
|
249
|
+
// // delete row.description;
|
|
250
|
+
// // expect(row).toEqual({ id: 't1', other: 'foo' });
|
|
251
|
+
// // delete data[0].data;
|
|
252
|
+
// // expect(data[0]).toMatchObject({ object_id: 't1', object_type: 'test_data', op: 'PUT', op_id: '1' });
|
|
253
|
+
// // })
|
|
254
|
+
// // );
|
|
255
|
+
|
|
256
|
+
// test(
|
|
257
|
+
// 'table not in sync rules',
|
|
258
|
+
// binlogStreamTest(factory, async (context) => {
|
|
259
|
+
// const { connectionManager } = context;
|
|
260
|
+
// await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
261
|
+
|
|
262
|
+
// await connectionManager.query(
|
|
263
|
+
// `CREATE TABLE test_donotsync (id CHAR(36) PRIMARY KEY DEFAULT (UUID()), description text)`
|
|
264
|
+
// );
|
|
265
|
+
|
|
266
|
+
// await context.replicateSnapshot();
|
|
267
|
+
|
|
268
|
+
// const startRowCount =
|
|
269
|
+
// (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
270
|
+
// const startTxCount =
|
|
271
|
+
// (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
272
|
+
|
|
273
|
+
// context.startStreaming();
|
|
274
|
+
|
|
275
|
+
// await connectionManager.query(`INSERT INTO test_donotsync(description) VALUES('test1')`);
|
|
276
|
+
// const data = await context.getBucketData('global[]');
|
|
277
|
+
|
|
278
|
+
// expect(data).toMatchObject([]);
|
|
279
|
+
// const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
280
|
+
// const endTxCount =
|
|
281
|
+
// (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
282
|
+
|
|
283
|
+
// // There was a transaction, but we should not replicate any actual data
|
|
284
|
+
// expect(endRowCount - startRowCount).toEqual(0);
|
|
285
|
+
// expect(endTxCount - startTxCount).toEqual(1);
|
|
286
|
+
// })
|
|
287
|
+
// );
|
|
288
|
+
}
|