@powersync/service-module-mysql 0.0.0-dev-20241021185145 → 0.0.0-dev-20241023191639
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -5
- package/dist/common/mysql-to-sqlite.d.ts +2 -1
- package/dist/common/mysql-to-sqlite.js +10 -2
- package/dist/common/mysql-to-sqlite.js.map +1 -1
- package/dist/common/read-executed-gtid.d.ts +1 -0
- package/dist/common/read-executed-gtid.js +7 -0
- package/dist/common/read-executed-gtid.js.map +1 -1
- package/dist/index.d.ts +1 -3
- package/dist/index.js +1 -3
- package/dist/index.js.map +1 -1
- package/dist/module/MySQLModule.d.ts +2 -0
- package/dist/module/MySQLModule.js +17 -0
- package/dist/module/MySQLModule.js.map +1 -1
- package/dist/replication/BinLogStream.js +40 -29
- package/dist/replication/BinLogStream.js.map +1 -1
- package/dist/types/types.d.ts +2 -0
- package/dist/types/types.js +3 -1
- package/dist/types/types.js.map +1 -1
- package/dist/utils/mysql_utils.d.ts +3 -0
- package/dist/utils/mysql_utils.js +7 -2
- package/dist/utils/mysql_utils.js.map +1 -1
- package/package.json +6 -6
- package/src/common/check-source-configuration.ts +1 -1
- package/src/common/mysql-to-sqlite.ts +10 -2
- package/src/common/read-executed-gtid.ts +12 -0
- package/src/index.ts +1 -5
- package/src/module/MySQLModule.ts +18 -0
- package/src/replication/BinLogStream.ts +50 -41
- package/src/replication/zongji/zongji.d.ts +11 -5
- package/src/types/types.ts +5 -1
- package/src/utils/mysql_utils.ts +8 -2
- package/tsconfig.tsbuildinfo +1 -1
- package/test/src/binlog_stream.test.ts +0 -287
- package/test/src/binlog_stream_utils.ts +0 -152
|
@@ -1,287 +0,0 @@
|
|
|
1
|
-
import { putOp, removeOp } from '@core-tests/stream_utils.js';
|
|
2
|
-
import { MONGO_STORAGE_FACTORY } from '@core-tests/util.js';
|
|
3
|
-
import { BucketStorageFactory, Metrics } from '@powersync/service-core';
|
|
4
|
-
import * as crypto from 'crypto';
|
|
5
|
-
import { describe, expect, test } from 'vitest';
|
|
6
|
-
import { binlogStreamTest } from './binlog_stream_utils.js';
|
|
7
|
-
import { logger } from '@powersync/lib-services-framework';
|
|
8
|
-
|
|
9
|
-
type StorageFactory = () => Promise<BucketStorageFactory>;
|
|
10
|
-
|
|
11
|
-
const BASIC_SYNC_RULES = `
|
|
12
|
-
bucket_definitions:
|
|
13
|
-
global:
|
|
14
|
-
data:
|
|
15
|
-
- SELECT id, description FROM "test_data"
|
|
16
|
-
`;
|
|
17
|
-
|
|
18
|
-
describe(
|
|
19
|
-
' Binlog stream - mongodb',
|
|
20
|
-
function () {
|
|
21
|
-
defineBinlogStreamTests(MONGO_STORAGE_FACTORY);
|
|
22
|
-
},
|
|
23
|
-
{ timeout: 20_000 }
|
|
24
|
-
);
|
|
25
|
-
|
|
26
|
-
function defineBinlogStreamTests(factory: StorageFactory) {
|
|
27
|
-
test(
|
|
28
|
-
'Replicate basic values',
|
|
29
|
-
binlogStreamTest(factory, async (context) => {
|
|
30
|
-
const { connectionManager } = context;
|
|
31
|
-
await context.updateSyncRules(`
|
|
32
|
-
bucket_definitions:
|
|
33
|
-
global:
|
|
34
|
-
data:
|
|
35
|
-
- SELECT id, description, num FROM "test_data"`);
|
|
36
|
-
|
|
37
|
-
await connectionManager.query(
|
|
38
|
-
`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY DEFAULT (UUID()), description TEXT, num BIGINT)`
|
|
39
|
-
);
|
|
40
|
-
|
|
41
|
-
await context.replicateSnapshot();
|
|
42
|
-
|
|
43
|
-
const startRowCount =
|
|
44
|
-
(await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
45
|
-
const startTxCount =
|
|
46
|
-
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
47
|
-
|
|
48
|
-
context.startStreaming();
|
|
49
|
-
await connectionManager.query(`INSERT INTO test_data(description, num) VALUES('test1', 1152921504606846976)`);
|
|
50
|
-
const [[result]] = await connectionManager.query(
|
|
51
|
-
`SELECT id AS test_id FROM test_data WHERE description = 'test1' AND num = 1152921504606846976`
|
|
52
|
-
);
|
|
53
|
-
const testId = result.test_id;
|
|
54
|
-
logger.info('Finished Inserting data with id:' + testId);
|
|
55
|
-
|
|
56
|
-
const data = await context.getBucketData('global[]');
|
|
57
|
-
|
|
58
|
-
expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1', num: 1152921504606846976n })]);
|
|
59
|
-
const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
60
|
-
const endTxCount =
|
|
61
|
-
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
62
|
-
expect(endRowCount - startRowCount).toEqual(1);
|
|
63
|
-
expect(endTxCount - startTxCount).toEqual(1);
|
|
64
|
-
})
|
|
65
|
-
);
|
|
66
|
-
|
|
67
|
-
test(
|
|
68
|
-
'replicating case sensitive table',
|
|
69
|
-
binlogStreamTest(factory, async (context) => {
|
|
70
|
-
const { connectionManager } = context;
|
|
71
|
-
await context.updateSyncRules(`
|
|
72
|
-
bucket_definitions:
|
|
73
|
-
global:
|
|
74
|
-
data:
|
|
75
|
-
- SELECT id, description FROM "test_DATA"
|
|
76
|
-
`);
|
|
77
|
-
|
|
78
|
-
await connectionManager.query(
|
|
79
|
-
`CREATE TABLE test_DATA (id CHAR(36) PRIMARY KEY DEFAULT (UUID()), description text)`
|
|
80
|
-
);
|
|
81
|
-
|
|
82
|
-
await context.replicateSnapshot();
|
|
83
|
-
|
|
84
|
-
const startRowCount =
|
|
85
|
-
(await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
86
|
-
const startTxCount =
|
|
87
|
-
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
88
|
-
|
|
89
|
-
context.startStreaming();
|
|
90
|
-
|
|
91
|
-
await connectionManager.query(`INSERT INTO test_DATA(description) VALUES('test1')`);
|
|
92
|
-
const [[result]] = await connectionManager.query(
|
|
93
|
-
`SELECT id AS test_id FROM test_DATA WHERE description = 'test1'`
|
|
94
|
-
);
|
|
95
|
-
const testId = result.test_id;
|
|
96
|
-
|
|
97
|
-
const data = await context.getBucketData('global[]');
|
|
98
|
-
|
|
99
|
-
expect(data).toMatchObject([putOp('test_DATA', { id: testId, description: 'test1' })]);
|
|
100
|
-
const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
101
|
-
const endTxCount =
|
|
102
|
-
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
103
|
-
expect(endRowCount - startRowCount).toEqual(1);
|
|
104
|
-
expect(endTxCount - startTxCount).toEqual(1);
|
|
105
|
-
})
|
|
106
|
-
);
|
|
107
|
-
|
|
108
|
-
// TODO: Not supported yet
|
|
109
|
-
// test(
|
|
110
|
-
// 'replicating TRUNCATE',
|
|
111
|
-
// binlogStreamTest(factory, async (context) => {
|
|
112
|
-
// const { connectionManager } = context;
|
|
113
|
-
// const syncRuleContent = `
|
|
114
|
-
// bucket_definitions:
|
|
115
|
-
// global:
|
|
116
|
-
// data:
|
|
117
|
-
// - SELECT id, description FROM "test_data"
|
|
118
|
-
// by_test_data:
|
|
119
|
-
// parameters: SELECT id FROM test_data WHERE id = token_parameters.user_id
|
|
120
|
-
// data: []
|
|
121
|
-
// `;
|
|
122
|
-
// await context.updateSyncRules(syncRuleContent);
|
|
123
|
-
// await connectionManager.query(`DROP TABLE IF EXISTS test_data`);
|
|
124
|
-
// await connectionManager.query(
|
|
125
|
-
// `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`
|
|
126
|
-
// );
|
|
127
|
-
//
|
|
128
|
-
// await context.replicateSnapshot();
|
|
129
|
-
// context.startStreaming();
|
|
130
|
-
//
|
|
131
|
-
// const [{ test_id }] = pgwireRows(
|
|
132
|
-
// await connectionManager.query(`INSERT INTO test_data(description) VALUES('test1') returning id as test_id`)
|
|
133
|
-
// );
|
|
134
|
-
// await connectionManager.query(`TRUNCATE test_data`);
|
|
135
|
-
//
|
|
136
|
-
// const data = await context.getBucketData('global[]');
|
|
137
|
-
//
|
|
138
|
-
// expect(data).toMatchObject([
|
|
139
|
-
// putOp('test_data', { id: test_id, description: 'test1' }),
|
|
140
|
-
// removeOp('test_data', test_id)
|
|
141
|
-
// ]);
|
|
142
|
-
// })
|
|
143
|
-
// );
|
|
144
|
-
|
|
145
|
-
test(
|
|
146
|
-
'replicating changing primary key',
|
|
147
|
-
binlogStreamTest(factory, async (context) => {
|
|
148
|
-
const { connectionManager } = context;
|
|
149
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
150
|
-
|
|
151
|
-
await connectionManager.query(
|
|
152
|
-
`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY DEFAULT (UUID()), description text)`
|
|
153
|
-
);
|
|
154
|
-
|
|
155
|
-
await context.replicateSnapshot();
|
|
156
|
-
context.startStreaming();
|
|
157
|
-
|
|
158
|
-
await connectionManager.query(`INSERT INTO test_data(description) VALUES('test1')`);
|
|
159
|
-
const [[result1]] = await connectionManager.query(
|
|
160
|
-
`SELECT id AS test_id FROM test_data WHERE description = 'test1'`
|
|
161
|
-
);
|
|
162
|
-
const testId1 = result1.test_id;
|
|
163
|
-
|
|
164
|
-
await connectionManager.query(`UPDATE test_data SET id = UUID(), description = 'test2a' WHERE id = '${testId1}'`);
|
|
165
|
-
const [[result2]] = await connectionManager.query(
|
|
166
|
-
`SELECT id AS test_id FROM test_data WHERE description = 'test2a'`
|
|
167
|
-
);
|
|
168
|
-
const testId2 = result2.test_id;
|
|
169
|
-
|
|
170
|
-
// This update may fail replicating with:
|
|
171
|
-
// Error: Update on missing record public.test_data:074a601e-fc78-4c33-a15d-f89fdd4af31d :: {"g":1,"t":"651e9fbe9fec6155895057ec","k":"1a0b34da-fb8c-5e6f-8421-d7a3c5d4df4f"}
|
|
172
|
-
await connectionManager.query(`UPDATE test_data SET description = 'test2b' WHERE id = '${testId2}'`);
|
|
173
|
-
|
|
174
|
-
// Re-use old id again
|
|
175
|
-
await connectionManager.query(`INSERT INTO test_data(id, description) VALUES('${testId1}', 'test1b')`);
|
|
176
|
-
await connectionManager.query(`UPDATE test_data SET description = 'test1c' WHERE id = '${testId1}'`);
|
|
177
|
-
|
|
178
|
-
const data = await context.getBucketData('global[]');
|
|
179
|
-
expect(data).toMatchObject([
|
|
180
|
-
// Initial insert
|
|
181
|
-
putOp('test_data', { id: testId1, description: 'test1' }),
|
|
182
|
-
// Update id, then description
|
|
183
|
-
removeOp('test_data', testId1),
|
|
184
|
-
putOp('test_data', { id: testId2, description: 'test2a' }),
|
|
185
|
-
putOp('test_data', { id: testId2, description: 'test2b' }),
|
|
186
|
-
// Re-use old id
|
|
187
|
-
putOp('test_data', { id: testId1, description: 'test1b' }),
|
|
188
|
-
putOp('test_data', { id: testId1, description: 'test1c' })
|
|
189
|
-
]);
|
|
190
|
-
})
|
|
191
|
-
);
|
|
192
|
-
|
|
193
|
-
test(
|
|
194
|
-
'initial sync',
|
|
195
|
-
binlogStreamTest(factory, async (context) => {
|
|
196
|
-
const { connectionManager } = context;
|
|
197
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
198
|
-
|
|
199
|
-
await connectionManager.query(
|
|
200
|
-
`CREATE TABLE test_data (id CHAR(36) PRIMARY KEY DEFAULT (UUID()), description text)`
|
|
201
|
-
);
|
|
202
|
-
|
|
203
|
-
await connectionManager.query(`INSERT INTO test_data(description) VALUES('test1')`);
|
|
204
|
-
const [[result]] = await connectionManager.query(
|
|
205
|
-
`SELECT id AS test_id FROM test_data WHERE description = 'test1'`
|
|
206
|
-
);
|
|
207
|
-
const testId = result.test_id;
|
|
208
|
-
|
|
209
|
-
await context.replicateSnapshot();
|
|
210
|
-
|
|
211
|
-
const data = await context.getBucketData('global[]');
|
|
212
|
-
expect(data).toMatchObject([putOp('test_data', { id: testId, description: 'test1' })]);
|
|
213
|
-
})
|
|
214
|
-
);
|
|
215
|
-
|
|
216
|
-
// test(
|
|
217
|
-
// 'record too large',
|
|
218
|
-
// binlogStreamTest(factory, async (context) => {
|
|
219
|
-
// await context.updateSyncRules(`bucket_definitions:
|
|
220
|
-
// global:
|
|
221
|
-
// data:
|
|
222
|
-
// - SELECT id, description, other FROM "test_data"`);
|
|
223
|
-
// const { connectionManager } = context;
|
|
224
|
-
//
|
|
225
|
-
// await connectionManager.query(`CREATE TABLE test_data(id text primary key, description text, other text)`);
|
|
226
|
-
//
|
|
227
|
-
// await context.replicateSnapshot();
|
|
228
|
-
//
|
|
229
|
-
// // 4MB
|
|
230
|
-
// const largeDescription = crypto.randomBytes(2_000_000).toString('hex');
|
|
231
|
-
// // 18MB
|
|
232
|
-
// const tooLargeDescription = crypto.randomBytes(9_000_000).toString('hex');
|
|
233
|
-
//
|
|
234
|
-
// await connectionManager.query({
|
|
235
|
-
// statement: `INSERT INTO test_data(id, description, other) VALUES('t1', $1, 'foo')`,
|
|
236
|
-
// params: [{ type: 'varchar', value: tooLargeDescription }]
|
|
237
|
-
// });
|
|
238
|
-
// await connectionManager.query({
|
|
239
|
-
// statement: `UPDATE test_data SET description = $1 WHERE id = 't1'`,
|
|
240
|
-
// params: [{ type: 'varchar', value: largeDescription }]
|
|
241
|
-
// });
|
|
242
|
-
//
|
|
243
|
-
// context.startStreaming();
|
|
244
|
-
//
|
|
245
|
-
// const data = await context.getBucketData('global[]');
|
|
246
|
-
// expect(data.length).toEqual(1);
|
|
247
|
-
// const row = JSON.parse(data[0].data as string);
|
|
248
|
-
// delete row.description;
|
|
249
|
-
// expect(row).toEqual({ id: 't1', other: 'foo' });
|
|
250
|
-
// delete data[0].data;
|
|
251
|
-
// expect(data[0]).toMatchObject({ object_id: 't1', object_type: 'test_data', op: 'PUT', op_id: '1' });
|
|
252
|
-
// })
|
|
253
|
-
// );
|
|
254
|
-
|
|
255
|
-
test(
|
|
256
|
-
'table not in sync rules',
|
|
257
|
-
binlogStreamTest(factory, async (context) => {
|
|
258
|
-
const { connectionManager } = context;
|
|
259
|
-
await context.updateSyncRules(BASIC_SYNC_RULES);
|
|
260
|
-
|
|
261
|
-
await connectionManager.query(
|
|
262
|
-
`CREATE TABLE test_donotsync (id CHAR(36) PRIMARY KEY DEFAULT (UUID()), description text)`
|
|
263
|
-
);
|
|
264
|
-
|
|
265
|
-
await context.replicateSnapshot();
|
|
266
|
-
|
|
267
|
-
const startRowCount =
|
|
268
|
-
(await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
269
|
-
const startTxCount =
|
|
270
|
-
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
271
|
-
|
|
272
|
-
context.startStreaming();
|
|
273
|
-
|
|
274
|
-
await connectionManager.query(`INSERT INTO test_donotsync(description) VALUES('test1')`);
|
|
275
|
-
const data = await context.getBucketData('global[]');
|
|
276
|
-
|
|
277
|
-
expect(data).toMatchObject([]);
|
|
278
|
-
const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0;
|
|
279
|
-
const endTxCount =
|
|
280
|
-
(await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0;
|
|
281
|
-
|
|
282
|
-
// There was a transaction, but we should not replicate any actual data
|
|
283
|
-
expect(endRowCount - startRowCount).toEqual(0);
|
|
284
|
-
expect(endTxCount - startTxCount).toEqual(1);
|
|
285
|
-
})
|
|
286
|
-
);
|
|
287
|
-
}
|
|
@@ -1,152 +0,0 @@
|
|
|
1
|
-
import { ActiveCheckpoint, BucketStorageFactory, OpId, SyncRulesBucketStorage } from '@powersync/service-core';
|
|
2
|
-
import { TEST_CONNECTION_OPTIONS, clearAndRecreateTestDb } from './util.js';
|
|
3
|
-
import { fromAsync } from '@core-tests/stream_utils.js';
|
|
4
|
-
import { BinLogStream, BinLogStreamOptions } from '@module/replication/BinLogStream.js';
|
|
5
|
-
import { MySQLConnectionManager } from '@module/replication/MySQLConnectionManager.js';
|
|
6
|
-
import mysqlPromise from 'mysql2/promise';
|
|
7
|
-
import { readExecutedGtid } from '@module/common/read-executed-gtid.js';
|
|
8
|
-
import { logger } from '@powersync/lib-services-framework';
|
|
9
|
-
|
|
10
|
-
/**
|
|
11
|
-
* Tests operating on the binlog stream need to configure the stream and manage asynchronous
|
|
12
|
-
* replication, which gets a little tricky.
|
|
13
|
-
*
|
|
14
|
-
* This wraps a test in a function that configures all the context, and tears it down afterward.
|
|
15
|
-
*/
|
|
16
|
-
export function binlogStreamTest(
|
|
17
|
-
factory: () => Promise<BucketStorageFactory>,
|
|
18
|
-
test: (context: BinlogStreamTestContext) => Promise<void>
|
|
19
|
-
): () => Promise<void> {
|
|
20
|
-
return async () => {
|
|
21
|
-
const f = await factory();
|
|
22
|
-
const connectionManager = new MySQLConnectionManager(TEST_CONNECTION_OPTIONS, {});
|
|
23
|
-
|
|
24
|
-
const connection = await connectionManager.getConnection();
|
|
25
|
-
await clearAndRecreateTestDb(connection);
|
|
26
|
-
connection.release();
|
|
27
|
-
const context = new BinlogStreamTestContext(f, connectionManager);
|
|
28
|
-
try {
|
|
29
|
-
await test(context);
|
|
30
|
-
} finally {
|
|
31
|
-
await context.dispose();
|
|
32
|
-
}
|
|
33
|
-
};
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
export class BinlogStreamTestContext {
|
|
37
|
-
private _binlogStream?: BinLogStream;
|
|
38
|
-
private abortController = new AbortController();
|
|
39
|
-
private streamPromise?: Promise<void>;
|
|
40
|
-
public storage?: SyncRulesBucketStorage;
|
|
41
|
-
private replicationDone = false;
|
|
42
|
-
|
|
43
|
-
constructor(
|
|
44
|
-
public factory: BucketStorageFactory,
|
|
45
|
-
public connectionManager: MySQLConnectionManager
|
|
46
|
-
) {}
|
|
47
|
-
|
|
48
|
-
async dispose() {
|
|
49
|
-
this.abortController.abort();
|
|
50
|
-
await this.streamPromise;
|
|
51
|
-
await this.connectionManager.end();
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
get connectionTag() {
|
|
55
|
-
return this.connectionManager.connectionTag;
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
async updateSyncRules(content: string) {
|
|
59
|
-
const syncRules = await this.factory.updateSyncRules({ content: content });
|
|
60
|
-
this.storage = this.factory.getInstance(syncRules);
|
|
61
|
-
return this.storage!;
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
get binlogStream() {
|
|
65
|
-
if (this.storage == null) {
|
|
66
|
-
throw new Error('updateSyncRules() first');
|
|
67
|
-
}
|
|
68
|
-
if (this._binlogStream) {
|
|
69
|
-
return this._binlogStream;
|
|
70
|
-
}
|
|
71
|
-
const options: BinLogStreamOptions = {
|
|
72
|
-
storage: this.storage,
|
|
73
|
-
connections: this.connectionManager,
|
|
74
|
-
abortSignal: this.abortController.signal
|
|
75
|
-
};
|
|
76
|
-
this._binlogStream = new BinLogStream(options);
|
|
77
|
-
return this._binlogStream!;
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
async replicateSnapshot() {
|
|
81
|
-
await this.binlogStream.initReplication();
|
|
82
|
-
this.replicationDone = true;
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
startStreaming() {
|
|
86
|
-
if (!this.replicationDone) {
|
|
87
|
-
throw new Error('Call replicateSnapshot() before startStreaming()');
|
|
88
|
-
}
|
|
89
|
-
this.streamPromise = this.binlogStream.streamChanges();
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
async getCheckpoint(options?: { timeout?: number }) {
|
|
93
|
-
const connection = await this.connectionManager.getConnection();
|
|
94
|
-
let checkpoint = await Promise.race([
|
|
95
|
-
getClientCheckpoint(connection, this.factory, { timeout: options?.timeout ?? 60_000 }),
|
|
96
|
-
this.streamPromise
|
|
97
|
-
]);
|
|
98
|
-
connection.release();
|
|
99
|
-
if (typeof checkpoint == undefined) {
|
|
100
|
-
// This indicates an issue with the test setup - streamingPromise completed instead
|
|
101
|
-
// of getClientCheckpoint()
|
|
102
|
-
throw new Error('Test failure - streamingPromise completed');
|
|
103
|
-
}
|
|
104
|
-
return checkpoint as string;
|
|
105
|
-
}
|
|
106
|
-
|
|
107
|
-
async getBucketsDataBatch(buckets: Record<string, string>, options?: { timeout?: number }) {
|
|
108
|
-
let checkpoint = await this.getCheckpoint(options);
|
|
109
|
-
const map = new Map<string, string>(Object.entries(buckets));
|
|
110
|
-
return fromAsync(this.storage!.getBucketDataBatch(checkpoint, map));
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
async getBucketData(bucket: string, start?: string, options?: { timeout?: number }) {
|
|
114
|
-
start ??= '0';
|
|
115
|
-
let checkpoint = await this.getCheckpoint(options);
|
|
116
|
-
const map = new Map<string, string>([[bucket, start]]);
|
|
117
|
-
const batch = this.storage!.getBucketDataBatch(checkpoint, map);
|
|
118
|
-
const batches = await fromAsync(batch);
|
|
119
|
-
return batches[0]?.batch.data ?? [];
|
|
120
|
-
}
|
|
121
|
-
}
|
|
122
|
-
|
|
123
|
-
export async function getClientCheckpoint(
|
|
124
|
-
connection: mysqlPromise.Connection,
|
|
125
|
-
bucketStorage: BucketStorageFactory,
|
|
126
|
-
options?: { timeout?: number }
|
|
127
|
-
): Promise<OpId> {
|
|
128
|
-
const start = Date.now();
|
|
129
|
-
const gtid = await readExecutedGtid(connection);
|
|
130
|
-
// This old API needs a persisted checkpoint id.
|
|
131
|
-
// Since we don't use LSNs anymore, the only way to get that is to wait.
|
|
132
|
-
|
|
133
|
-
const timeout = options?.timeout ?? 50_000;
|
|
134
|
-
let lastCp: ActiveCheckpoint | null = null;
|
|
135
|
-
|
|
136
|
-
logger.info('Expected Checkpoint: ' + gtid.comparable);
|
|
137
|
-
while (Date.now() - start < timeout) {
|
|
138
|
-
const cp = await bucketStorage.getActiveCheckpoint();
|
|
139
|
-
lastCp = cp;
|
|
140
|
-
//logger.info('Last Checkpoint: ' + lastCp.lsn);
|
|
141
|
-
if (!cp.hasSyncRules()) {
|
|
142
|
-
throw new Error('No sync rules available');
|
|
143
|
-
}
|
|
144
|
-
if (cp.lsn && cp.lsn >= gtid.comparable) {
|
|
145
|
-
return cp.checkpoint;
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
await new Promise((resolve) => setTimeout(resolve, 30));
|
|
149
|
-
}
|
|
150
|
-
|
|
151
|
-
throw new Error(`Timeout while waiting for checkpoint ${gtid.comparable}. Last checkpoint: ${lastCp?.lsn}`);
|
|
152
|
-
}
|