@powersync/service-core 0.4.2 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/entry/cli-entry.js +2 -1
  3. package/dist/entry/cli-entry.js.map +1 -1
  4. package/dist/entry/commands/compact-action.d.ts +2 -0
  5. package/dist/entry/commands/compact-action.js +48 -0
  6. package/dist/entry/commands/compact-action.js.map +1 -0
  7. package/dist/entry/entry-index.d.ts +1 -0
  8. package/dist/entry/entry-index.js +1 -0
  9. package/dist/entry/entry-index.js.map +1 -1
  10. package/dist/storage/BucketStorage.d.ts +31 -1
  11. package/dist/storage/BucketStorage.js.map +1 -1
  12. package/dist/storage/mongo/MongoCompactor.d.ts +40 -0
  13. package/dist/storage/mongo/MongoCompactor.js +292 -0
  14. package/dist/storage/mongo/MongoCompactor.js.map +1 -0
  15. package/dist/storage/mongo/MongoSyncBucketStorage.d.ts +3 -2
  16. package/dist/storage/mongo/MongoSyncBucketStorage.js +19 -13
  17. package/dist/storage/mongo/MongoSyncBucketStorage.js.map +1 -1
  18. package/dist/storage/mongo/models.d.ts +5 -4
  19. package/dist/storage/mongo/models.js.map +1 -1
  20. package/dist/storage/mongo/util.d.ts +3 -0
  21. package/dist/storage/mongo/util.js +22 -0
  22. package/dist/storage/mongo/util.js.map +1 -1
  23. package/dist/sync/sync.js +20 -7
  24. package/dist/sync/sync.js.map +1 -1
  25. package/package.json +4 -4
  26. package/src/entry/cli-entry.ts +2 -1
  27. package/src/entry/commands/compact-action.ts +54 -0
  28. package/src/entry/entry-index.ts +1 -0
  29. package/src/storage/BucketStorage.ts +36 -1
  30. package/src/storage/mongo/MongoCompactor.ts +371 -0
  31. package/src/storage/mongo/MongoSyncBucketStorage.ts +25 -14
  32. package/src/storage/mongo/models.ts +5 -4
  33. package/src/storage/mongo/util.ts +25 -0
  34. package/src/sync/sync.ts +20 -7
  35. package/test/src/__snapshots__/sync.test.ts.snap +85 -0
  36. package/test/src/bucket_validation.test.ts +142 -0
  37. package/test/src/bucket_validation.ts +116 -0
  38. package/test/src/compacting.test.ts +207 -0
  39. package/test/src/data_storage.test.ts +19 -60
  40. package/test/src/slow_tests.test.ts +144 -102
  41. package/test/src/sync.test.ts +169 -29
  42. package/test/src/util.ts +65 -1
  43. package/test/src/wal_stream_utils.ts +13 -4
  44. package/tsconfig.tsbuildinfo +1 -1
@@ -4,12 +4,15 @@ import { afterEach, describe, expect, test } from 'vitest';
4
4
  import { WalStream, WalStreamOptions } from '../../src/replication/WalStream.js';
5
5
  import { getClientCheckpoint } from '../../src/util/utils.js';
6
6
  import { env } from './env.js';
7
- import { MONGO_STORAGE_FACTORY, StorageFactory, TEST_CONNECTION_OPTIONS, connectPgPool } from './util.js';
7
+ import { MONGO_STORAGE_FACTORY, StorageFactory, TEST_CONNECTION_OPTIONS, clearTestDb, connectPgPool } from './util.js';
8
8
 
9
9
  import * as pgwire from '@powersync/service-jpgwire';
10
10
  import { SqliteRow } from '@powersync/service-sync-rules';
11
11
  import { MongoBucketStorage } from '../../src/storage/MongoBucketStorage.js';
12
12
  import { PgManager } from '../../src/util/PgManager.js';
13
+ import { mapOpEntry } from '@/storage/storage-index.js';
14
+ import { reduceBucket, validateCompactedBucket, validateBucket } from './bucket_validation.js';
15
+ import * as timers from 'node:timers/promises';
13
16
 
14
17
  describe('slow tests - mongodb', function () {
15
18
  // These are slow, inconsistent tests.
@@ -51,130 +54,169 @@ function defineSlowTests(factory: StorageFactory) {
51
54
  // * Skipping LSNs after a keepalive message
52
55
  // * Skipping LSNs when source transactions overlap
53
56
  test(
54
- 'repeated replication',
57
+ 'repeated replication - basic',
55
58
  async () => {
56
- const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
57
- const replicationConnection = await connections.replicationConnection();
58
- const pool = connections.pool;
59
- const f = (await factory()) as MongoBucketStorage;
59
+ await testRepeatedReplication({ compact: false, maxBatchSize: 50, numBatches: 5 });
60
+ },
61
+ { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
62
+ );
60
63
 
61
- const syncRuleContent = `
64
+ test(
65
+ 'repeated replication - compacted',
66
+ async () => {
67
+ await testRepeatedReplication({ compact: true, maxBatchSize: 100, numBatches: 2 });
68
+ },
69
+ { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
70
+ );
71
+
72
+ async function testRepeatedReplication(testOptions: { compact: boolean; maxBatchSize: number; numBatches: number }) {
73
+ const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
74
+ const replicationConnection = await connections.replicationConnection();
75
+ const pool = connections.pool;
76
+ await clearTestDb(pool);
77
+ const f = (await factory()) as MongoBucketStorage;
78
+
79
+ const syncRuleContent = `
62
80
  bucket_definitions:
63
81
  global:
64
82
  data:
65
83
  - SELECT * FROM "test_data"
66
84
  `;
67
- const syncRules = await f.updateSyncRules({ content: syncRuleContent });
68
- const storage = f.getInstance(syncRules.parsed());
69
- abortController = new AbortController();
70
- const options: WalStreamOptions = {
71
- abort_signal: abortController.signal,
72
- connections,
73
- storage: storage,
74
- factory: f
75
- };
76
- walStream = new WalStream(options);
77
-
78
- await pool.query(`DROP TABLE IF EXISTS test_data`);
79
- await pool.query(
80
- `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num decimal)`
81
- );
82
- await pool.query(`ALTER TABLE test_data REPLICA IDENTITY FULL`);
83
-
84
- await walStream.initReplication(replicationConnection);
85
- await storage.autoActivate();
86
- let abort = false;
87
- streamPromise = walStream.streamChanges(replicationConnection).finally(() => {
88
- abort = true;
89
- });
90
- const start = Date.now();
85
+ const syncRules = await f.updateSyncRules({ content: syncRuleContent });
86
+ const storage = f.getInstance(syncRules.parsed());
87
+ abortController = new AbortController();
88
+ const options: WalStreamOptions = {
89
+ abort_signal: abortController.signal,
90
+ connections,
91
+ storage: storage,
92
+ factory: f
93
+ };
94
+ walStream = new WalStream(options);
95
+
96
+ await pool.query(
97
+ `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text, num decimal)`
98
+ );
99
+ await pool.query(`ALTER TABLE test_data REPLICA IDENTITY FULL`);
100
+
101
+ await walStream.initReplication(replicationConnection);
102
+ await storage.autoActivate();
103
+ let abort = false;
104
+ streamPromise = walStream.streamChanges(replicationConnection).finally(() => {
105
+ abort = true;
106
+ });
107
+ const start = Date.now();
108
+
109
+ while (!abort && Date.now() - start < TEST_DURATION_MS) {
110
+ const bg = async () => {
111
+ for (let j = 0; j < testOptions.numBatches && !abort; j++) {
112
+ const n = Math.max(1, Math.floor(Math.random() * testOptions.maxBatchSize));
113
+ let statements: pgwire.Statement[] = [];
114
+ for (let i = 0; i < n; i++) {
115
+ const description = `test${i}`;
116
+ statements.push({
117
+ statement: `INSERT INTO test_data(description, num) VALUES($1, $2) returning id as test_id`,
118
+ params: [
119
+ { type: 'varchar', value: description },
120
+ { type: 'float8', value: Math.random() }
121
+ ]
122
+ });
123
+ }
124
+ const results = await pool.query(...statements);
125
+ const ids = results.results.map((sub) => {
126
+ return sub.rows[0][0] as string;
127
+ });
128
+ await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
91
129
 
92
- while (!abort && Date.now() - start < TEST_DURATION_MS) {
93
- const bg = async () => {
94
- for (let j = 0; j < 1 && !abort; j++) {
95
- const n = 1;
96
- let statements: pgwire.Statement[] = [];
97
- for (let i = 0; i < n; i++) {
98
- const description = `test${i}`;
99
- statements.push({
100
- statement: `INSERT INTO test_data(description, num) VALUES($1, $2) returning id as test_id`,
130
+ if (Math.random() > 0.5) {
131
+ const updateStatements: pgwire.Statement[] = ids.map((id) => {
132
+ return {
133
+ statement: `UPDATE test_data SET num = $2 WHERE id = $1`,
101
134
  params: [
102
- { type: 'varchar', value: description },
135
+ { type: 'uuid', value: id },
103
136
  { type: 'float8', value: Math.random() }
104
137
  ]
105
- });
106
- }
107
- const results = await pool.query(...statements);
108
- const ids = results.results.map((sub) => {
109
- return sub.rows[0][0] as string;
138
+ };
110
139
  });
111
- await new Promise((resolve) => setTimeout(resolve, Math.random() * 30));
112
140
 
141
+ await pool.query(...updateStatements);
113
142
  if (Math.random() > 0.5) {
114
- const updateStatements: pgwire.Statement[] = ids.map((id) => {
115
- return {
116
- statement: `UPDATE test_data SET num = $2 WHERE id = $1`,
117
- params: [
118
- { type: 'uuid', value: id },
119
- { type: 'float8', value: Math.random() }
120
- ]
121
- };
122
- });
123
-
143
+ // Special case - an update that doesn't change data
124
144
  await pool.query(...updateStatements);
125
- if (Math.random() > 0.5) {
126
- // Special case - an update that doesn't change data
127
- await pool.query(...updateStatements);
128
- }
129
145
  }
130
-
131
- const deleteStatements: pgwire.Statement[] = ids.map((id) => {
132
- return {
133
- statement: `DELETE FROM test_data WHERE id = $1`,
134
- params: [{ type: 'uuid', value: id }]
135
- };
136
- });
137
- await pool.query(...deleteStatements);
138
-
139
- await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
140
146
  }
141
- };
142
147
 
143
- // Call the above loop multiple times concurrently
144
- let promises = [1, 2, 3].map((i) => bg());
145
- await Promise.all(promises);
148
+ const deleteStatements: pgwire.Statement[] = ids.map((id) => {
149
+ return {
150
+ statement: `DELETE FROM test_data WHERE id = $1`,
151
+ params: [{ type: 'uuid', value: id }]
152
+ };
153
+ });
154
+ await pool.query(...deleteStatements);
146
155
 
147
- // Wait for replication to finish
148
- let checkpoint = await getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS });
156
+ await new Promise((resolve) => setTimeout(resolve, Math.random() * 10));
157
+ }
158
+ };
149
159
 
150
- // Check that all inserts have been deleted again
151
- const docs = await f.db.current_data.find().toArray();
152
- const transformed = docs.map((doc) => {
153
- return bson.deserialize((doc.data as mongo.Binary).buffer) as SqliteRow;
154
- });
155
- expect(transformed).toEqual([]);
156
-
157
- // Check that each PUT has a REMOVE
158
- const ops = await f.db.bucket_data.find().sort({ _id: 1 }).toArray();
159
- let active = new Set<string>();
160
- for (let op of ops) {
161
- const key = op.source_key.toHexString();
162
- if (op.op == 'PUT') {
163
- active.add(key);
164
- } else if (op.op == 'REMOVE') {
165
- active.delete(key);
160
+ let compactController = new AbortController();
161
+
162
+ const bgCompact = async () => {
163
+ // Repeatedly compact, and check that the compact conditions hold
164
+ while (!compactController.signal.aborted) {
165
+ const delay = Math.random() * 50;
166
+ try {
167
+ await timers.setTimeout(delay, undefined, { signal: compactController.signal });
168
+ } catch (e) {
169
+ break;
166
170
  }
171
+
172
+ const checkpoint = BigInt((await storage.getCheckpoint()).checkpoint);
173
+ const opsBefore = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
174
+ .filter((row) => row._id.o <= checkpoint)
175
+ .map(mapOpEntry);
176
+ await storage.compact({ maxOpId: checkpoint });
177
+ const opsAfter = (await f.db.bucket_data.find().sort({ _id: 1 }).toArray())
178
+ .filter((row) => row._id.o <= checkpoint)
179
+ .map(mapOpEntry);
180
+
181
+ validateCompactedBucket(opsBefore, opsAfter);
167
182
  }
168
- if (active.size > 0) {
169
- throw new Error(`${active.size} rows not removed`);
183
+ };
184
+
185
+ // Call the above loop multiple times concurrently
186
+ const promises = [1, 2, 3].map((i) => bg());
187
+ const compactPromise = testOptions.compact ? bgCompact() : null;
188
+ await Promise.all(promises);
189
+ compactController.abort();
190
+ await compactPromise;
191
+
192
+ // Wait for replication to finish
193
+ let checkpoint = await getClientCheckpoint(pool, storage.factory, { timeout: TIMEOUT_MARGIN_MS });
194
+
195
+ // Check that all inserts have been deleted again
196
+ const docs = await f.db.current_data.find().toArray();
197
+ const transformed = docs.map((doc) => {
198
+ return bson.deserialize((doc.data as mongo.Binary).buffer) as SqliteRow;
199
+ });
200
+ expect(transformed).toEqual([]);
201
+
202
+ // Check that each PUT has a REMOVE
203
+ const ops = await f.db.bucket_data.find().sort({ _id: 1 }).toArray();
204
+
205
+ // All a single bucket in this test
206
+ const bucket = ops.map((op) => mapOpEntry(op));
207
+ const reduced = reduceBucket(bucket);
208
+ expect(reduced).toMatchObject([
209
+ {
210
+ op_id: '0',
211
+ op: 'CLEAR'
170
212
  }
171
- }
213
+ // Should contain no additional data
214
+ ]);
215
+ }
172
216
 
173
- abortController.abort();
174
- await streamPromise;
175
- },
176
- { timeout: TEST_DURATION_MS + TIMEOUT_MARGIN_MS }
177
- );
217
+ abortController.abort();
218
+ await streamPromise;
219
+ }
178
220
 
179
221
  // Test repeatedly performing initial replication.
180
222
  //
@@ -184,6 +226,7 @@ bucket_definitions:
184
226
  'repeated initial replication',
185
227
  async () => {
186
228
  const pool = await connectPgPool();
229
+ await clearTestDb(pool);
187
230
  const f = await factory();
188
231
 
189
232
  const syncRuleContent = `
@@ -196,7 +239,6 @@ bucket_definitions:
196
239
  const storage = f.getInstance(syncRules.parsed());
197
240
 
198
241
  // 1. Setup some base data that will be replicated in initial replication
199
- await pool.query(`DROP TABLE IF EXISTS test_data`);
200
242
  await pool.query(`CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)`);
201
243
 
202
244
  let statements: pgwire.Statement[] = [];
@@ -1,34 +1,18 @@
1
- import * as bson from 'bson';
1
+ import { RequestTracker } from '@/sync/RequestTracker.js';
2
+ import { StreamingSyncLine } from '@/util/protocol-types.js';
3
+ import { lsnMakeComparable } from '@powersync/service-jpgwire';
4
+ import { JSONBig } from '@powersync/service-jsonbig';
5
+ import { RequestParameters } from '@powersync/service-sync-rules';
6
+ import * as timers from 'timers/promises';
2
7
  import { describe, expect, test } from 'vitest';
3
8
  import { ZERO_LSN } from '../../src/replication/WalStream.js';
4
- import { SourceTable } from '../../src/storage/SourceTable.js';
5
- import { hashData } from '../../src/util/utils.js';
6
- import { MONGO_STORAGE_FACTORY, StorageFactory } from './util.js';
7
- import { JSONBig } from '@powersync/service-jsonbig';
8
9
  import { streamResponse } from '../../src/sync/sync.js';
9
- import * as timers from 'timers/promises';
10
- import { lsnMakeComparable } from '@powersync/service-jpgwire';
11
- import { RequestParameters } from '@powersync/service-sync-rules';
12
- import { RequestTracker } from '@/sync/RequestTracker.js';
10
+ import { makeTestTable, MONGO_STORAGE_FACTORY, StorageFactory } from './util.js';
13
11
 
14
12
  describe('sync - mongodb', function () {
15
13
  defineTests(MONGO_STORAGE_FACTORY);
16
14
  });
17
15
 
18
- function makeTestTable(name: string, columns?: string[] | undefined) {
19
- const relId = hashData('table', name, (columns ?? ['id']).join(','));
20
- const id = new bson.ObjectId('6544e3899293153fa7b38331');
21
- return new SourceTable(
22
- id,
23
- SourceTable.DEFAULT_TAG,
24
- relId,
25
- SourceTable.DEFAULT_SCHEMA,
26
- name,
27
- (columns ?? ['id']).map((column) => ({ name: column, typeOid: 25 })),
28
- true
29
- );
30
- }
31
-
32
16
  const TEST_TABLE = makeTestTable('test', ['id']);
33
17
 
34
18
  const BASIC_SYNC_RULES = `
@@ -251,15 +235,156 @@ function defineTests(factory: StorageFactory) {
251
235
  const expLines = await getCheckpointLines(iter);
252
236
  expect(expLines).toMatchSnapshot();
253
237
  });
238
+
239
+ test('compacting data - invalidate checkpoint', async () => {
240
+ // This tests a case of a compact operation invalidating a checkpoint in the
241
+ // middle of syncing data.
242
+ // This is expected to be rare in practice, but it is important to handle
243
+ // this case correctly to maintain consistency on the client.
244
+
245
+ const f = await factory();
246
+
247
+ const syncRules = await f.updateSyncRules({
248
+ content: BASIC_SYNC_RULES
249
+ });
250
+
251
+ const storage = await f.getInstance(syncRules.parsed());
252
+ await storage.setSnapshotDone(ZERO_LSN);
253
+ await storage.autoActivate();
254
+
255
+ await storage.startBatch({}, async (batch) => {
256
+ await batch.save({
257
+ sourceTable: TEST_TABLE,
258
+ tag: 'insert',
259
+ after: {
260
+ id: 't1',
261
+ description: 'Test 1'
262
+ }
263
+ });
264
+
265
+ await batch.save({
266
+ sourceTable: TEST_TABLE,
267
+ tag: 'insert',
268
+ after: {
269
+ id: 't2',
270
+ description: 'Test 2'
271
+ }
272
+ });
273
+
274
+ await batch.commit(lsnMakeComparable('0/1'));
275
+ });
276
+
277
+ const stream = streamResponse({
278
+ storage: f,
279
+ params: {
280
+ buckets: [],
281
+ include_checksum: true,
282
+ raw_data: true
283
+ },
284
+ tracker,
285
+ syncParams: new RequestParameters({ sub: '' }, {}),
286
+ token: { exp: Date.now() / 1000 + 10 } as any
287
+ });
288
+
289
+ const iter = stream[Symbol.asyncIterator]();
290
+
291
+ // Only consume the first "checkpoint" message, and pause before receiving data.
292
+ const lines = await consumeIterator(iter, { consume: false, isDone: (line) => (line as any)?.checkpoint != null });
293
+ expect(lines).toMatchSnapshot();
294
+ expect(lines[0]).toEqual({
295
+ checkpoint: expect.objectContaining({
296
+ last_op_id: '2'
297
+ })
298
+ });
299
+
300
+ // Now we save additional data AND compact before continuing.
301
+ // This invalidates the checkpoint we've received above.
302
+
303
+ await storage.startBatch({}, async (batch) => {
304
+ await batch.save({
305
+ sourceTable: TEST_TABLE,
306
+ tag: 'update',
307
+ after: {
308
+ id: 't1',
309
+ description: 'Test 1b'
310
+ }
311
+ });
312
+
313
+ await batch.save({
314
+ sourceTable: TEST_TABLE,
315
+ tag: 'update',
316
+ after: {
317
+ id: 't2',
318
+ description: 'Test 2b'
319
+ }
320
+ });
321
+
322
+ await batch.commit(lsnMakeComparable('0/2'));
323
+ });
324
+
325
+ await storage.compact();
326
+
327
+ const lines2 = await getCheckpointLines(iter, { consume: true });
328
+
329
+ // Snapshot test checks for changes in general.
330
+ // The tests after that documents the specific things we're looking for
331
+ // in this test.
332
+ expect(lines2).toMatchSnapshot();
333
+
334
+ expect(lines2[0]).toEqual({
335
+ data: expect.objectContaining({
336
+ has_more: false,
337
+ data: [
338
+ // The first two ops have been replaced by a single CLEAR op
339
+ expect.objectContaining({
340
+ op: 'CLEAR'
341
+ })
342
+ ]
343
+ })
344
+ });
345
+
346
+ // Note: No checkpoint_complete here, since the checkpoint has been
347
+ // invalidated by the CLEAR op.
348
+
349
+ expect(lines2[1]).toEqual({
350
+ checkpoint_diff: expect.objectContaining({
351
+ last_op_id: '4'
352
+ })
353
+ });
354
+
355
+ expect(lines2[2]).toEqual({
356
+ data: expect.objectContaining({
357
+ has_more: false,
358
+ data: [
359
+ expect.objectContaining({
360
+ op: 'PUT'
361
+ }),
362
+ expect.objectContaining({
363
+ op: 'PUT'
364
+ })
365
+ ]
366
+ })
367
+ });
368
+
369
+ // Now we get a checkpoint_complete
370
+ expect(lines2[3]).toEqual({
371
+ checkpoint_complete: expect.objectContaining({
372
+ last_op_id: '4'
373
+ })
374
+ });
375
+ });
254
376
  }
255
377
 
256
378
  /**
257
- * Get lines on an iterator until the next checkpoint_complete.
379
+ * Get lines on an iterator until isDone(line) == true.
258
380
  *
259
- * Does not stop the iterator.
381
+ * Does not stop the iterator unless options.consume is true.
260
382
  */
261
- async function getCheckpointLines(iter: AsyncIterator<any>, options?: { consume?: boolean }): Promise<any[]> {
262
- let lines: any[] = [];
383
+ async function consumeIterator<T>(
384
+ iter: AsyncIterator<T>,
385
+ options: { isDone: (line: T) => boolean; consume?: boolean }
386
+ ) {
387
+ let lines: T[] = [];
263
388
  try {
264
389
  const controller = new AbortController();
265
390
  const timeout = timers.setTimeout(1500, { value: null, done: 'timeout' }, { signal: controller.signal });
@@ -274,7 +399,7 @@ async function getCheckpointLines(iter: AsyncIterator<any>, options?: { consume?
274
399
  if (value) {
275
400
  lines.push(value);
276
401
  }
277
- if (done || value.checkpoint_complete) {
402
+ if (done || options.isDone(value)) {
278
403
  break;
279
404
  }
280
405
  }
@@ -292,11 +417,26 @@ async function getCheckpointLines(iter: AsyncIterator<any>, options?: { consume?
292
417
  }
293
418
  }
294
419
 
420
+ /**
421
+ * Get lines on an iterator until the next checkpoint_complete.
422
+ *
423
+ * Does not stop the iterator unless options.consume is true.
424
+ */
425
+ async function getCheckpointLines(
426
+ iter: AsyncIterator<StreamingSyncLine | string | null>,
427
+ options?: { consume?: boolean }
428
+ ) {
429
+ return consumeIterator(iter, {
430
+ consume: options?.consume,
431
+ isDone: (line) => (line as any)?.checkpoint_complete
432
+ });
433
+ }
434
+
295
435
  /**
296
436
  * Get lines on an iterator until the next checkpoint_complete.
297
437
  *
298
438
  * Stops the iterator afterwards.
299
439
  */
300
- async function consumeCheckpointLines(iterable: AsyncIterable<any>): Promise<any[]> {
440
+ async function consumeCheckpointLines(iterable: AsyncIterable<StreamingSyncLine | string | null>): Promise<any[]> {
301
441
  return getCheckpointLines(iterable[Symbol.asyncIterator](), { consume: true });
302
442
  }
package/test/src/util.ts CHANGED
@@ -1,12 +1,16 @@
1
1
  import * as pgwire from '@powersync/service-jpgwire';
2
2
  import { normalizeConnection } from '@powersync/service-types';
3
3
  import * as mongo from 'mongodb';
4
- import { BucketStorageFactory } from '../../src/storage/BucketStorage.js';
4
+ import { BucketStorageFactory, SyncBucketDataBatch } from '../../src/storage/BucketStorage.js';
5
5
  import { MongoBucketStorage } from '../../src/storage/MongoBucketStorage.js';
6
6
  import { PowerSyncMongo } from '../../src/storage/mongo/db.js';
7
7
  import { escapeIdentifier } from '../../src/util/pgwire_utils.js';
8
8
  import { env } from './env.js';
9
9
  import { Metrics } from '@/metrics/Metrics.js';
10
+ import { hashData } from '@/util/utils.js';
11
+ import { SourceTable } from '@/storage/SourceTable.js';
12
+ import * as bson from 'bson';
13
+ import { SyncBucketData } from '@/util/protocol-types.js';
10
14
 
11
15
  // The metrics need to be initialised before they can be used
12
16
  await Metrics.initialise({
@@ -27,6 +31,10 @@ export const MONGO_STORAGE_FACTORY: StorageFactory = async () => {
27
31
  };
28
32
 
29
33
  export async function clearTestDb(db: pgwire.PgClient) {
34
+ await db.query(
35
+ "select pg_drop_replication_slot(slot_name) from pg_replication_slots where active = false and slot_name like 'test_%'"
36
+ );
37
+
30
38
  await db.query(`CREATE EXTENSION IF NOT EXISTS "uuid-ossp"`);
31
39
  try {
32
40
  await db.query(`DROP PUBLICATION powersync`);
@@ -74,3 +82,59 @@ export async function connectMongo() {
74
82
  const db = new PowerSyncMongo(client);
75
83
  return db;
76
84
  }
85
+
86
+ export function makeTestTable(name: string, columns?: string[] | undefined) {
87
+ const relId = hashData('table', name, (columns ?? ['id']).join(','));
88
+ const id = new bson.ObjectId('6544e3899293153fa7b38331');
89
+ return new SourceTable(
90
+ id,
91
+ SourceTable.DEFAULT_TAG,
92
+ relId,
93
+ SourceTable.DEFAULT_SCHEMA,
94
+ name,
95
+ (columns ?? ['id']).map((column) => ({ name: column, typeOid: 25 })),
96
+ true
97
+ );
98
+ }
99
+
100
+ export function getBatchData(batch: SyncBucketData[] | SyncBucketDataBatch[] | SyncBucketDataBatch) {
101
+ const first = getFirst(batch);
102
+ if (first == null) {
103
+ return [];
104
+ }
105
+ return first.data.map((d) => {
106
+ return {
107
+ op_id: d.op_id,
108
+ op: d.op,
109
+ object_id: d.object_id,
110
+ checksum: d.checksum
111
+ };
112
+ });
113
+ }
114
+
115
+ export function getBatchMeta(batch: SyncBucketData[] | SyncBucketDataBatch[] | SyncBucketDataBatch) {
116
+ const first = getFirst(batch);
117
+ if (first == null) {
118
+ return null;
119
+ }
120
+ return {
121
+ has_more: first.has_more,
122
+ after: first.after,
123
+ next_after: first.next_after
124
+ };
125
+ }
126
+
127
+ function getFirst(batch: SyncBucketData[] | SyncBucketDataBatch[] | SyncBucketDataBatch): SyncBucketData | null {
128
+ if (!Array.isArray(batch)) {
129
+ return batch.batch;
130
+ }
131
+ if (batch.length == 0) {
132
+ return null;
133
+ }
134
+ let first = batch[0];
135
+ if ((first as SyncBucketDataBatch).batch != null) {
136
+ return (first as SyncBucketDataBatch).batch;
137
+ } else {
138
+ return first as SyncBucketData;
139
+ }
140
+ }
@@ -20,9 +20,7 @@ export function walStreamTest(
20
20
  return async () => {
21
21
  const f = await factory();
22
22
  const connections = new PgManager(TEST_CONNECTION_OPTIONS, {});
23
- await connections.pool.query(
24
- 'select pg_drop_replication_slot(slot_name) from pg_replication_slots where active = false'
25
- );
23
+
26
24
  await clearTestDb(connections.pool);
27
25
  const context = new WalStreamTestContext(f, connections);
28
26
  try {
@@ -113,7 +111,7 @@ export class WalStreamTestContext {
113
111
  const map = new Map<string, string>([[bucket, start]]);
114
112
  const batch = await this.storage!.getBucketDataBatch(checkpoint, map);
115
113
  const batches = await fromAsync(batch);
116
- return batches[0]?.data ?? [];
114
+ return batches[0]?.batch.data ?? [];
117
115
  }
118
116
  }
119
117
 
@@ -145,3 +143,14 @@ export async function fromAsync<T>(source: Iterable<T> | AsyncIterable<T>): Prom
145
143
  }
146
144
  return items;
147
145
  }
146
+
147
+ export async function oneFromAsync<T>(source: Iterable<T> | AsyncIterable<T>): Promise<T> {
148
+ const items: T[] = [];
149
+ for await (const item of source) {
150
+ items.push(item);
151
+ }
152
+ if (items.length != 1) {
153
+ throw new Error(`One item expected, got: ${items.length}`);
154
+ }
155
+ return items[0];
156
+ }