@powersync/service-core-tests 0.5.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,10 +15,7 @@ const TEST_TABLE = test_utils.makeTestTable('test', ['id']);
15
15
  * compactTests(() => new MongoStorageFactory(), { clearBatchLimit: 2, moveBatchLimit: 1, moveBatchQueryLimit: 1 }));
16
16
  * ```
17
17
  */
18
- export function registerCompactTests<CompactOptions extends storage.CompactOptions = storage.CompactOptions>(
19
- generateStorageFactory: storage.TestStorageFactory,
20
- compactOptions: CompactOptions
21
- ) {
18
+ export function registerCompactTests(generateStorageFactory: storage.TestStorageFactory) {
22
19
  test('compacting (1)', async () => {
23
20
  const sync_rules = test_utils.testRules(`
24
21
  bucket_definitions:
@@ -87,7 +84,11 @@ bucket_definitions:
87
84
  }
88
85
  ]);
89
86
 
90
- await bucketStorage.compact(compactOptions);
87
+ await bucketStorage.compact({
88
+ clearBatchLimit: 2,
89
+ moveBatchLimit: 1,
90
+ moveBatchQueryLimit: 1
91
+ });
91
92
 
92
93
  const batchAfter = await test_utils.oneFromAsync(
93
94
  bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))
@@ -204,7 +205,11 @@ bucket_definitions:
204
205
  }
205
206
  ]);
206
207
 
207
- await bucketStorage.compact(compactOptions);
208
+ await bucketStorage.compact({
209
+ clearBatchLimit: 2,
210
+ moveBatchLimit: 1,
211
+ moveBatchQueryLimit: 1
212
+ });
208
213
 
209
214
  const batchAfter = await test_utils.oneFromAsync(
210
215
  bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]))
@@ -285,7 +290,11 @@ bucket_definitions:
285
290
  });
286
291
  const checkpoint2 = result2!.flushed_op;
287
292
 
288
- await bucketStorage.compact(compactOptions);
293
+ await bucketStorage.compact({
294
+ clearBatchLimit: 2,
295
+ moveBatchLimit: 1,
296
+ moveBatchQueryLimit: 1
297
+ });
289
298
 
290
299
  const batchAfter = await test_utils.oneFromAsync(
291
300
  bucketStorage.getBucketDataBatch(checkpoint2, new Map([['global[]', '0']]))
@@ -307,4 +316,130 @@ bucket_definitions:
307
316
  checksum: 1874612650
308
317
  });
309
318
  });
319
+
320
+ test('compacting (4)', async () => {
321
+ const sync_rules = test_utils.testRules(/* yaml */
322
+ ` bucket_definitions:
323
+ grouped:
324
+ # The parameter query here is not important
325
+ # We specifically don't want to create bucket_parameter records here
326
+ # since the op_ids for bucket_data could vary between storage implementations.
327
+ parameters: select 'b' as b
328
+ data:
329
+ - select * from test where b = bucket.b`);
330
+
331
+ await using factory = await generateStorageFactory();
332
+ const bucketStorage = factory.getInstance(sync_rules);
333
+
334
+ const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
335
+ /**
336
+ * Repeatedly create operations which fall into different buckets.
337
+ * The bucket operations are purposely interleaved as the op_id increases.
338
+ * A large amount of operations are created here.
339
+ * The configured window of compacting operations is 100. This means the initial window will
340
+ * contain operations from multiple buckets.
341
+ */
342
+ for (let count = 0; count < 100; count++) {
343
+ await batch.save({
344
+ sourceTable: TEST_TABLE,
345
+ tag: storage.SaveOperationTag.INSERT,
346
+ after: {
347
+ id: 't1',
348
+ b: 'b1',
349
+ value: 'start'
350
+ },
351
+ afterReplicaId: test_utils.rid('t1')
352
+ });
353
+
354
+ await batch.save({
355
+ sourceTable: TEST_TABLE,
356
+ tag: storage.SaveOperationTag.UPDATE,
357
+ after: {
358
+ id: 't1',
359
+ b: 'b1',
360
+ value: 'intermediate'
361
+ },
362
+ afterReplicaId: test_utils.rid('t1')
363
+ });
364
+
365
+ await batch.save({
366
+ sourceTable: TEST_TABLE,
367
+ tag: storage.SaveOperationTag.INSERT,
368
+ after: {
369
+ id: 't2',
370
+ b: 'b2',
371
+ value: 'start'
372
+ },
373
+ afterReplicaId: test_utils.rid('t2')
374
+ });
375
+
376
+ await batch.save({
377
+ sourceTable: TEST_TABLE,
378
+ tag: storage.SaveOperationTag.UPDATE,
379
+ after: {
380
+ id: 't1',
381
+ b: 'b1',
382
+ value: 'final'
383
+ },
384
+ afterReplicaId: test_utils.rid('t1')
385
+ });
386
+
387
+ await batch.save({
388
+ sourceTable: TEST_TABLE,
389
+ tag: storage.SaveOperationTag.UPDATE,
390
+ after: {
391
+ id: 't2',
392
+ b: 'b2',
393
+ value: 'final'
394
+ },
395
+ afterReplicaId: test_utils.rid('t2')
396
+ });
397
+ }
398
+ });
399
+
400
+ const checkpoint = result!.flushed_op;
401
+
402
+ await bucketStorage.compact({
403
+ clearBatchLimit: 100,
404
+ moveBatchLimit: 100,
405
+ moveBatchQueryLimit: 100 // Larger limit for a larger window of operations
406
+ });
407
+
408
+ const batchAfter = await test_utils.fromAsync(
409
+ bucketStorage.getBucketDataBatch(
410
+ checkpoint,
411
+ new Map([
412
+ ['grouped["b1"]', '0'],
413
+ ['grouped["b2"]', '0']
414
+ ])
415
+ )
416
+ );
417
+ const dataAfter = batchAfter.flatMap((b) => b.batch.data);
418
+
419
+ // The op_ids will vary between MongoDB and Postgres storage
420
+ expect(dataAfter).toMatchObject(
421
+ expect.arrayContaining([
422
+ { op_id: '497', op: 'CLEAR', checksum: -937074151 },
423
+ {
424
+ op_id: '499',
425
+ op: 'PUT',
426
+ object_type: 'test',
427
+ object_id: 't1',
428
+ checksum: 52221819,
429
+ subkey: '6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423',
430
+ data: '{"id":"t1","b":"b1","value":"final"}'
431
+ },
432
+ { op_id: '498', op: 'CLEAR', checksum: -234380197 },
433
+ {
434
+ op_id: '500',
435
+ op: 'PUT',
436
+ object_type: 'test',
437
+ object_id: 't2',
438
+ checksum: 2126669493,
439
+ subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee',
440
+ data: '{"id":"t2","b":"b2","value":"final"}'
441
+ }
442
+ ])
443
+ );
444
+ });
310
445
  }
@@ -253,6 +253,69 @@ bucket_definitions:
253
253
  expect(sentRows).toBe(10002);
254
254
  });
255
255
 
256
+ test('sends checkpoint complete line for empty checkpoint', async () => {
257
+ await using f = await factory();
258
+
259
+ const syncRules = await f.updateSyncRules({
260
+ content: BASIC_SYNC_RULES
261
+ });
262
+ const bucketStorage = f.getInstance(syncRules);
263
+ await bucketStorage.autoActivate();
264
+
265
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
266
+ await batch.save({
267
+ sourceTable: TEST_TABLE,
268
+ tag: storage.SaveOperationTag.INSERT,
269
+ after: {
270
+ id: 't1',
271
+ description: 'sync'
272
+ },
273
+ afterReplicaId: 't1'
274
+ });
275
+ await batch.commit('0/1');
276
+ });
277
+
278
+ const stream = sync.streamResponse({
279
+ bucketStorage: bucketStorage,
280
+ syncRules: bucketStorage.getParsedSyncRules(test_utils.PARSE_OPTIONS),
281
+ params: {
282
+ buckets: [],
283
+ include_checksum: true,
284
+ raw_data: true
285
+ },
286
+ tracker,
287
+ syncParams: new RequestParameters({ sub: '' }, {}),
288
+ token: { exp: Date.now() / 1000 + 100000 } as any
289
+ });
290
+
291
+ const lines: any[] = [];
292
+ let receivedCompletions = 0;
293
+
294
+ for await (let next of stream) {
295
+ if (typeof next == 'string') {
296
+ next = JSON.parse(next);
297
+ }
298
+ lines.push(next);
299
+
300
+ if (typeof next === 'object' && next !== null) {
301
+ if ('checkpoint_complete' in next) {
302
+ receivedCompletions++;
303
+ if (receivedCompletions == 1) {
304
+ // Trigger an empty bucket update.
305
+ await bucketStorage.createManagedWriteCheckpoint({user_id: '', heads: {'1': '1/0'}});
306
+ await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
307
+ await batch.commit('1/0');
308
+ });
309
+ } else {
310
+ break;
311
+ }
312
+ }
313
+ }
314
+ }
315
+
316
+ expect(lines).toMatchSnapshot();
317
+ });
318
+
256
319
  test('sync legacy non-raw data', async () => {
257
320
  const f = await factory();
258
321