@powersync/service-core 0.4.1 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/CHANGELOG.md +16 -0
  2. package/dist/entry/cli-entry.js +2 -1
  3. package/dist/entry/cli-entry.js.map +1 -1
  4. package/dist/entry/commands/compact-action.d.ts +2 -0
  5. package/dist/entry/commands/compact-action.js +49 -0
  6. package/dist/entry/commands/compact-action.js.map +1 -0
  7. package/dist/entry/entry-index.d.ts +1 -0
  8. package/dist/entry/entry-index.js +1 -0
  9. package/dist/entry/entry-index.js.map +1 -1
  10. package/dist/routes/endpoints/socket-route.js +9 -1
  11. package/dist/routes/endpoints/socket-route.js.map +1 -1
  12. package/dist/routes/endpoints/sync-stream.js +9 -1
  13. package/dist/routes/endpoints/sync-stream.js.map +1 -1
  14. package/dist/storage/BucketStorage.d.ts +25 -1
  15. package/dist/storage/BucketStorage.js.map +1 -1
  16. package/dist/storage/mongo/MongoCompactor.d.ts +38 -0
  17. package/dist/storage/mongo/MongoCompactor.js +278 -0
  18. package/dist/storage/mongo/MongoCompactor.js.map +1 -0
  19. package/dist/storage/mongo/MongoSyncBucketStorage.d.ts +3 -2
  20. package/dist/storage/mongo/MongoSyncBucketStorage.js +19 -13
  21. package/dist/storage/mongo/MongoSyncBucketStorage.js.map +1 -1
  22. package/dist/storage/mongo/models.d.ts +5 -4
  23. package/dist/storage/mongo/models.js.map +1 -1
  24. package/dist/storage/mongo/util.d.ts +3 -0
  25. package/dist/storage/mongo/util.js +22 -0
  26. package/dist/storage/mongo/util.js.map +1 -1
  27. package/dist/sync/RequestTracker.d.ts +9 -0
  28. package/dist/sync/RequestTracker.js +19 -0
  29. package/dist/sync/RequestTracker.js.map +1 -0
  30. package/dist/sync/sync.d.ts +2 -0
  31. package/dist/sync/sync.js +51 -18
  32. package/dist/sync/sync.js.map +1 -1
  33. package/dist/sync/util.d.ts +2 -1
  34. package/dist/sync/util.js +2 -3
  35. package/dist/sync/util.js.map +1 -1
  36. package/package.json +4 -4
  37. package/src/entry/cli-entry.ts +2 -1
  38. package/src/entry/commands/compact-action.ts +55 -0
  39. package/src/entry/entry-index.ts +1 -0
  40. package/src/routes/endpoints/socket-route.ts +9 -1
  41. package/src/routes/endpoints/sync-stream.ts +10 -1
  42. package/src/storage/BucketStorage.ts +29 -1
  43. package/src/storage/mongo/MongoCompactor.ts +356 -0
  44. package/src/storage/mongo/MongoSyncBucketStorage.ts +25 -14
  45. package/src/storage/mongo/models.ts +5 -4
  46. package/src/storage/mongo/util.ts +25 -0
  47. package/src/sync/RequestTracker.ts +21 -0
  48. package/src/sync/sync.ts +61 -17
  49. package/src/sync/util.ts +6 -2
  50. package/test/src/__snapshots__/sync.test.ts.snap +85 -0
  51. package/test/src/bucket_validation.test.ts +142 -0
  52. package/test/src/bucket_validation.ts +116 -0
  53. package/test/src/compacting.test.ts +207 -0
  54. package/test/src/data_storage.test.ts +19 -60
  55. package/test/src/slow_tests.test.ts +144 -102
  56. package/test/src/sync.test.ts +176 -28
  57. package/test/src/util.ts +65 -1
  58. package/test/src/wal_stream_utils.ts +13 -4
  59. package/tsconfig.tsbuildinfo +1 -1
package/src/sync/sync.ts CHANGED
@@ -11,6 +11,7 @@ import { logger } from '@powersync/lib-services-framework';
11
11
  import { Metrics } from '../metrics/Metrics.js';
12
12
  import { mergeAsyncIterables } from './merge.js';
13
13
  import { TokenStreamOptions, tokenStream } from './util.js';
14
+ import { RequestTracker } from './RequestTracker.js';
14
15
 
15
16
  /**
16
17
  * Maximum number of connections actively fetching data.
@@ -28,12 +29,14 @@ export interface SyncStreamParameters {
28
29
  */
29
30
  signal?: AbortSignal;
30
31
  tokenStreamOptions?: Partial<TokenStreamOptions>;
32
+
33
+ tracker: RequestTracker;
31
34
  }
32
35
 
33
36
  export async function* streamResponse(
34
37
  options: SyncStreamParameters
35
38
  ): AsyncIterable<util.StreamingSyncLine | string | null> {
36
- const { storage, params, syncParams, token, tokenStreamOptions, signal } = options;
39
+ const { storage, params, syncParams, token, tokenStreamOptions, tracker, signal } = options;
37
40
  // We also need to be able to abort, so we create our own controller.
38
41
  const controller = new AbortController();
39
42
  if (signal) {
@@ -49,7 +52,7 @@ export async function* streamResponse(
49
52
  }
50
53
  }
51
54
  const ki = tokenStream(token, controller.signal, tokenStreamOptions);
52
- const stream = streamResponseInner(storage, params, syncParams, controller.signal);
55
+ const stream = streamResponseInner(storage, params, syncParams, tracker, controller.signal);
53
56
  // Merge the two streams, and abort as soon as one of the streams end.
54
57
  const merged = mergeAsyncIterables([stream, ki], controller.signal);
55
58
 
@@ -72,6 +75,7 @@ async function* streamResponseInner(
72
75
  storage: storage.BucketStorageFactory,
73
76
  params: util.StreamingSyncRequest,
74
77
  syncParams: RequestParameters,
78
+ tracker: RequestTracker,
75
79
  signal: AbortSignal
76
80
  ): AsyncGenerator<util.StreamingSyncLine | string | null> {
77
81
  // Bucket state of bucket id -> op_id.
@@ -109,6 +113,11 @@ async function* streamResponseInner(
109
113
  });
110
114
 
111
115
  if (allBuckets.length > 1000) {
116
+ logger.error(`Too many buckets`, {
117
+ checkpoint,
118
+ user_id: syncParams.user_id,
119
+ buckets: allBuckets.length
120
+ });
112
121
  // TODO: Limit number of buckets even before we get to this point
113
122
  throw new Error(`Too many buckets: ${allBuckets.length}`);
114
123
  }
@@ -137,11 +146,18 @@ async function* streamResponseInner(
137
146
  }
138
147
  bucketsToFetch = diff.updatedBuckets.map((c) => c.bucket);
139
148
 
140
- let message = `Updated checkpoint: ${checkpoint} | write: ${writeCheckpoint} | `;
149
+ let message = `Updated checkpoint: ${checkpoint} | `;
150
+ message += `write: ${writeCheckpoint} | `;
141
151
  message += `buckets: ${allBuckets.length} | `;
142
152
  message += `updated: ${limitedBuckets(diff.updatedBuckets, 20)} | `;
143
- message += `removed: ${limitedBuckets(diff.removedBuckets, 20)} | `;
144
- logger.info(message);
153
+ message += `removed: ${limitedBuckets(diff.removedBuckets, 20)}`;
154
+ logger.info(message, {
155
+ checkpoint,
156
+ user_id: syncParams.user_id,
157
+ buckets: allBuckets.length,
158
+ updated: diff.updatedBuckets.length,
159
+ removed: diff.removedBuckets.length
160
+ });
145
161
 
146
162
  const checksum_line: util.StreamingSyncCheckpointDiff = {
147
163
  checkpoint_diff: {
@@ -156,7 +172,7 @@ async function* streamResponseInner(
156
172
  } else {
157
173
  let message = `New checkpoint: ${checkpoint} | write: ${writeCheckpoint} | `;
158
174
  message += `buckets: ${allBuckets.length} ${limitedBuckets(allBuckets, 20)}`;
159
- logger.info(message);
175
+ logger.info(message, { checkpoint, user_id: syncParams.user_id, buckets: allBuckets.length });
160
176
  bucketsToFetch = allBuckets;
161
177
  const checksum_line: util.StreamingSyncCheckpoint = {
162
178
  checkpoint: {
@@ -172,7 +188,16 @@ async function* streamResponseInner(
172
188
 
173
189
  // This incrementally updates dataBuckets with each individual bucket position.
174
190
  // At the end of this, we can be sure that all buckets have data up to the checkpoint.
175
- yield* bucketDataInBatches({ storage, checkpoint, bucketsToFetch, dataBuckets, raw_data, binary_data, signal });
191
+ yield* bucketDataInBatches({
192
+ storage,
193
+ checkpoint,
194
+ bucketsToFetch,
195
+ dataBuckets,
196
+ raw_data,
197
+ binary_data,
198
+ signal,
199
+ tracker
200
+ });
176
201
 
177
202
  await new Promise((resolve) => setTimeout(resolve, 10));
178
203
  }
@@ -186,6 +211,7 @@ interface BucketDataRequest {
186
211
  dataBuckets: Map<string, string>;
187
212
  raw_data: boolean | undefined;
188
213
  binary_data: boolean | undefined;
214
+ tracker: RequestTracker;
189
215
  signal: AbortSignal;
190
216
  }
191
217
 
@@ -221,11 +247,19 @@ async function* bucketDataInBatches(request: BucketDataRequest) {
221
247
  }
222
248
  }
223
249
 
250
+ interface BucketDataBatchResult {
251
+ done: boolean;
252
+ data: any;
253
+ }
254
+
224
255
  /**
225
256
  * Extracted as a separate internal function just to avoid memory leaks.
226
257
  */
227
- async function* bucketDataBatch(request: BucketDataRequest) {
228
- const { storage, checkpoint, bucketsToFetch, dataBuckets, raw_data, binary_data, signal } = request;
258
+ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<BucketDataBatchResult, void> {
259
+ const { storage, checkpoint, bucketsToFetch, dataBuckets, raw_data, binary_data, tracker, signal } = request;
260
+
261
+ const checkpointOp = BigInt(checkpoint);
262
+ let checkpointInvalidated = false;
229
263
 
230
264
  const [_, release] = await syncSemaphore.acquire();
231
265
  try {
@@ -236,13 +270,16 @@ async function* bucketDataBatch(request: BucketDataRequest) {
236
270
 
237
271
  let has_more = false;
238
272
 
239
- for await (let r of data) {
273
+ for await (let { batch: r, targetOp } of data) {
240
274
  if (signal.aborted) {
241
275
  return;
242
276
  }
243
277
  if (r.has_more) {
244
278
  has_more = true;
245
279
  }
280
+ if (targetOp != null && targetOp > checkpointOp) {
281
+ checkpointInvalidated = true;
282
+ }
246
283
  if (r.data.length == 0) {
247
284
  continue;
248
285
  }
@@ -272,18 +309,25 @@ async function* bucketDataBatch(request: BucketDataRequest) {
272
309
  // iterator memory in case if large data sent.
273
310
  yield { data: null, done: false };
274
311
  }
275
- Metrics.getInstance().operations_synced_total.add(r.data.length);
312
+ tracker.addOperationsSynced(r.data.length);
276
313
 
277
314
  dataBuckets.set(r.bucket, r.next_after);
278
315
  }
279
316
 
280
317
  if (!has_more) {
281
- const line: util.StreamingSyncCheckpointComplete = {
282
- checkpoint_complete: {
283
- last_op_id: checkpoint
284
- }
285
- };
286
- yield { data: line, done: true };
318
+ if (checkpointInvalidated) {
319
+ // Checkpoint invalidated by a CLEAR or MOVE op.
320
+ // Don't send the checkpoint_complete line in this case.
321
+ // More data should be available immediately for a new checkpoint.
322
+ yield { data: null, done: true };
323
+ } else {
324
+ const line: util.StreamingSyncCheckpointComplete = {
325
+ checkpoint_complete: {
326
+ last_op_id: checkpoint
327
+ }
328
+ };
329
+ yield { data: line, done: true };
330
+ }
287
331
  }
288
332
  } finally {
289
333
  release();
package/src/sync/util.ts CHANGED
@@ -2,6 +2,7 @@ import * as timers from 'timers/promises';
2
2
 
3
3
  import * as util from '../util/util-index.js';
4
4
  import { Metrics } from '../metrics/Metrics.js';
5
+ import { RequestTracker } from './RequestTracker.js';
5
6
 
6
7
  export type TokenStreamOptions = {
7
8
  /**
@@ -89,10 +90,13 @@ export async function* ndjson(iterator: AsyncIterable<string | null | Record<str
89
90
  }
90
91
  }
91
92
 
92
- export async function* transformToBytesTracked(iterator: AsyncIterable<string>): AsyncGenerator<Buffer> {
93
+ export async function* transformToBytesTracked(
94
+ iterator: AsyncIterable<string>,
95
+ tracker: RequestTracker
96
+ ): AsyncGenerator<Buffer> {
93
97
  for await (let data of iterator) {
94
98
  const encoded = Buffer.from(data, 'utf8');
95
- Metrics.getInstance().data_synced_bytes.add(encoded.length);
99
+ tracker.addDataSynced(encoded.length);
96
100
  yield encoded;
97
101
  }
98
102
  }
@@ -1,5 +1,90 @@
1
1
  // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
2
 
3
+ exports[`sync - mongodb > compacting data - invalidate checkpoint 1`] = `
4
+ [
5
+ {
6
+ "checkpoint": {
7
+ "buckets": [
8
+ {
9
+ "bucket": "mybucket[]",
10
+ "checksum": -93886621,
11
+ "count": 2,
12
+ },
13
+ ],
14
+ "last_op_id": "2",
15
+ "write_checkpoint": undefined,
16
+ },
17
+ },
18
+ ]
19
+ `;
20
+
21
+ exports[`sync - mongodb > compacting data - invalidate checkpoint 2`] = `
22
+ [
23
+ {
24
+ "data": {
25
+ "after": "0",
26
+ "bucket": "mybucket[]",
27
+ "data": [
28
+ {
29
+ "checksum": -93886621n,
30
+ "op": "CLEAR",
31
+ "op_id": "2",
32
+ },
33
+ ],
34
+ "has_more": false,
35
+ "next_after": "2",
36
+ },
37
+ },
38
+ {
39
+ "checkpoint_diff": {
40
+ "last_op_id": "4",
41
+ "removed_buckets": [],
42
+ "updated_buckets": [
43
+ {
44
+ "bucket": "mybucket[]",
45
+ "checksum": 499012468,
46
+ "count": 4,
47
+ },
48
+ ],
49
+ "write_checkpoint": undefined,
50
+ },
51
+ },
52
+ {
53
+ "data": {
54
+ "after": "2",
55
+ "bucket": "mybucket[]",
56
+ "data": [
57
+ {
58
+ "checksum": 1859363232n,
59
+ "data": "{\\"id\\":\\"t1\\",\\"description\\":\\"Test 1b\\"}",
60
+ "object_id": "t1",
61
+ "object_type": "test",
62
+ "op": "PUT",
63
+ "op_id": "3",
64
+ "subkey": "6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423",
65
+ },
66
+ {
67
+ "checksum": 3028503153n,
68
+ "data": "{\\"id\\":\\"t2\\",\\"description\\":\\"Test 2b\\"}",
69
+ "object_id": "t2",
70
+ "object_type": "test",
71
+ "op": "PUT",
72
+ "op_id": "4",
73
+ "subkey": "6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee",
74
+ },
75
+ ],
76
+ "has_more": false,
77
+ "next_after": "4",
78
+ },
79
+ },
80
+ {
81
+ "checkpoint_complete": {
82
+ "last_op_id": "4",
83
+ },
84
+ },
85
+ ]
86
+ `;
87
+
3
88
  exports[`sync - mongodb > expired token 1`] = `
4
89
  [
5
90
  {
@@ -0,0 +1,142 @@
1
+ import { OplogEntry } from '@/util/protocol-types.js';
2
+ import { describe, expect, test } from 'vitest';
3
+ import { reduceBucket, validateBucket } from './bucket_validation.js';
4
+
5
+ // This tests the reduceBucket function.
6
+ // While this function is not used directly in the service implementation,
7
+ // it is an important part of validating consistency in other tests.
8
+ describe('bucket validation', () => {
9
+ const ops1: OplogEntry[] = [
10
+ {
11
+ op_id: '1',
12
+ op: 'PUT',
13
+ object_type: 'test',
14
+ object_id: 't1',
15
+ checksum: 2634521662,
16
+ subkey: '6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423',
17
+ data: '{"id":"t1"}'
18
+ },
19
+ {
20
+ op_id: '2',
21
+ op: 'PUT',
22
+ object_type: 'test',
23
+ object_id: 't2',
24
+ checksum: 4243212114,
25
+ subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee',
26
+ data: '{"id":"t2"}'
27
+ },
28
+ {
29
+ op_id: '3',
30
+ op: 'REMOVE',
31
+ object_type: 'test',
32
+ object_id: 't1',
33
+ checksum: 4228978084,
34
+ subkey: '6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423',
35
+ data: null
36
+ },
37
+ {
38
+ op_id: '4',
39
+ op: 'PUT',
40
+ object_type: 'test',
41
+ object_id: 't2',
42
+ checksum: 4243212114,
43
+ subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee',
44
+ data: '{"id":"t2"}'
45
+ }
46
+ ];
47
+
48
+ test('reduce 1', () => {
49
+ expect(reduceBucket(ops1)).toEqual([
50
+ {
51
+ checksum: -1778190028,
52
+ op: 'CLEAR',
53
+ op_id: '0'
54
+ },
55
+ {
56
+ checksum: 4243212114,
57
+ data: '{"id":"t2"}',
58
+ object_id: 't2',
59
+ object_type: 'test',
60
+ op: 'PUT',
61
+ op_id: '4',
62
+ subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee'
63
+ }
64
+ ]);
65
+
66
+ expect(reduceBucket(reduceBucket(ops1))).toEqual([
67
+ {
68
+ checksum: -1778190028,
69
+ op: 'CLEAR',
70
+ op_id: '0'
71
+ },
72
+ {
73
+ checksum: 4243212114,
74
+ data: '{"id":"t2"}',
75
+ object_id: 't2',
76
+ object_type: 'test',
77
+ op: 'PUT',
78
+ op_id: '4',
79
+ subkey: '6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee'
80
+ }
81
+ ]);
82
+
83
+ validateBucket(ops1);
84
+ });
85
+
86
+ test('reduce 2', () => {
87
+ const bucket: OplogEntry[] = [
88
+ ...ops1,
89
+
90
+ {
91
+ checksum: 93784613,
92
+ op: 'CLEAR',
93
+ op_id: '5'
94
+ },
95
+ {
96
+ checksum: 5133378,
97
+ data: '{"id":"t3"}',
98
+ object_id: 't3',
99
+ object_type: 'test',
100
+ op: 'PUT',
101
+ op_id: '11',
102
+ subkey: '6544e3899293153fa7b38333/ec27c691-b47a-5d92-927a-9944feb89eee'
103
+ }
104
+ ];
105
+
106
+ expect(reduceBucket(bucket)).toEqual([
107
+ {
108
+ checksum: 93784613,
109
+ op: 'CLEAR',
110
+ op_id: '0'
111
+ },
112
+ {
113
+ checksum: 5133378,
114
+ data: '{"id":"t3"}',
115
+ object_id: 't3',
116
+ object_type: 'test',
117
+ op: 'PUT',
118
+ op_id: '11',
119
+ subkey: '6544e3899293153fa7b38333/ec27c691-b47a-5d92-927a-9944feb89eee'
120
+ }
121
+ ]);
122
+
123
+ expect(reduceBucket(reduceBucket(bucket))).toEqual([
124
+ {
125
+ checksum: 93784613,
126
+ op: 'CLEAR',
127
+ op_id: '0'
128
+ },
129
+ {
130
+ checksum: 5133378,
131
+ data: '{"id":"t3"}',
132
+ object_id: 't3',
133
+ object_type: 'test',
134
+ op: 'PUT',
135
+ op_id: '11',
136
+ subkey: '6544e3899293153fa7b38333/ec27c691-b47a-5d92-927a-9944feb89eee'
137
+ }
138
+ ]);
139
+
140
+ validateBucket(bucket);
141
+ });
142
+ });
@@ -0,0 +1,116 @@
1
+ import { OplogEntry } from '@/util/protocol-types.js';
2
+ import { addChecksums } from '@/util/utils.js';
3
+ import { expect } from 'vitest';
4
+
5
+ /**
6
+ * Reduce a bucket to the final state as stored on the client.
7
+ *
8
+ * This keeps the final state for each row as a PUT operation.
9
+ *
10
+ * All other operations are replaced with a single CLEAR operation,
11
+ * summing their checksums, and using a 0 as an op_id.
12
+ *
13
+ * This is the function $r(B)$, as described in /docs/bucket-properties.md.
14
+ */
15
+ export function reduceBucket(operations: OplogEntry[]) {
16
+ let rowState = new Map<string, OplogEntry>();
17
+ let otherChecksum = 0;
18
+
19
+ for (let op of operations) {
20
+ const key = rowKey(op);
21
+ if (op.op == 'PUT') {
22
+ const existing = rowState.get(key);
23
+ if (existing) {
24
+ otherChecksum = addChecksums(otherChecksum, existing.checksum as number);
25
+ }
26
+ rowState.set(key, op);
27
+ } else if (op.op == 'REMOVE') {
28
+ const existing = rowState.get(key);
29
+ if (existing) {
30
+ otherChecksum = addChecksums(otherChecksum, existing.checksum as number);
31
+ }
32
+ rowState.delete(key);
33
+ otherChecksum = addChecksums(otherChecksum, op.checksum as number);
34
+ } else if (op.op == 'CLEAR') {
35
+ rowState.clear();
36
+ otherChecksum = op.checksum as number;
37
+ } else if (op.op == 'MOVE') {
38
+ otherChecksum = addChecksums(otherChecksum, op.checksum as number);
39
+ } else {
40
+ throw new Error(`Unknown operation ${op.op}`);
41
+ }
42
+ }
43
+
44
+ const puts = [...rowState.values()].sort((a, b) => {
45
+ return Number(BigInt(a.op_id) - BigInt(b.op_id));
46
+ });
47
+
48
+ let finalState: OplogEntry[] = [
49
+ // Special operation to indiciate the checksum remainder
50
+ { op_id: '0', op: 'CLEAR', checksum: otherChecksum },
51
+ ...puts
52
+ ];
53
+
54
+ return finalState;
55
+ }
56
+
57
+ function rowKey(entry: OplogEntry) {
58
+ return `${entry.object_type}/${entry.object_id}/${entry.subkey}`;
59
+ }
60
+
61
+ /**
62
+ * Validate this property, as described in /docs/bucket-properties.md:
63
+ *
64
+ * $r(B_{[..id_n]}) = r(r(B_{[..id_i]}) \cup B_{[id_{i+1}..id_n]}) \;\forall\; i \in [1..n]$
65
+ *
66
+ * We test that a client syncing the entire bucket in one go (left side of the equation),
67
+ * ends up with the same result as another client syncing up to operation id_i, then sync
68
+ * the rest.
69
+ */
70
+ export function validateBucket(bucket: OplogEntry[]) {
71
+ const r1 = reduceBucket(bucket);
72
+ for (let i = 0; i <= bucket.length; i++) {
73
+ const r2 = reduceBucket(bucket.slice(0, i + 1));
74
+ const b3 = bucket.slice(i + 1);
75
+ const r3 = r2.concat(b3);
76
+ const r4 = reduceBucket(r3);
77
+ expect(r4).toEqual(r1);
78
+ }
79
+
80
+ // This is the same check, just implemented differently
81
+ validateCompactedBucket(bucket, bucket);
82
+ }
83
+
84
+ /**
85
+ * Validate these properties for a bucket $B$ and its compacted version $B'$,:
86
+ * as described in /docs/bucket-properties.md:
87
+ *
88
+ * 1. $r(B) = r(B')$
89
+ * 2. $r(B_{[..c]}) = r(r(B_{[..c_i]}) \cup B'_{[c_i+1..c]}) \;\forall\; c_i \in B$
90
+ *
91
+ * The first one is that the result of syncing the original bucket is the same as
92
+ * syncing the compacted bucket.
93
+ *
94
+ * The second property is that result of syncing the entire original bucket, is the same
95
+ * as syncing any partial version of that (up to op $c_i$), and then continue syncing
96
+ * using the compacted bucket.
97
+ */
98
+ export function validateCompactedBucket(bucket: OplogEntry[], compacted: OplogEntry[]) {
99
+ // r(B_{[..c]})
100
+ const r1 = reduceBucket(bucket);
101
+ // r(B) = r(B')
102
+ expect(reduceBucket(compacted)).toEqual(r1);
103
+
104
+ for (let i = 0; i < bucket.length; i++) {
105
+ // r(B_{[..c_i]})
106
+ const r2 = reduceBucket(bucket.slice(0, i + 1));
107
+ const c_i = BigInt(bucket[i].op_id);
108
+ // B'_{[c_i+1..c]}
109
+ const b3 = compacted.filter((op) => BigInt(op.op_id) > c_i);
110
+ // r(B_{[..c_i]}) \cup B'_{[c_i+1..c]}
111
+ const r3 = r2.concat(b3);
112
+ // r(r(B_{[..c_i]}) \cup B'_{[c_i+1..c]})
113
+ const r4 = reduceBucket(r3);
114
+ expect(r4).toEqual(r1);
115
+ }
116
+ }