@powersync/service-core 0.4.2 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/entry/cli-entry.js +2 -1
  3. package/dist/entry/cli-entry.js.map +1 -1
  4. package/dist/entry/commands/compact-action.d.ts +2 -0
  5. package/dist/entry/commands/compact-action.js +49 -0
  6. package/dist/entry/commands/compact-action.js.map +1 -0
  7. package/dist/entry/entry-index.d.ts +1 -0
  8. package/dist/entry/entry-index.js +1 -0
  9. package/dist/entry/entry-index.js.map +1 -1
  10. package/dist/storage/BucketStorage.d.ts +25 -1
  11. package/dist/storage/BucketStorage.js.map +1 -1
  12. package/dist/storage/mongo/MongoCompactor.d.ts +38 -0
  13. package/dist/storage/mongo/MongoCompactor.js +278 -0
  14. package/dist/storage/mongo/MongoCompactor.js.map +1 -0
  15. package/dist/storage/mongo/MongoSyncBucketStorage.d.ts +3 -2
  16. package/dist/storage/mongo/MongoSyncBucketStorage.js +19 -13
  17. package/dist/storage/mongo/MongoSyncBucketStorage.js.map +1 -1
  18. package/dist/storage/mongo/models.d.ts +5 -4
  19. package/dist/storage/mongo/models.js.map +1 -1
  20. package/dist/storage/mongo/util.d.ts +3 -0
  21. package/dist/storage/mongo/util.js +22 -0
  22. package/dist/storage/mongo/util.js.map +1 -1
  23. package/dist/sync/sync.js +20 -7
  24. package/dist/sync/sync.js.map +1 -1
  25. package/package.json +4 -4
  26. package/src/entry/cli-entry.ts +2 -1
  27. package/src/entry/commands/compact-action.ts +55 -0
  28. package/src/entry/entry-index.ts +1 -0
  29. package/src/storage/BucketStorage.ts +29 -1
  30. package/src/storage/mongo/MongoCompactor.ts +356 -0
  31. package/src/storage/mongo/MongoSyncBucketStorage.ts +25 -14
  32. package/src/storage/mongo/models.ts +5 -4
  33. package/src/storage/mongo/util.ts +25 -0
  34. package/src/sync/sync.ts +20 -7
  35. package/test/src/__snapshots__/sync.test.ts.snap +85 -0
  36. package/test/src/bucket_validation.test.ts +142 -0
  37. package/test/src/bucket_validation.ts +116 -0
  38. package/test/src/compacting.test.ts +207 -0
  39. package/test/src/data_storage.test.ts +19 -60
  40. package/test/src/slow_tests.test.ts +144 -102
  41. package/test/src/sync.test.ts +169 -29
  42. package/test/src/util.ts +65 -1
  43. package/test/src/wal_stream_utils.ts +13 -4
  44. package/tsconfig.tsbuildinfo +1 -1
@@ -0,0 +1,356 @@
1
+ import { logger } from '@powersync/lib-services-framework';
2
+ import { AnyBulkWriteOperation, MaxKey, MinKey } from 'mongodb';
3
+ import { addChecksums } from '../../util/utils.js';
4
+ import { PowerSyncMongo } from './db.js';
5
+ import { BucketDataDocument, BucketDataKey } from './models.js';
6
+ import { CompactOptions } from '../BucketStorage.js';
7
+
8
+ interface CurrentBucketState {
9
+ /** Bucket name */
10
+ bucket: string;
11
+ /**
12
+ * Rows seen in the bucket, with the last op_id of each.
13
+ */
14
+ seen: Map<string, bigint>;
15
+ /**
16
+ * Estimated memory usage of the seen Map.
17
+ */
18
+ trackingSize: number;
19
+
20
+ /**
21
+ * Last (lowest) seen op_id that is not a PUT.
22
+ */
23
+ lastNotPut: bigint | null;
24
+
25
+ /**
26
+ * Number of REMOVE/MOVE operations seen since lastNotPut.
27
+ */
28
+ opsSincePut: number;
29
+ }
30
+
31
+ /**
32
+ * Additional options, primarily for testing.
33
+ */
34
+ export interface MongoCompactOptions extends CompactOptions {
35
+ /** Minimum of 2 */
36
+ clearBatchLimit?: number;
37
+ /** Minimum of 1 */
38
+ moveBatchLimit?: number;
39
+ /** Minimum of 1 */
40
+ moveBatchQueryLimit?: number;
41
+ }
42
+
43
+ const DEFAULT_CLEAR_BATCH_LIMIT = 5000;
44
+ const DEFAULT_MOVE_BATCH_LIMIT = 2000;
45
+ const DEFAULT_MOVE_BATCH_QUERY_LIMIT = 10_000;
46
+
47
+ /** This default is primarily for tests. */
48
+ const DEFAULT_MEMORY_LIMIT_MB = 64;
49
+
50
+ export class MongoCompactor {
51
+ private updates: AnyBulkWriteOperation<BucketDataDocument>[] = [];
52
+
53
+ private idLimitBytes: number;
54
+ private moveBatchLimit: number;
55
+ private moveBatchQueryLimit: number;
56
+ private clearBatchLimit: number;
57
+ private maxOpId: bigint | undefined;
58
+
59
+ constructor(private db: PowerSyncMongo, private group_id: number, options?: MongoCompactOptions) {
60
+ this.idLimitBytes = (options?.memoryLimitMB ?? DEFAULT_MEMORY_LIMIT_MB) * 1024 * 1024;
61
+ this.moveBatchLimit = options?.moveBatchLimit ?? DEFAULT_MOVE_BATCH_LIMIT;
62
+ this.moveBatchQueryLimit = options?.moveBatchQueryLimit ?? DEFAULT_MOVE_BATCH_QUERY_LIMIT;
63
+ this.clearBatchLimit = options?.clearBatchLimit ?? DEFAULT_CLEAR_BATCH_LIMIT;
64
+ this.maxOpId = options?.maxOpId;
65
+ }
66
+
67
+ /**
68
+ * Compact buckets by converting operations into MOVE and/or CLEAR operations.
69
+ *
70
+ * See /docs/compacting-operations.md for details.
71
+ */
72
+ async compact() {
73
+ const idLimitBytes = this.idLimitBytes;
74
+
75
+ let currentState: CurrentBucketState | null = null;
76
+
77
+ // Constant lower bound
78
+ const lowerBound: BucketDataKey = {
79
+ g: this.group_id,
80
+ b: new MinKey() as any,
81
+ o: new MinKey() as any
82
+ };
83
+
84
+ // Upper bound is adjusted for each batch
85
+ let upperBound: BucketDataKey = {
86
+ g: this.group_id,
87
+ b: new MaxKey() as any,
88
+ o: new MaxKey() as any
89
+ };
90
+
91
+ while (true) {
92
+ // Query one batch at a time, to avoid cursor timeouts
93
+ const batch = await this.db.bucket_data
94
+ .find(
95
+ {
96
+ _id: {
97
+ $gte: lowerBound,
98
+ $lt: upperBound
99
+ }
100
+ },
101
+ {
102
+ projection: {
103
+ _id: 1,
104
+ op: 1,
105
+ table: 1,
106
+ row_id: 1,
107
+ source_table: 1,
108
+ source_key: 1
109
+ },
110
+ limit: this.moveBatchQueryLimit,
111
+ sort: { _id: -1 },
112
+ singleBatch: true
113
+ }
114
+ )
115
+ .toArray();
116
+
117
+ if (batch.length == 0) {
118
+ // We've reached the end
119
+ break;
120
+ }
121
+
122
+ // Set upperBound for the next batch
123
+ upperBound = batch[batch.length - 1]._id;
124
+
125
+ for (let doc of batch) {
126
+ if (currentState == null || doc._id.b != currentState.bucket) {
127
+ if (currentState != null && currentState.lastNotPut != null && currentState.opsSincePut >= 1) {
128
+ // Important to flush before clearBucket()
129
+ await this.flush();
130
+ logger.info(
131
+ `Inserting CLEAR at ${this.group_id}:${currentState.bucket}:${currentState.lastNotPut} to remove ${currentState.opsSincePut} operations`
132
+ );
133
+
134
+ const bucket = currentState.bucket;
135
+ const clearOp = currentState.lastNotPut;
136
+ // Free memory before clearing bucket
137
+ currentState = null;
138
+ await this.clearBucket(bucket, clearOp);
139
+ }
140
+ currentState = {
141
+ bucket: doc._id.b,
142
+ seen: new Map(),
143
+ trackingSize: 0,
144
+ lastNotPut: null,
145
+ opsSincePut: 0
146
+ };
147
+ }
148
+
149
+ if (this.maxOpId != null && doc._id.o > this.maxOpId) {
150
+ continue;
151
+ }
152
+
153
+ let isPersistentPut = doc.op == 'PUT';
154
+
155
+ if (doc.op == 'REMOVE' || doc.op == 'PUT') {
156
+ const key = `${doc.table}/${doc.row_id}/${doc.source_table}/${doc.source_key?.toHexString()}`;
157
+ const targetOp = currentState.seen.get(key);
158
+ if (targetOp) {
159
+ // Will convert to MOVE, so don't count as PUT
160
+ isPersistentPut = false;
161
+
162
+ this.updates.push({
163
+ updateOne: {
164
+ filter: {
165
+ _id: doc._id
166
+ },
167
+ update: {
168
+ $set: {
169
+ op: 'MOVE',
170
+ target_op: targetOp
171
+ },
172
+ $unset: {
173
+ source_table: 1,
174
+ source_key: 1,
175
+ table: 1,
176
+ row_id: 1,
177
+ data: 1
178
+ }
179
+ }
180
+ }
181
+ });
182
+ } else {
183
+ if (currentState.trackingSize >= idLimitBytes) {
184
+ // Reached memory limit.
185
+ // Keep the highest seen values in this case.
186
+ } else {
187
+ // flatstr reduces the memory usage by flattening the string
188
+ currentState.seen.set(flatstr(key), doc._id.o);
189
+ // length + 16 for the string
190
+ // 24 for the bigint
191
+ // 50 for map overhead
192
+ // 50 for additional overhead
193
+ currentState.trackingSize += key.length + 140;
194
+ }
195
+ }
196
+ }
197
+
198
+ if (isPersistentPut) {
199
+ currentState.lastNotPut = null;
200
+ currentState.opsSincePut = 0;
201
+ } else if (doc.op != 'CLEAR') {
202
+ if (currentState.lastNotPut == null) {
203
+ currentState.lastNotPut = doc._id.o;
204
+ }
205
+ currentState.opsSincePut += 1;
206
+ }
207
+
208
+ if (this.updates.length >= this.moveBatchLimit) {
209
+ await this.flush();
210
+ }
211
+ }
212
+ }
213
+
214
+ await this.flush();
215
+ currentState?.seen.clear();
216
+ if (currentState?.lastNotPut != null && currentState?.opsSincePut > 1) {
217
+ logger.info(
218
+ `Inserting CLEAR at ${this.group_id}:${currentState.bucket}:${currentState.lastNotPut} to remove ${currentState.opsSincePut} operations`
219
+ );
220
+ const bucket = currentState.bucket;
221
+ const clearOp = currentState.lastNotPut;
222
+ // Free memory before clearing bucket
223
+ currentState = null;
224
+ await this.clearBucket(bucket, clearOp);
225
+ }
226
+ }
227
+
228
+ private async flush() {
229
+ if (this.updates.length > 0) {
230
+ logger.info(`Compacting ${this.updates.length} ops`);
231
+ await this.db.bucket_data.bulkWrite(this.updates, {
232
+ // Order is not important.
233
+ // Since checksums are not affected, these operations can happen in any order,
234
+ // and it's fine if the operations are partially applied.
235
+ // Each individual operation is atomic.
236
+ ordered: false
237
+ });
238
+ this.updates = [];
239
+ }
240
+ }
241
+
242
+ /**
243
+ * Perform a CLEAR compact for a bucket.
244
+ *
245
+ * @param bucket bucket name
246
+ * @param op op_id of the last non-PUT operation, which will be converted to CLEAR.
247
+ */
248
+ private async clearBucket(bucket: string, op: bigint) {
249
+ const opFilter = {
250
+ _id: {
251
+ $gte: {
252
+ g: this.group_id,
253
+ b: bucket,
254
+ o: new MinKey() as any
255
+ },
256
+ $lte: {
257
+ g: this.group_id,
258
+ b: bucket,
259
+ o: op
260
+ }
261
+ }
262
+ };
263
+
264
+ const session = this.db.client.startSession();
265
+ try {
266
+ let done = false;
267
+ while (!done) {
268
+ // Do the CLEAR operation in batches, with each batch a separate transaction.
269
+ // The state after each batch is fully consistent.
270
+ // We need a transaction per batch to make sure checksums stay consistent.
271
+ await session.withTransaction(
272
+ async () => {
273
+ const query = this.db.bucket_data.find(opFilter, {
274
+ session,
275
+ sort: { _id: 1 },
276
+ projection: {
277
+ _id: 1,
278
+ op: 1,
279
+ checksum: 1,
280
+ target_op: 1
281
+ },
282
+ limit: this.clearBatchLimit
283
+ });
284
+ let checksum = 0;
285
+ let lastOpId: BucketDataKey | null = null;
286
+ let targetOp: bigint | null = null;
287
+ let gotAnOp = false;
288
+ for await (let op of query.stream()) {
289
+ if (op.op == 'MOVE' || op.op == 'REMOVE' || op.op == 'CLEAR') {
290
+ checksum = addChecksums(checksum, op.checksum);
291
+ lastOpId = op._id;
292
+ if (op.op != 'CLEAR') {
293
+ gotAnOp = true;
294
+ }
295
+ if (op.target_op != null) {
296
+ if (targetOp == null || op.target_op > targetOp) {
297
+ targetOp = op.target_op;
298
+ }
299
+ }
300
+ } else {
301
+ throw new Error(`Unexpected ${op.op} operation at ${op._id.g}:${op._id.b}:${op._id.o}`);
302
+ }
303
+ }
304
+ if (!gotAnOp) {
305
+ done = true;
306
+ return;
307
+ }
308
+
309
+ logger.info(`Flushing CLEAR at ${lastOpId?.o}`);
310
+ await this.db.bucket_data.deleteMany(
311
+ {
312
+ _id: {
313
+ $gte: {
314
+ g: this.group_id,
315
+ b: bucket,
316
+ o: new MinKey() as any
317
+ },
318
+ $lte: lastOpId!
319
+ }
320
+ },
321
+ { session }
322
+ );
323
+
324
+ await this.db.bucket_data.insertOne(
325
+ {
326
+ _id: lastOpId!,
327
+ op: 'CLEAR',
328
+ checksum: checksum,
329
+ data: null,
330
+ target_op: targetOp
331
+ },
332
+ { session }
333
+ );
334
+ },
335
+ {
336
+ writeConcern: { w: 'majority' },
337
+ readConcern: { level: 'snapshot' }
338
+ }
339
+ );
340
+ }
341
+ } finally {
342
+ await session.endSession();
343
+ }
344
+ }
345
+ }
346
+
347
+ /**
348
+ * Flattens string to reduce memory usage (around 320 bytes -> 120 bytes),
349
+ * at the cost of some upfront CPU usage.
350
+ *
351
+ * From: https://github.com/davidmarkclements/flatstr/issues/8
352
+ */
353
+ function flatstr(s: string) {
354
+ s.match(/\n/g);
355
+ return s;
356
+ }
@@ -8,21 +8,24 @@ import * as util from '../../util/util-index.js';
8
8
  import {
9
9
  BucketDataBatchOptions,
10
10
  BucketStorageBatch,
11
+ CompactOptions,
11
12
  DEFAULT_DOCUMENT_BATCH_LIMIT,
12
13
  DEFAULT_DOCUMENT_CHUNK_LIMIT_BYTES,
13
14
  FlushedResult,
14
15
  ResolveTableOptions,
15
16
  ResolveTableResult,
17
+ SyncBucketDataBatch,
16
18
  SyncRulesBucketStorage,
17
19
  SyncRuleStatus
18
20
  } from '../BucketStorage.js';
21
+ import { ChecksumCache, FetchPartialBucketChecksum } from '../ChecksumCache.js';
19
22
  import { MongoBucketStorage } from '../MongoBucketStorage.js';
20
23
  import { SourceTable } from '../SourceTable.js';
21
24
  import { PowerSyncMongo } from './db.js';
22
25
  import { BucketDataDocument, BucketDataKey, SourceKey, SyncRuleState } from './models.js';
23
26
  import { MongoBucketBatch } from './MongoBucketBatch.js';
24
- import { BSON_DESERIALIZE_OPTIONS, idPrefixFilter, readSingleBatch, serializeLookup } from './util.js';
25
- import { ChecksumCache, FetchPartialBucketChecksum } from '../ChecksumCache.js';
27
+ import { MongoCompactor } from './MongoCompactor.js';
28
+ import { BSON_DESERIALIZE_OPTIONS, idPrefixFilter, mapOpEntry, readSingleBatch, serializeLookup } from './util.js';
26
29
 
27
30
  export class MongoSyncBucketStorage implements SyncRulesBucketStorage {
28
31
  private readonly db: PowerSyncMongo;
@@ -201,7 +204,7 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage {
201
204
  checkpoint: util.OpId,
202
205
  dataBuckets: Map<string, string>,
203
206
  options?: BucketDataBatchOptions
204
- ): AsyncIterable<util.SyncBucketData> {
207
+ ): AsyncIterable<SyncBucketDataBatch> {
205
208
  if (dataBuckets.size == 0) {
206
209
  return;
207
210
  }
@@ -267,6 +270,7 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage {
267
270
 
268
271
  let batchSize = 0;
269
272
  let currentBatch: util.SyncBucketData | null = null;
273
+ let targetOp: bigint | null = null;
270
274
 
271
275
  // Ordered by _id, meaning buckets are grouped together
272
276
  for (let rawData of data) {
@@ -284,7 +288,8 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage {
284
288
  start = currentBatch.after;
285
289
  currentBatch = null;
286
290
  batchSize = 0;
287
- yield yieldBatch;
291
+ yield { batch: yieldBatch, targetOp: targetOp };
292
+ targetOp = null;
288
293
  }
289
294
 
290
295
  start ??= dataBuckets.get(bucket);
@@ -298,17 +303,18 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage {
298
303
  data: [],
299
304
  next_after: start
300
305
  };
306
+ targetOp = null;
307
+ }
308
+
309
+ const entry = mapOpEntry(row);
310
+
311
+ if (row.target_op != null) {
312
+ // MOVE, CLEAR
313
+ if (targetOp == null || row.target_op > targetOp) {
314
+ targetOp = row.target_op;
315
+ }
301
316
  }
302
317
 
303
- const entry: util.OplogEntry = {
304
- op_id: util.timestampToOpId(row._id.o),
305
- op: row.op,
306
- object_type: row.table,
307
- object_id: row.row_id,
308
- checksum: Number(row.checksum),
309
- subkey: `${row.source_table}/${row.source_key.toHexString()}`,
310
- data: row.data
311
- };
312
318
  currentBatch.data.push(entry);
313
319
  currentBatch.next_after = entry.op_id;
314
320
 
@@ -318,7 +324,8 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage {
318
324
  if (currentBatch != null) {
319
325
  const yieldBatch = currentBatch;
320
326
  currentBatch = null;
321
- yield yieldBatch;
327
+ yield { batch: yieldBatch, targetOp: targetOp };
328
+ targetOp = null;
322
329
  }
323
330
  }
324
331
 
@@ -530,4 +537,8 @@ export class MongoSyncBucketStorage implements SyncRulesBucketStorage {
530
537
  }
531
538
  );
532
539
  }
540
+
541
+ async compact(options?: CompactOptions) {
542
+ return new MongoCompactor(this.db, this.group_id, options).compact();
543
+ }
533
544
  }
@@ -42,12 +42,13 @@ export interface BucketParameterDocument {
42
42
  export interface BucketDataDocument {
43
43
  _id: BucketDataKey;
44
44
  op: OpType;
45
- source_table: bson.ObjectId;
46
- source_key: bson.UUID;
47
- table: string;
48
- row_id: string;
45
+ source_table?: bson.ObjectId;
46
+ source_key?: bson.UUID;
47
+ table?: string;
48
+ row_id?: string;
49
49
  checksum: number;
50
50
  data: string | null;
51
+ target_op?: bigint | null;
51
52
  }
52
53
 
53
54
  export type OpType = 'PUT' | 'REMOVE' | 'MOVE' | 'CLEAR';
@@ -2,6 +2,9 @@ import { SqliteJsonValue } from '@powersync/service-sync-rules';
2
2
  import * as bson from 'bson';
3
3
  import * as mongo from 'mongodb';
4
4
  import * as crypto from 'crypto';
5
+ import { BucketDataDocument } from './models.js';
6
+ import { timestampToOpId } from '../../util/utils.js';
7
+ import { OplogEntry } from '../../util/protocol-types.js';
5
8
 
6
9
  /**
7
10
  * Lookup serialization must be number-agnostic. I.e. normalize numbers, instead of preserving numbers.
@@ -86,3 +89,25 @@ export const BSON_DESERIALIZE_OPTIONS: bson.DeserializeOptions = {
86
89
  // use bigint instead of Long
87
90
  useBigInt64: true
88
91
  };
92
+
93
+ export function mapOpEntry(row: BucketDataDocument): OplogEntry {
94
+ if (row.op == 'PUT' || row.op == 'REMOVE') {
95
+ return {
96
+ op_id: timestampToOpId(row._id.o),
97
+ op: row.op,
98
+ object_type: row.table,
99
+ object_id: row.row_id,
100
+ checksum: Number(row.checksum),
101
+ subkey: `${row.source_table}/${row.source_key!.toHexString()}`,
102
+ data: row.data
103
+ };
104
+ } else {
105
+ // MOVE, CLEAR
106
+
107
+ return {
108
+ op_id: timestampToOpId(row._id.o),
109
+ op: row.op,
110
+ checksum: Number(row.checksum)
111
+ };
112
+ }
113
+ }
package/src/sync/sync.ts CHANGED
@@ -258,6 +258,9 @@ interface BucketDataBatchResult {
258
258
  async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<BucketDataBatchResult, void> {
259
259
  const { storage, checkpoint, bucketsToFetch, dataBuckets, raw_data, binary_data, tracker, signal } = request;
260
260
 
261
+ const checkpointOp = BigInt(checkpoint);
262
+ let checkpointInvalidated = false;
263
+
261
264
  const [_, release] = await syncSemaphore.acquire();
262
265
  try {
263
266
  // Optimization: Only fetch buckets for which the checksums have changed since the last checkpoint
@@ -267,13 +270,16 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
267
270
 
268
271
  let has_more = false;
269
272
 
270
- for await (let r of data) {
273
+ for await (let { batch: r, targetOp } of data) {
271
274
  if (signal.aborted) {
272
275
  return;
273
276
  }
274
277
  if (r.has_more) {
275
278
  has_more = true;
276
279
  }
280
+ if (targetOp != null && targetOp > checkpointOp) {
281
+ checkpointInvalidated = true;
282
+ }
277
283
  if (r.data.length == 0) {
278
284
  continue;
279
285
  }
@@ -309,12 +315,19 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
309
315
  }
310
316
 
311
317
  if (!has_more) {
312
- const line: util.StreamingSyncCheckpointComplete = {
313
- checkpoint_complete: {
314
- last_op_id: checkpoint
315
- }
316
- };
317
- yield { data: line, done: true };
318
+ if (checkpointInvalidated) {
319
+ // Checkpoint invalidated by a CLEAR or MOVE op.
320
+ // Don't send the checkpoint_complete line in this case.
321
+ // More data should be available immediately for a new checkpoint.
322
+ yield { data: null, done: true };
323
+ } else {
324
+ const line: util.StreamingSyncCheckpointComplete = {
325
+ checkpoint_complete: {
326
+ last_op_id: checkpoint
327
+ }
328
+ };
329
+ yield { data: line, done: true };
330
+ }
318
331
  }
319
332
  } finally {
320
333
  release();
@@ -1,5 +1,90 @@
1
1
  // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
2
 
3
+ exports[`sync - mongodb > compacting data - invalidate checkpoint 1`] = `
4
+ [
5
+ {
6
+ "checkpoint": {
7
+ "buckets": [
8
+ {
9
+ "bucket": "mybucket[]",
10
+ "checksum": -93886621,
11
+ "count": 2,
12
+ },
13
+ ],
14
+ "last_op_id": "2",
15
+ "write_checkpoint": undefined,
16
+ },
17
+ },
18
+ ]
19
+ `;
20
+
21
+ exports[`sync - mongodb > compacting data - invalidate checkpoint 2`] = `
22
+ [
23
+ {
24
+ "data": {
25
+ "after": "0",
26
+ "bucket": "mybucket[]",
27
+ "data": [
28
+ {
29
+ "checksum": -93886621n,
30
+ "op": "CLEAR",
31
+ "op_id": "2",
32
+ },
33
+ ],
34
+ "has_more": false,
35
+ "next_after": "2",
36
+ },
37
+ },
38
+ {
39
+ "checkpoint_diff": {
40
+ "last_op_id": "4",
41
+ "removed_buckets": [],
42
+ "updated_buckets": [
43
+ {
44
+ "bucket": "mybucket[]",
45
+ "checksum": 499012468,
46
+ "count": 4,
47
+ },
48
+ ],
49
+ "write_checkpoint": undefined,
50
+ },
51
+ },
52
+ {
53
+ "data": {
54
+ "after": "2",
55
+ "bucket": "mybucket[]",
56
+ "data": [
57
+ {
58
+ "checksum": 1859363232n,
59
+ "data": "{\\"id\\":\\"t1\\",\\"description\\":\\"Test 1b\\"}",
60
+ "object_id": "t1",
61
+ "object_type": "test",
62
+ "op": "PUT",
63
+ "op_id": "3",
64
+ "subkey": "6544e3899293153fa7b38331/117ab485-4b42-58a2-ab32-0053a22c3423",
65
+ },
66
+ {
67
+ "checksum": 3028503153n,
68
+ "data": "{\\"id\\":\\"t2\\",\\"description\\":\\"Test 2b\\"}",
69
+ "object_id": "t2",
70
+ "object_type": "test",
71
+ "op": "PUT",
72
+ "op_id": "4",
73
+ "subkey": "6544e3899293153fa7b38331/ec27c691-b47a-5d92-927a-9944feb89eee",
74
+ },
75
+ ],
76
+ "has_more": false,
77
+ "next_after": "4",
78
+ },
79
+ },
80
+ {
81
+ "checkpoint_complete": {
82
+ "last_op_id": "4",
83
+ },
84
+ },
85
+ ]
86
+ `;
87
+
3
88
  exports[`sync - mongodb > expired token 1`] = `
4
89
  [
5
90
  {