@powersync/service-module-mongodb-storage 0.0.0-dev-20250507154604 → 0.0.0-dev-20250611110033

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,11 +7,19 @@ import {
7
7
  container,
8
8
  ErrorCode,
9
9
  errors,
10
- logger,
10
+ Logger,
11
+ logger as defaultLogger,
11
12
  ReplicationAssertionError,
12
13
  ServiceError
13
14
  } from '@powersync/lib-services-framework';
14
- import { deserializeBson, InternalOpId, SaveOperationTag, storage, utils } from '@powersync/service-core';
15
+ import {
16
+ BucketStorageMarkRecordUnavailable,
17
+ deserializeBson,
18
+ InternalOpId,
19
+ SaveOperationTag,
20
+ storage,
21
+ utils
22
+ } from '@powersync/service-core';
15
23
  import * as timers from 'node:timers/promises';
16
24
  import { PowerSyncMongo } from './db.js';
17
25
  import { CurrentBucket, CurrentDataDocument, SourceKey, SyncRuleDocument } from './models.js';
@@ -46,12 +54,18 @@ export interface MongoBucketBatchOptions {
46
54
  * Set to true for initial replication.
47
55
  */
48
56
  skipExistingRows: boolean;
57
+
58
+ markRecordUnavailable: BucketStorageMarkRecordUnavailable | undefined;
59
+
60
+ logger?: Logger;
49
61
  }
50
62
 
51
63
  export class MongoBucketBatch
52
64
  extends BaseObserver<storage.BucketBatchStorageListener>
53
65
  implements storage.BucketStorageBatch
54
66
  {
67
+ private logger: Logger;
68
+
55
69
  private readonly client: mongo.MongoClient;
56
70
  public readonly db: PowerSyncMongo;
57
71
  public readonly session: mongo.ClientSession;
@@ -65,6 +79,7 @@ export class MongoBucketBatch
65
79
 
66
80
  private batch: OperationBatch | null = null;
67
81
  private write_checkpoint_batch: storage.CustomWriteCheckpointOptions[] = [];
82
+ private markRecordUnavailable: BucketStorageMarkRecordUnavailable | undefined;
68
83
 
69
84
  /**
70
85
  * Last LSN received associated with a checkpoint.
@@ -86,6 +101,7 @@ export class MongoBucketBatch
86
101
 
87
102
  constructor(options: MongoBucketBatchOptions) {
88
103
  super();
104
+ this.logger = options.logger ?? defaultLogger;
89
105
  this.client = options.db.client;
90
106
  this.db = options.db;
91
107
  this.group_id = options.groupId;
@@ -96,6 +112,7 @@ export class MongoBucketBatch
96
112
  this.sync_rules = options.syncRules;
97
113
  this.storeCurrentData = options.storeCurrentData;
98
114
  this.skipExistingRows = options.skipExistingRows;
115
+ this.markRecordUnavailable = options.markRecordUnavailable;
99
116
  this.batch = new OperationBatch();
100
117
 
101
118
  this.persisted_op = options.keepaliveOp ?? null;
@@ -112,12 +129,12 @@ export class MongoBucketBatch
112
129
  return this.last_checkpoint_lsn;
113
130
  }
114
131
 
115
- async flush(): Promise<storage.FlushedResult | null> {
132
+ async flush(options?: storage.BucketBatchCommitOptions): Promise<storage.FlushedResult | null> {
116
133
  let result: storage.FlushedResult | null = null;
117
134
  // One flush may be split over multiple transactions.
118
135
  // Each flushInner() is one transaction.
119
136
  while (this.batch != null) {
120
- let r = await this.flushInner();
137
+ let r = await this.flushInner(options);
121
138
  if (r) {
122
139
  result = r;
123
140
  }
@@ -127,7 +144,7 @@ export class MongoBucketBatch
127
144
  return result;
128
145
  }
129
146
 
130
- private async flushInner(): Promise<storage.FlushedResult | null> {
147
+ private async flushInner(options?: storage.BucketBatchCommitOptions): Promise<storage.FlushedResult | null> {
131
148
  const batch = this.batch;
132
149
  if (batch == null) {
133
150
  return null;
@@ -137,7 +154,7 @@ export class MongoBucketBatch
137
154
  let resumeBatch: OperationBatch | null = null;
138
155
 
139
156
  await this.withReplicationTransaction(`Flushing ${batch.length} ops`, async (session, opSeq) => {
140
- resumeBatch = await this.replicateBatch(session, batch, opSeq);
157
+ resumeBatch = await this.replicateBatch(session, batch, opSeq, options);
141
158
 
142
159
  last_op = opSeq.last();
143
160
  });
@@ -157,7 +174,8 @@ export class MongoBucketBatch
157
174
  private async replicateBatch(
158
175
  session: mongo.ClientSession,
159
176
  batch: OperationBatch,
160
- op_seq: MongoIdSequence
177
+ op_seq: MongoIdSequence,
178
+ options?: storage.BucketBatchCommitOptions
161
179
  ): Promise<OperationBatch | null> {
162
180
  let sizes: Map<string, number> | undefined = undefined;
163
181
  if (this.storeCurrentData && !this.skipExistingRows) {
@@ -231,7 +249,9 @@ export class MongoBucketBatch
231
249
  current_data_lookup.set(cacheKey(doc._id.t, doc._id.k), doc);
232
250
  }
233
251
 
234
- let persistedBatch: PersistedBatch | null = new PersistedBatch(this.group_id, transactionSize);
252
+ let persistedBatch: PersistedBatch | null = new PersistedBatch(this.group_id, transactionSize, {
253
+ logger: this.logger
254
+ });
235
255
 
236
256
  for (let op of b) {
237
257
  if (resumeBatch) {
@@ -253,7 +273,7 @@ export class MongoBucketBatch
253
273
  if (persistedBatch!.shouldFlushTransaction()) {
254
274
  // Transaction is getting big.
255
275
  // Flush, and resume in a new transaction.
256
- await persistedBatch!.flush(this.db, this.session);
276
+ await persistedBatch!.flush(this.db, this.session, options);
257
277
  persistedBatch = null;
258
278
  // Computing our current progress is a little tricky here, since
259
279
  // we're stopping in the middle of a batch.
@@ -264,7 +284,7 @@ export class MongoBucketBatch
264
284
 
265
285
  if (persistedBatch) {
266
286
  transactionSize = persistedBatch.currentSize;
267
- await persistedBatch.flush(this.db, this.session);
287
+ await persistedBatch.flush(this.db, this.session, options);
268
288
  }
269
289
  }
270
290
 
@@ -310,11 +330,18 @@ export class MongoBucketBatch
310
330
  // Not an error if we re-apply a transaction
311
331
  existing_buckets = [];
312
332
  existing_lookups = [];
313
- // Log to help with debugging if there was a consistency issue
314
333
  if (this.storeCurrentData) {
315
- logger.warn(
316
- `Cannot find previous record for update on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
317
- );
334
+ if (this.markRecordUnavailable != null) {
335
+ // This will trigger a "resnapshot" of the record.
336
+ // This is not relevant if storeCurrentData is false, since we'll get the full row
337
+ // directly in the replication stream.
338
+ this.markRecordUnavailable(record);
339
+ } else {
340
+ // Log to help with debugging if there was a consistency issue
341
+ this.logger.warn(
342
+ `Cannot find previous record for update on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
343
+ );
344
+ }
318
345
  }
319
346
  } else {
320
347
  existing_buckets = result.buckets;
@@ -331,8 +358,8 @@ export class MongoBucketBatch
331
358
  existing_buckets = [];
332
359
  existing_lookups = [];
333
360
  // Log to help with debugging if there was a consistency issue
334
- if (this.storeCurrentData) {
335
- logger.warn(
361
+ if (this.storeCurrentData && this.markRecordUnavailable == null) {
362
+ this.logger.warn(
336
363
  `Cannot find previous record for delete on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
337
364
  );
338
365
  }
@@ -429,7 +456,7 @@ export class MongoBucketBatch
429
456
  }
430
457
  }
431
458
  );
432
- logger.error(
459
+ this.logger.error(
433
460
  `Failed to evaluate data query on ${record.sourceTable.qualifiedName}.${record.after?.id}: ${error.error}`
434
461
  );
435
462
  }
@@ -469,7 +496,7 @@ export class MongoBucketBatch
469
496
  }
470
497
  }
471
498
  );
472
- logger.error(
499
+ this.logger.error(
473
500
  `Failed to evaluate parameter query on ${record.sourceTable.qualifiedName}.${after.id}: ${error.error}`
474
501
  );
475
502
  }
@@ -523,7 +550,7 @@ export class MongoBucketBatch
523
550
  if (e instanceof mongo.MongoError && e.hasErrorLabel('TransientTransactionError')) {
524
551
  // Likely write conflict caused by concurrent write stream replicating
525
552
  } else {
526
- logger.warn('Transaction error', e as Error);
553
+ this.logger.warn('Transaction error', e as Error);
527
554
  }
528
555
  await timers.setTimeout(Math.random() * 50);
529
556
  throw e;
@@ -548,7 +575,7 @@ export class MongoBucketBatch
548
575
  await this.withTransaction(async () => {
549
576
  flushTry += 1;
550
577
  if (flushTry % 10 == 0) {
551
- logger.info(`${this.slot_name} ${description} - try ${flushTry}`);
578
+ this.logger.info(`${description} - try ${flushTry}`);
552
579
  }
553
580
  if (flushTry > 20 && Date.now() > lastTry) {
554
581
  throw new ServiceError(ErrorCode.PSYNC_S1402, 'Max transaction tries exceeded');
@@ -613,17 +640,18 @@ export class MongoBucketBatch
613
640
  async commit(lsn: string, options?: storage.BucketBatchCommitOptions): Promise<boolean> {
614
641
  const { createEmptyCheckpoints } = { ...storage.DEFAULT_BUCKET_BATCH_COMMIT_OPTIONS, ...options };
615
642
 
616
- await this.flush();
643
+ await this.flush(options);
617
644
 
618
645
  if (this.last_checkpoint_lsn != null && lsn < this.last_checkpoint_lsn) {
619
646
  // When re-applying transactions, don't create a new checkpoint until
620
647
  // we are past the last transaction.
621
- logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`);
648
+ this.logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`);
649
+ // Cannot create a checkpoint yet - return false
622
650
  return false;
623
651
  }
624
652
  if (lsn < this.no_checkpoint_before_lsn) {
625
653
  if (Date.now() - this.lastWaitingLogThottled > 5_000) {
626
- logger.info(
654
+ this.logger.info(
627
655
  `Waiting until ${this.no_checkpoint_before_lsn} before creating checkpoint, currently at ${lsn}. Persisted op: ${this.persisted_op}`
628
656
  );
629
657
  this.lastWaitingLogThottled = Date.now();
@@ -647,11 +675,13 @@ export class MongoBucketBatch
647
675
  { session: this.session }
648
676
  );
649
677
 
678
+ // Cannot create a checkpoint yet - return false
650
679
  return false;
651
680
  }
652
681
 
653
682
  if (!createEmptyCheckpoints && this.persisted_op == null) {
654
- return false;
683
+ // Nothing to commit - also return true
684
+ return true;
655
685
  }
656
686
 
657
687
  const now = new Date();
@@ -673,7 +703,8 @@ export class MongoBucketBatch
673
703
  _id: this.group_id
674
704
  },
675
705
  {
676
- $set: update
706
+ $set: update,
707
+ $unset: { snapshot_lsn: 1 }
677
708
  },
678
709
  { session: this.session }
679
710
  );
@@ -695,7 +726,7 @@ export class MongoBucketBatch
695
726
  if (this.persisted_op != null) {
696
727
  // The commit may have been skipped due to "no_checkpoint_before_lsn".
697
728
  // Apply it now if relevant
698
- logger.info(`Commit due to keepalive at ${lsn} / ${this.persisted_op}`);
729
+ this.logger.info(`Commit due to keepalive at ${lsn} / ${this.persisted_op}`);
699
730
  return await this.commit(lsn);
700
731
  }
701
732
 
@@ -709,7 +740,8 @@ export class MongoBucketBatch
709
740
  snapshot_done: true,
710
741
  last_fatal_error: null,
711
742
  last_keepalive_ts: new Date()
712
- }
743
+ },
744
+ $unset: { snapshot_lsn: 1 }
713
745
  },
714
746
  { session: this.session }
715
747
  );
@@ -718,6 +750,22 @@ export class MongoBucketBatch
718
750
  return true;
719
751
  }
720
752
 
753
+ async setSnapshotLsn(lsn: string): Promise<void> {
754
+ const update: Partial<SyncRuleDocument> = {
755
+ snapshot_lsn: lsn
756
+ };
757
+
758
+ await this.db.sync_rules.updateOne(
759
+ {
760
+ _id: this.group_id
761
+ },
762
+ {
763
+ $set: update
764
+ },
765
+ { session: this.session }
766
+ );
767
+ }
768
+
721
769
  async save(record: storage.SaveOptions): Promise<storage.FlushedResult | null> {
722
770
  const { after, before, sourceTable, tag } = record;
723
771
  for (const event of this.getTableEvents(sourceTable)) {
@@ -742,7 +790,7 @@ export class MongoBucketBatch
742
790
  return null;
743
791
  }
744
792
 
745
- logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`);
793
+ this.logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`);
746
794
 
747
795
  this.batch ??= new OperationBatch();
748
796
  this.batch.push(new RecordOperation(record));
@@ -813,7 +861,7 @@ export class MongoBucketBatch
813
861
  session: session
814
862
  });
815
863
  const batch = await cursor.toArray();
816
- const persistedBatch = new PersistedBatch(this.group_id, 0);
864
+ const persistedBatch = new PersistedBatch(this.group_id, 0, { logger: this.logger });
817
865
 
818
866
  for (let value of batch) {
819
867
  persistedBatch.saveBucketData({
@@ -843,6 +891,37 @@ export class MongoBucketBatch
843
891
  return last_op!;
844
892
  }
845
893
 
894
+ async updateTableProgress(
895
+ table: storage.SourceTable,
896
+ progress: Partial<storage.TableSnapshotStatus>
897
+ ): Promise<storage.SourceTable> {
898
+ const copy = table.clone();
899
+ const snapshotStatus = {
900
+ totalEstimatedCount: progress.totalEstimatedCount ?? copy.snapshotStatus?.totalEstimatedCount ?? 0,
901
+ replicatedCount: progress.replicatedCount ?? copy.snapshotStatus?.replicatedCount ?? 0,
902
+ lastKey: progress.lastKey ?? copy.snapshotStatus?.lastKey ?? null
903
+ };
904
+ copy.snapshotStatus = snapshotStatus;
905
+
906
+ await this.withTransaction(async () => {
907
+ await this.db.source_tables.updateOne(
908
+ { _id: table.id },
909
+ {
910
+ $set: {
911
+ snapshot_status: {
912
+ last_key: snapshotStatus.lastKey == null ? null : new bson.Binary(snapshotStatus.lastKey),
913
+ total_estimated_count: snapshotStatus.totalEstimatedCount,
914
+ replicated_count: snapshotStatus.replicatedCount
915
+ }
916
+ }
917
+ },
918
+ { session: this.session }
919
+ );
920
+ });
921
+
922
+ return copy;
923
+ }
924
+
846
925
  async markSnapshotDone(tables: storage.SourceTable[], no_checkpoint_before_lsn: string) {
847
926
  const session = this.session;
848
927
  const ids = tables.map((table) => table.id);
@@ -853,6 +932,9 @@ export class MongoBucketBatch
853
932
  {
854
933
  $set: {
855
934
  snapshot_done: true
935
+ },
936
+ $unset: {
937
+ snapshot_status: 1
856
938
  }
857
939
  },
858
940
  { session }
@@ -876,17 +958,8 @@ export class MongoBucketBatch
876
958
  }
877
959
  });
878
960
  return tables.map((table) => {
879
- const copy = new storage.SourceTable(
880
- table.id,
881
- table.connectionTag,
882
- table.objectId,
883
- table.schema,
884
- table.table,
885
- table.replicaIdColumns,
886
- table.snapshotComplete
887
- );
888
- copy.syncData = table.syncData;
889
- copy.syncParameters = table.syncParameters;
961
+ const copy = table.clone();
962
+ copy.snapshotComplete = true;
890
963
  return copy;
891
964
  });
892
965
  }
@@ -895,7 +968,7 @@ export class MongoBucketBatch
895
968
  * Gets relevant {@link SqlEventDescriptor}s for the given {@link SourceTable}
896
969
  */
897
970
  protected getTableEvents(table: storage.SourceTable): SqlEventDescriptor[] {
898
- return this.sync_rules.event_descriptors.filter((evt) =>
971
+ return this.sync_rules.eventDescriptors.filter((evt) =>
899
972
  [...evt.getSourceTables()].some((sourceTable) => sourceTable.matches(table))
900
973
  );
901
974
  }
@@ -15,6 +15,7 @@ export class MongoPersistedSyncRulesContent implements storage.PersistedSyncRule
15
15
  public readonly last_fatal_error: string | null;
16
16
  public readonly last_keepalive_ts: Date | null;
17
17
  public readonly last_checkpoint_ts: Date | null;
18
+ public readonly active: boolean;
18
19
 
19
20
  public current_lock: MongoSyncRulesLock | null = null;
20
21
 
@@ -30,6 +31,7 @@ export class MongoPersistedSyncRulesContent implements storage.PersistedSyncRule
30
31
  this.last_fatal_error = doc.last_fatal_error;
31
32
  this.last_checkpoint_ts = doc.last_checkpoint_ts;
32
33
  this.last_keepalive_ts = doc.last_keepalive_ts;
34
+ this.active = doc.state == 'ACTIVE';
33
35
  }
34
36
 
35
37
  parsed(options: storage.ParseSyncRulesOptions) {
@@ -116,9 +116,15 @@ export class MongoSyncBucketStorage
116
116
  const doc = await this.db.sync_rules.findOne(
117
117
  { _id: this.group_id },
118
118
  {
119
- projection: { last_checkpoint: 1, last_checkpoint_lsn: 1 }
119
+ projection: { last_checkpoint: 1, last_checkpoint_lsn: 1, snapshot_done: 1 }
120
120
  }
121
121
  );
122
+ if (!doc?.snapshot_done) {
123
+ return {
124
+ checkpoint: 0n,
125
+ lsn: null
126
+ };
127
+ }
122
128
  return {
123
129
  checkpoint: doc?.last_checkpoint ?? 0n,
124
130
  lsn: doc?.last_checkpoint_lsn ?? null
@@ -138,6 +144,7 @@ export class MongoSyncBucketStorage
138
144
  const checkpoint_lsn = doc?.last_checkpoint_lsn ?? null;
139
145
 
140
146
  await using batch = new MongoBucketBatch({
147
+ logger: options.logger,
141
148
  db: this.db,
142
149
  syncRules: this.sync_rules.parsed(options).sync_rules,
143
150
  groupId: this.group_id,
@@ -146,7 +153,8 @@ export class MongoSyncBucketStorage
146
153
  noCheckpointBeforeLsn: doc?.no_checkpoint_before ?? options.zeroLSN,
147
154
  keepaliveOp: doc?.keepalive_op ? BigInt(doc.keepalive_op) : null,
148
155
  storeCurrentData: options.storeCurrentData,
149
- skipExistingRows: options.skipExistingRows ?? false
156
+ skipExistingRows: options.skipExistingRows ?? false,
157
+ markRecordUnavailable: options.markRecordUnavailable
150
158
  });
151
159
  this.iterateListeners((cb) => cb.batchStarted?.(batch));
152
160
 
@@ -193,7 +201,8 @@ export class MongoSyncBucketStorage
193
201
  table_name: table,
194
202
  replica_id_columns: null,
195
203
  replica_id_columns2: columns,
196
- snapshot_done: false
204
+ snapshot_done: false,
205
+ snapshot_status: undefined
197
206
  };
198
207
 
199
208
  await col.insertOne(doc, { session });
@@ -210,6 +219,14 @@ export class MongoSyncBucketStorage
210
219
  sourceTable.syncEvent = options.sync_rules.tableTriggersEvent(sourceTable);
211
220
  sourceTable.syncData = options.sync_rules.tableSyncsData(sourceTable);
212
221
  sourceTable.syncParameters = options.sync_rules.tableSyncsParameters(sourceTable);
222
+ sourceTable.snapshotStatus =
223
+ doc.snapshot_status == null
224
+ ? undefined
225
+ : {
226
+ lastKey: doc.snapshot_status.last_key?.buffer ?? null,
227
+ totalEstimatedCount: doc.snapshot_status.total_estimated_count,
228
+ replicatedCount: doc.snapshot_status.replicated_count
229
+ };
213
230
 
214
231
  let dropTables: storage.SourceTable[] = [];
215
232
  // Detect tables that are either renamed, or have different replica_id_columns
@@ -521,7 +538,8 @@ export class MongoSyncBucketStorage
521
538
  projection: {
522
539
  snapshot_done: 1,
523
540
  last_checkpoint_lsn: 1,
524
- state: 1
541
+ state: 1,
542
+ snapshot_lsn: 1
525
543
  }
526
544
  }
527
545
  );
@@ -531,6 +549,7 @@ export class MongoSyncBucketStorage
531
549
 
532
550
  return {
533
551
  snapshot_done: doc.snapshot_done,
552
+ snapshot_lsn: doc.snapshot_lsn ?? null,
534
553
  active: doc.state == 'ACTIVE',
535
554
  checkpoint_lsn: doc.last_checkpoint_lsn
536
555
  };
@@ -572,6 +591,9 @@ export class MongoSyncBucketStorage
572
591
  last_checkpoint_lsn: null,
573
592
  last_checkpoint: null,
574
593
  no_checkpoint_before: null
594
+ },
595
+ $unset: {
596
+ snapshot_lsn: 1
575
597
  }
576
598
  },
577
599
  { maxTimeMS: lib_mongo.db.MONGO_CLEAR_OPERATION_TIMEOUT_MS }
@@ -3,7 +3,7 @@ import { JSONBig } from '@powersync/service-jsonbig';
3
3
  import { EvaluatedParameters, EvaluatedRow } from '@powersync/service-sync-rules';
4
4
  import * as bson from 'bson';
5
5
 
6
- import { logger } from '@powersync/lib-services-framework';
6
+ import { Logger, logger as defaultLogger } from '@powersync/lib-services-framework';
7
7
  import { InternalOpId, storage, utils } from '@powersync/service-core';
8
8
  import { currentBucketKey, MAX_ROW_SIZE } from './MongoBucketBatch.js';
9
9
  import { MongoIdSequence } from './MongoIdSequence.js';
@@ -46,6 +46,7 @@ const MAX_TRANSACTION_DOC_COUNT = 2_000;
46
46
  * multiple transactions.
47
47
  */
48
48
  export class PersistedBatch {
49
+ logger: Logger;
49
50
  bucketData: mongo.AnyBulkWriteOperation<BucketDataDocument>[] = [];
50
51
  bucketParameters: mongo.AnyBulkWriteOperation<BucketParameterDocument>[] = [];
51
52
  currentData: mongo.AnyBulkWriteOperation<CurrentDataDocument>[] = [];
@@ -63,9 +64,11 @@ export class PersistedBatch {
63
64
 
64
65
  constructor(
65
66
  private group_id: number,
66
- writtenSize: number
67
+ writtenSize: number,
68
+ options?: { logger?: Logger }
67
69
  ) {
68
70
  this.currentSize = writtenSize;
71
+ this.logger = options?.logger ?? defaultLogger;
69
72
  }
70
73
 
71
74
  private incrementBucket(bucket: string, op_id: InternalOpId) {
@@ -107,7 +110,7 @@ export class PersistedBatch {
107
110
  // the BSON size is small enough, but the JSON size is too large.
108
111
  // In these cases, we can't store the data, so we skip it, or generate a REMOVE operation if the row
109
112
  // was synced previously.
110
- logger.error(`powersync_${this.group_id} Row ${key} too large: ${recordData.length} bytes. Removing.`);
113
+ this.logger.error(`Row ${key} too large: ${recordData.length} bytes. Removing.`);
111
114
  continue;
112
115
  }
113
116
 
@@ -206,7 +209,7 @@ export class PersistedBatch {
206
209
  k: sourceKey
207
210
  },
208
211
  lookup: binLookup,
209
- bucket_parameters: result.bucket_parameters
212
+ bucket_parameters: result.bucketParameters
210
213
  }
211
214
  }
212
215
  });
@@ -270,9 +273,11 @@ export class PersistedBatch {
270
273
  );
271
274
  }
272
275
 
273
- async flush(db: PowerSyncMongo, session: mongo.ClientSession) {
276
+ async flush(db: PowerSyncMongo, session: mongo.ClientSession, options?: storage.BucketBatchCommitOptions) {
274
277
  const startAt = performance.now();
278
+ let flushedSomething = false;
275
279
  if (this.bucketData.length > 0) {
280
+ flushedSomething = true;
276
281
  await db.bucket_data.bulkWrite(this.bucketData, {
277
282
  session,
278
283
  // inserts only - order doesn't matter
@@ -280,6 +285,7 @@ export class PersistedBatch {
280
285
  });
281
286
  }
282
287
  if (this.bucketParameters.length > 0) {
288
+ flushedSomething = true;
283
289
  await db.bucket_parameters.bulkWrite(this.bucketParameters, {
284
290
  session,
285
291
  // inserts only - order doesn't matter
@@ -287,6 +293,7 @@ export class PersistedBatch {
287
293
  });
288
294
  }
289
295
  if (this.currentData.length > 0) {
296
+ flushedSomething = true;
290
297
  await db.current_data.bulkWrite(this.currentData, {
291
298
  session,
292
299
  // may update and delete data within the same batch - order matters
@@ -295,6 +302,7 @@ export class PersistedBatch {
295
302
  }
296
303
 
297
304
  if (this.bucketStates.size > 0) {
305
+ flushedSomething = true;
298
306
  await db.bucket_state.bulkWrite(this.getBucketStateUpdates(), {
299
307
  session,
300
308
  // Per-bucket operation - order doesn't matter
@@ -302,12 +310,43 @@ export class PersistedBatch {
302
310
  });
303
311
  }
304
312
 
305
- const duration = performance.now() - startAt;
306
- logger.info(
307
- `powersync_${this.group_id} Flushed ${this.bucketData.length} + ${this.bucketParameters.length} + ${
308
- this.currentData.length
309
- } updates, ${Math.round(this.currentSize / 1024)}kb in ${duration.toFixed(0)}ms. Last op_id: ${this.debugLastOpId}`
310
- );
313
+ if (flushedSomething) {
314
+ const duration = Math.round(performance.now() - startAt);
315
+ if (options?.oldestUncommittedChange != null) {
316
+ const replicationLag = Math.round((Date.now() - options.oldestUncommittedChange.getTime()) / 1000);
317
+
318
+ this.logger.info(
319
+ `Flushed ${this.bucketData.length} + ${this.bucketParameters.length} + ${
320
+ this.currentData.length
321
+ } updates, ${Math.round(this.currentSize / 1024)}kb in ${duration}ms. Last op_id: ${this.debugLastOpId}. Replication lag: ${replicationLag}s`,
322
+ {
323
+ flushed: {
324
+ duration: duration,
325
+ size: this.currentSize,
326
+ bucket_data_count: this.bucketData.length,
327
+ parameter_data_count: this.bucketParameters.length,
328
+ current_data_count: this.currentData.length,
329
+ replication_lag_seconds: replicationLag
330
+ }
331
+ }
332
+ );
333
+ } else {
334
+ this.logger.info(
335
+ `Flushed ${this.bucketData.length} + ${this.bucketParameters.length} + ${
336
+ this.currentData.length
337
+ } updates, ${Math.round(this.currentSize / 1024)}kb in ${duration}ms. Last op_id: ${this.debugLastOpId}`,
338
+ {
339
+ flushed: {
340
+ duration: duration,
341
+ size: this.currentSize,
342
+ bucket_data_count: this.bucketData.length,
343
+ parameter_data_count: this.bucketParameters.length,
344
+ current_data_count: this.currentData.length
345
+ }
346
+ }
347
+ );
348
+ }
349
+ }
311
350
 
312
351
  this.bucketData = [];
313
352
  this.bucketParameters = [];
@@ -1,4 +1,4 @@
1
- import { storage } from '@powersync/service-core';
1
+ import { storage, TableSnapshotStatus } from '@powersync/service-core';
2
2
  import { SqliteJsonValue } from '@powersync/service-sync-rules';
3
3
  import * as bson from 'bson';
4
4
 
@@ -73,6 +73,13 @@ export interface SourceTableDocument {
73
73
  replica_id_columns: string[] | null;
74
74
  replica_id_columns2: { name: string; type_oid?: number; type?: string }[] | undefined;
75
75
  snapshot_done: boolean | undefined;
76
+ snapshot_status: SourceTableDocumentSnapshotStatus | undefined;
77
+ }
78
+
79
+ export interface SourceTableDocumentSnapshotStatus {
80
+ total_estimated_count: number;
81
+ replicated_count: number;
82
+ last_key: bson.Binary | null;
76
83
  }
77
84
 
78
85
  /**
@@ -110,6 +117,13 @@ export interface SyncRuleDocument {
110
117
  */
111
118
  snapshot_done: boolean;
112
119
 
120
+ /**
121
+ * If snapshot_done = false, this may be the lsn at which we started the snapshot.
122
+ *
123
+ * This can be used for resuming the snapshot after a restart.
124
+ */
125
+ snapshot_lsn: string | undefined;
126
+
113
127
  /**
114
128
  * The last consistent checkpoint.
115
129
  *