@powersync/service-module-postgres-storage 0.7.4 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +37 -0
  2. package/dist/.tsbuildinfo +1 -1
  3. package/dist/@types/migrations/scripts/1749024804042-snapshot-progress.d.ts +3 -0
  4. package/dist/@types/storage/PostgresSyncRulesStorage.d.ts +1 -2
  5. package/dist/@types/storage/PostgresTestStorageFactoryGenerator.d.ts +5 -1
  6. package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +8 -2
  7. package/dist/@types/storage/checkpoints/PostgresWriteCheckpointAPI.d.ts +2 -3
  8. package/dist/@types/storage/sync-rules/PostgresPersistedSyncRulesContent.d.ts +1 -0
  9. package/dist/@types/types/models/SourceTable.d.ts +3 -0
  10. package/dist/@types/types/models/SyncRules.d.ts +4 -0
  11. package/dist/migrations/scripts/1684951997326-init.js.map +1 -1
  12. package/dist/migrations/scripts/1749024804042-snapshot-progress.js +110 -0
  13. package/dist/migrations/scripts/1749024804042-snapshot-progress.js.map +1 -0
  14. package/dist/storage/PostgresSyncRulesStorage.js +17 -8
  15. package/dist/storage/PostgresSyncRulesStorage.js.map +1 -1
  16. package/dist/storage/PostgresTestStorageFactoryGenerator.js +48 -37
  17. package/dist/storage/PostgresTestStorageFactoryGenerator.js.map +1 -1
  18. package/dist/storage/batch/PostgresBucketBatch.js +62 -13
  19. package/dist/storage/batch/PostgresBucketBatch.js.map +1 -1
  20. package/dist/storage/batch/PostgresPersistedBatch.js +1 -1
  21. package/dist/storage/batch/PostgresPersistedBatch.js.map +1 -1
  22. package/dist/storage/checkpoints/PostgresWriteCheckpointAPI.js +1 -5
  23. package/dist/storage/checkpoints/PostgresWriteCheckpointAPI.js.map +1 -1
  24. package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js +2 -0
  25. package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js.map +1 -1
  26. package/dist/types/models/SourceTable.js +5 -2
  27. package/dist/types/models/SourceTable.js.map +1 -1
  28. package/dist/types/models/SyncRules.js +4 -0
  29. package/dist/types/models/SyncRules.js.map +1 -1
  30. package/package.json +8 -8
  31. package/src/migrations/scripts/1684951997326-init.ts +0 -1
  32. package/src/migrations/scripts/1749024804042-snapshot-progress.ts +43 -0
  33. package/src/storage/PostgresSyncRulesStorage.ts +17 -11
  34. package/src/storage/PostgresTestStorageFactoryGenerator.ts +48 -36
  35. package/src/storage/batch/PostgresBucketBatch.ts +76 -16
  36. package/src/storage/batch/PostgresPersistedBatch.ts +1 -1
  37. package/src/storage/checkpoints/PostgresWriteCheckpointAPI.ts +5 -10
  38. package/src/storage/sync-rules/PostgresPersistedSyncRulesContent.ts +2 -0
  39. package/src/types/models/SourceTable.ts +5 -2
  40. package/src/types/models/SyncRules.ts +4 -0
  41. package/test/src/__snapshots__/storage_sync.test.ts.snap +147 -0
  42. package/test/src/migrations.test.ts +10 -2
  43. package/test/src/util.ts +7 -2
@@ -107,12 +107,6 @@ export class PostgresSyncRulesStorage
107
107
  return new PostgresCompactor(this.db, this.group_id, options).compact();
108
108
  }
109
109
 
110
- batchCreateCustomWriteCheckpoints(checkpoints: storage.BatchedCustomWriteCheckpointOptions[]): Promise<void> {
111
- return this.writeCheckpointAPI.batchCreateCustomWriteCheckpoints(
112
- checkpoints.map((c) => ({ ...c, sync_rules_id: this.group_id }))
113
- );
114
- }
115
-
116
110
  lastWriteCheckpoint(filters: storage.SyncStorageLastWriteCheckpointFilters): Promise<bigint | null> {
117
111
  return this.writeCheckpointAPI.lastWriteCheckpoint({
118
112
  ...filters,
@@ -233,6 +227,13 @@ export class PostgresSyncRulesStorage
233
227
  replicationColumns,
234
228
  sourceTableRow!.snapshot_done ?? true
235
229
  );
230
+ if (!sourceTable.snapshotComplete) {
231
+ sourceTable.snapshotStatus = {
232
+ totalEstimatedCount: Number(sourceTableRow!.snapshot_total_estimated_count ?? -1n),
233
+ replicatedCount: Number(sourceTableRow!.snapshot_replicated_count ?? 0n),
234
+ lastKey: sourceTableRow!.snapshot_last_key
235
+ };
236
+ }
236
237
  sourceTable.syncEvent = options.sync_rules.tableTriggersEvent(sourceTable);
237
238
  sourceTable.syncData = options.sync_rules.tableSyncsData(sourceTable);
238
239
  sourceTable.syncParameters = options.sync_rules.tableSyncsParameters(sourceTable);
@@ -321,6 +322,7 @@ export class PostgresSyncRulesStorage
321
322
  const checkpoint_lsn = syncRules?.last_checkpoint_lsn ?? null;
322
323
 
323
324
  const batch = new PostgresBucketBatch({
325
+ logger: options.logger ?? framework.logger,
324
326
  db: this.db,
325
327
  sync_rules: this.sync_rules.parsed(options).sync_rules,
326
328
  group_id: this.group_id,
@@ -330,7 +332,8 @@ export class PostgresSyncRulesStorage
330
332
  no_checkpoint_before_lsn: syncRules?.no_checkpoint_before ?? options.zeroLSN,
331
333
  store_current_data: options.storeCurrentData,
332
334
  skip_existing_rows: options.skipExistingRows ?? false,
333
- batch_limits: this.options.batchLimits
335
+ batch_limits: this.options.batchLimits,
336
+ markRecordUnavailable: options.markRecordUnavailable
334
337
  });
335
338
  this.iterateListeners((cb) => cb.batchStarted?.(batch));
336
339
 
@@ -564,7 +567,7 @@ export class PostgresSyncRulesStorage
564
567
 
565
568
  async terminate(options?: storage.TerminateOptions) {
566
569
  if (!options || options?.clearStorage) {
567
- await this.clear();
570
+ await this.clear(options);
568
571
  }
569
572
  await this.db.sql`
570
573
  UPDATE sync_rules
@@ -580,6 +583,7 @@ export class PostgresSyncRulesStorage
580
583
  const syncRulesRow = await this.db.sql`
581
584
  SELECT
582
585
  snapshot_done,
586
+ snapshot_lsn,
583
587
  last_checkpoint_lsn,
584
588
  state
585
589
  FROM
@@ -587,7 +591,7 @@ export class PostgresSyncRulesStorage
587
591
  WHERE
588
592
  id = ${{ type: 'int4', value: this.group_id }}
589
593
  `
590
- .decoded(pick(models.SyncRules, ['snapshot_done', 'last_checkpoint_lsn', 'state']))
594
+ .decoded(pick(models.SyncRules, ['snapshot_done', 'last_checkpoint_lsn', 'state', 'snapshot_lsn']))
591
595
  .first();
592
596
 
593
597
  if (syncRulesRow == null) {
@@ -597,11 +601,13 @@ export class PostgresSyncRulesStorage
597
601
  return {
598
602
  snapshot_done: syncRulesRow.snapshot_done,
599
603
  active: syncRulesRow.state == storage.SyncRuleState.ACTIVE,
600
- checkpoint_lsn: syncRulesRow.last_checkpoint_lsn ?? null
604
+ checkpoint_lsn: syncRulesRow.last_checkpoint_lsn ?? null,
605
+ snapshot_lsn: syncRulesRow.snapshot_lsn ?? null
601
606
  };
602
607
  }
603
608
 
604
- async clear(): Promise<void> {
609
+ async clear(options?: storage.ClearStorageOptions): Promise<void> {
610
+ // TODO: Cleanly abort the cleanup when the provided signal is aborted.
605
611
  await this.db.sql`
606
612
  UPDATE sync_rules
607
613
  SET
@@ -12,50 +12,62 @@ export type PostgresTestStorageOptions = {
12
12
  migrationAgent?: (config: PostgresStorageConfigDecoded) => PostgresMigrationAgent;
13
13
  };
14
14
 
15
- export const PostgresTestStorageFactoryGenerator = (factoryOptions: PostgresTestStorageOptions) => {
16
- return async (options?: TestStorageOptions) => {
17
- try {
18
- const migrationManager: PowerSyncMigrationManager = new framework.MigrationManager();
19
-
20
- const BASE_CONFIG = {
21
- type: 'postgresql' as const,
22
- uri: factoryOptions.url,
23
- sslmode: 'disable' as const
24
- };
25
-
26
- const TEST_CONNECTION_OPTIONS = normalizePostgresStorageConfig(BASE_CONFIG);
27
-
28
- await using migrationAgent = factoryOptions.migrationAgent
29
- ? factoryOptions.migrationAgent(BASE_CONFIG)
30
- : new PostgresMigrationAgent(BASE_CONFIG);
31
- migrationManager.registerMigrationAgent(migrationAgent);
32
-
33
- const mockServiceContext = { configuration: { storage: BASE_CONFIG } } as unknown as ServiceContext;
34
-
35
- if (!options?.doNotClear) {
36
- await migrationManager.migrate({
37
- direction: framework.migrations.Direction.Down,
38
- migrationContext: {
39
- service_context: mockServiceContext
40
- }
41
- });
15
+ export const postgresTestSetup = (factoryOptions: PostgresTestStorageOptions) => {
16
+ const BASE_CONFIG = {
17
+ type: 'postgresql' as const,
18
+ uri: factoryOptions.url,
19
+ sslmode: 'disable' as const
20
+ };
21
+
22
+ const TEST_CONNECTION_OPTIONS = normalizePostgresStorageConfig(BASE_CONFIG);
23
+
24
+ const migrate = async (direction: framework.migrations.Direction) => {
25
+ await using migrationManager: PowerSyncMigrationManager = new framework.MigrationManager();
26
+ await using migrationAgent = factoryOptions.migrationAgent
27
+ ? factoryOptions.migrationAgent(BASE_CONFIG)
28
+ : new PostgresMigrationAgent(BASE_CONFIG);
29
+ migrationManager.registerMigrationAgent(migrationAgent);
30
+
31
+ const mockServiceContext = { configuration: { storage: BASE_CONFIG } } as unknown as ServiceContext;
32
+
33
+ await migrationManager.migrate({
34
+ direction: framework.migrations.Direction.Down,
35
+ migrationContext: {
36
+ service_context: mockServiceContext
42
37
  }
38
+ });
43
39
 
40
+ if (direction == framework.migrations.Direction.Up) {
44
41
  await migrationManager.migrate({
45
42
  direction: framework.migrations.Direction.Up,
46
43
  migrationContext: {
47
44
  service_context: mockServiceContext
48
45
  }
49
46
  });
50
-
51
- return new PostgresBucketStorageFactory({
52
- config: TEST_CONNECTION_OPTIONS,
53
- slot_name_prefix: 'test_'
54
- });
55
- } catch (ex) {
56
- // Vitest does not display these errors nicely when using the `await using` syntx
57
- console.error(ex, ex.cause);
58
- throw ex;
59
47
  }
60
48
  };
49
+
50
+ return {
51
+ factory: async (options?: TestStorageOptions) => {
52
+ try {
53
+ if (!options?.doNotClear) {
54
+ await migrate(framework.migrations.Direction.Up);
55
+ }
56
+
57
+ return new PostgresBucketStorageFactory({
58
+ config: TEST_CONNECTION_OPTIONS,
59
+ slot_name_prefix: 'test_'
60
+ });
61
+ } catch (ex) {
62
+ // Vitest does not display these errors nicely when using the `await using` syntx
63
+ console.error(ex, ex.cause);
64
+ throw ex;
65
+ }
66
+ },
67
+ migrate
68
+ };
69
+ };
70
+
71
+ export const PostgresTestStorageFactoryGenerator = (factoryOptions: PostgresTestStorageOptions) => {
72
+ return postgresTestSetup(factoryOptions).factory;
61
73
  };
@@ -4,12 +4,12 @@ import {
4
4
  container,
5
5
  ErrorCode,
6
6
  errors,
7
- logger,
7
+ Logger,
8
8
  ReplicationAssertionError,
9
9
  ServiceAssertionError,
10
10
  ServiceError
11
11
  } from '@powersync/lib-services-framework';
12
- import { InternalOpId, storage, utils } from '@powersync/service-core';
12
+ import { BucketStorageMarkRecordUnavailable, InternalOpId, storage, utils } from '@powersync/service-core';
13
13
  import * as sync_rules from '@powersync/service-sync-rules';
14
14
  import * as timers from 'timers/promises';
15
15
  import * as t from 'ts-codec';
@@ -22,6 +22,7 @@ import { cacheKey, encodedCacheKey, OperationBatch, RecordOperation } from './Op
22
22
  import { PostgresPersistedBatch } from './PostgresPersistedBatch.js';
23
23
 
24
24
  export interface PostgresBucketBatchOptions {
25
+ logger: Logger;
25
26
  db: lib_postgres.DatabaseClient;
26
27
  sync_rules: sync_rules.SqlSyncRules;
27
28
  group_id: number;
@@ -35,6 +36,8 @@ export interface PostgresBucketBatchOptions {
35
36
  */
36
37
  skip_existing_rows: boolean;
37
38
  batch_limits: RequiredOperationBatchLimits;
39
+
40
+ markRecordUnavailable: BucketStorageMarkRecordUnavailable | undefined;
38
41
  }
39
42
 
40
43
  /**
@@ -54,6 +57,8 @@ export class PostgresBucketBatch
54
57
  extends BaseObserver<storage.BucketBatchStorageListener>
55
58
  implements storage.BucketStorageBatch
56
59
  {
60
+ private logger: Logger;
61
+
57
62
  public last_flushed_op: InternalOpId | null = null;
58
63
 
59
64
  protected db: lib_postgres.DatabaseClient;
@@ -67,15 +72,18 @@ export class PostgresBucketBatch
67
72
  protected readonly sync_rules: sync_rules.SqlSyncRules;
68
73
  protected batch: OperationBatch | null;
69
74
  private lastWaitingLogThrottled = 0;
75
+ private markRecordUnavailable: BucketStorageMarkRecordUnavailable | undefined;
70
76
 
71
77
  constructor(protected options: PostgresBucketBatchOptions) {
72
78
  super();
79
+ this.logger = options.logger;
73
80
  this.db = options.db;
74
81
  this.group_id = options.group_id;
75
82
  this.last_checkpoint_lsn = options.last_checkpoint_lsn;
76
83
  this.no_checkpoint_before_lsn = options.no_checkpoint_before_lsn;
77
84
  this.write_checkpoint_batch = [];
78
85
  this.sync_rules = options.sync_rules;
86
+ this.markRecordUnavailable = options.markRecordUnavailable;
79
87
  this.batch = null;
80
88
  this.persisted_op = null;
81
89
  if (options.keep_alive_op) {
@@ -115,7 +123,7 @@ export class PostgresBucketBatch
115
123
  return null;
116
124
  }
117
125
 
118
- logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`);
126
+ this.logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`);
119
127
 
120
128
  this.batch ??= new OperationBatch(this.options.batch_limits);
121
129
  this.batch.push(new RecordOperation(record));
@@ -279,13 +287,14 @@ export class PostgresBucketBatch
279
287
  if (this.last_checkpoint_lsn != null && lsn < this.last_checkpoint_lsn) {
280
288
  // When re-applying transactions, don't create a new checkpoint until
281
289
  // we are past the last transaction.
282
- logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`);
290
+ this.logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`);
291
+ // Cannot create a checkpoint yet - return false
283
292
  return false;
284
293
  }
285
294
 
286
295
  if (lsn < this.no_checkpoint_before_lsn) {
287
296
  if (Date.now() - this.lastWaitingLogThrottled > 5_000) {
288
- logger.info(
297
+ this.logger.info(
289
298
  `Waiting until ${this.no_checkpoint_before_lsn} before creating checkpoint, currently at ${lsn}. Persisted op: ${this.persisted_op}`
290
299
  );
291
300
  this.lastWaitingLogThrottled = Date.now();
@@ -305,12 +314,14 @@ export class PostgresBucketBatch
305
314
  id = ${{ type: 'int4', value: this.group_id }}
306
315
  `.execute();
307
316
 
317
+ // Cannot create a checkpoint yet - return false
308
318
  return false;
309
319
  }
310
320
 
311
321
  // Don't create a checkpoint if there were no changes
312
322
  if (!createEmptyCheckpoints && this.persisted_op == null) {
313
- return false;
323
+ // Nothing to commit - return true
324
+ return true;
314
325
  }
315
326
 
316
327
  const now = new Date().toISOString();
@@ -333,6 +344,7 @@ export class PostgresBucketBatch
333
344
  keepalive_op = ${{ type: 'int8', value: update.keepalive_op }},
334
345
  last_fatal_error = ${{ type: 'varchar', value: update.last_fatal_error }},
335
346
  snapshot_done = ${{ type: 'bool', value: update.snapshot_done }},
347
+ snapshot_lsn = NULL,
336
348
  last_keepalive_ts = ${{ type: 1184, value: update.last_keepalive_ts }},
337
349
  last_checkpoint = COALESCE(
338
350
  ${{ type: 'int8', value: update.last_checkpoint }},
@@ -371,7 +383,7 @@ export class PostgresBucketBatch
371
383
  if (this.persisted_op != null) {
372
384
  // The commit may have been skipped due to "no_checkpoint_before_lsn".
373
385
  // Apply it now if relevant
374
- logger.info(`Commit due to keepalive at ${lsn} / ${this.persisted_op}`);
386
+ this.logger.info(`Commit due to keepalive at ${lsn} / ${this.persisted_op}`);
375
387
  return await this.commit(lsn);
376
388
  }
377
389
 
@@ -379,6 +391,7 @@ export class PostgresBucketBatch
379
391
  UPDATE sync_rules
380
392
  SET
381
393
  snapshot_done = ${{ type: 'bool', value: true }},
394
+ snapshot_lsn = NULL,
382
395
  last_checkpoint_lsn = ${{ type: 'varchar', value: lsn }},
383
396
  last_fatal_error = ${{ type: 'varchar', value: null }},
384
397
  last_keepalive_ts = ${{ type: 1184, value: new Date().toISOString() }}
@@ -399,6 +412,16 @@ export class PostgresBucketBatch
399
412
  return true;
400
413
  }
401
414
 
415
+ async setSnapshotLsn(lsn: string): Promise<void> {
416
+ await this.db.sql`
417
+ UPDATE sync_rules
418
+ SET
419
+ snapshot_lsn = ${{ type: 'varchar', value: lsn }}
420
+ WHERE
421
+ id = ${{ type: 'int4', value: this.group_id }}
422
+ `.execute();
423
+ }
424
+
402
425
  async markSnapshotDone(
403
426
  tables: storage.SourceTable[],
404
427
  no_checkpoint_before_lsn: string
@@ -409,7 +432,10 @@ export class PostgresBucketBatch
409
432
  await db.sql`
410
433
  UPDATE source_tables
411
434
  SET
412
- snapshot_done = ${{ type: 'bool', value: true }}
435
+ snapshot_done = ${{ type: 'bool', value: true }},
436
+ snapshot_total_estimated_count = NULL,
437
+ snapshot_replicated_count = NULL,
438
+ snapshot_last_key = NULL
413
439
  WHERE
414
440
  id IN (
415
441
  SELECT
@@ -448,6 +474,31 @@ export class PostgresBucketBatch
448
474
  });
449
475
  }
450
476
 
477
+ async updateTableProgress(
478
+ table: storage.SourceTable,
479
+ progress: Partial<storage.TableSnapshotStatus>
480
+ ): Promise<storage.SourceTable> {
481
+ const copy = table.clone();
482
+ const snapshotStatus = {
483
+ totalEstimatedCount: progress.totalEstimatedCount ?? copy.snapshotStatus?.totalEstimatedCount ?? 0,
484
+ replicatedCount: progress.replicatedCount ?? copy.snapshotStatus?.replicatedCount ?? 0,
485
+ lastKey: progress.lastKey ?? copy.snapshotStatus?.lastKey ?? null
486
+ };
487
+ copy.snapshotStatus = snapshotStatus;
488
+
489
+ await this.db.sql`
490
+ UPDATE source_tables
491
+ SET
492
+ snapshot_total_estimated_count = ${{ type: 'int4', value: snapshotStatus.totalEstimatedCount }},
493
+ snapshot_replicated_count = ${{ type: 'int4', value: snapshotStatus.replicatedCount }},
494
+ snapshot_last_key = ${{ type: 'bytea', value: snapshotStatus.lastKey }}
495
+ WHERE
496
+ id = ${{ type: 'varchar', value: table.id }}
497
+ `.execute();
498
+
499
+ return copy;
500
+ }
501
+
451
502
  addCustomWriteCheckpoint(checkpoint: storage.BatchedCustomWriteCheckpointOptions): void {
452
503
  this.write_checkpoint_batch.push({
453
504
  ...checkpoint,
@@ -657,10 +708,19 @@ export class PostgresBucketBatch
657
708
  existingBuckets = [];
658
709
  existingLookups = [];
659
710
  // Log to help with debugging if there was a consistency issue
711
+
660
712
  if (this.options.store_current_data) {
661
- logger.warn(
662
- `Cannot find previous record for update on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
663
- );
713
+ if (this.markRecordUnavailable != null) {
714
+ // This will trigger a "resnapshot" of the record.
715
+ // This is not relevant if storeCurrentData is false, since we'll get the full row
716
+ // directly in the replication stream.
717
+ this.markRecordUnavailable(record);
718
+ } else {
719
+ // Log to help with debugging if there was a consistency issue
720
+ this.logger.warn(
721
+ `Cannot find previous record for update on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
722
+ );
723
+ }
664
724
  }
665
725
  } else {
666
726
  existingBuckets = result.buckets;
@@ -677,8 +737,8 @@ export class PostgresBucketBatch
677
737
  existingBuckets = [];
678
738
  existingLookups = [];
679
739
  // Log to help with debugging if there was a consistency issue
680
- if (this.options.store_current_data) {
681
- logger.warn(
740
+ if (this.options.store_current_data && this.markRecordUnavailable == null) {
741
+ this.logger.warn(
682
742
  `Cannot find previous record for delete on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
683
743
  );
684
744
  }
@@ -771,7 +831,7 @@ export class PostgresBucketBatch
771
831
  }
772
832
  }
773
833
  );
774
- logger.error(
834
+ this.logger.error(
775
835
  `Failed to evaluate data query on ${record.sourceTable.qualifiedName}.${record.after?.id}: ${error.error}`
776
836
  );
777
837
  }
@@ -811,7 +871,7 @@ export class PostgresBucketBatch
811
871
  }
812
872
  }
813
873
  );
814
- logger.error(
874
+ this.logger.error(
815
875
  `Failed to evaluate parameter query on ${record.sourceTable.qualifiedName}.${after.id}: ${error.error}`
816
876
  );
817
877
  }
@@ -861,7 +921,7 @@ export class PostgresBucketBatch
861
921
  * TODO maybe share this with an abstract class
862
922
  */
863
923
  protected getTableEvents(table: storage.SourceTable): sync_rules.SqlEventDescriptor[] {
864
- return this.sync_rules.event_descriptors.filter((evt) =>
924
+ return this.sync_rules.eventDescriptors.filter((evt) =>
865
925
  [...evt.getSourceTables()].some((sourceTable) => sourceTable.matches(table))
866
926
  );
867
927
  }
@@ -152,7 +152,7 @@ export class PostgresPersistedBatch {
152
152
  const base64 = binLookup.toString('base64');
153
153
  remaining_lookups.delete(base64);
154
154
  const hexLookup = binLookup.toString('hex');
155
- const serializedBucketParameters = JSONBig.stringify(result.bucket_parameters);
155
+ const serializedBucketParameters = JSONBig.stringify(result.bucketParameters);
156
156
  this.parameterDataInserts.push({
157
157
  group_id: this.group_id,
158
158
  source_table: table.id,
@@ -1,7 +1,6 @@
1
1
  import * as lib_postgres from '@powersync/lib-service-postgres';
2
2
  import * as framework from '@powersync/lib-services-framework';
3
- import { storage, sync } from '@powersync/service-core';
4
- import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
3
+ import { InternalOpId, storage } from '@powersync/service-core';
5
4
  import { models } from '../../types/types.js';
6
5
 
7
6
  export type PostgresCheckpointAPIOptions = {
@@ -26,7 +25,10 @@ export class PostgresWriteCheckpointAPI implements storage.WriteCheckpointAPI {
26
25
  this._mode = mode;
27
26
  }
28
27
 
29
- async batchCreateCustomWriteCheckpoints(checkpoints: storage.CustomWriteCheckpointOptions[]): Promise<void> {
28
+ async batchCreateCustomWriteCheckpoints(
29
+ checkpoints: storage.CustomWriteCheckpointOptions[],
30
+ op_id: InternalOpId
31
+ ): Promise<void> {
30
32
  return batchCreateCustomWriteCheckpoints(this.db, checkpoints);
31
33
  }
32
34
 
@@ -58,13 +60,6 @@ export class PostgresWriteCheckpointAPI implements storage.WriteCheckpointAPI {
58
60
  return row!.write_checkpoint;
59
61
  }
60
62
 
61
- watchUserWriteCheckpoint(
62
- options: storage.WatchUserWriteCheckpointOptions
63
- ): AsyncIterable<storage.WriteCheckpointResult> {
64
- // Not used for Postgres currently
65
- throw new Error('Method not implemented.');
66
- }
67
-
68
63
  async lastWriteCheckpoint(filters: storage.LastWriteCheckpointFilters): Promise<bigint | null> {
69
64
  switch (this.writeCheckpointMode) {
70
65
  case storage.WriteCheckpointMode.CUSTOM:
@@ -14,6 +14,7 @@ export class PostgresPersistedSyncRulesContent implements storage.PersistedSyncR
14
14
  public readonly last_fatal_error: string | null;
15
15
  public readonly last_keepalive_ts: Date | null;
16
16
  public readonly last_checkpoint_ts: Date | null;
17
+ public readonly active: boolean;
17
18
  current_lock: storage.ReplicationLock | null = null;
18
19
 
19
20
  constructor(
@@ -27,6 +28,7 @@ export class PostgresPersistedSyncRulesContent implements storage.PersistedSyncR
27
28
  this.last_fatal_error = row.last_fatal_error;
28
29
  this.last_checkpoint_ts = row.last_checkpoint_ts ? new Date(row.last_checkpoint_ts) : null;
29
30
  this.last_keepalive_ts = row.last_keepalive_ts ? new Date(row.last_keepalive_ts) : null;
31
+ this.active = row.state == 'ACTIVE';
30
32
  }
31
33
 
32
34
  parsed(options: storage.ParseSyncRulesOptions): storage.PersistedSyncRules {
@@ -1,5 +1,5 @@
1
1
  import * as t from 'ts-codec';
2
- import { bigint, jsonb, jsonb_raw, pgwire_number } from '../codecs.js';
2
+ import { bigint, hexBuffer, jsonb, jsonb_raw, pgwire_number } from '../codecs.js';
3
3
 
4
4
  export type StoredRelationId = {
5
5
  object_id: string | number | undefined;
@@ -25,7 +25,10 @@ export const SourceTable = t.object({
25
25
  schema_name: t.string,
26
26
  table_name: t.string,
27
27
  replica_id_columns: t.Null.or(jsonb(t.array(ColumnDescriptor))),
28
- snapshot_done: t.boolean
28
+ snapshot_done: t.boolean,
29
+ snapshot_total_estimated_count: t.Null.or(bigint),
30
+ snapshot_replicated_count: t.Null.or(bigint),
31
+ snapshot_last_key: t.Null.or(hexBuffer)
29
32
  });
30
33
 
31
34
  export type SourceTable = t.Encoded<typeof SourceTable>;
@@ -11,6 +11,10 @@ export const SyncRules = t.object({
11
11
  * Can only be false if state == PROCESSING.
12
12
  */
13
13
  snapshot_done: t.boolean,
14
+ /**
15
+ * May be set if snapshot_done = false, if the replication stream requires it.
16
+ */
17
+ snapshot_lsn: t.Null.or(t.string),
14
18
  /**
15
19
  * The last consistent checkpoint.
16
20
  *
@@ -309,6 +309,153 @@ exports[`sync - postgres > sync global data 1`] = `
309
309
  ]
310
310
  `;
311
311
 
312
+ exports[`sync - postgres > sync interrupts low-priority buckets on new checkpoints (2) 1`] = `
313
+ [
314
+ {
315
+ "checkpoint": {
316
+ "buckets": [
317
+ {
318
+ "bucket": "b0a[]",
319
+ "checksum": -659831575,
320
+ "count": 2000,
321
+ "priority": 2,
322
+ },
323
+ {
324
+ "bucket": "b0b[]",
325
+ "checksum": -659831575,
326
+ "count": 2000,
327
+ "priority": 2,
328
+ },
329
+ {
330
+ "bucket": "b1[]",
331
+ "checksum": -1096116670,
332
+ "count": 1,
333
+ "priority": 1,
334
+ },
335
+ ],
336
+ "last_op_id": "4001",
337
+ "write_checkpoint": undefined,
338
+ },
339
+ },
340
+ {
341
+ "data": {
342
+ "after": "0",
343
+ "bucket": "b1[]",
344
+ "data": undefined,
345
+ "has_more": false,
346
+ "next_after": "1",
347
+ },
348
+ },
349
+ {
350
+ "partial_checkpoint_complete": {
351
+ "last_op_id": "4001",
352
+ "priority": 1,
353
+ },
354
+ },
355
+ {
356
+ "data": {
357
+ "after": "0",
358
+ "bucket": "b0a[]",
359
+ "data": undefined,
360
+ "has_more": true,
361
+ "next_after": "2000",
362
+ },
363
+ },
364
+ {
365
+ "data": {
366
+ "after": "2000",
367
+ "bucket": "b0a[]",
368
+ "data": undefined,
369
+ "has_more": true,
370
+ "next_after": "4000",
371
+ },
372
+ },
373
+ {
374
+ "checkpoint_diff": {
375
+ "last_op_id": "4004",
376
+ "removed_buckets": [],
377
+ "updated_buckets": [
378
+ {
379
+ "bucket": "b0a[]",
380
+ "checksum": 883076828,
381
+ "count": 2001,
382
+ "priority": 2,
383
+ },
384
+ {
385
+ "bucket": "b0b[]",
386
+ "checksum": 883076828,
387
+ "count": 2001,
388
+ "priority": 2,
389
+ },
390
+ {
391
+ "bucket": "b1[]",
392
+ "checksum": 1841937527,
393
+ "count": 2,
394
+ "priority": 1,
395
+ },
396
+ ],
397
+ "write_checkpoint": undefined,
398
+ },
399
+ },
400
+ {
401
+ "data": {
402
+ "after": "1",
403
+ "bucket": "b1[]",
404
+ "data": undefined,
405
+ "has_more": false,
406
+ "next_after": "4002",
407
+ },
408
+ },
409
+ {
410
+ "partial_checkpoint_complete": {
411
+ "last_op_id": "4004",
412
+ "priority": 1,
413
+ },
414
+ },
415
+ {
416
+ "data": {
417
+ "after": "4000",
418
+ "bucket": "b0a[]",
419
+ "data": undefined,
420
+ "has_more": false,
421
+ "next_after": "4003",
422
+ },
423
+ },
424
+ {
425
+ "data": {
426
+ "after": "0",
427
+ "bucket": "b0b[]",
428
+ "data": undefined,
429
+ "has_more": true,
430
+ "next_after": "1999",
431
+ },
432
+ },
433
+ {
434
+ "data": {
435
+ "after": "1999",
436
+ "bucket": "b0b[]",
437
+ "data": undefined,
438
+ "has_more": true,
439
+ "next_after": "3999",
440
+ },
441
+ },
442
+ {
443
+ "data": {
444
+ "after": "3999",
445
+ "bucket": "b0b[]",
446
+ "data": undefined,
447
+ "has_more": false,
448
+ "next_after": "4004",
449
+ },
450
+ },
451
+ {
452
+ "checkpoint_complete": {
453
+ "last_op_id": "4004",
454
+ },
455
+ },
456
+ ]
457
+ `;
458
+
312
459
  exports[`sync - postgres > sync legacy non-raw data 1`] = `
313
460
  [
314
461
  {