@powersync/service-module-postgres-storage 0.0.0-dev-20250507151436 → 0.0.0-dev-20250611110033

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +49 -7
  2. package/dist/.tsbuildinfo +1 -1
  3. package/dist/@types/migrations/scripts/1749024804042-snapshot-progress.d.ts +3 -0
  4. package/dist/@types/storage/PostgresTestStorageFactoryGenerator.d.ts +5 -1
  5. package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +9 -2
  6. package/dist/@types/storage/sync-rules/PostgresPersistedSyncRulesContent.d.ts +1 -0
  7. package/dist/@types/types/models/SourceTable.d.ts +3 -0
  8. package/dist/@types/types/models/SyncRules.d.ts +4 -0
  9. package/dist/@types/types/types.d.ts +2 -2
  10. package/dist/migrations/scripts/1684951997326-init.js.map +1 -1
  11. package/dist/migrations/scripts/1749024804042-snapshot-progress.js +110 -0
  12. package/dist/migrations/scripts/1749024804042-snapshot-progress.js.map +1 -0
  13. package/dist/storage/PostgresSyncRulesStorage.js +15 -4
  14. package/dist/storage/PostgresSyncRulesStorage.js.map +1 -1
  15. package/dist/storage/PostgresTestStorageFactoryGenerator.js +48 -37
  16. package/dist/storage/PostgresTestStorageFactoryGenerator.js.map +1 -1
  17. package/dist/storage/batch/PostgresBucketBatch.js +80 -27
  18. package/dist/storage/batch/PostgresBucketBatch.js.map +1 -1
  19. package/dist/storage/batch/PostgresPersistedBatch.js +1 -1
  20. package/dist/storage/batch/PostgresPersistedBatch.js.map +1 -1
  21. package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js +2 -0
  22. package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js.map +1 -1
  23. package/dist/types/models/SourceTable.js +5 -2
  24. package/dist/types/models/SourceTable.js.map +1 -1
  25. package/dist/types/models/SyncRules.js +4 -0
  26. package/dist/types/models/SyncRules.js.map +1 -1
  27. package/package.json +9 -10
  28. package/src/migrations/scripts/1684951997326-init.ts +0 -1
  29. package/src/migrations/scripts/1749024804042-snapshot-progress.ts +43 -0
  30. package/src/storage/PostgresSyncRulesStorage.ts +15 -4
  31. package/src/storage/PostgresTestStorageFactoryGenerator.ts +48 -36
  32. package/src/storage/batch/PostgresBucketBatch.ts +95 -30
  33. package/src/storage/batch/PostgresPersistedBatch.ts +1 -1
  34. package/src/storage/sync-rules/PostgresPersistedSyncRulesContent.ts +2 -0
  35. package/src/types/models/SourceTable.ts +5 -2
  36. package/src/types/models/SyncRules.ts +4 -0
  37. package/test/src/__snapshots__/storage_sync.test.ts.snap +147 -0
  38. package/test/src/migrations.test.ts +10 -2
  39. package/test/src/util.ts +7 -2
@@ -12,50 +12,62 @@ export type PostgresTestStorageOptions = {
12
12
  migrationAgent?: (config: PostgresStorageConfigDecoded) => PostgresMigrationAgent;
13
13
  };
14
14
 
15
- export const PostgresTestStorageFactoryGenerator = (factoryOptions: PostgresTestStorageOptions) => {
16
- return async (options?: TestStorageOptions) => {
17
- try {
18
- const migrationManager: PowerSyncMigrationManager = new framework.MigrationManager();
19
-
20
- const BASE_CONFIG = {
21
- type: 'postgresql' as const,
22
- uri: factoryOptions.url,
23
- sslmode: 'disable' as const
24
- };
25
-
26
- const TEST_CONNECTION_OPTIONS = normalizePostgresStorageConfig(BASE_CONFIG);
27
-
28
- await using migrationAgent = factoryOptions.migrationAgent
29
- ? factoryOptions.migrationAgent(BASE_CONFIG)
30
- : new PostgresMigrationAgent(BASE_CONFIG);
31
- migrationManager.registerMigrationAgent(migrationAgent);
32
-
33
- const mockServiceContext = { configuration: { storage: BASE_CONFIG } } as unknown as ServiceContext;
34
-
35
- if (!options?.doNotClear) {
36
- await migrationManager.migrate({
37
- direction: framework.migrations.Direction.Down,
38
- migrationContext: {
39
- service_context: mockServiceContext
40
- }
41
- });
15
+ export const postgresTestSetup = (factoryOptions: PostgresTestStorageOptions) => {
16
+ const BASE_CONFIG = {
17
+ type: 'postgresql' as const,
18
+ uri: factoryOptions.url,
19
+ sslmode: 'disable' as const
20
+ };
21
+
22
+ const TEST_CONNECTION_OPTIONS = normalizePostgresStorageConfig(BASE_CONFIG);
23
+
24
+ const migrate = async (direction: framework.migrations.Direction) => {
25
+ await using migrationManager: PowerSyncMigrationManager = new framework.MigrationManager();
26
+ await using migrationAgent = factoryOptions.migrationAgent
27
+ ? factoryOptions.migrationAgent(BASE_CONFIG)
28
+ : new PostgresMigrationAgent(BASE_CONFIG);
29
+ migrationManager.registerMigrationAgent(migrationAgent);
30
+
31
+ const mockServiceContext = { configuration: { storage: BASE_CONFIG } } as unknown as ServiceContext;
32
+
33
+ await migrationManager.migrate({
34
+ direction: framework.migrations.Direction.Down,
35
+ migrationContext: {
36
+ service_context: mockServiceContext
42
37
  }
38
+ });
43
39
 
40
+ if (direction == framework.migrations.Direction.Up) {
44
41
  await migrationManager.migrate({
45
42
  direction: framework.migrations.Direction.Up,
46
43
  migrationContext: {
47
44
  service_context: mockServiceContext
48
45
  }
49
46
  });
50
-
51
- return new PostgresBucketStorageFactory({
52
- config: TEST_CONNECTION_OPTIONS,
53
- slot_name_prefix: 'test_'
54
- });
55
- } catch (ex) {
56
- // Vitest does not display these errors nicely when using the `await using` syntx
57
- console.error(ex, ex.cause);
58
- throw ex;
59
47
  }
60
48
  };
49
+
50
+ return {
51
+ factory: async (options?: TestStorageOptions) => {
52
+ try {
53
+ if (!options?.doNotClear) {
54
+ await migrate(framework.migrations.Direction.Up);
55
+ }
56
+
57
+ return new PostgresBucketStorageFactory({
58
+ config: TEST_CONNECTION_OPTIONS,
59
+ slot_name_prefix: 'test_'
60
+ });
61
+ } catch (ex) {
62
+ // Vitest does not display these errors nicely when using the `await using` syntx
63
+ console.error(ex, ex.cause);
64
+ throw ex;
65
+ }
66
+ },
67
+ migrate
68
+ };
69
+ };
70
+
71
+ export const PostgresTestStorageFactoryGenerator = (factoryOptions: PostgresTestStorageOptions) => {
72
+ return postgresTestSetup(factoryOptions).factory;
61
73
  };
@@ -4,12 +4,12 @@ import {
4
4
  container,
5
5
  ErrorCode,
6
6
  errors,
7
- logger,
7
+ Logger,
8
8
  ReplicationAssertionError,
9
9
  ServiceAssertionError,
10
10
  ServiceError
11
11
  } from '@powersync/lib-services-framework';
12
- import { InternalOpId, storage, utils } from '@powersync/service-core';
12
+ import { BucketStorageMarkRecordUnavailable, InternalOpId, storage, utils } from '@powersync/service-core';
13
13
  import * as sync_rules from '@powersync/service-sync-rules';
14
14
  import * as timers from 'timers/promises';
15
15
  import * as t from 'ts-codec';
@@ -22,6 +22,7 @@ import { cacheKey, encodedCacheKey, OperationBatch, RecordOperation } from './Op
22
22
  import { PostgresPersistedBatch } from './PostgresPersistedBatch.js';
23
23
 
24
24
  export interface PostgresBucketBatchOptions {
25
+ logger: Logger;
25
26
  db: lib_postgres.DatabaseClient;
26
27
  sync_rules: sync_rules.SqlSyncRules;
27
28
  group_id: number;
@@ -35,6 +36,8 @@ export interface PostgresBucketBatchOptions {
35
36
  */
36
37
  skip_existing_rows: boolean;
37
38
  batch_limits: RequiredOperationBatchLimits;
39
+
40
+ markRecordUnavailable: BucketStorageMarkRecordUnavailable | undefined;
38
41
  }
39
42
 
40
43
  /**
@@ -54,6 +57,8 @@ export class PostgresBucketBatch
54
57
  extends BaseObserver<storage.BucketBatchStorageListener>
55
58
  implements storage.BucketStorageBatch
56
59
  {
60
+ private logger: Logger;
61
+
57
62
  public last_flushed_op: InternalOpId | null = null;
58
63
 
59
64
  protected db: lib_postgres.DatabaseClient;
@@ -67,15 +72,18 @@ export class PostgresBucketBatch
67
72
  protected readonly sync_rules: sync_rules.SqlSyncRules;
68
73
  protected batch: OperationBatch | null;
69
74
  private lastWaitingLogThrottled = 0;
75
+ private markRecordUnavailable: BucketStorageMarkRecordUnavailable | undefined;
70
76
 
71
77
  constructor(protected options: PostgresBucketBatchOptions) {
72
78
  super();
79
+ this.logger = options.logger;
73
80
  this.db = options.db;
74
81
  this.group_id = options.group_id;
75
82
  this.last_checkpoint_lsn = options.last_checkpoint_lsn;
76
83
  this.no_checkpoint_before_lsn = options.no_checkpoint_before_lsn;
77
84
  this.write_checkpoint_batch = [];
78
85
  this.sync_rules = options.sync_rules;
86
+ this.markRecordUnavailable = options.markRecordUnavailable;
79
87
  this.batch = null;
80
88
  this.persisted_op = null;
81
89
  if (options.keep_alive_op) {
@@ -115,7 +123,7 @@ export class PostgresBucketBatch
115
123
  return null;
116
124
  }
117
125
 
118
- logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`);
126
+ this.logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`);
119
127
 
120
128
  this.batch ??= new OperationBatch(this.options.batch_limits);
121
129
  this.batch.push(new RecordOperation(record));
@@ -208,13 +216,7 @@ export class PostgresBucketBatch
208
216
  return null;
209
217
  }
210
218
 
211
- const currentSequence = await this.db.sql`
212
- SELECT
213
- LAST_VALUE AS value
214
- FROM
215
- op_id_sequence;
216
- `.first<{ value: bigint }>();
217
- return currentSequence!.value;
219
+ return this.getLastOpIdSequence(this.db);
218
220
  }
219
221
 
220
222
  async drop(sourceTables: storage.SourceTable[]): Promise<storage.FlushedResult | null> {
@@ -262,13 +264,7 @@ export class PostgresBucketBatch
262
264
  const lastOp = await this.withReplicationTransaction(async (db) => {
263
265
  resumeBatch = await this.replicateBatch(db, batch);
264
266
 
265
- const sequence = await db.sql`
266
- SELECT
267
- LAST_VALUE AS value
268
- FROM
269
- op_id_sequence;
270
- `.first<{ value: bigint }>();
271
- return sequence!.value;
267
+ return this.getLastOpIdSequence(db);
272
268
  });
273
269
 
274
270
  // null if done, set if we need another flush
@@ -291,13 +287,14 @@ export class PostgresBucketBatch
291
287
  if (this.last_checkpoint_lsn != null && lsn < this.last_checkpoint_lsn) {
292
288
  // When re-applying transactions, don't create a new checkpoint until
293
289
  // we are past the last transaction.
294
- logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`);
290
+ this.logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`);
291
+ // Cannot create a checkpoint yet - return false
295
292
  return false;
296
293
  }
297
294
 
298
295
  if (lsn < this.no_checkpoint_before_lsn) {
299
296
  if (Date.now() - this.lastWaitingLogThrottled > 5_000) {
300
- logger.info(
297
+ this.logger.info(
301
298
  `Waiting until ${this.no_checkpoint_before_lsn} before creating checkpoint, currently at ${lsn}. Persisted op: ${this.persisted_op}`
302
299
  );
303
300
  this.lastWaitingLogThrottled = Date.now();
@@ -317,12 +314,14 @@ export class PostgresBucketBatch
317
314
  id = ${{ type: 'int4', value: this.group_id }}
318
315
  `.execute();
319
316
 
317
+ // Cannot create a checkpoint yet - return false
320
318
  return false;
321
319
  }
322
320
 
323
321
  // Don't create a checkpoint if there were no changes
324
322
  if (!createEmptyCheckpoints && this.persisted_op == null) {
325
- return false;
323
+ // Nothing to commit - return true
324
+ return true;
326
325
  }
327
326
 
328
327
  const now = new Date().toISOString();
@@ -345,6 +344,7 @@ export class PostgresBucketBatch
345
344
  keepalive_op = ${{ type: 'int8', value: update.keepalive_op }},
346
345
  last_fatal_error = ${{ type: 'varchar', value: update.last_fatal_error }},
347
346
  snapshot_done = ${{ type: 'bool', value: update.snapshot_done }},
347
+ snapshot_lsn = NULL,
348
348
  last_keepalive_ts = ${{ type: 1184, value: update.last_keepalive_ts }},
349
349
  last_checkpoint = COALESCE(
350
350
  ${{ type: 'int8', value: update.last_checkpoint }},
@@ -383,7 +383,7 @@ export class PostgresBucketBatch
383
383
  if (this.persisted_op != null) {
384
384
  // The commit may have been skipped due to "no_checkpoint_before_lsn".
385
385
  // Apply it now if relevant
386
- logger.info(`Commit due to keepalive at ${lsn} / ${this.persisted_op}`);
386
+ this.logger.info(`Commit due to keepalive at ${lsn} / ${this.persisted_op}`);
387
387
  return await this.commit(lsn);
388
388
  }
389
389
 
@@ -391,6 +391,7 @@ export class PostgresBucketBatch
391
391
  UPDATE sync_rules
392
392
  SET
393
393
  snapshot_done = ${{ type: 'bool', value: true }},
394
+ snapshot_lsn = NULL,
394
395
  last_checkpoint_lsn = ${{ type: 'varchar', value: lsn }},
395
396
  last_fatal_error = ${{ type: 'varchar', value: null }},
396
397
  last_keepalive_ts = ${{ type: 1184, value: new Date().toISOString() }}
@@ -411,6 +412,16 @@ export class PostgresBucketBatch
411
412
  return true;
412
413
  }
413
414
 
415
+ async setSnapshotLsn(lsn: string): Promise<void> {
416
+ await this.db.sql`
417
+ UPDATE sync_rules
418
+ SET
419
+ snapshot_lsn = ${{ type: 'varchar', value: lsn }}
420
+ WHERE
421
+ id = ${{ type: 'int4', value: this.group_id }}
422
+ `.execute();
423
+ }
424
+
414
425
  async markSnapshotDone(
415
426
  tables: storage.SourceTable[],
416
427
  no_checkpoint_before_lsn: string
@@ -421,7 +432,10 @@ export class PostgresBucketBatch
421
432
  await db.sql`
422
433
  UPDATE source_tables
423
434
  SET
424
- snapshot_done = ${{ type: 'bool', value: true }}
435
+ snapshot_done = ${{ type: 'bool', value: true }},
436
+ snapshot_total_estimated_count = NULL,
437
+ snapshot_replicated_count = NULL,
438
+ snapshot_last_key = NULL
425
439
  WHERE
426
440
  id IN (
427
441
  SELECT
@@ -460,6 +474,31 @@ export class PostgresBucketBatch
460
474
  });
461
475
  }
462
476
 
477
+ async updateTableProgress(
478
+ table: storage.SourceTable,
479
+ progress: Partial<storage.TableSnapshotStatus>
480
+ ): Promise<storage.SourceTable> {
481
+ const copy = table.clone();
482
+ const snapshotStatus = {
483
+ totalEstimatedCount: progress.totalEstimatedCount ?? copy.snapshotStatus?.totalEstimatedCount ?? 0,
484
+ replicatedCount: progress.replicatedCount ?? copy.snapshotStatus?.replicatedCount ?? 0,
485
+ lastKey: progress.lastKey ?? copy.snapshotStatus?.lastKey ?? null
486
+ };
487
+ copy.snapshotStatus = snapshotStatus;
488
+
489
+ await this.db.sql`
490
+ UPDATE source_tables
491
+ SET
492
+ snapshot_total_estimated_count = ${{ type: 'int4', value: snapshotStatus.totalEstimatedCount }},
493
+ snapshot_replicated_count = ${{ type: 'int4', value: snapshotStatus.replicatedCount }},
494
+ snapshot_last_key = ${{ type: 'bytea', value: snapshotStatus.lastKey }}
495
+ WHERE
496
+ id = ${{ type: 'varchar', value: table.id }}
497
+ `.execute();
498
+
499
+ return copy;
500
+ }
501
+
463
502
  addCustomWriteCheckpoint(checkpoint: storage.BatchedCustomWriteCheckpointOptions): void {
464
503
  this.write_checkpoint_batch.push({
465
504
  ...checkpoint,
@@ -669,10 +708,19 @@ export class PostgresBucketBatch
669
708
  existingBuckets = [];
670
709
  existingLookups = [];
671
710
  // Log to help with debugging if there was a consistency issue
711
+
672
712
  if (this.options.store_current_data) {
673
- logger.warn(
674
- `Cannot find previous record for update on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
675
- );
713
+ if (this.markRecordUnavailable != null) {
714
+ // This will trigger a "resnapshot" of the record.
715
+ // This is not relevant if storeCurrentData is false, since we'll get the full row
716
+ // directly in the replication stream.
717
+ this.markRecordUnavailable(record);
718
+ } else {
719
+ // Log to help with debugging if there was a consistency issue
720
+ this.logger.warn(
721
+ `Cannot find previous record for update on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
722
+ );
723
+ }
676
724
  }
677
725
  } else {
678
726
  existingBuckets = result.buckets;
@@ -689,8 +737,8 @@ export class PostgresBucketBatch
689
737
  existingBuckets = [];
690
738
  existingLookups = [];
691
739
  // Log to help with debugging if there was a consistency issue
692
- if (this.options.store_current_data) {
693
- logger.warn(
740
+ if (this.options.store_current_data && this.markRecordUnavailable == null) {
741
+ this.logger.warn(
694
742
  `Cannot find previous record for delete on ${record.sourceTable.qualifiedName}: ${beforeId} / ${record.before?.id}`
695
743
  );
696
744
  }
@@ -783,7 +831,7 @@ export class PostgresBucketBatch
783
831
  }
784
832
  }
785
833
  );
786
- logger.error(
834
+ this.logger.error(
787
835
  `Failed to evaluate data query on ${record.sourceTable.qualifiedName}.${record.after?.id}: ${error.error}`
788
836
  );
789
837
  }
@@ -823,7 +871,7 @@ export class PostgresBucketBatch
823
871
  }
824
872
  }
825
873
  );
826
- logger.error(
874
+ this.logger.error(
827
875
  `Failed to evaluate parameter query on ${record.sourceTable.qualifiedName}.${after.id}: ${error.error}`
828
876
  );
829
877
  }
@@ -873,7 +921,7 @@ export class PostgresBucketBatch
873
921
  * TODO maybe share this with an abstract class
874
922
  */
875
923
  protected getTableEvents(table: storage.SourceTable): sync_rules.SqlEventDescriptor[] {
876
- return this.sync_rules.event_descriptors.filter((evt) =>
924
+ return this.sync_rules.eventDescriptors.filter((evt) =>
877
925
  [...evt.getSourceTables()].some((sourceTable) => sourceTable.matches(table))
878
926
  );
879
927
  }
@@ -895,6 +943,23 @@ export class PostgresBucketBatch
895
943
  `.execute();
896
944
  }
897
945
  }
946
+
947
+ private async getLastOpIdSequence(db: lib_postgres.AbstractPostgresConnection) {
948
+ // When no op_id has been generated, last_value = 1 and nextval() will be 1.
949
+ // To cater for this case, we check is_called, and default to 0 if no value has been generated.
950
+ const sequence = await db.sql`
951
+ SELECT
952
+ (
953
+ CASE
954
+ WHEN is_called THEN last_value
955
+ ELSE 0
956
+ END
957
+ ) AS value
958
+ FROM
959
+ op_id_sequence;
960
+ `.first<{ value: bigint }>();
961
+ return sequence!.value;
962
+ }
898
963
  }
899
964
 
900
965
  /**
@@ -152,7 +152,7 @@ export class PostgresPersistedBatch {
152
152
  const base64 = binLookup.toString('base64');
153
153
  remaining_lookups.delete(base64);
154
154
  const hexLookup = binLookup.toString('hex');
155
- const serializedBucketParameters = JSONBig.stringify(result.bucket_parameters);
155
+ const serializedBucketParameters = JSONBig.stringify(result.bucketParameters);
156
156
  this.parameterDataInserts.push({
157
157
  group_id: this.group_id,
158
158
  source_table: table.id,
@@ -14,6 +14,7 @@ export class PostgresPersistedSyncRulesContent implements storage.PersistedSyncR
14
14
  public readonly last_fatal_error: string | null;
15
15
  public readonly last_keepalive_ts: Date | null;
16
16
  public readonly last_checkpoint_ts: Date | null;
17
+ public readonly active: boolean;
17
18
  current_lock: storage.ReplicationLock | null = null;
18
19
 
19
20
  constructor(
@@ -27,6 +28,7 @@ export class PostgresPersistedSyncRulesContent implements storage.PersistedSyncR
27
28
  this.last_fatal_error = row.last_fatal_error;
28
29
  this.last_checkpoint_ts = row.last_checkpoint_ts ? new Date(row.last_checkpoint_ts) : null;
29
30
  this.last_keepalive_ts = row.last_keepalive_ts ? new Date(row.last_keepalive_ts) : null;
31
+ this.active = row.state == 'ACTIVE';
30
32
  }
31
33
 
32
34
  parsed(options: storage.ParseSyncRulesOptions): storage.PersistedSyncRules {
@@ -1,5 +1,5 @@
1
1
  import * as t from 'ts-codec';
2
- import { bigint, jsonb, jsonb_raw, pgwire_number } from '../codecs.js';
2
+ import { bigint, hexBuffer, jsonb, jsonb_raw, pgwire_number } from '../codecs.js';
3
3
 
4
4
  export type StoredRelationId = {
5
5
  object_id: string | number | undefined;
@@ -25,7 +25,10 @@ export const SourceTable = t.object({
25
25
  schema_name: t.string,
26
26
  table_name: t.string,
27
27
  replica_id_columns: t.Null.or(jsonb(t.array(ColumnDescriptor))),
28
- snapshot_done: t.boolean
28
+ snapshot_done: t.boolean,
29
+ snapshot_total_estimated_count: t.Null.or(bigint),
30
+ snapshot_replicated_count: t.Null.or(bigint),
31
+ snapshot_last_key: t.Null.or(hexBuffer)
29
32
  });
30
33
 
31
34
  export type SourceTable = t.Encoded<typeof SourceTable>;
@@ -11,6 +11,10 @@ export const SyncRules = t.object({
11
11
  * Can only be false if state == PROCESSING.
12
12
  */
13
13
  snapshot_done: t.boolean,
14
+ /**
15
+ * May be set if snapshot_done = false, if the replication stream requires it.
16
+ */
17
+ snapshot_lsn: t.Null.or(t.string),
14
18
  /**
15
19
  * The last consistent checkpoint.
16
20
  *
@@ -309,6 +309,153 @@ exports[`sync - postgres > sync global data 1`] = `
309
309
  ]
310
310
  `;
311
311
 
312
+ exports[`sync - postgres > sync interrupts low-priority buckets on new checkpoints (2) 1`] = `
313
+ [
314
+ {
315
+ "checkpoint": {
316
+ "buckets": [
317
+ {
318
+ "bucket": "b0a[]",
319
+ "checksum": -659831575,
320
+ "count": 2000,
321
+ "priority": 2,
322
+ },
323
+ {
324
+ "bucket": "b0b[]",
325
+ "checksum": -659831575,
326
+ "count": 2000,
327
+ "priority": 2,
328
+ },
329
+ {
330
+ "bucket": "b1[]",
331
+ "checksum": -1096116670,
332
+ "count": 1,
333
+ "priority": 1,
334
+ },
335
+ ],
336
+ "last_op_id": "4001",
337
+ "write_checkpoint": undefined,
338
+ },
339
+ },
340
+ {
341
+ "data": {
342
+ "after": "0",
343
+ "bucket": "b1[]",
344
+ "data": undefined,
345
+ "has_more": false,
346
+ "next_after": "1",
347
+ },
348
+ },
349
+ {
350
+ "partial_checkpoint_complete": {
351
+ "last_op_id": "4001",
352
+ "priority": 1,
353
+ },
354
+ },
355
+ {
356
+ "data": {
357
+ "after": "0",
358
+ "bucket": "b0a[]",
359
+ "data": undefined,
360
+ "has_more": true,
361
+ "next_after": "2000",
362
+ },
363
+ },
364
+ {
365
+ "data": {
366
+ "after": "2000",
367
+ "bucket": "b0a[]",
368
+ "data": undefined,
369
+ "has_more": true,
370
+ "next_after": "4000",
371
+ },
372
+ },
373
+ {
374
+ "checkpoint_diff": {
375
+ "last_op_id": "4004",
376
+ "removed_buckets": [],
377
+ "updated_buckets": [
378
+ {
379
+ "bucket": "b0a[]",
380
+ "checksum": 883076828,
381
+ "count": 2001,
382
+ "priority": 2,
383
+ },
384
+ {
385
+ "bucket": "b0b[]",
386
+ "checksum": 883076828,
387
+ "count": 2001,
388
+ "priority": 2,
389
+ },
390
+ {
391
+ "bucket": "b1[]",
392
+ "checksum": 1841937527,
393
+ "count": 2,
394
+ "priority": 1,
395
+ },
396
+ ],
397
+ "write_checkpoint": undefined,
398
+ },
399
+ },
400
+ {
401
+ "data": {
402
+ "after": "1",
403
+ "bucket": "b1[]",
404
+ "data": undefined,
405
+ "has_more": false,
406
+ "next_after": "4002",
407
+ },
408
+ },
409
+ {
410
+ "partial_checkpoint_complete": {
411
+ "last_op_id": "4004",
412
+ "priority": 1,
413
+ },
414
+ },
415
+ {
416
+ "data": {
417
+ "after": "4000",
418
+ "bucket": "b0a[]",
419
+ "data": undefined,
420
+ "has_more": false,
421
+ "next_after": "4003",
422
+ },
423
+ },
424
+ {
425
+ "data": {
426
+ "after": "0",
427
+ "bucket": "b0b[]",
428
+ "data": undefined,
429
+ "has_more": true,
430
+ "next_after": "1999",
431
+ },
432
+ },
433
+ {
434
+ "data": {
435
+ "after": "1999",
436
+ "bucket": "b0b[]",
437
+ "data": undefined,
438
+ "has_more": true,
439
+ "next_after": "3999",
440
+ },
441
+ },
442
+ {
443
+ "data": {
444
+ "after": "3999",
445
+ "bucket": "b0b[]",
446
+ "data": undefined,
447
+ "has_more": false,
448
+ "next_after": "4004",
449
+ },
450
+ },
451
+ {
452
+ "checkpoint_complete": {
453
+ "last_op_id": "4004",
454
+ },
455
+ },
456
+ ]
457
+ `;
458
+
312
459
  exports[`sync - postgres > sync legacy non-raw data 1`] = `
313
460
  [
314
461
  {
@@ -1,15 +1,23 @@
1
- import { describe, expect, it } from 'vitest';
1
+ import { beforeEach, describe, expect, it } from 'vitest';
2
2
 
3
+ import { Direction } from '@powersync/lib-services-framework';
3
4
  import { register } from '@powersync/service-core-tests';
4
5
  import { PostgresMigrationAgent } from '../../src/migrations/PostgresMigrationAgent.js';
5
6
  import { env } from './env.js';
6
- import { POSTGRES_STORAGE_FACTORY } from './util.js';
7
+ import { POSTGRES_STORAGE_FACTORY, POSTGRES_STORAGE_SETUP } from './util.js';
7
8
 
8
9
  const MIGRATION_AGENT_FACTORY = () => {
9
10
  return new PostgresMigrationAgent({ type: 'postgresql', uri: env.PG_STORAGE_TEST_URL, sslmode: 'disable' });
10
11
  };
11
12
 
12
13
  describe('Migrations', () => {
14
+ beforeEach(async () => {
15
+ // The migration tests clear the migration store, without running the down migrations.
16
+ // This ensures all the down migrations have been run before.
17
+ const setup = POSTGRES_STORAGE_SETUP;
18
+ await setup.migrate(Direction.Down);
19
+ });
20
+
13
21
  register.registerMigrationTests(MIGRATION_AGENT_FACTORY);
14
22
 
15
23
  it('Should have tables declared', async () => {
package/test/src/util.ts CHANGED
@@ -2,7 +2,10 @@ import path from 'path';
2
2
  import { fileURLToPath } from 'url';
3
3
  import { normalizePostgresStorageConfig } from '../../src//types/types.js';
4
4
  import { PostgresMigrationAgent } from '../../src/migrations/PostgresMigrationAgent.js';
5
- import { PostgresTestStorageFactoryGenerator } from '../../src/storage/PostgresTestStorageFactoryGenerator.js';
5
+ import {
6
+ postgresTestSetup,
7
+ PostgresTestStorageFactoryGenerator
8
+ } from '../../src/storage/PostgresTestStorageFactoryGenerator.js';
6
9
  import { env } from './env.js';
7
10
 
8
11
  const __filename = fileURLToPath(import.meta.url);
@@ -28,7 +31,9 @@ class TestPostgresMigrationAgent extends PostgresMigrationAgent {
28
31
  }
29
32
  }
30
33
 
31
- export const POSTGRES_STORAGE_FACTORY = PostgresTestStorageFactoryGenerator({
34
+ export const POSTGRES_STORAGE_SETUP = postgresTestSetup({
32
35
  url: env.PG_STORAGE_TEST_URL,
33
36
  migrationAgent: (config) => new TestPostgresMigrationAgent(config)
34
37
  });
38
+
39
+ export const POSTGRES_STORAGE_FACTORY = POSTGRES_STORAGE_SETUP.factory;