@powersync/service-module-postgres-storage 0.0.0-dev-20250214100224 → 0.0.0-dev-20250227082606

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,15 @@
1
1
  import * as lib_postgres from '@powersync/lib-service-postgres';
2
- import { DisposableObserver, ReplicationAssertionError } from '@powersync/lib-services-framework';
3
- import { storage, utils } from '@powersync/service-core';
2
+ import { ReplicationAssertionError } from '@powersync/lib-services-framework';
3
+ import {
4
+ BroadcastIterable,
5
+ CHECKPOINT_INVALIDATE_ALL,
6
+ CheckpointChanges,
7
+ GetCheckpointChangesOptions,
8
+ LastValueSink,
9
+ storage,
10
+ utils,
11
+ WatchWriteCheckpointOptions
12
+ } from '@powersync/service-core';
4
13
  import { JSONBig } from '@powersync/service-jsonbig';
5
14
  import * as sync_rules from '@powersync/service-sync-rules';
6
15
  import * as uuid from 'uuid';
@@ -8,14 +17,18 @@ import { BIGINT_MAX } from '../types/codecs.js';
8
17
  import { models, RequiredOperationBatchLimits } from '../types/types.js';
9
18
  import { replicaIdToSubkey } from '../utils/bson.js';
10
19
  import { mapOpEntry } from '../utils/bucket-data.js';
20
+ import * as timers from 'timers/promises';
11
21
 
22
+ import * as framework from '@powersync/lib-services-framework';
12
23
  import { StatementParam } from '@powersync/service-jpgwire';
13
- import { StoredRelationId } from '../types/models/SourceTable.js';
24
+ import { SourceTableDecoded, StoredRelationId } from '../types/models/SourceTable.js';
14
25
  import { pick } from '../utils/ts-codec.js';
15
26
  import { PostgresBucketBatch } from './batch/PostgresBucketBatch.js';
16
27
  import { PostgresWriteCheckpointAPI } from './checkpoints/PostgresWriteCheckpointAPI.js';
17
28
  import { PostgresBucketStorageFactory } from './PostgresBucketStorageFactory.js';
18
29
  import { PostgresCompactor } from './PostgresCompactor.js';
30
+ import { wrapWithAbort } from 'ix/asynciterable/operators/withabort.js';
31
+ import { Decoded } from 'ts-codec';
19
32
 
20
33
  export type PostgresSyncRulesStorageOptions = {
21
34
  factory: PostgresBucketStorageFactory;
@@ -26,7 +39,7 @@ export type PostgresSyncRulesStorageOptions = {
26
39
  };
27
40
 
28
41
  export class PostgresSyncRulesStorage
29
- extends DisposableObserver<storage.SyncRulesBucketStorageListener>
42
+ extends framework.BaseObserver<storage.SyncRulesBucketStorageListener>
30
43
  implements storage.SyncRulesBucketStorage
31
44
  {
32
45
  public readonly group_id: number;
@@ -34,6 +47,8 @@ export class PostgresSyncRulesStorage
34
47
  public readonly slot_name: string;
35
48
  public readonly factory: PostgresBucketStorageFactory;
36
49
 
50
+ private sharedIterator = new BroadcastIterable((signal) => this.watchActiveCheckpoint(signal));
51
+
37
52
  protected db: lib_postgres.DatabaseClient;
38
53
  protected writeCheckpointAPI: PostgresWriteCheckpointAPI;
39
54
 
@@ -151,21 +166,39 @@ export class PostgresSyncRulesStorage
151
166
  type_oid: typeof column.typeId !== 'undefined' ? Number(column.typeId) : column.typeId
152
167
  }));
153
168
  return this.db.transaction(async (db) => {
154
- let sourceTableRow = await db.sql`
155
- SELECT
156
- *
157
- FROM
158
- source_tables
159
- WHERE
160
- group_id = ${{ type: 'int4', value: group_id }}
161
- AND connection_id = ${{ type: 'int4', value: connection_id }}
162
- AND relation_id = ${{ type: 'jsonb', value: { object_id: objectId } satisfies StoredRelationId }}
163
- AND schema_name = ${{ type: 'varchar', value: schema }}
164
- AND table_name = ${{ type: 'varchar', value: table }}
165
- AND replica_id_columns = ${{ type: 'jsonb', value: columns }}
166
- `
167
- .decoded(models.SourceTable)
168
- .first();
169
+ let sourceTableRow: SourceTableDecoded | null;
170
+ if (objectId != null) {
171
+ sourceTableRow = await db.sql`
172
+ SELECT
173
+ *
174
+ FROM
175
+ source_tables
176
+ WHERE
177
+ group_id = ${{ type: 'int4', value: group_id }}
178
+ AND connection_id = ${{ type: 'int4', value: connection_id }}
179
+ AND relation_id = ${{ type: 'jsonb', value: { object_id: objectId } satisfies StoredRelationId }}
180
+ AND schema_name = ${{ type: 'varchar', value: schema }}
181
+ AND table_name = ${{ type: 'varchar', value: table }}
182
+ AND replica_id_columns = ${{ type: 'jsonb', value: columns }}
183
+ `
184
+ .decoded(models.SourceTable)
185
+ .first();
186
+ } else {
187
+ sourceTableRow = await db.sql`
188
+ SELECT
189
+ *
190
+ FROM
191
+ source_tables
192
+ WHERE
193
+ group_id = ${{ type: 'int4', value: group_id }}
194
+ AND connection_id = ${{ type: 'int4', value: connection_id }}
195
+ AND schema_name = ${{ type: 'varchar', value: schema }}
196
+ AND table_name = ${{ type: 'varchar', value: table }}
197
+ AND replica_id_columns = ${{ type: 'jsonb', value: columns }}
198
+ `
199
+ .decoded(models.SourceTable)
200
+ .first();
201
+ }
169
202
 
170
203
  if (sourceTableRow == null) {
171
204
  const row = await db.sql`
@@ -184,7 +217,7 @@ export class PostgresSyncRulesStorage
184
217
  ${{ type: 'varchar', value: uuid.v4() }},
185
218
  ${{ type: 'int4', value: group_id }},
186
219
  ${{ type: 'int4', value: connection_id }},
187
- --- The objectId can be string | number, we store it as jsonb value
220
+ --- The objectId can be string | number | undefined, we store it as jsonb value
188
221
  ${{ type: 'jsonb', value: { object_id: objectId } satisfies StoredRelationId }},
189
222
  ${{ type: 'varchar', value: schema }},
190
223
  ${{ type: 'varchar', value: table }},
@@ -211,25 +244,47 @@ export class PostgresSyncRulesStorage
211
244
  sourceTable.syncData = options.sync_rules.tableSyncsData(sourceTable);
212
245
  sourceTable.syncParameters = options.sync_rules.tableSyncsParameters(sourceTable);
213
246
 
214
- const truncatedTables = await db.sql`
215
- SELECT
216
- *
217
- FROM
218
- source_tables
219
- WHERE
220
- group_id = ${{ type: 'int4', value: group_id }}
221
- AND connection_id = ${{ type: 'int4', value: connection_id }}
222
- AND id != ${{ type: 'varchar', value: sourceTableRow!.id }}
223
- AND (
224
- relation_id = ${{ type: 'jsonb', value: { object_id: objectId } satisfies StoredRelationId }}
225
- OR (
247
+ let truncatedTables: SourceTableDecoded[] = [];
248
+ if (objectId != null) {
249
+ // relation_id present - check for renamed tables
250
+ truncatedTables = await db.sql`
251
+ SELECT
252
+ *
253
+ FROM
254
+ source_tables
255
+ WHERE
256
+ group_id = ${{ type: 'int4', value: group_id }}
257
+ AND connection_id = ${{ type: 'int4', value: connection_id }}
258
+ AND id != ${{ type: 'varchar', value: sourceTableRow!.id }}
259
+ AND (
260
+ relation_id = ${{ type: 'jsonb', value: { object_id: objectId } satisfies StoredRelationId }}
261
+ OR (
262
+ schema_name = ${{ type: 'varchar', value: schema }}
263
+ AND table_name = ${{ type: 'varchar', value: table }}
264
+ )
265
+ )
266
+ `
267
+ .decoded(models.SourceTable)
268
+ .rows();
269
+ } else {
270
+ // relation_id not present - only check for changed replica_id_columns
271
+ truncatedTables = await db.sql`
272
+ SELECT
273
+ *
274
+ FROM
275
+ source_tables
276
+ WHERE
277
+ group_id = ${{ type: 'int4', value: group_id }}
278
+ AND connection_id = ${{ type: 'int4', value: connection_id }}
279
+ AND id != ${{ type: 'varchar', value: sourceTableRow!.id }}
280
+ AND (
226
281
  schema_name = ${{ type: 'varchar', value: schema }}
227
282
  AND table_name = ${{ type: 'varchar', value: table }}
228
283
  )
229
- )
230
- `
231
- .decoded(models.SourceTable)
232
- .rows();
284
+ `
285
+ .decoded(models.SourceTable)
286
+ .rows();
287
+ }
233
288
 
234
289
  return {
235
290
  table: sourceTable,
@@ -272,7 +327,7 @@ export class PostgresSyncRulesStorage
272
327
 
273
328
  const checkpoint_lsn = syncRules?.last_checkpoint_lsn ?? null;
274
329
 
275
- await using batch = new PostgresBucketBatch({
330
+ const batch = new PostgresBucketBatch({
276
331
  db: this.db,
277
332
  sync_rules: this.sync_rules.parsed(options).sync_rules,
278
333
  group_id: this.group_id,
@@ -602,7 +657,10 @@ export class PostgresSyncRulesStorage
602
657
  SET
603
658
  state = ${{ type: 'varchar', value: storage.SyncRuleState.STOP }}
604
659
  WHERE
605
- state = ${{ type: 'varchar', value: storage.SyncRuleState.ACTIVE }}
660
+ (
661
+ state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
662
+ OR state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
663
+ )
606
664
  AND id != ${{ type: 'int4', value: this.group_id }}
607
665
  `.execute();
608
666
  });
@@ -663,4 +721,139 @@ export class PostgresSyncRulesStorage
663
721
  })
664
722
  );
665
723
  }
724
+
725
+ async getActiveCheckpoint(): Promise<storage.ReplicationCheckpoint> {
726
+ const activeCheckpoint = await this.db.sql`
727
+ SELECT
728
+ id,
729
+ last_checkpoint,
730
+ last_checkpoint_lsn
731
+ FROM
732
+ sync_rules
733
+ WHERE
734
+ state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
735
+ OR state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
736
+ ORDER BY
737
+ id DESC
738
+ LIMIT
739
+ 1
740
+ `
741
+ .decoded(models.ActiveCheckpoint)
742
+ .first();
743
+
744
+ return this.makeActiveCheckpoint(activeCheckpoint);
745
+ }
746
+
747
+ async *watchWriteCheckpoint(options: WatchWriteCheckpointOptions): AsyncIterable<storage.StorageCheckpointUpdate> {
748
+ let lastCheckpoint: utils.OpId | null = null;
749
+ let lastWriteCheckpoint: bigint | null = null;
750
+
751
+ const { signal, user_id } = options;
752
+
753
+ const iter = wrapWithAbort(this.sharedIterator, signal);
754
+ for await (const cp of iter) {
755
+ const { checkpoint, lsn } = cp;
756
+
757
+ // lsn changes are not important by itself.
758
+ // What is important is:
759
+ // 1. checkpoint (op_id) changes.
760
+ // 2. write checkpoint changes for the specific user
761
+ const lsnFilters: Record<string, string> = lsn ? { 1: lsn } : {};
762
+
763
+ const currentWriteCheckpoint = await this.lastWriteCheckpoint({
764
+ user_id,
765
+ heads: {
766
+ ...lsnFilters
767
+ }
768
+ });
769
+
770
+ if (currentWriteCheckpoint == lastWriteCheckpoint && checkpoint == lastCheckpoint) {
771
+ // No change - wait for next one
772
+ // In some cases, many LSNs may be produced in a short time.
773
+ // Add a delay to throttle the write checkpoint lookup a bit.
774
+ await timers.setTimeout(20 + 10 * Math.random());
775
+ continue;
776
+ }
777
+
778
+ lastWriteCheckpoint = currentWriteCheckpoint;
779
+ lastCheckpoint = checkpoint;
780
+
781
+ yield {
782
+ base: cp,
783
+ writeCheckpoint: currentWriteCheckpoint,
784
+ update: CHECKPOINT_INVALIDATE_ALL
785
+ };
786
+ }
787
+ }
788
+
789
+ protected async *watchActiveCheckpoint(signal: AbortSignal): AsyncIterable<storage.ReplicationCheckpoint> {
790
+ const doc = await this.db.sql`
791
+ SELECT
792
+ id,
793
+ last_checkpoint,
794
+ last_checkpoint_lsn
795
+ FROM
796
+ sync_rules
797
+ WHERE
798
+ state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
799
+ OR state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
800
+ LIMIT
801
+ 1
802
+ `
803
+ .decoded(models.ActiveCheckpoint)
804
+ .first();
805
+
806
+ if (doc == null) {
807
+ // Abort the connections - clients will have to retry later.
808
+ throw new framework.ServiceError(framework.ErrorCode.PSYNC_S2302, 'No active sync rules available');
809
+ }
810
+
811
+ const sink = new LastValueSink<string>(undefined);
812
+
813
+ const disposeListener = this.db.registerListener({
814
+ notification: (notification) => sink.next(notification.payload)
815
+ });
816
+
817
+ signal.addEventListener('aborted', async () => {
818
+ disposeListener();
819
+ sink.complete();
820
+ });
821
+
822
+ yield this.makeActiveCheckpoint(doc);
823
+
824
+ let lastOp: storage.ReplicationCheckpoint | null = null;
825
+ for await (const payload of sink.withSignal(signal)) {
826
+ if (signal.aborted) {
827
+ return;
828
+ }
829
+
830
+ const notification = models.ActiveCheckpointNotification.decode(payload);
831
+ if (notification.active_checkpoint == null) {
832
+ continue;
833
+ }
834
+ if (Number(notification.active_checkpoint.id) != doc.id) {
835
+ // Active sync rules changed - abort and restart the stream
836
+ break;
837
+ }
838
+
839
+ const activeCheckpoint = this.makeActiveCheckpoint(notification.active_checkpoint);
840
+
841
+ if (lastOp == null || activeCheckpoint.lsn != lastOp.lsn || activeCheckpoint.checkpoint != lastOp.checkpoint) {
842
+ lastOp = activeCheckpoint;
843
+ yield activeCheckpoint;
844
+ }
845
+ }
846
+ }
847
+
848
+ async getCheckpointChanges(options: GetCheckpointChangesOptions): Promise<CheckpointChanges> {
849
+ // We do not track individual changes yet
850
+ return CHECKPOINT_INVALIDATE_ALL;
851
+ }
852
+
853
+ private makeActiveCheckpoint(row: models.ActiveCheckpointDecoded | null) {
854
+ return {
855
+ checkpoint: utils.timestampToOpId(row?.last_checkpoint ?? 0n),
856
+ lsn: row?.last_checkpoint_lsn ?? null
857
+ } satisfies storage.ReplicationCheckpoint;
858
+ }
666
859
  }
@@ -1,7 +1,7 @@
1
1
  import * as lib_postgres from '@powersync/lib-service-postgres';
2
2
  import {
3
+ BaseObserver,
3
4
  container,
4
- DisposableObserver,
5
5
  ErrorCode,
6
6
  errors,
7
7
  logger,
@@ -51,7 +51,7 @@ type StatefulCheckpointDecoded = t.Decoded<typeof StatefulCheckpoint>;
51
51
  const MAX_ROW_SIZE = 15 * 1024 * 1024;
52
52
 
53
53
  export class PostgresBucketBatch
54
- extends DisposableObserver<storage.BucketBatchStorageListener>
54
+ extends BaseObserver<storage.BucketBatchStorageListener>
55
55
  implements storage.BucketStorageBatch
56
56
  {
57
57
  public last_flushed_op: bigint | null = null;
@@ -87,6 +87,10 @@ export class PostgresBucketBatch
87
87
  return this.last_checkpoint_lsn;
88
88
  }
89
89
 
90
+ async [Symbol.asyncDispose]() {
91
+ super.clearListeners();
92
+ }
93
+
90
94
  async save(record: storage.SaveOptions): Promise<storage.FlushedResult | null> {
91
95
  // TODO maybe share with abstract class
92
96
  const { after, before, sourceTable, tag } = record;
@@ -2,7 +2,7 @@ import * as t from 'ts-codec';
2
2
  import { bigint, jsonb, jsonb_raw, pgwire_number } from '../codecs.js';
3
3
 
4
4
  export type StoredRelationId = {
5
- object_id: string | number;
5
+ object_id: string | number | undefined;
6
6
  };
7
7
 
8
8
  export const ColumnDescriptor = t.object({
@@ -53,6 +53,7 @@ export type RequiredOperationBatchLimits = Required<OperationBatchLimits>;
53
53
 
54
54
  export type NormalizedPostgresStorageConfig = pg_wire.NormalizedConnectionConfig & {
55
55
  batch_limits: RequiredOperationBatchLimits;
56
+ max_pool_size: number;
56
57
  };
57
58
 
58
59
  export const normalizePostgresStorageConfig = (
@@ -9,6 +9,7 @@ exports[`sync - postgres > compacting data - invalidate checkpoint 1`] = `
9
9
  "bucket": "mybucket[]",
10
10
  "checksum": -93886621,
11
11
  "count": 2,
12
+ "priority": 3,
12
13
  },
13
14
  ],
14
15
  "last_op_id": "2",
@@ -44,6 +45,7 @@ exports[`sync - postgres > compacting data - invalidate checkpoint 2`] = `
44
45
  "bucket": "mybucket[]",
45
46
  "checksum": 499012468,
46
47
  "count": 4,
48
+ "priority": 3,
47
49
  },
48
50
  ],
49
51
  "write_checkpoint": undefined,
@@ -102,6 +104,7 @@ exports[`sync - postgres > expiring token 1`] = `
102
104
  "bucket": "mybucket[]",
103
105
  "checksum": 0,
104
106
  "count": 0,
107
+ "priority": 3,
105
108
  },
106
109
  ],
107
110
  "last_op_id": "0",
@@ -124,6 +127,136 @@ exports[`sync - postgres > expiring token 2`] = `
124
127
  ]
125
128
  `;
126
129
 
130
+ exports[`sync - postgres > sends checkpoint complete line for empty checkpoint 1`] = `
131
+ [
132
+ {
133
+ "checkpoint": {
134
+ "buckets": [
135
+ {
136
+ "bucket": "mybucket[]",
137
+ "checksum": -1221282404,
138
+ "count": 1,
139
+ "priority": 3,
140
+ },
141
+ ],
142
+ "last_op_id": "1",
143
+ "write_checkpoint": undefined,
144
+ },
145
+ },
146
+ {
147
+ "data": {
148
+ "after": "0",
149
+ "bucket": "mybucket[]",
150
+ "data": [
151
+ {
152
+ "checksum": 3073684892,
153
+ "data": "{"id":"t1","description":"sync"}",
154
+ "object_id": "t1",
155
+ "object_type": "test",
156
+ "op": "PUT",
157
+ "op_id": "1",
158
+ "subkey": "02d285ac-4f96-5124-8fba-c6d1df992dd1",
159
+ },
160
+ ],
161
+ "has_more": false,
162
+ "next_after": "1",
163
+ },
164
+ },
165
+ {
166
+ "checkpoint_complete": {
167
+ "last_op_id": "1",
168
+ },
169
+ },
170
+ {
171
+ "checkpoint_diff": {
172
+ "last_op_id": "1",
173
+ "removed_buckets": [],
174
+ "updated_buckets": [],
175
+ "write_checkpoint": "1",
176
+ },
177
+ },
178
+ {
179
+ "checkpoint_complete": {
180
+ "last_op_id": "1",
181
+ },
182
+ },
183
+ ]
184
+ `;
185
+
186
+ exports[`sync - postgres > sync buckets in order 1`] = `
187
+ [
188
+ {
189
+ "checkpoint": {
190
+ "buckets": [
191
+ {
192
+ "bucket": "b0[]",
193
+ "checksum": 920318466,
194
+ "count": 1,
195
+ "priority": 2,
196
+ },
197
+ {
198
+ "bucket": "b1[]",
199
+ "checksum": -1382098757,
200
+ "count": 1,
201
+ "priority": 1,
202
+ },
203
+ ],
204
+ "last_op_id": "2",
205
+ "write_checkpoint": undefined,
206
+ },
207
+ },
208
+ {
209
+ "data": {
210
+ "after": "0",
211
+ "bucket": "b1[]",
212
+ "data": [
213
+ {
214
+ "checksum": 2912868539n,
215
+ "data": "{"id":"earlier","description":"Test 2"}",
216
+ "object_id": "earlier",
217
+ "object_type": "test",
218
+ "op": "PUT",
219
+ "op_id": "2",
220
+ "subkey": "243b0e26-87b2-578a-993c-5ac5b6f7fd64",
221
+ },
222
+ ],
223
+ "has_more": false,
224
+ "next_after": "2",
225
+ },
226
+ },
227
+ {
228
+ "partial_checkpoint_complete": {
229
+ "last_op_id": "2",
230
+ "priority": 1,
231
+ },
232
+ },
233
+ {
234
+ "data": {
235
+ "after": "0",
236
+ "bucket": "b0[]",
237
+ "data": [
238
+ {
239
+ "checksum": 920318466n,
240
+ "data": "{"id":"t1","description":"Test 1"}",
241
+ "object_id": "t1",
242
+ "object_type": "test",
243
+ "op": "PUT",
244
+ "op_id": "1",
245
+ "subkey": "02d285ac-4f96-5124-8fba-c6d1df992dd1",
246
+ },
247
+ ],
248
+ "has_more": false,
249
+ "next_after": "1",
250
+ },
251
+ },
252
+ {
253
+ "checkpoint_complete": {
254
+ "last_op_id": "2",
255
+ },
256
+ },
257
+ ]
258
+ `;
259
+
127
260
  exports[`sync - postgres > sync global data 1`] = `
128
261
  [
129
262
  {
@@ -133,6 +266,7 @@ exports[`sync - postgres > sync global data 1`] = `
133
266
  "bucket": "mybucket[]",
134
267
  "checksum": -93886621,
135
268
  "count": 2,
269
+ "priority": 3,
136
270
  },
137
271
  ],
138
272
  "last_op_id": "2",
@@ -184,6 +318,7 @@ exports[`sync - postgres > sync legacy non-raw data 1`] = `
184
318
  "bucket": "mybucket[]",
185
319
  "checksum": -852817836,
186
320
  "count": 1,
321
+ "priority": 3,
187
322
  },
188
323
  ],
189
324
  "last_op_id": "1",
@@ -231,6 +366,7 @@ exports[`sync - postgres > sync updates to global data 1`] = `
231
366
  "bucket": "mybucket[]",
232
367
  "checksum": 0,
233
368
  "count": 0,
369
+ "priority": 3,
234
370
  },
235
371
  ],
236
372
  "last_op_id": "0",
@@ -256,6 +392,7 @@ exports[`sync - postgres > sync updates to global data 2`] = `
256
392
  "bucket": "mybucket[]",
257
393
  "checksum": 920318466,
258
394
  "count": 1,
395
+ "priority": 3,
259
396
  },
260
397
  ],
261
398
  "write_checkpoint": undefined,
@@ -299,6 +436,7 @@ exports[`sync - postgres > sync updates to global data 3`] = `
299
436
  "bucket": "mybucket[]",
300
437
  "checksum": -93886621,
301
438
  "count": 2,
439
+ "priority": 3,
302
440
  },
303
441
  ],
304
442
  "write_checkpoint": undefined,
@@ -26,7 +26,7 @@ describe('Postgres Sync Bucket Storage', () => {
26
26
  - SELECT id, description FROM "%"
27
27
  `
28
28
  );
29
- using factory = await POSTGRES_STORAGE_FACTORY();
29
+ await using factory = await POSTGRES_STORAGE_FACTORY();
30
30
  const bucketStorage = factory.getInstance(sync_rules);
31
31
 
32
32
  const result = await bucketStorage.startBatch(test_utils.BATCH_OPTIONS, async (batch) => {
@@ -2,4 +2,4 @@ import { register } from '@powersync/service-core-tests';
2
2
  import { describe } from 'vitest';
3
3
  import { POSTGRES_STORAGE_FACTORY } from './util.js';
4
4
 
5
- describe('Postgres Sync Bucket Storage Compact', () => register.registerCompactTests(POSTGRES_STORAGE_FACTORY, {}));
5
+ describe('Postgres Sync Bucket Storage Compact', () => register.registerCompactTests(POSTGRES_STORAGE_FACTORY));