@powersync/service-module-postgres-storage 0.12.0 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/CHANGELOG.md +45 -0
  2. package/dist/.tsbuildinfo +1 -1
  3. package/dist/@types/migrations/scripts/1771424826685-current-data-pending-deletes.d.ts +3 -0
  4. package/dist/@types/storage/PostgresBucketStorageFactory.d.ts +4 -0
  5. package/dist/@types/storage/PostgresCompactor.d.ts +8 -2
  6. package/dist/@types/storage/PostgresSyncRulesStorage.d.ts +10 -4
  7. package/dist/@types/storage/batch/OperationBatch.d.ts +2 -2
  8. package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +13 -9
  9. package/dist/@types/storage/batch/PostgresPersistedBatch.d.ts +17 -5
  10. package/dist/@types/storage/current-data-store.d.ts +85 -0
  11. package/dist/@types/storage/current-data-table.d.ts +9 -0
  12. package/dist/@types/storage/table-id.d.ts +2 -0
  13. package/dist/@types/types/models/CurrentData.d.ts +18 -3
  14. package/dist/@types/utils/bson.d.ts +1 -1
  15. package/dist/@types/utils/test-utils.d.ts +1 -1
  16. package/dist/migrations/scripts/1771424826685-current-data-pending-deletes.js +8 -0
  17. package/dist/migrations/scripts/1771424826685-current-data-pending-deletes.js.map +1 -0
  18. package/dist/storage/PostgresBucketStorageFactory.js +41 -4
  19. package/dist/storage/PostgresBucketStorageFactory.js.map +1 -1
  20. package/dist/storage/PostgresCompactor.js +14 -6
  21. package/dist/storage/PostgresCompactor.js.map +1 -1
  22. package/dist/storage/PostgresSyncRulesStorage.js +98 -24
  23. package/dist/storage/PostgresSyncRulesStorage.js.map +1 -1
  24. package/dist/storage/batch/OperationBatch.js +2 -1
  25. package/dist/storage/batch/OperationBatch.js.map +1 -1
  26. package/dist/storage/batch/PostgresBucketBatch.js +295 -213
  27. package/dist/storage/batch/PostgresBucketBatch.js.map +1 -1
  28. package/dist/storage/batch/PostgresPersistedBatch.js +86 -81
  29. package/dist/storage/batch/PostgresPersistedBatch.js.map +1 -1
  30. package/dist/storage/current-data-store.js +270 -0
  31. package/dist/storage/current-data-store.js.map +1 -0
  32. package/dist/storage/current-data-table.js +22 -0
  33. package/dist/storage/current-data-table.js.map +1 -0
  34. package/dist/storage/table-id.js +8 -0
  35. package/dist/storage/table-id.js.map +1 -0
  36. package/dist/types/models/CurrentData.js +11 -2
  37. package/dist/types/models/CurrentData.js.map +1 -1
  38. package/dist/utils/bson.js.map +1 -1
  39. package/dist/utils/db.js +9 -0
  40. package/dist/utils/db.js.map +1 -1
  41. package/dist/utils/test-utils.js +13 -6
  42. package/dist/utils/test-utils.js.map +1 -1
  43. package/package.json +8 -8
  44. package/src/migrations/scripts/1771424826685-current-data-pending-deletes.ts +10 -0
  45. package/src/storage/PostgresBucketStorageFactory.ts +53 -5
  46. package/src/storage/PostgresCompactor.ts +17 -8
  47. package/src/storage/PostgresSyncRulesStorage.ts +47 -31
  48. package/src/storage/batch/OperationBatch.ts +4 -3
  49. package/src/storage/batch/PostgresBucketBatch.ts +316 -238
  50. package/src/storage/batch/PostgresPersistedBatch.ts +92 -84
  51. package/src/storage/current-data-store.ts +326 -0
  52. package/src/storage/current-data-table.ts +26 -0
  53. package/src/storage/table-id.ts +9 -0
  54. package/src/types/models/CurrentData.ts +17 -4
  55. package/src/utils/bson.ts +1 -1
  56. package/src/utils/db.ts +10 -0
  57. package/src/utils/test-utils.ts +14 -7
  58. package/test/src/__snapshots__/storage.test.ts.snap +151 -0
  59. package/test/src/__snapshots__/storage_compacting.test.ts.snap +17 -0
  60. package/test/src/__snapshots__/storage_sync.test.ts.snap +1111 -16
  61. package/test/src/env.ts +1 -1
  62. package/test/src/migrations.test.ts +1 -1
  63. package/test/src/storage.test.ts +138 -131
  64. package/test/src/storage_compacting.test.ts +80 -11
  65. package/test/src/storage_sync.test.ts +57 -54
  66. package/test/src/util.ts +4 -4
@@ -14,6 +14,7 @@ import {
14
14
  PopulateChecksumCacheResults,
15
15
  ReplicationCheckpoint,
16
16
  storage,
17
+ StorageVersionConfig,
17
18
  utils,
18
19
  WatchWriteCheckpointOptions
19
20
  } from '@powersync/service-core';
@@ -35,6 +36,7 @@ import { PostgresBucketBatch } from './batch/PostgresBucketBatch.js';
35
36
  import { PostgresWriteCheckpointAPI } from './checkpoints/PostgresWriteCheckpointAPI.js';
36
37
  import { PostgresBucketStorageFactory } from './PostgresBucketStorageFactory.js';
37
38
  import { PostgresCompactor } from './PostgresCompactor.js';
39
+ import { PostgresCurrentDataStore } from './current-data-store.js';
38
40
 
39
41
  export type PostgresSyncRulesStorageOptions = {
40
42
  factory: PostgresBucketStorageFactory;
@@ -52,11 +54,13 @@ export class PostgresSyncRulesStorage
52
54
  public readonly sync_rules: storage.PersistedSyncRulesContent;
53
55
  public readonly slot_name: string;
54
56
  public readonly factory: PostgresBucketStorageFactory;
57
+ public readonly storageConfig: StorageVersionConfig;
55
58
 
56
59
  private sharedIterator = new BroadcastIterable((signal) => this.watchActiveCheckpoint(signal));
57
60
 
58
61
  protected db: lib_postgres.DatabaseClient;
59
62
  protected writeCheckpointAPI: PostgresWriteCheckpointAPI;
63
+ private readonly currentDataStore: PostgresCurrentDataStore;
60
64
 
61
65
  // TODO we might be able to share this in an abstract class
62
66
  private parsedSyncRulesCache:
@@ -71,6 +75,8 @@ export class PostgresSyncRulesStorage
71
75
  this.sync_rules = options.sync_rules;
72
76
  this.slot_name = options.sync_rules.slot_name;
73
77
  this.factory = options.factory;
78
+ this.storageConfig = options.sync_rules.getStorageConfig();
79
+ this.currentDataStore = new PostgresCurrentDataStore(this.storageConfig);
74
80
 
75
81
  this.writeCheckpointAPI = new PostgresWriteCheckpointAPI({
76
82
  db: this.db,
@@ -121,8 +127,18 @@ export class PostgresSyncRulesStorage
121
127
  `.execute();
122
128
  }
123
129
 
124
- compact(options?: storage.CompactOptions): Promise<void> {
125
- return new PostgresCompactor(this.db, this.group_id, options).compact();
130
+ async compact(options?: storage.CompactOptions): Promise<void> {
131
+ let maxOpId = options?.maxOpId;
132
+ if (maxOpId == null) {
133
+ const checkpoint = await this.getCheckpoint();
134
+ // Note: If there is no active checkpoint, this will be 0, in which case no compacting is performed
135
+ maxOpId = checkpoint.checkpoint;
136
+ }
137
+
138
+ return new PostgresCompactor(this.db, this.group_id, {
139
+ ...options,
140
+ maxOpId
141
+ }).compact();
126
142
  }
127
143
 
128
144
  async populatePersistentChecksumCache(options: PopulateChecksumCacheOptions): Promise<PopulateChecksumCacheResults> {
@@ -327,10 +343,7 @@ export class PostgresSyncRulesStorage
327
343
  });
328
344
  }
329
345
 
330
- async startBatch(
331
- options: storage.StartBatchOptions,
332
- callback: (batch: storage.BucketStorageBatch) => Promise<void>
333
- ): Promise<storage.FlushedResult | null> {
346
+ async createWriter(options: storage.CreateWriterOptions): Promise<storage.BucketStorageBatch> {
334
347
  const syncRules = await this.db.sql`
335
348
  SELECT
336
349
  last_checkpoint_lsn,
@@ -347,7 +360,7 @@ export class PostgresSyncRulesStorage
347
360
 
348
361
  const checkpoint_lsn = syncRules?.last_checkpoint_lsn ?? null;
349
362
 
350
- const batch = new PostgresBucketBatch({
363
+ const writer = new PostgresBucketBatch({
351
364
  logger: options.logger ?? framework.logger,
352
365
  db: this.db,
353
366
  sync_rules: this.sync_rules.parsed(options).hydratedSyncRules(),
@@ -355,22 +368,28 @@ export class PostgresSyncRulesStorage
355
368
  slot_name: this.slot_name,
356
369
  last_checkpoint_lsn: checkpoint_lsn,
357
370
  keep_alive_op: syncRules?.keepalive_op,
358
- no_checkpoint_before_lsn: syncRules?.no_checkpoint_before ?? options.zeroLSN,
359
371
  resumeFromLsn: maxLsn(syncRules?.snapshot_lsn, checkpoint_lsn),
360
372
  store_current_data: options.storeCurrentData,
361
373
  skip_existing_rows: options.skipExistingRows ?? false,
362
374
  batch_limits: this.options.batchLimits,
363
- markRecordUnavailable: options.markRecordUnavailable
375
+ markRecordUnavailable: options.markRecordUnavailable,
376
+ storageConfig: this.storageConfig
364
377
  });
365
- this.iterateListeners((cb) => cb.batchStarted?.(batch));
366
-
367
- await callback(batch);
368
- await batch.flush();
369
- if (batch.last_flushed_op != null) {
370
- return { flushed_op: batch.last_flushed_op };
371
- } else {
372
- return null;
373
- }
378
+ this.iterateListeners((cb) => cb.batchStarted?.(writer));
379
+ return writer;
380
+ }
381
+
382
+ /**
383
+ * @deprecated Use `createWriter()` with `await using` instead.
384
+ */
385
+ async startBatch(
386
+ options: storage.CreateWriterOptions,
387
+ callback: (batch: storage.BucketStorageBatch) => Promise<void>
388
+ ): Promise<storage.FlushedResult | null> {
389
+ await using writer = await this.createWriter(options);
390
+ await callback(writer);
391
+ await writer.flush();
392
+ return writer.last_flushed_op != null ? { flushed_op: writer.last_flushed_op } : null;
374
393
  }
375
394
 
376
395
  async getParameterSets(
@@ -415,10 +434,10 @@ export class PostgresSyncRulesStorage
415
434
 
416
435
  async *getBucketDataBatch(
417
436
  checkpoint: InternalOpId,
418
- dataBuckets: Map<string, InternalOpId>,
437
+ dataBuckets: storage.BucketDataRequest[],
419
438
  options?: storage.BucketDataBatchOptions
420
439
  ): AsyncIterable<storage.SyncBucketDataChunk> {
421
- if (dataBuckets.size == 0) {
440
+ if (dataBuckets.length == 0) {
422
441
  return;
423
442
  }
424
443
 
@@ -430,10 +449,8 @@ export class PostgresSyncRulesStorage
430
449
  // not match up with chunks.
431
450
 
432
451
  const end = checkpoint ?? BIGINT_MAX;
433
- const filters = Array.from(dataBuckets.entries()).map(([name, start]) => ({
434
- bucket_name: name,
435
- start: start
436
- }));
452
+ const filters = dataBuckets.map((request) => ({ bucket_name: request.bucket, start: request.start }));
453
+ const startOpByBucket = new Map(dataBuckets.map((request) => [request.bucket, request.start]));
437
454
 
438
455
  const batchRowLimit = options?.limit ?? storage.DEFAULT_DOCUMENT_BATCH_LIMIT;
439
456
  const chunkSizeLimitBytes = options?.chunkLimitBytes ?? storage.DEFAULT_DOCUMENT_CHUNK_LIMIT_BYTES;
@@ -533,7 +550,7 @@ export class PostgresSyncRulesStorage
533
550
  }
534
551
 
535
552
  if (start == null) {
536
- const startOpId = dataBuckets.get(bucket_name);
553
+ const startOpId = startOpByBucket.get(bucket_name);
537
554
  if (startOpId == null) {
538
555
  throw new framework.ServiceAssertionError(`data for unexpected bucket: ${bucket_name}`);
539
556
  }
@@ -588,7 +605,10 @@ export class PostgresSyncRulesStorage
588
605
  }
589
606
  }
590
607
 
591
- async getChecksums(checkpoint: utils.InternalOpId, buckets: string[]): Promise<utils.ChecksumMap> {
608
+ async getChecksums(
609
+ checkpoint: utils.InternalOpId,
610
+ buckets: storage.BucketChecksumRequest[]
611
+ ): Promise<utils.ChecksumMap> {
592
612
  return this.checksumCache.getChecksumMap(checkpoint, buckets);
593
613
  }
594
614
 
@@ -662,11 +682,7 @@ export class PostgresSyncRulesStorage
662
682
  group_id = ${{ type: 'int4', value: this.group_id }}
663
683
  `.execute();
664
684
 
665
- await this.db.sql`
666
- DELETE FROM current_data
667
- WHERE
668
- group_id = ${{ type: 'int4', value: this.group_id }}
669
- `.execute();
685
+ await this.currentDataStore.deleteGroupRows(this.db, { groupId: this.group_id });
670
686
 
671
687
  await this.db.sql`
672
688
  DELETE FROM source_tables
@@ -5,6 +5,7 @@
5
5
 
6
6
  import { storage, utils } from '@powersync/service-core';
7
7
  import { RequiredOperationBatchLimits } from '../../types/types.js';
8
+ import { postgresTableId } from '../table-id.js';
8
9
 
9
10
  /**
10
11
  * Batch of input operations.
@@ -89,13 +90,13 @@ export class RecordOperation {
89
90
  /**
90
91
  * In-memory cache key - must not be persisted.
91
92
  */
92
- export function cacheKey(sourceTableId: string, id: storage.ReplicaId) {
93
+ export function cacheKey(sourceTableId: storage.SourceTableId, id: storage.ReplicaId) {
93
94
  return encodedCacheKey(sourceTableId, storage.serializeReplicaId(id));
94
95
  }
95
96
 
96
97
  /**
97
98
  * Calculates a cache key for a stored ReplicaId. This is usually stored as a bytea/Buffer.
98
99
  */
99
- export function encodedCacheKey(sourceTableId: string, storedKey: Buffer) {
100
- return `${sourceTableId}.${storedKey.toString('base64')}`;
100
+ export function encodedCacheKey(sourceTableId: storage.SourceTableId, storedKey: Buffer) {
101
+ return `${postgresTableId(sourceTableId)}.${storedKey.toString('base64')}`;
101
102
  }