@powersync/service-module-postgres-storage 0.4.2 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +26 -0
- package/dist/.tsbuildinfo +1 -1
- package/dist/@types/storage/PostgresSyncRulesStorage.d.ts +4 -4
- package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +4 -4
- package/dist/@types/types/types.d.ts +5 -0
- package/dist/storage/PostgresCompactor.js.map +1 -1
- package/dist/storage/PostgresSyncRulesStorage.js +13 -10
- package/dist/storage/PostgresSyncRulesStorage.js.map +1 -1
- package/dist/storage/batch/PostgresBucketBatch.js +7 -4
- package/dist/storage/batch/PostgresBucketBatch.js.map +1 -1
- package/dist/types/types.js.map +1 -1
- package/dist/utils/bucket-data.js +2 -2
- package/dist/utils/bucket-data.js.map +1 -1
- package/package.json +6 -6
- package/src/storage/PostgresCompactor.ts +7 -7
- package/src/storage/PostgresSyncRulesStorage.ts +20 -14
- package/src/storage/batch/PostgresBucketBatch.ts +11 -10
- package/src/types/types.ts +1 -0
- package/src/utils/bucket-data.ts +2 -2
- package/test/src/storage.test.ts +4 -4
|
@@ -5,6 +5,8 @@ import {
|
|
|
5
5
|
CHECKPOINT_INVALIDATE_ALL,
|
|
6
6
|
CheckpointChanges,
|
|
7
7
|
GetCheckpointChangesOptions,
|
|
8
|
+
InternalOpId,
|
|
9
|
+
internalToExternalOpId,
|
|
8
10
|
LastValueSink,
|
|
9
11
|
storage,
|
|
10
12
|
utils,
|
|
@@ -149,7 +151,7 @@ export class PostgresSyncRulesStorage
|
|
|
149
151
|
.first();
|
|
150
152
|
|
|
151
153
|
return {
|
|
152
|
-
checkpoint:
|
|
154
|
+
checkpoint: checkpointRow?.last_checkpoint ?? 0n,
|
|
153
155
|
lsn: checkpointRow?.last_checkpoint_lsn ?? null
|
|
154
156
|
};
|
|
155
157
|
}
|
|
@@ -344,14 +346,14 @@ export class PostgresSyncRulesStorage
|
|
|
344
346
|
await callback(batch);
|
|
345
347
|
await batch.flush();
|
|
346
348
|
if (batch.last_flushed_op) {
|
|
347
|
-
return { flushed_op:
|
|
349
|
+
return { flushed_op: batch.last_flushed_op };
|
|
348
350
|
} else {
|
|
349
351
|
return null;
|
|
350
352
|
}
|
|
351
353
|
}
|
|
352
354
|
|
|
353
355
|
async getParameterSets(
|
|
354
|
-
checkpoint: utils.
|
|
356
|
+
checkpoint: utils.InternalOpId,
|
|
355
357
|
lookups: sync_rules.SqliteJsonValue[][]
|
|
356
358
|
): Promise<sync_rules.SqliteJsonRow[]> {
|
|
357
359
|
const rows = await this.db.sql`
|
|
@@ -374,7 +376,7 @@ export class PostgresSyncRulesStorage
|
|
|
374
376
|
value: lookups.map((l) => storage.serializeLookupBuffer(l).toString('hex'))
|
|
375
377
|
}}) AS FILTER
|
|
376
378
|
)
|
|
377
|
-
AND id <= ${{ type: 'int8', value:
|
|
379
|
+
AND id <= ${{ type: 'int8', value: checkpoint }}
|
|
378
380
|
ORDER BY
|
|
379
381
|
lookup,
|
|
380
382
|
source_table,
|
|
@@ -391,8 +393,8 @@ export class PostgresSyncRulesStorage
|
|
|
391
393
|
}
|
|
392
394
|
|
|
393
395
|
async *getBucketDataBatch(
|
|
394
|
-
checkpoint:
|
|
395
|
-
dataBuckets: Map<string,
|
|
396
|
+
checkpoint: InternalOpId,
|
|
397
|
+
dataBuckets: Map<string, InternalOpId>,
|
|
396
398
|
options?: storage.BucketDataBatchOptions
|
|
397
399
|
): AsyncIterable<storage.SyncBucketDataBatch> {
|
|
398
400
|
if (dataBuckets.size == 0) {
|
|
@@ -410,7 +412,7 @@ export class PostgresSyncRulesStorage
|
|
|
410
412
|
|
|
411
413
|
let batchSize = 0;
|
|
412
414
|
let currentBatch: utils.SyncBucketData | null = null;
|
|
413
|
-
let targetOp:
|
|
415
|
+
let targetOp: InternalOpId | null = null;
|
|
414
416
|
let rowCount = 0;
|
|
415
417
|
|
|
416
418
|
/**
|
|
@@ -503,9 +505,12 @@ export class PostgresSyncRulesStorage
|
|
|
503
505
|
}
|
|
504
506
|
}
|
|
505
507
|
|
|
506
|
-
start ??= dataBuckets.get(bucket_name);
|
|
507
508
|
if (start == null) {
|
|
508
|
-
|
|
509
|
+
const startOpId = dataBuckets.get(bucket_name);
|
|
510
|
+
if (startOpId == null) {
|
|
511
|
+
throw new framework.ServiceAssertionError(`data for unexpected bucket: ${bucket_name}`);
|
|
512
|
+
}
|
|
513
|
+
start = internalToExternalOpId(startOpId);
|
|
509
514
|
}
|
|
510
515
|
currentBatch = {
|
|
511
516
|
bucket: bucket_name,
|
|
@@ -549,7 +554,7 @@ export class PostgresSyncRulesStorage
|
|
|
549
554
|
}
|
|
550
555
|
}
|
|
551
556
|
|
|
552
|
-
async getChecksums(checkpoint: utils.
|
|
557
|
+
async getChecksums(checkpoint: utils.InternalOpId, buckets: string[]): Promise<utils.ChecksumMap> {
|
|
553
558
|
return this.checksumCache.getChecksumMap(checkpoint, buckets);
|
|
554
559
|
}
|
|
555
560
|
|
|
@@ -672,8 +677,9 @@ export class PostgresSyncRulesStorage
|
|
|
672
677
|
}
|
|
673
678
|
|
|
674
679
|
const rangedBatch = batch.map((b) => ({
|
|
675
|
-
|
|
676
|
-
start: b.start ??
|
|
680
|
+
bucket: b.bucket,
|
|
681
|
+
start: String(b.start ?? 0n),
|
|
682
|
+
end: String(b.end)
|
|
677
683
|
}));
|
|
678
684
|
|
|
679
685
|
const results = await this.db.sql`
|
|
@@ -745,7 +751,7 @@ export class PostgresSyncRulesStorage
|
|
|
745
751
|
}
|
|
746
752
|
|
|
747
753
|
async *watchWriteCheckpoint(options: WatchWriteCheckpointOptions): AsyncIterable<storage.StorageCheckpointUpdate> {
|
|
748
|
-
let lastCheckpoint: utils.
|
|
754
|
+
let lastCheckpoint: utils.InternalOpId | null = null;
|
|
749
755
|
let lastWriteCheckpoint: bigint | null = null;
|
|
750
756
|
|
|
751
757
|
const { signal, user_id } = options;
|
|
@@ -852,7 +858,7 @@ export class PostgresSyncRulesStorage
|
|
|
852
858
|
|
|
853
859
|
private makeActiveCheckpoint(row: models.ActiveCheckpointDecoded | null) {
|
|
854
860
|
return {
|
|
855
|
-
checkpoint:
|
|
861
|
+
checkpoint: row?.last_checkpoint ?? 0n,
|
|
856
862
|
lsn: row?.last_checkpoint_lsn ?? null
|
|
857
863
|
} satisfies storage.ReplicationCheckpoint;
|
|
858
864
|
}
|
|
@@ -9,7 +9,7 @@ import {
|
|
|
9
9
|
ServiceAssertionError,
|
|
10
10
|
ServiceError
|
|
11
11
|
} from '@powersync/lib-services-framework';
|
|
12
|
-
import { storage, utils } from '@powersync/service-core';
|
|
12
|
+
import { InternalOpId, storage, utils } from '@powersync/service-core';
|
|
13
13
|
import * as sync_rules from '@powersync/service-sync-rules';
|
|
14
14
|
import * as timers from 'timers/promises';
|
|
15
15
|
import * as t from 'ts-codec';
|
|
@@ -29,7 +29,7 @@ export interface PostgresBucketBatchOptions {
|
|
|
29
29
|
last_checkpoint_lsn: string | null;
|
|
30
30
|
no_checkpoint_before_lsn: string;
|
|
31
31
|
store_current_data: boolean;
|
|
32
|
-
keep_alive_op?:
|
|
32
|
+
keep_alive_op?: InternalOpId | null;
|
|
33
33
|
/**
|
|
34
34
|
* Set to true for initial replication.
|
|
35
35
|
*/
|
|
@@ -54,14 +54,14 @@ export class PostgresBucketBatch
|
|
|
54
54
|
extends BaseObserver<storage.BucketBatchStorageListener>
|
|
55
55
|
implements storage.BucketStorageBatch
|
|
56
56
|
{
|
|
57
|
-
public last_flushed_op:
|
|
57
|
+
public last_flushed_op: InternalOpId | null = null;
|
|
58
58
|
|
|
59
59
|
protected db: lib_postgres.DatabaseClient;
|
|
60
60
|
protected group_id: number;
|
|
61
61
|
protected last_checkpoint_lsn: string | null;
|
|
62
62
|
protected no_checkpoint_before_lsn: string;
|
|
63
63
|
|
|
64
|
-
protected persisted_op:
|
|
64
|
+
protected persisted_op: InternalOpId | null;
|
|
65
65
|
|
|
66
66
|
protected write_checkpoint_batch: storage.CustomWriteCheckpointOptions[];
|
|
67
67
|
protected readonly sync_rules: sync_rules.SqlSyncRules;
|
|
@@ -132,18 +132,19 @@ export class PostgresBucketBatch
|
|
|
132
132
|
async truncate(sourceTables: storage.SourceTable[]): Promise<storage.FlushedResult | null> {
|
|
133
133
|
await this.flush();
|
|
134
134
|
|
|
135
|
-
let last_op:
|
|
135
|
+
let last_op: InternalOpId | null = null;
|
|
136
136
|
for (let table of sourceTables) {
|
|
137
137
|
last_op = await this.truncateSingle(table);
|
|
138
138
|
}
|
|
139
139
|
|
|
140
140
|
if (last_op) {
|
|
141
141
|
this.persisted_op = last_op;
|
|
142
|
+
return {
|
|
143
|
+
flushed_op: last_op
|
|
144
|
+
};
|
|
145
|
+
} else {
|
|
146
|
+
return null;
|
|
142
147
|
}
|
|
143
|
-
|
|
144
|
-
return {
|
|
145
|
-
flushed_op: String(last_op!)
|
|
146
|
-
};
|
|
147
148
|
}
|
|
148
149
|
|
|
149
150
|
protected async truncateSingle(sourceTable: storage.SourceTable) {
|
|
@@ -279,7 +280,7 @@ export class PostgresBucketBatch
|
|
|
279
280
|
|
|
280
281
|
this.persisted_op = lastOp;
|
|
281
282
|
this.last_flushed_op = lastOp;
|
|
282
|
-
return { flushed_op:
|
|
283
|
+
return { flushed_op: lastOp };
|
|
283
284
|
}
|
|
284
285
|
|
|
285
286
|
async commit(lsn: string, options?: storage.BucketBatchCommitOptions): Promise<boolean> {
|
package/src/types/types.ts
CHANGED
|
@@ -53,6 +53,7 @@ export type RequiredOperationBatchLimits = Required<OperationBatchLimits>;
|
|
|
53
53
|
|
|
54
54
|
export type NormalizedPostgresStorageConfig = pg_wire.NormalizedConnectionConfig & {
|
|
55
55
|
batch_limits: RequiredOperationBatchLimits;
|
|
56
|
+
max_pool_size: number;
|
|
56
57
|
};
|
|
57
58
|
|
|
58
59
|
export const normalizePostgresStorageConfig = (
|
package/src/utils/bucket-data.ts
CHANGED
|
@@ -5,7 +5,7 @@ import { replicaIdToSubkey } from './bson.js';
|
|
|
5
5
|
export const mapOpEntry = (entry: models.BucketDataDecoded) => {
|
|
6
6
|
if (entry.op == models.OpType.PUT || entry.op == models.OpType.REMOVE) {
|
|
7
7
|
return {
|
|
8
|
-
op_id: utils.
|
|
8
|
+
op_id: utils.internalToExternalOpId(entry.op_id),
|
|
9
9
|
op: entry.op,
|
|
10
10
|
object_type: entry.table_name ?? undefined,
|
|
11
11
|
object_id: entry.row_id ?? undefined,
|
|
@@ -17,7 +17,7 @@ export const mapOpEntry = (entry: models.BucketDataDecoded) => {
|
|
|
17
17
|
// MOVE, CLEAR
|
|
18
18
|
|
|
19
19
|
return {
|
|
20
|
-
op_id: utils.
|
|
20
|
+
op_id: utils.internalToExternalOpId(entry.op_id),
|
|
21
21
|
op: entry.op,
|
|
22
22
|
checksum: Number(entry.checksum)
|
|
23
23
|
};
|
package/test/src/storage.test.ts
CHANGED
|
@@ -81,7 +81,7 @@ describe('Postgres Sync Bucket Storage', () => {
|
|
|
81
81
|
const options: storage.BucketDataBatchOptions = {};
|
|
82
82
|
|
|
83
83
|
const batch1 = await test_utils.fromAsync(
|
|
84
|
-
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]',
|
|
84
|
+
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]]), options)
|
|
85
85
|
);
|
|
86
86
|
expect(test_utils.getBatchData(batch1)).toEqual([
|
|
87
87
|
{ op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }
|
|
@@ -93,7 +93,7 @@ describe('Postgres Sync Bucket Storage', () => {
|
|
|
93
93
|
});
|
|
94
94
|
|
|
95
95
|
const batch2 = await test_utils.fromAsync(
|
|
96
|
-
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options)
|
|
96
|
+
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', BigInt(batch1[0].batch.next_after)]]), options)
|
|
97
97
|
);
|
|
98
98
|
expect(test_utils.getBatchData(batch2)).toEqual([
|
|
99
99
|
{ op_id: '2', op: 'PUT', object_id: 'large1', checksum: 1178768505 }
|
|
@@ -105,7 +105,7 @@ describe('Postgres Sync Bucket Storage', () => {
|
|
|
105
105
|
});
|
|
106
106
|
|
|
107
107
|
const batch3 = await test_utils.fromAsync(
|
|
108
|
-
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options)
|
|
108
|
+
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', BigInt(batch2[0].batch.next_after)]]), options)
|
|
109
109
|
);
|
|
110
110
|
expect(test_utils.getBatchData(batch3)).toEqual([
|
|
111
111
|
{ op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1607205872 }
|
|
@@ -117,7 +117,7 @@ describe('Postgres Sync Bucket Storage', () => {
|
|
|
117
117
|
});
|
|
118
118
|
|
|
119
119
|
const batch4 = await test_utils.fromAsync(
|
|
120
|
-
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch3[0].batch.next_after]]), options)
|
|
120
|
+
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', BigInt(batch3[0].batch.next_after)]]), options)
|
|
121
121
|
);
|
|
122
122
|
expect(test_utils.getBatchData(batch4)).toEqual([
|
|
123
123
|
{ op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 }
|