@powersync/service-module-postgres-storage 0.0.0-dev-20250304151813 → 0.0.0-dev-20250306152715
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -7
- package/dist/.tsbuildinfo +1 -1
- package/dist/@types/storage/PostgresSyncRulesStorage.d.ts +4 -4
- package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +4 -4
- package/dist/storage/PostgresCompactor.js.map +1 -1
- package/dist/storage/PostgresSyncRulesStorage.js +13 -10
- package/dist/storage/PostgresSyncRulesStorage.js.map +1 -1
- package/dist/storage/batch/PostgresBucketBatch.js +7 -4
- package/dist/storage/batch/PostgresBucketBatch.js.map +1 -1
- package/dist/utils/bucket-data.js +2 -2
- package/dist/utils/bucket-data.js.map +1 -1
- package/package.json +6 -6
- package/src/storage/PostgresCompactor.ts +7 -7
- package/src/storage/PostgresSyncRulesStorage.ts +20 -14
- package/src/storage/batch/PostgresBucketBatch.ts +11 -10
- package/src/utils/bucket-data.ts +2 -2
- package/test/src/setup.ts +2 -8
- package/test/src/storage.test.ts +4 -4
|
@@ -9,7 +9,7 @@ import {
|
|
|
9
9
|
ServiceAssertionError,
|
|
10
10
|
ServiceError
|
|
11
11
|
} from '@powersync/lib-services-framework';
|
|
12
|
-
import { storage, utils } from '@powersync/service-core';
|
|
12
|
+
import { InternalOpId, storage, utils } from '@powersync/service-core';
|
|
13
13
|
import * as sync_rules from '@powersync/service-sync-rules';
|
|
14
14
|
import * as timers from 'timers/promises';
|
|
15
15
|
import * as t from 'ts-codec';
|
|
@@ -29,7 +29,7 @@ export interface PostgresBucketBatchOptions {
|
|
|
29
29
|
last_checkpoint_lsn: string | null;
|
|
30
30
|
no_checkpoint_before_lsn: string;
|
|
31
31
|
store_current_data: boolean;
|
|
32
|
-
keep_alive_op?:
|
|
32
|
+
keep_alive_op?: InternalOpId | null;
|
|
33
33
|
/**
|
|
34
34
|
* Set to true for initial replication.
|
|
35
35
|
*/
|
|
@@ -54,14 +54,14 @@ export class PostgresBucketBatch
|
|
|
54
54
|
extends BaseObserver<storage.BucketBatchStorageListener>
|
|
55
55
|
implements storage.BucketStorageBatch
|
|
56
56
|
{
|
|
57
|
-
public last_flushed_op:
|
|
57
|
+
public last_flushed_op: InternalOpId | null = null;
|
|
58
58
|
|
|
59
59
|
protected db: lib_postgres.DatabaseClient;
|
|
60
60
|
protected group_id: number;
|
|
61
61
|
protected last_checkpoint_lsn: string | null;
|
|
62
62
|
protected no_checkpoint_before_lsn: string;
|
|
63
63
|
|
|
64
|
-
protected persisted_op:
|
|
64
|
+
protected persisted_op: InternalOpId | null;
|
|
65
65
|
|
|
66
66
|
protected write_checkpoint_batch: storage.CustomWriteCheckpointOptions[];
|
|
67
67
|
protected readonly sync_rules: sync_rules.SqlSyncRules;
|
|
@@ -132,18 +132,19 @@ export class PostgresBucketBatch
|
|
|
132
132
|
async truncate(sourceTables: storage.SourceTable[]): Promise<storage.FlushedResult | null> {
|
|
133
133
|
await this.flush();
|
|
134
134
|
|
|
135
|
-
let last_op:
|
|
135
|
+
let last_op: InternalOpId | null = null;
|
|
136
136
|
for (let table of sourceTables) {
|
|
137
137
|
last_op = await this.truncateSingle(table);
|
|
138
138
|
}
|
|
139
139
|
|
|
140
140
|
if (last_op) {
|
|
141
141
|
this.persisted_op = last_op;
|
|
142
|
+
return {
|
|
143
|
+
flushed_op: last_op
|
|
144
|
+
};
|
|
145
|
+
} else {
|
|
146
|
+
return null;
|
|
142
147
|
}
|
|
143
|
-
|
|
144
|
-
return {
|
|
145
|
-
flushed_op: String(last_op!)
|
|
146
|
-
};
|
|
147
148
|
}
|
|
148
149
|
|
|
149
150
|
protected async truncateSingle(sourceTable: storage.SourceTable) {
|
|
@@ -279,7 +280,7 @@ export class PostgresBucketBatch
|
|
|
279
280
|
|
|
280
281
|
this.persisted_op = lastOp;
|
|
281
282
|
this.last_flushed_op = lastOp;
|
|
282
|
-
return { flushed_op:
|
|
283
|
+
return { flushed_op: lastOp };
|
|
283
284
|
}
|
|
284
285
|
|
|
285
286
|
async commit(lsn: string, options?: storage.BucketBatchCommitOptions): Promise<boolean> {
|
package/src/utils/bucket-data.ts
CHANGED
|
@@ -5,7 +5,7 @@ import { replicaIdToSubkey } from './bson.js';
|
|
|
5
5
|
export const mapOpEntry = (entry: models.BucketDataDecoded) => {
|
|
6
6
|
if (entry.op == models.OpType.PUT || entry.op == models.OpType.REMOVE) {
|
|
7
7
|
return {
|
|
8
|
-
op_id: utils.
|
|
8
|
+
op_id: utils.internalToExternalOpId(entry.op_id),
|
|
9
9
|
op: entry.op,
|
|
10
10
|
object_type: entry.table_name ?? undefined,
|
|
11
11
|
object_id: entry.row_id ?? undefined,
|
|
@@ -17,7 +17,7 @@ export const mapOpEntry = (entry: models.BucketDataDecoded) => {
|
|
|
17
17
|
// MOVE, CLEAR
|
|
18
18
|
|
|
19
19
|
return {
|
|
20
|
-
op_id: utils.
|
|
20
|
+
op_id: utils.internalToExternalOpId(entry.op_id),
|
|
21
21
|
op: entry.op,
|
|
22
22
|
checksum: Number(entry.checksum)
|
|
23
23
|
};
|
package/test/src/setup.ts
CHANGED
|
@@ -1,16 +1,10 @@
|
|
|
1
1
|
import { container } from '@powersync/lib-services-framework';
|
|
2
|
-
import { Metrics } from '@powersync/service-core';
|
|
3
2
|
import { beforeAll } from 'vitest';
|
|
3
|
+
import { METRICS_HELPER } from '@powersync/service-core-tests';
|
|
4
4
|
|
|
5
5
|
beforeAll(async () => {
|
|
6
6
|
// Executes for every test file
|
|
7
7
|
container.registerDefaults();
|
|
8
8
|
|
|
9
|
-
|
|
10
|
-
await Metrics.initialise({
|
|
11
|
-
disable_telemetry_sharing: true,
|
|
12
|
-
powersync_instance_id: 'test',
|
|
13
|
-
internal_metrics_endpoint: 'unused.for.tests.com'
|
|
14
|
-
});
|
|
15
|
-
Metrics.getInstance().resetCounters();
|
|
9
|
+
METRICS_HELPER.resetMetrics();
|
|
16
10
|
});
|
package/test/src/storage.test.ts
CHANGED
|
@@ -81,7 +81,7 @@ describe('Postgres Sync Bucket Storage', () => {
|
|
|
81
81
|
const options: storage.BucketDataBatchOptions = {};
|
|
82
82
|
|
|
83
83
|
const batch1 = await test_utils.fromAsync(
|
|
84
|
-
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]',
|
|
84
|
+
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', 0n]]), options)
|
|
85
85
|
);
|
|
86
86
|
expect(test_utils.getBatchData(batch1)).toEqual([
|
|
87
87
|
{ op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 }
|
|
@@ -93,7 +93,7 @@ describe('Postgres Sync Bucket Storage', () => {
|
|
|
93
93
|
});
|
|
94
94
|
|
|
95
95
|
const batch2 = await test_utils.fromAsync(
|
|
96
|
-
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options)
|
|
96
|
+
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', BigInt(batch1[0].batch.next_after)]]), options)
|
|
97
97
|
);
|
|
98
98
|
expect(test_utils.getBatchData(batch2)).toEqual([
|
|
99
99
|
{ op_id: '2', op: 'PUT', object_id: 'large1', checksum: 1178768505 }
|
|
@@ -105,7 +105,7 @@ describe('Postgres Sync Bucket Storage', () => {
|
|
|
105
105
|
});
|
|
106
106
|
|
|
107
107
|
const batch3 = await test_utils.fromAsync(
|
|
108
|
-
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options)
|
|
108
|
+
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', BigInt(batch2[0].batch.next_after)]]), options)
|
|
109
109
|
);
|
|
110
110
|
expect(test_utils.getBatchData(batch3)).toEqual([
|
|
111
111
|
{ op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1607205872 }
|
|
@@ -117,7 +117,7 @@ describe('Postgres Sync Bucket Storage', () => {
|
|
|
117
117
|
});
|
|
118
118
|
|
|
119
119
|
const batch4 = await test_utils.fromAsync(
|
|
120
|
-
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', batch3[0].batch.next_after]]), options)
|
|
120
|
+
bucketStorage.getBucketDataBatch(checkpoint, new Map([['global[]', BigInt(batch3[0].batch.next_after)]]), options)
|
|
121
121
|
);
|
|
122
122
|
expect(test_utils.getBatchData(batch4)).toEqual([
|
|
123
123
|
{ op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 }
|