@powersync/service-module-postgres-storage 0.0.0-dev-20250122110924 → 0.0.0-dev-20250227082606
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +120 -12
- package/README.md +7 -1
- package/dist/.tsbuildinfo +1 -1
- package/dist/@types/storage/PostgresBucketStorageFactory.d.ts +8 -13
- package/dist/@types/storage/PostgresCompactor.d.ts +0 -6
- package/dist/@types/storage/PostgresSyncRulesStorage.d.ts +9 -3
- package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +7 -6
- package/dist/@types/types/codecs.d.ts +2 -2
- package/dist/@types/types/models/BucketData.d.ts +1 -1
- package/dist/@types/types/models/BucketParameters.d.ts +2 -2
- package/dist/@types/types/models/CurrentData.d.ts +3 -3
- package/dist/@types/types/models/SourceTable.d.ts +1 -1
- package/dist/@types/types/types.d.ts +5 -0
- package/dist/storage/PostgresBucketStorageFactory.js +68 -158
- package/dist/storage/PostgresBucketStorageFactory.js.map +1 -1
- package/dist/storage/PostgresCompactor.js +19 -6
- package/dist/storage/PostgresCompactor.js.map +1 -1
- package/dist/storage/PostgresSyncRulesStorage.js +221 -123
- package/dist/storage/PostgresSyncRulesStorage.js.map +1 -1
- package/dist/storage/batch/PostgresBucketBatch.js +31 -19
- package/dist/storage/batch/PostgresBucketBatch.js.map +1 -1
- package/dist/storage/batch/PostgresPersistedBatch.js +39 -81
- package/dist/storage/batch/PostgresPersistedBatch.js.map +1 -1
- package/dist/types/types.js.map +1 -1
- package/package.json +9 -9
- package/src/storage/PostgresBucketStorageFactory.ts +73 -179
- package/src/storage/PostgresCompactor.ts +19 -14
- package/src/storage/PostgresSyncRulesStorage.ts +231 -38
- package/src/storage/batch/PostgresBucketBatch.ts +39 -22
- package/src/storage/batch/PostgresPersistedBatch.ts +39 -81
- package/src/types/models/SourceTable.ts +1 -1
- package/src/types/types.ts +1 -0
- package/test/src/__snapshots__/storage_sync.test.ts.snap +138 -0
- package/test/src/storage.test.ts +1 -1
- package/test/src/storage_compacting.test.ts +1 -1
- package/tsconfig.json +0 -2
|
@@ -1,6 +1,15 @@
|
|
|
1
1
|
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
2
|
+
import { ReplicationAssertionError } from '@powersync/lib-services-framework';
|
|
3
|
+
import {
|
|
4
|
+
BroadcastIterable,
|
|
5
|
+
CHECKPOINT_INVALIDATE_ALL,
|
|
6
|
+
CheckpointChanges,
|
|
7
|
+
GetCheckpointChangesOptions,
|
|
8
|
+
LastValueSink,
|
|
9
|
+
storage,
|
|
10
|
+
utils,
|
|
11
|
+
WatchWriteCheckpointOptions
|
|
12
|
+
} from '@powersync/service-core';
|
|
4
13
|
import { JSONBig } from '@powersync/service-jsonbig';
|
|
5
14
|
import * as sync_rules from '@powersync/service-sync-rules';
|
|
6
15
|
import * as uuid from 'uuid';
|
|
@@ -8,14 +17,18 @@ import { BIGINT_MAX } from '../types/codecs.js';
|
|
|
8
17
|
import { models, RequiredOperationBatchLimits } from '../types/types.js';
|
|
9
18
|
import { replicaIdToSubkey } from '../utils/bson.js';
|
|
10
19
|
import { mapOpEntry } from '../utils/bucket-data.js';
|
|
20
|
+
import * as timers from 'timers/promises';
|
|
11
21
|
|
|
22
|
+
import * as framework from '@powersync/lib-services-framework';
|
|
12
23
|
import { StatementParam } from '@powersync/service-jpgwire';
|
|
13
|
-
import { StoredRelationId } from '../types/models/SourceTable.js';
|
|
24
|
+
import { SourceTableDecoded, StoredRelationId } from '../types/models/SourceTable.js';
|
|
14
25
|
import { pick } from '../utils/ts-codec.js';
|
|
15
26
|
import { PostgresBucketBatch } from './batch/PostgresBucketBatch.js';
|
|
16
27
|
import { PostgresWriteCheckpointAPI } from './checkpoints/PostgresWriteCheckpointAPI.js';
|
|
17
28
|
import { PostgresBucketStorageFactory } from './PostgresBucketStorageFactory.js';
|
|
18
29
|
import { PostgresCompactor } from './PostgresCompactor.js';
|
|
30
|
+
import { wrapWithAbort } from 'ix/asynciterable/operators/withabort.js';
|
|
31
|
+
import { Decoded } from 'ts-codec';
|
|
19
32
|
|
|
20
33
|
export type PostgresSyncRulesStorageOptions = {
|
|
21
34
|
factory: PostgresBucketStorageFactory;
|
|
@@ -26,7 +39,7 @@ export type PostgresSyncRulesStorageOptions = {
|
|
|
26
39
|
};
|
|
27
40
|
|
|
28
41
|
export class PostgresSyncRulesStorage
|
|
29
|
-
extends
|
|
42
|
+
extends framework.BaseObserver<storage.SyncRulesBucketStorageListener>
|
|
30
43
|
implements storage.SyncRulesBucketStorage
|
|
31
44
|
{
|
|
32
45
|
public readonly group_id: number;
|
|
@@ -34,6 +47,8 @@ export class PostgresSyncRulesStorage
|
|
|
34
47
|
public readonly slot_name: string;
|
|
35
48
|
public readonly factory: PostgresBucketStorageFactory;
|
|
36
49
|
|
|
50
|
+
private sharedIterator = new BroadcastIterable((signal) => this.watchActiveCheckpoint(signal));
|
|
51
|
+
|
|
37
52
|
protected db: lib_postgres.DatabaseClient;
|
|
38
53
|
protected writeCheckpointAPI: PostgresWriteCheckpointAPI;
|
|
39
54
|
|
|
@@ -151,21 +166,39 @@ export class PostgresSyncRulesStorage
|
|
|
151
166
|
type_oid: typeof column.typeId !== 'undefined' ? Number(column.typeId) : column.typeId
|
|
152
167
|
}));
|
|
153
168
|
return this.db.transaction(async (db) => {
|
|
154
|
-
let sourceTableRow
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
+
let sourceTableRow: SourceTableDecoded | null;
|
|
170
|
+
if (objectId != null) {
|
|
171
|
+
sourceTableRow = await db.sql`
|
|
172
|
+
SELECT
|
|
173
|
+
*
|
|
174
|
+
FROM
|
|
175
|
+
source_tables
|
|
176
|
+
WHERE
|
|
177
|
+
group_id = ${{ type: 'int4', value: group_id }}
|
|
178
|
+
AND connection_id = ${{ type: 'int4', value: connection_id }}
|
|
179
|
+
AND relation_id = ${{ type: 'jsonb', value: { object_id: objectId } satisfies StoredRelationId }}
|
|
180
|
+
AND schema_name = ${{ type: 'varchar', value: schema }}
|
|
181
|
+
AND table_name = ${{ type: 'varchar', value: table }}
|
|
182
|
+
AND replica_id_columns = ${{ type: 'jsonb', value: columns }}
|
|
183
|
+
`
|
|
184
|
+
.decoded(models.SourceTable)
|
|
185
|
+
.first();
|
|
186
|
+
} else {
|
|
187
|
+
sourceTableRow = await db.sql`
|
|
188
|
+
SELECT
|
|
189
|
+
*
|
|
190
|
+
FROM
|
|
191
|
+
source_tables
|
|
192
|
+
WHERE
|
|
193
|
+
group_id = ${{ type: 'int4', value: group_id }}
|
|
194
|
+
AND connection_id = ${{ type: 'int4', value: connection_id }}
|
|
195
|
+
AND schema_name = ${{ type: 'varchar', value: schema }}
|
|
196
|
+
AND table_name = ${{ type: 'varchar', value: table }}
|
|
197
|
+
AND replica_id_columns = ${{ type: 'jsonb', value: columns }}
|
|
198
|
+
`
|
|
199
|
+
.decoded(models.SourceTable)
|
|
200
|
+
.first();
|
|
201
|
+
}
|
|
169
202
|
|
|
170
203
|
if (sourceTableRow == null) {
|
|
171
204
|
const row = await db.sql`
|
|
@@ -184,7 +217,7 @@ export class PostgresSyncRulesStorage
|
|
|
184
217
|
${{ type: 'varchar', value: uuid.v4() }},
|
|
185
218
|
${{ type: 'int4', value: group_id }},
|
|
186
219
|
${{ type: 'int4', value: connection_id }},
|
|
187
|
-
--- The objectId can be string | number, we store it as jsonb value
|
|
220
|
+
--- The objectId can be string | number | undefined, we store it as jsonb value
|
|
188
221
|
${{ type: 'jsonb', value: { object_id: objectId } satisfies StoredRelationId }},
|
|
189
222
|
${{ type: 'varchar', value: schema }},
|
|
190
223
|
${{ type: 'varchar', value: table }},
|
|
@@ -211,25 +244,47 @@ export class PostgresSyncRulesStorage
|
|
|
211
244
|
sourceTable.syncData = options.sync_rules.tableSyncsData(sourceTable);
|
|
212
245
|
sourceTable.syncParameters = options.sync_rules.tableSyncsParameters(sourceTable);
|
|
213
246
|
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
247
|
+
let truncatedTables: SourceTableDecoded[] = [];
|
|
248
|
+
if (objectId != null) {
|
|
249
|
+
// relation_id present - check for renamed tables
|
|
250
|
+
truncatedTables = await db.sql`
|
|
251
|
+
SELECT
|
|
252
|
+
*
|
|
253
|
+
FROM
|
|
254
|
+
source_tables
|
|
255
|
+
WHERE
|
|
256
|
+
group_id = ${{ type: 'int4', value: group_id }}
|
|
257
|
+
AND connection_id = ${{ type: 'int4', value: connection_id }}
|
|
258
|
+
AND id != ${{ type: 'varchar', value: sourceTableRow!.id }}
|
|
259
|
+
AND (
|
|
260
|
+
relation_id = ${{ type: 'jsonb', value: { object_id: objectId } satisfies StoredRelationId }}
|
|
261
|
+
OR (
|
|
262
|
+
schema_name = ${{ type: 'varchar', value: schema }}
|
|
263
|
+
AND table_name = ${{ type: 'varchar', value: table }}
|
|
264
|
+
)
|
|
265
|
+
)
|
|
266
|
+
`
|
|
267
|
+
.decoded(models.SourceTable)
|
|
268
|
+
.rows();
|
|
269
|
+
} else {
|
|
270
|
+
// relation_id not present - only check for changed replica_id_columns
|
|
271
|
+
truncatedTables = await db.sql`
|
|
272
|
+
SELECT
|
|
273
|
+
*
|
|
274
|
+
FROM
|
|
275
|
+
source_tables
|
|
276
|
+
WHERE
|
|
277
|
+
group_id = ${{ type: 'int4', value: group_id }}
|
|
278
|
+
AND connection_id = ${{ type: 'int4', value: connection_id }}
|
|
279
|
+
AND id != ${{ type: 'varchar', value: sourceTableRow!.id }}
|
|
280
|
+
AND (
|
|
226
281
|
schema_name = ${{ type: 'varchar', value: schema }}
|
|
227
282
|
AND table_name = ${{ type: 'varchar', value: table }}
|
|
228
283
|
)
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
284
|
+
`
|
|
285
|
+
.decoded(models.SourceTable)
|
|
286
|
+
.rows();
|
|
287
|
+
}
|
|
233
288
|
|
|
234
289
|
return {
|
|
235
290
|
table: sourceTable,
|
|
@@ -272,7 +327,7 @@ export class PostgresSyncRulesStorage
|
|
|
272
327
|
|
|
273
328
|
const checkpoint_lsn = syncRules?.last_checkpoint_lsn ?? null;
|
|
274
329
|
|
|
275
|
-
|
|
330
|
+
const batch = new PostgresBucketBatch({
|
|
276
331
|
db: this.db,
|
|
277
332
|
sync_rules: this.sync_rules.parsed(options).sync_rules,
|
|
278
333
|
group_id: this.group_id,
|
|
@@ -602,7 +657,10 @@ export class PostgresSyncRulesStorage
|
|
|
602
657
|
SET
|
|
603
658
|
state = ${{ type: 'varchar', value: storage.SyncRuleState.STOP }}
|
|
604
659
|
WHERE
|
|
605
|
-
|
|
660
|
+
(
|
|
661
|
+
state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
|
|
662
|
+
OR state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
|
|
663
|
+
)
|
|
606
664
|
AND id != ${{ type: 'int4', value: this.group_id }}
|
|
607
665
|
`.execute();
|
|
608
666
|
});
|
|
@@ -663,4 +721,139 @@ export class PostgresSyncRulesStorage
|
|
|
663
721
|
})
|
|
664
722
|
);
|
|
665
723
|
}
|
|
724
|
+
|
|
725
|
+
async getActiveCheckpoint(): Promise<storage.ReplicationCheckpoint> {
|
|
726
|
+
const activeCheckpoint = await this.db.sql`
|
|
727
|
+
SELECT
|
|
728
|
+
id,
|
|
729
|
+
last_checkpoint,
|
|
730
|
+
last_checkpoint_lsn
|
|
731
|
+
FROM
|
|
732
|
+
sync_rules
|
|
733
|
+
WHERE
|
|
734
|
+
state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
|
|
735
|
+
OR state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
|
|
736
|
+
ORDER BY
|
|
737
|
+
id DESC
|
|
738
|
+
LIMIT
|
|
739
|
+
1
|
|
740
|
+
`
|
|
741
|
+
.decoded(models.ActiveCheckpoint)
|
|
742
|
+
.first();
|
|
743
|
+
|
|
744
|
+
return this.makeActiveCheckpoint(activeCheckpoint);
|
|
745
|
+
}
|
|
746
|
+
|
|
747
|
+
async *watchWriteCheckpoint(options: WatchWriteCheckpointOptions): AsyncIterable<storage.StorageCheckpointUpdate> {
|
|
748
|
+
let lastCheckpoint: utils.OpId | null = null;
|
|
749
|
+
let lastWriteCheckpoint: bigint | null = null;
|
|
750
|
+
|
|
751
|
+
const { signal, user_id } = options;
|
|
752
|
+
|
|
753
|
+
const iter = wrapWithAbort(this.sharedIterator, signal);
|
|
754
|
+
for await (const cp of iter) {
|
|
755
|
+
const { checkpoint, lsn } = cp;
|
|
756
|
+
|
|
757
|
+
// lsn changes are not important by itself.
|
|
758
|
+
// What is important is:
|
|
759
|
+
// 1. checkpoint (op_id) changes.
|
|
760
|
+
// 2. write checkpoint changes for the specific user
|
|
761
|
+
const lsnFilters: Record<string, string> = lsn ? { 1: lsn } : {};
|
|
762
|
+
|
|
763
|
+
const currentWriteCheckpoint = await this.lastWriteCheckpoint({
|
|
764
|
+
user_id,
|
|
765
|
+
heads: {
|
|
766
|
+
...lsnFilters
|
|
767
|
+
}
|
|
768
|
+
});
|
|
769
|
+
|
|
770
|
+
if (currentWriteCheckpoint == lastWriteCheckpoint && checkpoint == lastCheckpoint) {
|
|
771
|
+
// No change - wait for next one
|
|
772
|
+
// In some cases, many LSNs may be produced in a short time.
|
|
773
|
+
// Add a delay to throttle the write checkpoint lookup a bit.
|
|
774
|
+
await timers.setTimeout(20 + 10 * Math.random());
|
|
775
|
+
continue;
|
|
776
|
+
}
|
|
777
|
+
|
|
778
|
+
lastWriteCheckpoint = currentWriteCheckpoint;
|
|
779
|
+
lastCheckpoint = checkpoint;
|
|
780
|
+
|
|
781
|
+
yield {
|
|
782
|
+
base: cp,
|
|
783
|
+
writeCheckpoint: currentWriteCheckpoint,
|
|
784
|
+
update: CHECKPOINT_INVALIDATE_ALL
|
|
785
|
+
};
|
|
786
|
+
}
|
|
787
|
+
}
|
|
788
|
+
|
|
789
|
+
protected async *watchActiveCheckpoint(signal: AbortSignal): AsyncIterable<storage.ReplicationCheckpoint> {
|
|
790
|
+
const doc = await this.db.sql`
|
|
791
|
+
SELECT
|
|
792
|
+
id,
|
|
793
|
+
last_checkpoint,
|
|
794
|
+
last_checkpoint_lsn
|
|
795
|
+
FROM
|
|
796
|
+
sync_rules
|
|
797
|
+
WHERE
|
|
798
|
+
state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
|
|
799
|
+
OR state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
|
|
800
|
+
LIMIT
|
|
801
|
+
1
|
|
802
|
+
`
|
|
803
|
+
.decoded(models.ActiveCheckpoint)
|
|
804
|
+
.first();
|
|
805
|
+
|
|
806
|
+
if (doc == null) {
|
|
807
|
+
// Abort the connections - clients will have to retry later.
|
|
808
|
+
throw new framework.ServiceError(framework.ErrorCode.PSYNC_S2302, 'No active sync rules available');
|
|
809
|
+
}
|
|
810
|
+
|
|
811
|
+
const sink = new LastValueSink<string>(undefined);
|
|
812
|
+
|
|
813
|
+
const disposeListener = this.db.registerListener({
|
|
814
|
+
notification: (notification) => sink.next(notification.payload)
|
|
815
|
+
});
|
|
816
|
+
|
|
817
|
+
signal.addEventListener('aborted', async () => {
|
|
818
|
+
disposeListener();
|
|
819
|
+
sink.complete();
|
|
820
|
+
});
|
|
821
|
+
|
|
822
|
+
yield this.makeActiveCheckpoint(doc);
|
|
823
|
+
|
|
824
|
+
let lastOp: storage.ReplicationCheckpoint | null = null;
|
|
825
|
+
for await (const payload of sink.withSignal(signal)) {
|
|
826
|
+
if (signal.aborted) {
|
|
827
|
+
return;
|
|
828
|
+
}
|
|
829
|
+
|
|
830
|
+
const notification = models.ActiveCheckpointNotification.decode(payload);
|
|
831
|
+
if (notification.active_checkpoint == null) {
|
|
832
|
+
continue;
|
|
833
|
+
}
|
|
834
|
+
if (Number(notification.active_checkpoint.id) != doc.id) {
|
|
835
|
+
// Active sync rules changed - abort and restart the stream
|
|
836
|
+
break;
|
|
837
|
+
}
|
|
838
|
+
|
|
839
|
+
const activeCheckpoint = this.makeActiveCheckpoint(notification.active_checkpoint);
|
|
840
|
+
|
|
841
|
+
if (lastOp == null || activeCheckpoint.lsn != lastOp.lsn || activeCheckpoint.checkpoint != lastOp.checkpoint) {
|
|
842
|
+
lastOp = activeCheckpoint;
|
|
843
|
+
yield activeCheckpoint;
|
|
844
|
+
}
|
|
845
|
+
}
|
|
846
|
+
}
|
|
847
|
+
|
|
848
|
+
async getCheckpointChanges(options: GetCheckpointChangesOptions): Promise<CheckpointChanges> {
|
|
849
|
+
// We do not track individual changes yet
|
|
850
|
+
return CHECKPOINT_INVALIDATE_ALL;
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
private makeActiveCheckpoint(row: models.ActiveCheckpointDecoded | null) {
|
|
854
|
+
return {
|
|
855
|
+
checkpoint: utils.timestampToOpId(row?.last_checkpoint ?? 0n),
|
|
856
|
+
lsn: row?.last_checkpoint_lsn ?? null
|
|
857
|
+
} satisfies storage.ReplicationCheckpoint;
|
|
858
|
+
}
|
|
666
859
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
2
2
|
import {
|
|
3
|
+
BaseObserver,
|
|
3
4
|
container,
|
|
4
|
-
DisposableObserver,
|
|
5
5
|
ErrorCode,
|
|
6
6
|
errors,
|
|
7
7
|
logger,
|
|
@@ -51,7 +51,7 @@ type StatefulCheckpointDecoded = t.Decoded<typeof StatefulCheckpoint>;
|
|
|
51
51
|
const MAX_ROW_SIZE = 15 * 1024 * 1024;
|
|
52
52
|
|
|
53
53
|
export class PostgresBucketBatch
|
|
54
|
-
extends
|
|
54
|
+
extends BaseObserver<storage.BucketBatchStorageListener>
|
|
55
55
|
implements storage.BucketStorageBatch
|
|
56
56
|
{
|
|
57
57
|
public last_flushed_op: bigint | null = null;
|
|
@@ -87,9 +87,13 @@ export class PostgresBucketBatch
|
|
|
87
87
|
return this.last_checkpoint_lsn;
|
|
88
88
|
}
|
|
89
89
|
|
|
90
|
+
async [Symbol.asyncDispose]() {
|
|
91
|
+
super.clearListeners();
|
|
92
|
+
}
|
|
93
|
+
|
|
90
94
|
async save(record: storage.SaveOptions): Promise<storage.FlushedResult | null> {
|
|
91
95
|
// TODO maybe share with abstract class
|
|
92
|
-
const { after,
|
|
96
|
+
const { after, before, sourceTable, tag } = record;
|
|
93
97
|
for (const event of this.getTableEvents(sourceTable)) {
|
|
94
98
|
this.iterateListeners((cb) =>
|
|
95
99
|
cb.replicationEvent?.({
|
|
@@ -245,7 +249,10 @@ export class PostgresBucketBatch
|
|
|
245
249
|
|
|
246
250
|
private async flushInner(): Promise<storage.FlushedResult | null> {
|
|
247
251
|
const batch = this.batch;
|
|
248
|
-
|
|
252
|
+
// Don't flush empty batches
|
|
253
|
+
// This helps prevent feedback loops when using the same database for
|
|
254
|
+
// the source data and sync bucket storage
|
|
255
|
+
if (batch == null || batch.length == 0) {
|
|
249
256
|
return null;
|
|
250
257
|
}
|
|
251
258
|
|
|
@@ -275,7 +282,9 @@ export class PostgresBucketBatch
|
|
|
275
282
|
return { flushed_op: String(lastOp) };
|
|
276
283
|
}
|
|
277
284
|
|
|
278
|
-
async commit(lsn: string): Promise<boolean> {
|
|
285
|
+
async commit(lsn: string, options?: storage.BucketBatchCommitOptions): Promise<boolean> {
|
|
286
|
+
const { createEmptyCheckpoints } = { ...storage.DEFAULT_BUCKET_BATCH_COMMIT_OPTIONS, ...options };
|
|
287
|
+
|
|
279
288
|
await this.flush();
|
|
280
289
|
|
|
281
290
|
if (this.last_checkpoint_lsn != null && lsn < this.last_checkpoint_lsn) {
|
|
@@ -309,6 +318,12 @@ export class PostgresBucketBatch
|
|
|
309
318
|
|
|
310
319
|
return false;
|
|
311
320
|
}
|
|
321
|
+
|
|
322
|
+
// Don't create a checkpoint if there were no changes
|
|
323
|
+
if (!createEmptyCheckpoints && this.persisted_op == null) {
|
|
324
|
+
return false;
|
|
325
|
+
}
|
|
326
|
+
|
|
312
327
|
const now = new Date().toISOString();
|
|
313
328
|
const update: Partial<models.SyncRules> = {
|
|
314
329
|
last_checkpoint_lsn: lsn,
|
|
@@ -488,7 +503,7 @@ export class PostgresBucketBatch
|
|
|
488
503
|
jsonb_array_elements(${{ type: 'jsonb', value: sizeLookups }}::jsonb) AS FILTER
|
|
489
504
|
)
|
|
490
505
|
SELECT
|
|
491
|
-
|
|
506
|
+
octet_length(c.data) AS data_size,
|
|
492
507
|
c.source_table,
|
|
493
508
|
c.source_key
|
|
494
509
|
FROM
|
|
@@ -529,23 +544,20 @@ export class PostgresBucketBatch
|
|
|
529
544
|
const current_data_lookup = new Map<string, CurrentDataDecoded>();
|
|
530
545
|
for await (const currentDataRows of db.streamRows<CurrentData>({
|
|
531
546
|
statement: /* sql */ `
|
|
532
|
-
WITH
|
|
533
|
-
filter_data AS (
|
|
534
|
-
SELECT
|
|
535
|
-
decode(FILTER ->> 'source_key', 'hex') AS source_key, -- Decoding from hex to bytea
|
|
536
|
-
(FILTER ->> 'source_table') AS source_table_id
|
|
537
|
-
FROM
|
|
538
|
-
jsonb_array_elements($1::jsonb) AS FILTER
|
|
539
|
-
)
|
|
540
547
|
SELECT
|
|
541
|
-
--- With skipExistingRows, we only need to know whether or not the row exists.
|
|
542
548
|
${this.options.skip_existing_rows ? `c.source_table, c.source_key` : 'c.*'}
|
|
543
549
|
FROM
|
|
544
550
|
current_data c
|
|
545
|
-
JOIN
|
|
551
|
+
JOIN (
|
|
552
|
+
SELECT
|
|
553
|
+
decode(FILTER ->> 'source_key', 'hex') AS source_key,
|
|
554
|
+
FILTER ->> 'source_table' AS source_table_id
|
|
555
|
+
FROM
|
|
556
|
+
jsonb_array_elements($1::jsonb) AS FILTER
|
|
557
|
+
) f ON c.source_table = f.source_table_id
|
|
546
558
|
AND c.source_key = f.source_key
|
|
547
559
|
WHERE
|
|
548
|
-
c.group_id = $2
|
|
560
|
+
c.group_id = $2;
|
|
549
561
|
`,
|
|
550
562
|
params: [
|
|
551
563
|
{
|
|
@@ -553,7 +565,7 @@ export class PostgresBucketBatch
|
|
|
553
565
|
value: lookups
|
|
554
566
|
},
|
|
555
567
|
{
|
|
556
|
-
type: '
|
|
568
|
+
type: 'int4',
|
|
557
569
|
value: this.group_id
|
|
558
570
|
}
|
|
559
571
|
]
|
|
@@ -610,7 +622,12 @@ export class PostgresBucketBatch
|
|
|
610
622
|
await persistedBatch.flush(db);
|
|
611
623
|
}
|
|
612
624
|
}
|
|
613
|
-
|
|
625
|
+
|
|
626
|
+
// Don't return empty batches
|
|
627
|
+
if (resumeBatch?.batch.length) {
|
|
628
|
+
return resumeBatch;
|
|
629
|
+
}
|
|
630
|
+
return null;
|
|
614
631
|
}
|
|
615
632
|
|
|
616
633
|
protected async saveOperation(
|
|
@@ -627,8 +644,8 @@ export class PostgresBucketBatch
|
|
|
627
644
|
|
|
628
645
|
let existingBuckets: CurrentBucket[] = [];
|
|
629
646
|
let newBuckets: CurrentBucket[] = [];
|
|
630
|
-
let existingLookups: Buffer[] = [];
|
|
631
|
-
let newLookups: Buffer[] = [];
|
|
647
|
+
let existingLookups: Buffer<ArrayBuffer>[] = [];
|
|
648
|
+
let newLookups: Buffer<ArrayBuffer>[] = [];
|
|
632
649
|
|
|
633
650
|
if (this.options.skip_existing_rows) {
|
|
634
651
|
if (record.tag == storage.SaveOperationTag.INSERT) {
|
|
@@ -682,7 +699,7 @@ export class PostgresBucketBatch
|
|
|
682
699
|
}
|
|
683
700
|
}
|
|
684
701
|
|
|
685
|
-
let afterData: Buffer | undefined;
|
|
702
|
+
let afterData: Buffer<ArrayBuffer> | undefined;
|
|
686
703
|
if (afterId != null && !this.options.store_current_data) {
|
|
687
704
|
afterData = storage.serializeBson({});
|
|
688
705
|
} else if (afterId != null) {
|
|
@@ -256,33 +256,6 @@ export class PostgresPersistedBatch {
|
|
|
256
256
|
protected async flushBucketData(db: lib_postgres.WrappedConnection) {
|
|
257
257
|
if (this.bucketDataInserts.length > 0) {
|
|
258
258
|
await db.sql`
|
|
259
|
-
WITH
|
|
260
|
-
parsed_data AS (
|
|
261
|
-
SELECT
|
|
262
|
-
group_id,
|
|
263
|
-
bucket_name,
|
|
264
|
-
source_table,
|
|
265
|
-
decode(source_key, 'hex') AS source_key, -- Decode hex to bytea
|
|
266
|
-
table_name,
|
|
267
|
-
op,
|
|
268
|
-
row_id,
|
|
269
|
-
checksum,
|
|
270
|
-
data,
|
|
271
|
-
target_op
|
|
272
|
-
FROM
|
|
273
|
-
jsonb_to_recordset(${{ type: 'jsonb', value: this.bucketDataInserts }}::jsonb) AS t (
|
|
274
|
-
group_id integer,
|
|
275
|
-
bucket_name text,
|
|
276
|
-
source_table text,
|
|
277
|
-
source_key text, -- Input as hex string
|
|
278
|
-
table_name text,
|
|
279
|
-
op text,
|
|
280
|
-
row_id text,
|
|
281
|
-
checksum bigint,
|
|
282
|
-
data text,
|
|
283
|
-
target_op bigint
|
|
284
|
-
)
|
|
285
|
-
)
|
|
286
259
|
INSERT INTO
|
|
287
260
|
bucket_data (
|
|
288
261
|
group_id,
|
|
@@ -303,14 +276,25 @@ export class PostgresPersistedBatch {
|
|
|
303
276
|
nextval('op_id_sequence'),
|
|
304
277
|
op,
|
|
305
278
|
source_table,
|
|
306
|
-
source_key,
|
|
279
|
+
decode(source_key, 'hex') AS source_key,
|
|
307
280
|
table_name,
|
|
308
281
|
row_id,
|
|
309
282
|
checksum,
|
|
310
283
|
data,
|
|
311
284
|
target_op
|
|
312
285
|
FROM
|
|
313
|
-
|
|
286
|
+
json_to_recordset(${{ type: 'json', value: this.bucketDataInserts }}::json) AS t (
|
|
287
|
+
group_id integer,
|
|
288
|
+
bucket_name text,
|
|
289
|
+
source_table text,
|
|
290
|
+
source_key text, -- Input as hex string
|
|
291
|
+
table_name text,
|
|
292
|
+
op text,
|
|
293
|
+
row_id text,
|
|
294
|
+
checksum bigint,
|
|
295
|
+
data text,
|
|
296
|
+
target_op bigint
|
|
297
|
+
);
|
|
314
298
|
`.execute();
|
|
315
299
|
}
|
|
316
300
|
}
|
|
@@ -318,23 +302,6 @@ export class PostgresPersistedBatch {
|
|
|
318
302
|
protected async flushParameterData(db: lib_postgres.WrappedConnection) {
|
|
319
303
|
if (this.parameterDataInserts.length > 0) {
|
|
320
304
|
await db.sql`
|
|
321
|
-
WITH
|
|
322
|
-
parsed_data AS (
|
|
323
|
-
SELECT
|
|
324
|
-
group_id,
|
|
325
|
-
source_table,
|
|
326
|
-
decode(source_key, 'hex') AS source_key, -- Decode hex to bytea
|
|
327
|
-
decode(lookup, 'hex') AS lookup, -- Decode hex to bytea
|
|
328
|
-
bucket_parameters
|
|
329
|
-
FROM
|
|
330
|
-
jsonb_to_recordset(${{ type: 'jsonb', value: this.parameterDataInserts }}::jsonb) AS t (
|
|
331
|
-
group_id integer,
|
|
332
|
-
source_table text,
|
|
333
|
-
source_key text, -- Input as hex string
|
|
334
|
-
lookup text, -- Input as hex string
|
|
335
|
-
bucket_parameters text -- Input as stringified JSON
|
|
336
|
-
)
|
|
337
|
-
)
|
|
338
305
|
INSERT INTO
|
|
339
306
|
bucket_parameters (
|
|
340
307
|
group_id,
|
|
@@ -346,11 +313,17 @@ export class PostgresPersistedBatch {
|
|
|
346
313
|
SELECT
|
|
347
314
|
group_id,
|
|
348
315
|
source_table,
|
|
349
|
-
source_key, --
|
|
350
|
-
lookup, --
|
|
316
|
+
decode(source_key, 'hex') AS source_key, -- Decode hex to bytea
|
|
317
|
+
decode(lookup, 'hex') AS lookup, -- Decode hex to bytea
|
|
351
318
|
bucket_parameters
|
|
352
319
|
FROM
|
|
353
|
-
|
|
320
|
+
json_to_recordset(${{ type: 'json', value: this.parameterDataInserts }}::json) AS t (
|
|
321
|
+
group_id integer,
|
|
322
|
+
source_table text,
|
|
323
|
+
source_key text, -- Input as hex string
|
|
324
|
+
lookup text, -- Input as hex string
|
|
325
|
+
bucket_parameters text -- Input as stringified JSON
|
|
326
|
+
)
|
|
354
327
|
`.execute();
|
|
355
328
|
}
|
|
356
329
|
}
|
|
@@ -358,33 +331,6 @@ export class PostgresPersistedBatch {
|
|
|
358
331
|
protected async flushCurrentData(db: lib_postgres.WrappedConnection) {
|
|
359
332
|
if (this.currentDataInserts.size > 0) {
|
|
360
333
|
await db.sql`
|
|
361
|
-
WITH
|
|
362
|
-
parsed_data AS (
|
|
363
|
-
SELECT
|
|
364
|
-
group_id,
|
|
365
|
-
source_table,
|
|
366
|
-
decode(source_key, 'hex') AS source_key, -- Decode hex to bytea
|
|
367
|
-
buckets::jsonb AS buckets,
|
|
368
|
-
decode(data, 'hex') AS data, -- Decode hex to bytea
|
|
369
|
-
ARRAY(
|
|
370
|
-
SELECT
|
|
371
|
-
decode((value ->> 0)::TEXT, 'hex')
|
|
372
|
-
FROM
|
|
373
|
-
jsonb_array_elements(lookups::jsonb) AS value
|
|
374
|
-
) AS lookups -- Decode array of hex strings to bytea[]
|
|
375
|
-
FROM
|
|
376
|
-
jsonb_to_recordset(${{
|
|
377
|
-
type: 'jsonb',
|
|
378
|
-
value: Array.from(this.currentDataInserts.values())
|
|
379
|
-
}}::jsonb) AS t (
|
|
380
|
-
group_id integer,
|
|
381
|
-
source_table text,
|
|
382
|
-
source_key text, -- Input as hex string
|
|
383
|
-
buckets text,
|
|
384
|
-
data text, -- Input as hex string
|
|
385
|
-
lookups text -- Input as stringified JSONB array of hex strings
|
|
386
|
-
)
|
|
387
|
-
)
|
|
388
334
|
INSERT INTO
|
|
389
335
|
current_data (
|
|
390
336
|
group_id,
|
|
@@ -397,12 +343,24 @@ export class PostgresPersistedBatch {
|
|
|
397
343
|
SELECT
|
|
398
344
|
group_id,
|
|
399
345
|
source_table,
|
|
400
|
-
source_key, --
|
|
401
|
-
buckets,
|
|
402
|
-
data, --
|
|
403
|
-
|
|
346
|
+
decode(source_key, 'hex') AS source_key, -- Decode hex to bytea
|
|
347
|
+
buckets::jsonb AS buckets,
|
|
348
|
+
decode(data, 'hex') AS data, -- Decode hex to bytea
|
|
349
|
+
array(
|
|
350
|
+
SELECT
|
|
351
|
+
decode(element, 'hex')
|
|
352
|
+
FROM
|
|
353
|
+
unnest(lookups) AS element
|
|
354
|
+
) AS lookups
|
|
404
355
|
FROM
|
|
405
|
-
|
|
356
|
+
json_to_recordset(${{ type: 'json', value: Array.from(this.currentDataInserts.values()) }}::json) AS t (
|
|
357
|
+
group_id integer,
|
|
358
|
+
source_table text,
|
|
359
|
+
source_key text, -- Input as hex string
|
|
360
|
+
buckets text,
|
|
361
|
+
data text, -- Input as hex string
|
|
362
|
+
lookups TEXT[] -- Input as stringified JSONB array of hex strings
|
|
363
|
+
)
|
|
406
364
|
ON CONFLICT (group_id, source_table, source_key) DO UPDATE
|
|
407
365
|
SET
|
|
408
366
|
buckets = EXCLUDED.buckets,
|
package/src/types/types.ts
CHANGED
|
@@ -53,6 +53,7 @@ export type RequiredOperationBatchLimits = Required<OperationBatchLimits>;
|
|
|
53
53
|
|
|
54
54
|
export type NormalizedPostgresStorageConfig = pg_wire.NormalizedConnectionConfig & {
|
|
55
55
|
batch_limits: RequiredOperationBatchLimits;
|
|
56
|
+
max_pool_size: number;
|
|
56
57
|
};
|
|
57
58
|
|
|
58
59
|
export const normalizePostgresStorageConfig = (
|