@powersync/service-module-postgres-storage 0.11.2 → 0.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/CHANGELOG.md +60 -0
  2. package/dist/.tsbuildinfo +1 -1
  3. package/dist/@types/migrations/scripts/1771232439485-storage-version.d.ts +3 -0
  4. package/dist/@types/migrations/scripts/1771424826685-current-data-pending-deletes.d.ts +3 -0
  5. package/dist/@types/migrations/scripts/1771491856000-sync-plan.d.ts +3 -0
  6. package/dist/@types/storage/PostgresBucketStorageFactory.d.ts +6 -10
  7. package/dist/@types/storage/PostgresCompactor.d.ts +10 -3
  8. package/dist/@types/storage/PostgresSyncRulesStorage.d.ts +5 -3
  9. package/dist/@types/storage/batch/OperationBatch.d.ts +2 -2
  10. package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +12 -9
  11. package/dist/@types/storage/batch/PostgresPersistedBatch.d.ts +17 -5
  12. package/dist/@types/storage/current-data-store.d.ts +85 -0
  13. package/dist/@types/storage/current-data-table.d.ts +9 -0
  14. package/dist/@types/storage/sync-rules/PostgresPersistedSyncRulesContent.d.ts +1 -10
  15. package/dist/@types/storage/table-id.d.ts +2 -0
  16. package/dist/@types/types/models/CurrentData.d.ts +18 -3
  17. package/dist/@types/types/models/SyncRules.d.ts +12 -2
  18. package/dist/@types/types/models/json.d.ts +11 -0
  19. package/dist/@types/types/types.d.ts +2 -0
  20. package/dist/@types/utils/bson.d.ts +1 -1
  21. package/dist/@types/utils/db.d.ts +9 -0
  22. package/dist/@types/utils/test-utils.d.ts +1 -1
  23. package/dist/migrations/scripts/1771232439485-storage-version.js +111 -0
  24. package/dist/migrations/scripts/1771232439485-storage-version.js.map +1 -0
  25. package/dist/migrations/scripts/1771424826685-current-data-pending-deletes.js +8 -0
  26. package/dist/migrations/scripts/1771424826685-current-data-pending-deletes.js.map +1 -0
  27. package/dist/migrations/scripts/1771491856000-sync-plan.js +91 -0
  28. package/dist/migrations/scripts/1771491856000-sync-plan.js.map +1 -0
  29. package/dist/storage/PostgresBucketStorageFactory.js +56 -58
  30. package/dist/storage/PostgresBucketStorageFactory.js.map +1 -1
  31. package/dist/storage/PostgresCompactor.js +55 -66
  32. package/dist/storage/PostgresCompactor.js.map +1 -1
  33. package/dist/storage/PostgresSyncRulesStorage.js +23 -15
  34. package/dist/storage/PostgresSyncRulesStorage.js.map +1 -1
  35. package/dist/storage/batch/OperationBatch.js +2 -1
  36. package/dist/storage/batch/OperationBatch.js.map +1 -1
  37. package/dist/storage/batch/PostgresBucketBatch.js +286 -213
  38. package/dist/storage/batch/PostgresBucketBatch.js.map +1 -1
  39. package/dist/storage/batch/PostgresPersistedBatch.js +86 -81
  40. package/dist/storage/batch/PostgresPersistedBatch.js.map +1 -1
  41. package/dist/storage/current-data-store.js +270 -0
  42. package/dist/storage/current-data-store.js.map +1 -0
  43. package/dist/storage/current-data-table.js +22 -0
  44. package/dist/storage/current-data-table.js.map +1 -0
  45. package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js +14 -30
  46. package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js.map +1 -1
  47. package/dist/storage/table-id.js +8 -0
  48. package/dist/storage/table-id.js.map +1 -0
  49. package/dist/types/models/CurrentData.js +11 -2
  50. package/dist/types/models/CurrentData.js.map +1 -1
  51. package/dist/types/models/SyncRules.js +12 -1
  52. package/dist/types/models/SyncRules.js.map +1 -1
  53. package/dist/types/models/json.js +21 -0
  54. package/dist/types/models/json.js.map +1 -0
  55. package/dist/utils/bson.js.map +1 -1
  56. package/dist/utils/db.js +41 -0
  57. package/dist/utils/db.js.map +1 -1
  58. package/dist/utils/test-utils.js +50 -14
  59. package/dist/utils/test-utils.js.map +1 -1
  60. package/package.json +9 -9
  61. package/src/migrations/scripts/1771232439485-storage-version.ts +44 -0
  62. package/src/migrations/scripts/1771424826685-current-data-pending-deletes.ts +10 -0
  63. package/src/migrations/scripts/1771491856000-sync-plan.ts +21 -0
  64. package/src/storage/PostgresBucketStorageFactory.ts +69 -68
  65. package/src/storage/PostgresCompactor.ts +63 -72
  66. package/src/storage/PostgresSyncRulesStorage.ts +30 -17
  67. package/src/storage/batch/OperationBatch.ts +4 -3
  68. package/src/storage/batch/PostgresBucketBatch.ts +306 -238
  69. package/src/storage/batch/PostgresPersistedBatch.ts +92 -84
  70. package/src/storage/current-data-store.ts +326 -0
  71. package/src/storage/current-data-table.ts +26 -0
  72. package/src/storage/sync-rules/PostgresPersistedSyncRulesContent.ts +13 -33
  73. package/src/storage/table-id.ts +9 -0
  74. package/src/types/models/CurrentData.ts +17 -4
  75. package/src/types/models/SyncRules.ts +16 -1
  76. package/src/types/models/json.ts +26 -0
  77. package/src/utils/bson.ts +1 -1
  78. package/src/utils/db.ts +47 -0
  79. package/src/utils/test-utils.ts +42 -15
  80. package/test/src/__snapshots__/storage.test.ts.snap +148 -6
  81. package/test/src/__snapshots__/storage_compacting.test.ts.snap +17 -0
  82. package/test/src/__snapshots__/storage_sync.test.ts.snap +2211 -21
  83. package/test/src/migrations.test.ts +9 -2
  84. package/test/src/storage.test.ts +137 -131
  85. package/test/src/storage_compacting.test.ts +113 -2
  86. package/test/src/storage_sync.test.ts +148 -4
  87. package/test/src/util.ts +5 -2
@@ -1,10 +1,12 @@
1
1
  import * as lib_postgres from '@powersync/lib-service-postgres';
2
2
  import { logger } from '@powersync/lib-services-framework';
3
- import { storage, utils } from '@powersync/service-core';
3
+ import { bson, storage, utils } from '@powersync/service-core';
4
4
  import { JSONBig } from '@powersync/service-jsonbig';
5
5
  import * as sync_rules from '@powersync/service-sync-rules';
6
6
  import { models, RequiredOperationBatchLimits } from '../../types/types.js';
7
7
  import { replicaIdToSubkey } from '../../utils/bson.js';
8
+ import { PostgresCurrentDataStore } from '../current-data-store.js';
9
+ import { postgresTableId } from '../table-id.js';
8
10
 
9
11
  export type SaveBucketDataOptions = {
10
12
  /**
@@ -24,7 +26,7 @@ export type SaveParameterDataOptions = {
24
26
  };
25
27
 
26
28
  export type DeleteCurrentDataOptions = {
27
- source_table_id: bigint;
29
+ source_table_id: string;
28
30
  /**
29
31
  * ReplicaID which needs to be serialized in order to be queried
30
32
  * or inserted into the DB
@@ -34,14 +36,24 @@ export type DeleteCurrentDataOptions = {
34
36
  * Optionally provide the serialized source key directly
35
37
  */
36
38
  serialized_source_key?: Buffer;
39
+
40
+ /**
41
+ * Streaming replication needs soft deletes, while truncating tables can use a hard delete directly.
42
+ */
43
+ soft: boolean;
37
44
  };
38
45
 
39
46
  export type PostgresPersistedBatchOptions = RequiredOperationBatchLimits & {
40
47
  group_id: number;
48
+ storageConfig: storage.StorageVersionConfig;
41
49
  };
42
50
 
51
+ const EMPTY_DATA = Buffer.from(bson.serialize({}));
52
+
43
53
  export class PostgresPersistedBatch {
44
54
  group_id: number;
55
+ private readonly storageConfig: storage.StorageVersionConfig;
56
+ private readonly currentDataStore: PostgresCurrentDataStore;
45
57
 
46
58
  /**
47
59
  * Very rough estimate of current operations size in bytes
@@ -56,22 +68,26 @@ export class PostgresPersistedBatch {
56
68
  */
57
69
  protected bucketDataInserts: models.BucketData[];
58
70
  protected parameterDataInserts: models.BucketParameters[];
59
- protected currentDataDeletes: Pick<models.CurrentData, 'group_id' | 'source_key' | 'source_table'>[];
60
71
  /**
61
- * This is stored as a map to avoid multiple inserts (or conflicts) for the same key
72
+ * This is stored as a map to avoid multiple inserts (or conflicts) for the same key.
73
+ *
74
+ * Each key may only occur in one of these two maps.
62
75
  */
63
- protected currentDataInserts: Map<string, models.CurrentData>;
76
+ protected currentDataInserts: Map<string, models.V3CurrentData>;
77
+ protected currentDataDeletes: Map<string, { source_key_hex: string; source_table: string }>;
64
78
 
65
79
  constructor(options: PostgresPersistedBatchOptions) {
66
80
  this.group_id = options.group_id;
81
+ this.storageConfig = options.storageConfig;
82
+ this.currentDataStore = new PostgresCurrentDataStore(this.storageConfig);
67
83
 
68
84
  this.maxTransactionBatchSize = options.max_estimated_size;
69
85
  this.maxTransactionDocCount = options.max_record_count;
70
86
 
71
87
  this.bucketDataInserts = [];
72
88
  this.parameterDataInserts = [];
73
- this.currentDataDeletes = [];
74
89
  this.currentDataInserts = new Map();
90
+ this.currentDataDeletes = new Map();
75
91
  this.currentSize = 0;
76
92
  }
77
93
 
@@ -98,7 +114,7 @@ export class PostgresPersistedBatch {
98
114
  group_id: this.group_id,
99
115
  bucket_name: k.bucket,
100
116
  op: models.OpType.PUT,
101
- source_table: options.table.id,
117
+ source_table: postgresTableId(options.table.id),
102
118
  source_key: hexSourceKey,
103
119
  table_name: k.table,
104
120
  row_id: k.id,
@@ -117,7 +133,7 @@ export class PostgresPersistedBatch {
117
133
  group_id: this.group_id,
118
134
  bucket_name: bd.bucket,
119
135
  op: models.OpType.REMOVE,
120
- source_table: options.table.id,
136
+ source_table: postgresTableId(options.table.id),
121
137
  source_key: hexSourceKey,
122
138
  table_name: bd.table,
123
139
  row_id: bd.id,
@@ -155,7 +171,7 @@ export class PostgresPersistedBatch {
155
171
  const serializedBucketParameters = JSONBig.stringify(result.bucketParameters);
156
172
  this.parameterDataInserts.push({
157
173
  group_id: this.group_id,
158
- source_table: table.id,
174
+ source_table: postgresTableId(table.id),
159
175
  source_key: hexSourceKey,
160
176
  bucket_parameters: serializedBucketParameters,
161
177
  id: 0, // auto incrementing id
@@ -169,7 +185,7 @@ export class PostgresPersistedBatch {
169
185
  const hexLookup = lookup.toString('hex');
170
186
  this.parameterDataInserts.push({
171
187
  group_id: this.group_id,
172
- source_table: table.id,
188
+ source_table: postgresTableId(table.id),
173
189
  source_key: hexSourceKey,
174
190
  bucket_parameters: JSON.stringify([]),
175
191
  id: 0, // auto incrementing id
@@ -180,19 +196,37 @@ export class PostgresPersistedBatch {
180
196
  }
181
197
 
182
198
  deleteCurrentData(options: DeleteCurrentDataOptions) {
183
- const serializedReplicaId = options.serialized_source_key ?? storage.serializeReplicaId(options.source_key);
184
- this.currentDataDeletes.push({
185
- group_id: this.group_id,
186
- source_table: options.source_table_id.toString(),
187
- source_key: serializedReplicaId.toString('hex')
188
- });
189
- this.currentSize += serializedReplicaId.byteLength + 100;
199
+ if (options.soft && this.storageConfig.softDeleteCurrentData) {
200
+ return this.upsertCurrentData(
201
+ {
202
+ group_id: this.group_id,
203
+ source_table: options.source_table_id,
204
+ source_key: options.source_key,
205
+ buckets: [],
206
+ data: EMPTY_DATA,
207
+ lookups: [],
208
+ pending_delete: 1n // converted to nextval('op_id_sequence') in the query
209
+ },
210
+ options.serialized_source_key
211
+ );
212
+ } else {
213
+ const serializedReplicaId = options.serialized_source_key ?? storage.serializeReplicaId(options.source_key);
214
+ const hexReplicaId = serializedReplicaId.toString('hex');
215
+ const source_table = options.source_table_id;
216
+ const key = `${this.group_id}-${source_table}-${hexReplicaId}`;
217
+ this.currentDataInserts.delete(key);
218
+ this.currentDataDeletes.set(key, {
219
+ source_key_hex: hexReplicaId,
220
+ source_table: source_table
221
+ });
222
+ this.currentSize += serializedReplicaId.byteLength + 100;
223
+ }
190
224
  }
191
225
 
192
- upsertCurrentData(options: models.CurrentDataDecoded) {
226
+ upsertCurrentData(options: models.V3CurrentDataDecoded, serialized_source_key?: Buffer) {
193
227
  const { source_table, source_key, buckets } = options;
194
228
 
195
- const serializedReplicaId = storage.serializeReplicaId(source_key);
229
+ const serializedReplicaId = serialized_source_key ?? storage.serializeReplicaId(source_key);
196
230
  const hexReplicaId = serializedReplicaId.toString('hex');
197
231
  const serializedBuckets = JSONBig.stringify(options.buckets);
198
232
 
@@ -206,13 +240,15 @@ export class PostgresPersistedBatch {
206
240
  */
207
241
  const key = `${this.group_id}-${source_table}-${hexReplicaId}`;
208
242
 
243
+ this.currentDataDeletes.delete(key);
209
244
  this.currentDataInserts.set(key, {
210
245
  group_id: this.group_id,
211
246
  source_table: source_table,
212
247
  source_key: hexReplicaId,
213
248
  buckets: serializedBuckets,
214
249
  data: options.data.toString('hex'),
215
- lookups: options.lookups.map((l) => l.toString('hex'))
250
+ lookups: options.lookups.map((l) => l.toString('hex')),
251
+ pending_delete: options.pending_delete?.toString() ?? null
216
252
  });
217
253
 
218
254
  this.currentSize +=
@@ -230,7 +266,7 @@ export class PostgresPersistedBatch {
230
266
  this.currentSize >= this.maxTransactionBatchSize ||
231
267
  this.bucketDataInserts.length >= this.maxTransactionDocCount ||
232
268
  this.currentDataInserts.size >= this.maxTransactionDocCount ||
233
- this.currentDataDeletes.length >= this.maxTransactionDocCount ||
269
+ this.currentDataDeletes.size >= this.maxTransactionDocCount ||
234
270
  this.parameterDataInserts.length >= this.maxTransactionDocCount
235
271
  );
236
272
  }
@@ -239,24 +275,26 @@ export class PostgresPersistedBatch {
239
275
  const stats = {
240
276
  bucketDataCount: this.bucketDataInserts.length,
241
277
  parameterDataCount: this.parameterDataInserts.length,
242
- currentDataCount: this.currentDataInserts.size + this.currentDataDeletes.length
278
+ currentDataCount: this.currentDataInserts.size + this.currentDataDeletes.size
243
279
  };
244
280
  const flushedAny = stats.bucketDataCount > 0 || stats.parameterDataCount > 0 || stats.currentDataCount > 0;
245
281
 
246
282
  logger.info(
247
283
  `powersync_${this.group_id} Flushed ${this.bucketDataInserts.length} + ${this.parameterDataInserts.length} + ${
248
- this.currentDataInserts.size + this.currentDataDeletes.length
284
+ this.currentDataInserts.size
249
285
  } updates, ${Math.round(this.currentSize / 1024)}kb.`
250
286
  );
251
287
 
252
- await this.flushBucketData(db);
253
- await this.flushParameterData(db);
288
+ // Flush current_data first, since this is where lock errors are most likely to occur, and we
289
+ // want to detect those as soon as possible.
254
290
  await this.flushCurrentData(db);
255
291
 
292
+ await this.flushBucketData(db);
293
+ await this.flushParameterData(db);
256
294
  this.bucketDataInserts = [];
257
295
  this.parameterDataInserts = [];
258
- this.currentDataDeletes = [];
259
296
  this.currentDataInserts = new Map();
297
+ this.currentDataDeletes = new Map();
260
298
  this.currentSize = 0;
261
299
 
262
300
  return {
@@ -342,66 +380,36 @@ export class PostgresPersistedBatch {
342
380
 
343
381
  protected async flushCurrentData(db: lib_postgres.WrappedConnection) {
344
382
  if (this.currentDataInserts.size > 0) {
345
- await db.sql`
346
- INSERT INTO
347
- current_data (
348
- group_id,
349
- source_table,
350
- source_key,
351
- buckets,
352
- data,
353
- lookups
354
- )
355
- SELECT
356
- group_id,
357
- source_table,
358
- decode(source_key, 'hex') AS source_key, -- Decode hex to bytea
359
- buckets::jsonb AS buckets,
360
- decode(data, 'hex') AS data, -- Decode hex to bytea
361
- array(
362
- SELECT
363
- decode(element, 'hex')
364
- FROM
365
- unnest(lookups) AS element
366
- ) AS lookups
367
- FROM
368
- json_to_recordset(${{ type: 'json', value: Array.from(this.currentDataInserts.values()) }}::json) AS t (
369
- group_id integer,
370
- source_table text,
371
- source_key text, -- Input as hex string
372
- buckets text,
373
- data text, -- Input as hex string
374
- lookups TEXT[] -- Input as stringified JSONB array of hex strings
375
- )
376
- ON CONFLICT (group_id, source_table, source_key) DO UPDATE
377
- SET
378
- buckets = EXCLUDED.buckets,
379
- data = EXCLUDED.data,
380
- lookups = EXCLUDED.lookups;
381
- `.execute();
383
+ const updates = Array.from(this.currentDataInserts.values());
384
+ // Sort by source_table, source_key to ensure consistent order.
385
+ // While order of updates don't directly matter, using a consistent order helps to reduce 40P01 deadlock errors.
386
+ // We may still have deadlocks between deletes and inserts, but those should be less frequent.
387
+ updates.sort((a, b) => {
388
+ if (a.source_table < b.source_table) return -1;
389
+ if (a.source_table > b.source_table) return 1;
390
+ if (a.source_key < b.source_key) return -1;
391
+ if (a.source_key > b.source_key) return 1;
392
+ return 0;
393
+ });
394
+
395
+ await this.currentDataStore.flushUpserts(db, updates);
382
396
  }
383
397
 
384
- if (this.currentDataDeletes.length > 0) {
385
- await db.sql`
386
- WITH
387
- conditions AS (
388
- SELECT
389
- group_id,
390
- source_table,
391
- decode(source_key, 'hex') AS source_key -- Decode hex to bytea
392
- FROM
393
- jsonb_to_recordset(${{ type: 'jsonb', value: this.currentDataDeletes }}::jsonb) AS t (
394
- group_id integer,
395
- source_table text,
396
- source_key text -- Input as hex string
397
- )
398
- )
399
- DELETE FROM current_data USING conditions
400
- WHERE
401
- current_data.group_id = conditions.group_id
402
- AND current_data.source_table = conditions.source_table
403
- AND current_data.source_key = conditions.source_key;
404
- `.execute();
398
+ if (this.currentDataDeletes.size > 0) {
399
+ const deletes = Array.from(this.currentDataDeletes.values());
400
+ // Same sorting as for inserts
401
+ deletes.sort((a, b) => {
402
+ if (a.source_table < b.source_table) return -1;
403
+ if (a.source_table > b.source_table) return 1;
404
+ if (a.source_key_hex < b.source_key_hex) return -1;
405
+ if (a.source_key_hex > b.source_key_hex) return 1;
406
+ return 0;
407
+ });
408
+
409
+ await this.currentDataStore.flushDeletes(db, {
410
+ groupId: this.group_id,
411
+ deletes
412
+ });
405
413
  }
406
414
  }
407
415
  }
@@ -0,0 +1,326 @@
1
+ import * as lib_postgres from '@powersync/lib-service-postgres';
2
+ import { storage } from '@powersync/service-core';
3
+ import * as t from 'ts-codec';
4
+ import { pick } from '../utils/ts-codec.js';
5
+ import * as models from '../types/models/CurrentData.js';
6
+
7
+ type Queryable = Pick<lib_postgres.DatabaseClient, 'sql' | 'streamRows'>;
8
+
9
+ const TruncateCurrentDataCodec = pick(models.V1CurrentData, ['buckets', 'lookups', 'source_key']);
10
+ const LookupKeyCodec = pick(models.V1CurrentData, ['source_key', 'source_table']);
11
+
12
+ export type TruncateCurrentDataRow = t.Decoded<typeof TruncateCurrentDataCodec>;
13
+ export type CurrentDataLookupRow =
14
+ | t.Decoded<typeof LookupKeyCodec>
15
+ | t.Decoded<typeof models.V1CurrentData>
16
+ | t.Decoded<typeof models.V3CurrentData>;
17
+
18
+ export const V1_CURRENT_DATA_TABLE = 'current_data';
19
+ export const V3_CURRENT_DATA_TABLE = 'v3_current_data';
20
+
21
+ export class PostgresCurrentDataStore {
22
+ readonly table: string;
23
+ readonly softDeleteEnabled: boolean;
24
+
25
+ constructor(storageConfig: storage.StorageVersionConfig) {
26
+ this.softDeleteEnabled = storageConfig.softDeleteCurrentData;
27
+ this.table = storageConfig.softDeleteCurrentData ? V3_CURRENT_DATA_TABLE : V1_CURRENT_DATA_TABLE;
28
+ }
29
+
30
+ streamTruncateRows(
31
+ db: Queryable,
32
+ options: {
33
+ groupId: number;
34
+ sourceTableId: string;
35
+ limit: number;
36
+ }
37
+ ) {
38
+ return db.streamRows<t.Encoded<typeof TruncateCurrentDataCodec>>({
39
+ statement: `
40
+ SELECT
41
+ buckets,
42
+ lookups,
43
+ source_key
44
+ FROM
45
+ ${this.table}
46
+ WHERE
47
+ group_id = $1
48
+ AND source_table = $2
49
+ ${this.wherePendingDelete({ onlyLiveRows: true })}
50
+ LIMIT
51
+ $3
52
+ FOR NO KEY UPDATE
53
+ `,
54
+ params: [
55
+ { type: 'int4', value: options.groupId },
56
+ { type: 'varchar', value: options.sourceTableId },
57
+ { type: 'int4', value: options.limit }
58
+ ]
59
+ });
60
+ }
61
+
62
+ decodeTruncateRow(row: t.Encoded<typeof TruncateCurrentDataCodec>): TruncateCurrentDataRow {
63
+ return TruncateCurrentDataCodec.decode(row);
64
+ }
65
+
66
+ streamSizeRows(
67
+ db: Queryable,
68
+ options: {
69
+ groupId: number;
70
+ lookups: { source_table: string; source_key: string }[];
71
+ }
72
+ ) {
73
+ return db.streamRows<{
74
+ source_table: string;
75
+ source_key: storage.ReplicaId;
76
+ data_size: number;
77
+ }>({
78
+ statement: `
79
+ WITH
80
+ filter_data AS (
81
+ SELECT
82
+ decode(FILTER ->> 'source_key', 'hex') AS source_key,
83
+ (FILTER ->> 'source_table') AS source_table_id
84
+ FROM
85
+ jsonb_array_elements($1::jsonb) AS FILTER
86
+ )
87
+ SELECT
88
+ octet_length(c.data) AS data_size,
89
+ c.source_table,
90
+ c.source_key
91
+ FROM
92
+ ${this.table} c
93
+ JOIN filter_data f ON c.source_table = f.source_table_id
94
+ AND c.source_key = f.source_key
95
+ WHERE
96
+ c.group_id = $2
97
+ FOR NO KEY UPDATE
98
+ `,
99
+ params: [
100
+ { type: 'jsonb', value: options.lookups },
101
+ { type: 'int4', value: options.groupId }
102
+ ]
103
+ });
104
+ }
105
+
106
+ streamLookupRows(
107
+ db: Queryable,
108
+ options: {
109
+ groupId: number;
110
+ lookups: { source_table: string; source_key: string }[];
111
+ skipExistingRows: boolean;
112
+ }
113
+ ) {
114
+ const selectColumns = options.skipExistingRows ? `c.source_table, c.source_key` : `c.*`;
115
+ return db.streamRows<any>({
116
+ statement: `
117
+ SELECT
118
+ ${selectColumns}
119
+ FROM
120
+ ${this.table} c
121
+ JOIN (
122
+ SELECT
123
+ decode(FILTER ->> 'source_key', 'hex') AS source_key,
124
+ FILTER ->> 'source_table' AS source_table_id
125
+ FROM
126
+ jsonb_array_elements($1::jsonb) AS FILTER
127
+ ) f ON c.source_table = f.source_table_id
128
+ AND c.source_key = f.source_key
129
+ WHERE
130
+ c.group_id = $2
131
+ FOR NO KEY UPDATE;
132
+ `,
133
+ params: [
134
+ { type: 'jsonb', value: options.lookups },
135
+ { type: 'int4', value: options.groupId }
136
+ ]
137
+ });
138
+ }
139
+
140
+ decodeLookupRow(row: any, skipExistingRows: boolean): CurrentDataLookupRow {
141
+ if (skipExistingRows) {
142
+ return LookupKeyCodec.decode(row);
143
+ }
144
+ return this.softDeleteEnabled ? models.V3CurrentData.decode(row) : models.V1CurrentData.decode(row);
145
+ }
146
+
147
+ async flushUpserts(db: Queryable, updates: models.V3CurrentData[]) {
148
+ if (updates.length == 0) {
149
+ return;
150
+ }
151
+
152
+ if (this.softDeleteEnabled) {
153
+ await db.sql`
154
+ INSERT INTO
155
+ v3_current_data (
156
+ group_id,
157
+ source_table,
158
+ source_key,
159
+ buckets,
160
+ data,
161
+ lookups,
162
+ pending_delete
163
+ )
164
+ SELECT
165
+ group_id,
166
+ source_table,
167
+ decode(source_key, 'hex') AS source_key,
168
+ buckets::jsonb AS buckets,
169
+ decode(data, 'hex') AS data,
170
+ array(
171
+ SELECT
172
+ decode(element, 'hex')
173
+ FROM
174
+ unnest(lookups) AS element
175
+ ) AS lookups,
176
+ CASE
177
+ WHEN pending_delete IS NOT NULL THEN nextval('op_id_sequence')
178
+ ELSE NULL
179
+ END AS pending_delete
180
+ FROM
181
+ json_to_recordset(${{ type: 'json', value: updates }}::json) AS t (
182
+ group_id integer,
183
+ source_table text,
184
+ source_key text,
185
+ buckets text,
186
+ data text,
187
+ lookups TEXT[],
188
+ pending_delete bigint
189
+ )
190
+ ON CONFLICT (group_id, source_table, source_key) DO UPDATE
191
+ SET
192
+ buckets = EXCLUDED.buckets,
193
+ data = EXCLUDED.data,
194
+ lookups = EXCLUDED.lookups,
195
+ pending_delete = EXCLUDED.pending_delete;
196
+ `.execute();
197
+ return;
198
+ }
199
+
200
+ await db.sql`
201
+ INSERT INTO
202
+ current_data (
203
+ group_id,
204
+ source_table,
205
+ source_key,
206
+ buckets,
207
+ data,
208
+ lookups
209
+ )
210
+ SELECT
211
+ group_id,
212
+ source_table,
213
+ decode(source_key, 'hex') AS source_key,
214
+ buckets::jsonb AS buckets,
215
+ decode(data, 'hex') AS data,
216
+ array(
217
+ SELECT
218
+ decode(element, 'hex')
219
+ FROM
220
+ unnest(lookups) AS element
221
+ ) AS lookups
222
+ FROM
223
+ json_to_recordset(${{ type: 'json', value: updates }}::json) AS t (
224
+ group_id integer,
225
+ source_table text,
226
+ source_key text,
227
+ buckets text,
228
+ data text,
229
+ lookups TEXT[]
230
+ )
231
+ ON CONFLICT (group_id, source_table, source_key) DO UPDATE
232
+ SET
233
+ buckets = EXCLUDED.buckets,
234
+ data = EXCLUDED.data,
235
+ lookups = EXCLUDED.lookups;
236
+ `.execute();
237
+ }
238
+
239
+ async flushDeletes(
240
+ db: Queryable,
241
+ options: {
242
+ groupId: number;
243
+ deletes: { source_key_hex: string; source_table: string }[];
244
+ }
245
+ ) {
246
+ if (options.deletes.length == 0) {
247
+ return;
248
+ }
249
+ if (this.softDeleteEnabled) {
250
+ await db.sql`
251
+ WITH
252
+ conditions AS (
253
+ SELECT
254
+ source_table,
255
+ decode(source_key_hex, 'hex') AS source_key
256
+ FROM
257
+ jsonb_to_recordset(${{
258
+ type: 'jsonb',
259
+ value: options.deletes
260
+ }}::jsonb) AS t (source_table text, source_key_hex text)
261
+ )
262
+ DELETE FROM v3_current_data USING conditions
263
+ WHERE
264
+ v3_current_data.group_id = ${{ type: 'int4', value: options.groupId }}
265
+ AND v3_current_data.source_table = conditions.source_table
266
+ AND v3_current_data.source_key = conditions.source_key;
267
+ `.execute();
268
+ return;
269
+ }
270
+
271
+ await db.sql`
272
+ WITH
273
+ conditions AS (
274
+ SELECT
275
+ source_table,
276
+ decode(source_key_hex, 'hex') AS source_key
277
+ FROM
278
+ jsonb_to_recordset(${{
279
+ type: 'jsonb',
280
+ value: options.deletes
281
+ }}::jsonb) AS t (source_table text, source_key_hex text)
282
+ )
283
+ DELETE FROM current_data USING conditions
284
+ WHERE
285
+ current_data.group_id = ${{ type: 'int4', value: options.groupId }}
286
+ AND current_data.source_table = conditions.source_table
287
+ AND current_data.source_key = conditions.source_key;
288
+ `.execute();
289
+ }
290
+
291
+ async cleanupPendingDeletes(db: Queryable, options: { groupId: number; lastCheckpoint: bigint }) {
292
+ if (!this.softDeleteEnabled) {
293
+ return;
294
+ }
295
+ await db.sql`
296
+ DELETE FROM v3_current_data
297
+ WHERE
298
+ group_id = ${{ type: 'int4', value: options.groupId }}
299
+ AND pending_delete IS NOT NULL
300
+ AND pending_delete <= ${{ type: 'int8', value: options.lastCheckpoint }}
301
+ `.execute();
302
+ }
303
+
304
+ async deleteGroupRows(db: Queryable, options: { groupId: number }) {
305
+ if (this.softDeleteEnabled) {
306
+ await db.sql`
307
+ DELETE FROM v3_current_data
308
+ WHERE
309
+ group_id = ${{ type: 'int4', value: options.groupId }}
310
+ `.execute();
311
+ } else {
312
+ await db.sql`
313
+ DELETE FROM current_data
314
+ WHERE
315
+ group_id = ${{ type: 'int4', value: options.groupId }}
316
+ `.execute();
317
+ }
318
+ }
319
+
320
+ private wherePendingDelete(options: { onlyLiveRows: boolean }) {
321
+ if (this.softDeleteEnabled && options.onlyLiveRows) {
322
+ return `AND pending_delete IS NULL`;
323
+ }
324
+ return ``;
325
+ }
326
+ }
@@ -0,0 +1,26 @@
1
+ import { ServiceAssertionError } from '@powersync/lib-services-framework';
2
+ import { storage } from '@powersync/service-core';
3
+
4
+ export const V1_CURRENT_DATA_TABLE = 'current_data';
5
+ export const V3_CURRENT_DATA_TABLE = 'v3_current_data';
6
+
7
+ /**
8
+ * The table used by a specific storage version for general current_data access.
9
+ */
10
+ export function getCommonCurrentDataTable(storageConfig: storage.StorageVersionConfig) {
11
+ return storageConfig.softDeleteCurrentData ? V3_CURRENT_DATA_TABLE : V1_CURRENT_DATA_TABLE;
12
+ }
13
+
14
+ export function getV1CurrentDataTable(storageConfig: storage.StorageVersionConfig) {
15
+ if (storageConfig.softDeleteCurrentData) {
16
+ throw new ServiceAssertionError('current_data table cannot be used when softDeleteCurrentData is enabled');
17
+ }
18
+ return V1_CURRENT_DATA_TABLE;
19
+ }
20
+
21
+ export function getV3CurrentDataTable(storageConfig: storage.StorageVersionConfig) {
22
+ if (!storageConfig.softDeleteCurrentData) {
23
+ throw new ServiceAssertionError('v3_current_data table cannot be used when softDeleteCurrentData is disabled');
24
+ }
25
+ return V3_CURRENT_DATA_TABLE;
26
+ }