@powersync/service-module-postgres-storage 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. package/CHANGELOG.md +17 -0
  2. package/LICENSE +67 -0
  3. package/README.md +67 -0
  4. package/dist/.tsbuildinfo +1 -0
  5. package/dist/@types/index.d.ts +7 -0
  6. package/dist/@types/migrations/PostgresMigrationAgent.d.ts +12 -0
  7. package/dist/@types/migrations/PostgresMigrationStore.d.ts +14 -0
  8. package/dist/@types/migrations/migration-utils.d.ts +3 -0
  9. package/dist/@types/migrations/scripts/1684951997326-init.d.ts +3 -0
  10. package/dist/@types/module/PostgresStorageModule.d.ts +6 -0
  11. package/dist/@types/storage/PostgresBucketStorageFactory.d.ts +42 -0
  12. package/dist/@types/storage/PostgresCompactor.d.ts +40 -0
  13. package/dist/@types/storage/PostgresStorageProvider.d.ts +5 -0
  14. package/dist/@types/storage/PostgresSyncRulesStorage.d.ts +46 -0
  15. package/dist/@types/storage/PostgresTestStorageFactoryGenerator.d.ts +13 -0
  16. package/dist/@types/storage/batch/OperationBatch.d.ts +47 -0
  17. package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +90 -0
  18. package/dist/@types/storage/batch/PostgresPersistedBatch.d.ts +64 -0
  19. package/dist/@types/storage/checkpoints/PostgresWriteCheckpointAPI.d.ts +20 -0
  20. package/dist/@types/storage/storage-index.d.ts +5 -0
  21. package/dist/@types/storage/sync-rules/PostgresPersistedSyncRulesContent.d.ts +17 -0
  22. package/dist/@types/types/codecs.d.ts +61 -0
  23. package/dist/@types/types/models/ActiveCheckpoint.d.ts +12 -0
  24. package/dist/@types/types/models/ActiveCheckpointNotification.d.ts +19 -0
  25. package/dist/@types/types/models/BucketData.d.ts +22 -0
  26. package/dist/@types/types/models/BucketParameters.d.ts +11 -0
  27. package/dist/@types/types/models/CurrentData.d.ts +22 -0
  28. package/dist/@types/types/models/Instance.d.ts +6 -0
  29. package/dist/@types/types/models/Migration.d.ts +12 -0
  30. package/dist/@types/types/models/SourceTable.d.ts +31 -0
  31. package/dist/@types/types/models/SyncRules.d.ts +47 -0
  32. package/dist/@types/types/models/WriteCheckpoint.d.ts +15 -0
  33. package/dist/@types/types/models/models-index.d.ts +10 -0
  34. package/dist/@types/types/types.d.ts +94 -0
  35. package/dist/@types/utils/bson.d.ts +6 -0
  36. package/dist/@types/utils/bucket-data.d.ts +18 -0
  37. package/dist/@types/utils/db.d.ts +8 -0
  38. package/dist/@types/utils/ts-codec.d.ts +5 -0
  39. package/dist/@types/utils/utils-index.d.ts +4 -0
  40. package/dist/index.js +8 -0
  41. package/dist/index.js.map +1 -0
  42. package/dist/migrations/PostgresMigrationAgent.js +36 -0
  43. package/dist/migrations/PostgresMigrationAgent.js.map +1 -0
  44. package/dist/migrations/PostgresMigrationStore.js +60 -0
  45. package/dist/migrations/PostgresMigrationStore.js.map +1 -0
  46. package/dist/migrations/migration-utils.js +13 -0
  47. package/dist/migrations/migration-utils.js.map +1 -0
  48. package/dist/migrations/scripts/1684951997326-init.js +196 -0
  49. package/dist/migrations/scripts/1684951997326-init.js.map +1 -0
  50. package/dist/module/PostgresStorageModule.js +23 -0
  51. package/dist/module/PostgresStorageModule.js.map +1 -0
  52. package/dist/storage/PostgresBucketStorageFactory.js +433 -0
  53. package/dist/storage/PostgresBucketStorageFactory.js.map +1 -0
  54. package/dist/storage/PostgresCompactor.js +298 -0
  55. package/dist/storage/PostgresCompactor.js.map +1 -0
  56. package/dist/storage/PostgresStorageProvider.js +35 -0
  57. package/dist/storage/PostgresStorageProvider.js.map +1 -0
  58. package/dist/storage/PostgresSyncRulesStorage.js +619 -0
  59. package/dist/storage/PostgresSyncRulesStorage.js.map +1 -0
  60. package/dist/storage/PostgresTestStorageFactoryGenerator.js +110 -0
  61. package/dist/storage/PostgresTestStorageFactoryGenerator.js.map +1 -0
  62. package/dist/storage/batch/OperationBatch.js +93 -0
  63. package/dist/storage/batch/OperationBatch.js.map +1 -0
  64. package/dist/storage/batch/PostgresBucketBatch.js +732 -0
  65. package/dist/storage/batch/PostgresBucketBatch.js.map +1 -0
  66. package/dist/storage/batch/PostgresPersistedBatch.js +367 -0
  67. package/dist/storage/batch/PostgresPersistedBatch.js.map +1 -0
  68. package/dist/storage/checkpoints/PostgresWriteCheckpointAPI.js +148 -0
  69. package/dist/storage/checkpoints/PostgresWriteCheckpointAPI.js.map +1 -0
  70. package/dist/storage/storage-index.js +6 -0
  71. package/dist/storage/storage-index.js.map +1 -0
  72. package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js +58 -0
  73. package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js.map +1 -0
  74. package/dist/types/codecs.js +97 -0
  75. package/dist/types/codecs.js.map +1 -0
  76. package/dist/types/models/ActiveCheckpoint.js +12 -0
  77. package/dist/types/models/ActiveCheckpoint.js.map +1 -0
  78. package/dist/types/models/ActiveCheckpointNotification.js +8 -0
  79. package/dist/types/models/ActiveCheckpointNotification.js.map +1 -0
  80. package/dist/types/models/BucketData.js +23 -0
  81. package/dist/types/models/BucketData.js.map +1 -0
  82. package/dist/types/models/BucketParameters.js +11 -0
  83. package/dist/types/models/BucketParameters.js.map +1 -0
  84. package/dist/types/models/CurrentData.js +16 -0
  85. package/dist/types/models/CurrentData.js.map +1 -0
  86. package/dist/types/models/Instance.js +5 -0
  87. package/dist/types/models/Instance.js.map +1 -0
  88. package/dist/types/models/Migration.js +12 -0
  89. package/dist/types/models/Migration.js.map +1 -0
  90. package/dist/types/models/SourceTable.js +24 -0
  91. package/dist/types/models/SourceTable.js.map +1 -0
  92. package/dist/types/models/SyncRules.js +47 -0
  93. package/dist/types/models/SyncRules.js.map +1 -0
  94. package/dist/types/models/WriteCheckpoint.js +13 -0
  95. package/dist/types/models/WriteCheckpoint.js.map +1 -0
  96. package/dist/types/models/models-index.js +11 -0
  97. package/dist/types/models/models-index.js.map +1 -0
  98. package/dist/types/types.js +46 -0
  99. package/dist/types/types.js.map +1 -0
  100. package/dist/utils/bson.js +16 -0
  101. package/dist/utils/bson.js.map +1 -0
  102. package/dist/utils/bucket-data.js +25 -0
  103. package/dist/utils/bucket-data.js.map +1 -0
  104. package/dist/utils/db.js +24 -0
  105. package/dist/utils/db.js.map +1 -0
  106. package/dist/utils/ts-codec.js +11 -0
  107. package/dist/utils/ts-codec.js.map +1 -0
  108. package/dist/utils/utils-index.js +5 -0
  109. package/dist/utils/utils-index.js.map +1 -0
  110. package/package.json +50 -0
  111. package/src/index.ts +10 -0
  112. package/src/migrations/PostgresMigrationAgent.ts +46 -0
  113. package/src/migrations/PostgresMigrationStore.ts +70 -0
  114. package/src/migrations/migration-utils.ts +14 -0
  115. package/src/migrations/scripts/1684951997326-init.ts +141 -0
  116. package/src/module/PostgresStorageModule.ts +30 -0
  117. package/src/storage/PostgresBucketStorageFactory.ts +496 -0
  118. package/src/storage/PostgresCompactor.ts +366 -0
  119. package/src/storage/PostgresStorageProvider.ts +42 -0
  120. package/src/storage/PostgresSyncRulesStorage.ts +666 -0
  121. package/src/storage/PostgresTestStorageFactoryGenerator.ts +61 -0
  122. package/src/storage/batch/OperationBatch.ts +101 -0
  123. package/src/storage/batch/PostgresBucketBatch.ts +885 -0
  124. package/src/storage/batch/PostgresPersistedBatch.ts +441 -0
  125. package/src/storage/checkpoints/PostgresWriteCheckpointAPI.ts +176 -0
  126. package/src/storage/storage-index.ts +5 -0
  127. package/src/storage/sync-rules/PostgresPersistedSyncRulesContent.ts +67 -0
  128. package/src/types/codecs.ts +136 -0
  129. package/src/types/models/ActiveCheckpoint.ts +15 -0
  130. package/src/types/models/ActiveCheckpointNotification.ts +14 -0
  131. package/src/types/models/BucketData.ts +26 -0
  132. package/src/types/models/BucketParameters.ts +14 -0
  133. package/src/types/models/CurrentData.ts +23 -0
  134. package/src/types/models/Instance.ts +8 -0
  135. package/src/types/models/Migration.ts +19 -0
  136. package/src/types/models/SourceTable.ts +32 -0
  137. package/src/types/models/SyncRules.ts +50 -0
  138. package/src/types/models/WriteCheckpoint.ts +20 -0
  139. package/src/types/models/models-index.ts +10 -0
  140. package/src/types/types.ts +73 -0
  141. package/src/utils/bson.ts +17 -0
  142. package/src/utils/bucket-data.ts +25 -0
  143. package/src/utils/db.ts +27 -0
  144. package/src/utils/ts-codec.ts +14 -0
  145. package/src/utils/utils-index.ts +4 -0
  146. package/test/src/__snapshots__/storage.test.ts.snap +9 -0
  147. package/test/src/__snapshots__/storage_sync.test.ts.snap +332 -0
  148. package/test/src/env.ts +6 -0
  149. package/test/src/migrations.test.ts +34 -0
  150. package/test/src/setup.ts +16 -0
  151. package/test/src/storage.test.ts +131 -0
  152. package/test/src/storage_compacting.test.ts +5 -0
  153. package/test/src/storage_sync.test.ts +12 -0
  154. package/test/src/util.ts +34 -0
  155. package/test/tsconfig.json +20 -0
  156. package/tsconfig.json +36 -0
  157. package/vitest.config.ts +13 -0
@@ -0,0 +1,441 @@
1
+ import * as lib_postgres from '@powersync/lib-service-postgres';
2
+ import { logger } from '@powersync/lib-services-framework';
3
+ import { storage, utils } from '@powersync/service-core';
4
+ import { JSONBig } from '@powersync/service-jsonbig';
5
+ import * as sync_rules from '@powersync/service-sync-rules';
6
+ import { models, RequiredOperationBatchLimits } from '../../types/types.js';
7
+ import { replicaIdToSubkey } from '../../utils/bson.js';
8
+
9
+ export type SaveBucketDataOptions = {
10
+ /**
11
+ * This value will be serialized into a BSON Byte array for storage
12
+ */
13
+ source_key: storage.ReplicaId;
14
+ table: storage.SourceTable;
15
+ before_buckets: models.CurrentBucket[];
16
+ evaluated: sync_rules.EvaluatedRow[];
17
+ };
18
+
19
+ export type SaveParameterDataOptions = {
20
+ source_key: storage.ReplicaId;
21
+ table: storage.SourceTable;
22
+ evaluated: sync_rules.EvaluatedParameters[];
23
+ existing_lookups: Buffer[];
24
+ };
25
+
26
+ export type DeleteCurrentDataOptions = {
27
+ source_table_id: bigint;
28
+ /**
29
+ * ReplicaID which needs to be serialized in order to be queried
30
+ * or inserted into the DB
31
+ */
32
+ source_key?: storage.ReplicaId;
33
+ /**
34
+ * Optionally provide the serialized source key directly
35
+ */
36
+ serialized_source_key?: Buffer;
37
+ };
38
+
39
+ export type PostgresPersistedBatchOptions = RequiredOperationBatchLimits & {
40
+ group_id: number;
41
+ };
42
+
43
+ export class PostgresPersistedBatch {
44
+ group_id: number;
45
+
46
+ /**
47
+ * Very rough estimate of current operations size in bytes
48
+ */
49
+ currentSize: number;
50
+
51
+ readonly maxTransactionBatchSize: number;
52
+ readonly maxTransactionDocCount: number;
53
+
54
+ /**
55
+ * Ordered set of bucket_data insert operation parameters
56
+ */
57
+ protected bucketDataInserts: models.BucketData[];
58
+ protected parameterDataInserts: models.BucketParameters[];
59
+ protected currentDataDeletes: Pick<models.CurrentData, 'group_id' | 'source_key' | 'source_table'>[];
60
+ /**
61
+ * This is stored as a map to avoid multiple inserts (or conflicts) for the same key
62
+ */
63
+ protected currentDataInserts: Map<string, models.CurrentData>;
64
+
65
+ constructor(options: PostgresPersistedBatchOptions) {
66
+ this.group_id = options.group_id;
67
+
68
+ this.maxTransactionBatchSize = options.max_estimated_size;
69
+ this.maxTransactionDocCount = options.max_record_count;
70
+
71
+ this.bucketDataInserts = [];
72
+ this.parameterDataInserts = [];
73
+ this.currentDataDeletes = [];
74
+ this.currentDataInserts = new Map();
75
+ this.currentSize = 0;
76
+ }
77
+
78
+ saveBucketData(options: SaveBucketDataOptions) {
79
+ const remaining_buckets = new Map<string, models.CurrentBucket>();
80
+ for (const b of options.before_buckets) {
81
+ const key = currentBucketKey(b);
82
+ remaining_buckets.set(key, b);
83
+ }
84
+
85
+ const dchecksum = utils.hashDelete(replicaIdToSubkey(options.table.id, options.source_key));
86
+
87
+ const serializedSourceKey = storage.serializeReplicaId(options.source_key);
88
+ const hexSourceKey = serializedSourceKey.toString('hex');
89
+
90
+ for (const k of options.evaluated) {
91
+ const key = currentBucketKey(k);
92
+ remaining_buckets.delete(key);
93
+
94
+ const data = JSONBig.stringify(k.data);
95
+ const checksum = utils.hashData(k.table, k.id, data);
96
+
97
+ this.bucketDataInserts.push({
98
+ group_id: this.group_id,
99
+ bucket_name: k.bucket,
100
+ op: models.OpType.PUT,
101
+ source_table: options.table.id,
102
+ source_key: hexSourceKey,
103
+ table_name: k.table,
104
+ row_id: k.id,
105
+ checksum,
106
+ data,
107
+ op_id: 0, // Will use nextval of sequence
108
+ target_op: null
109
+ });
110
+
111
+ this.currentSize += k.bucket.length + data.length + hexSourceKey.length + 100;
112
+ }
113
+
114
+ for (const bd of remaining_buckets.values()) {
115
+ // REMOVE operation
116
+ this.bucketDataInserts.push({
117
+ group_id: this.group_id,
118
+ bucket_name: bd.bucket,
119
+ op: models.OpType.REMOVE,
120
+ source_table: options.table.id,
121
+ source_key: hexSourceKey,
122
+ table_name: bd.table,
123
+ row_id: bd.id,
124
+ checksum: dchecksum,
125
+ op_id: 0, // Will use nextval of sequence
126
+ target_op: null,
127
+ data: null
128
+ });
129
+ this.currentSize += bd.bucket.length + hexSourceKey.length + 100;
130
+ }
131
+ }
132
+
133
+ saveParameterData(options: SaveParameterDataOptions) {
134
+ // This is similar to saving bucket data.
135
+ // A key difference is that we don't need to keep the history intact.
136
+ // We do need to keep track of recent history though - enough that we can get consistent data for any specific checkpoint.
137
+ // Instead of storing per bucket id, we store per "lookup".
138
+ // A key difference is that we don't need to store or keep track of anything per-bucket - the entire record is
139
+ // either persisted or removed.
140
+ // We also don't need to keep history intact.
141
+ const { source_key, table, evaluated, existing_lookups } = options;
142
+ const serializedSourceKey = storage.serializeReplicaId(source_key);
143
+ const hexSourceKey = serializedSourceKey.toString('hex');
144
+ const remaining_lookups = new Map<string, Buffer>();
145
+ for (const l of existing_lookups) {
146
+ remaining_lookups.set(l.toString('base64'), l);
147
+ }
148
+
149
+ // 1. Insert new entries
150
+ for (const result of evaluated) {
151
+ const binLookup = storage.serializeLookupBuffer(result.lookup);
152
+ const base64 = binLookup.toString('base64');
153
+ remaining_lookups.delete(base64);
154
+ const hexLookup = binLookup.toString('hex');
155
+ const serializedBucketParameters = JSONBig.stringify(result.bucket_parameters);
156
+ this.parameterDataInserts.push({
157
+ group_id: this.group_id,
158
+ source_table: table.id,
159
+ source_key: hexSourceKey,
160
+ bucket_parameters: serializedBucketParameters,
161
+ id: 0, // auto incrementing id
162
+ lookup: hexLookup
163
+ });
164
+ this.currentSize += hexLookup.length + serializedBucketParameters.length + hexSourceKey.length + 100;
165
+ }
166
+
167
+ // 2. "REMOVE" entries for any lookup not touched.
168
+ for (const lookup of remaining_lookups.values()) {
169
+ const hexLookup = lookup.toString('hex');
170
+ this.parameterDataInserts.push({
171
+ group_id: this.group_id,
172
+ source_table: table.id,
173
+ source_key: hexSourceKey,
174
+ bucket_parameters: JSON.stringify([]),
175
+ id: 0, // auto incrementing id
176
+ lookup: hexLookup
177
+ });
178
+ this.currentSize += hexLookup.length + hexSourceKey.length + 100;
179
+ }
180
+ }
181
+
182
+ deleteCurrentData(options: DeleteCurrentDataOptions) {
183
+ const serializedReplicaId = options.serialized_source_key ?? storage.serializeReplicaId(options.source_key);
184
+ this.currentDataDeletes.push({
185
+ group_id: this.group_id,
186
+ source_table: options.source_table_id.toString(),
187
+ source_key: serializedReplicaId.toString('hex')
188
+ });
189
+ this.currentSize += serializedReplicaId.byteLength + 100;
190
+ }
191
+
192
+ upsertCurrentData(options: models.CurrentDataDecoded) {
193
+ const { source_table, source_key, buckets } = options;
194
+
195
+ const serializedReplicaId = storage.serializeReplicaId(source_key);
196
+ const hexReplicaId = serializedReplicaId.toString('hex');
197
+ const serializedBuckets = JSONBig.stringify(options.buckets);
198
+
199
+ /**
200
+ * Only track the last unique ID for this current_data record.
201
+ * Applying multiple items in the flush method could cause an
202
+ * "
203
+ * ON CONFLICT DO UPDATE command cannot affect row a second time
204
+ * "
205
+ * error.
206
+ */
207
+ const key = `${this.group_id}-${source_table}-${hexReplicaId}`;
208
+
209
+ this.currentDataInserts.set(key, {
210
+ group_id: this.group_id,
211
+ source_table: source_table,
212
+ source_key: hexReplicaId,
213
+ buckets: serializedBuckets,
214
+ data: options.data.toString('hex'),
215
+ lookups: options.lookups.map((l) => l.toString('hex'))
216
+ });
217
+
218
+ this.currentSize +=
219
+ (options.data?.byteLength ?? 0) +
220
+ serializedReplicaId.byteLength +
221
+ buckets.length +
222
+ options.lookups.reduce((total, l) => {
223
+ return total + l.byteLength;
224
+ }, 0) +
225
+ 100;
226
+ }
227
+
228
+ shouldFlushTransaction() {
229
+ return (
230
+ this.currentSize >= this.maxTransactionBatchSize ||
231
+ this.bucketDataInserts.length >= this.maxTransactionDocCount ||
232
+ this.currentDataInserts.size >= this.maxTransactionDocCount ||
233
+ this.currentDataDeletes.length >= this.maxTransactionDocCount ||
234
+ this.parameterDataInserts.length >= this.maxTransactionDocCount
235
+ );
236
+ }
237
+
238
+ async flush(db: lib_postgres.WrappedConnection) {
239
+ logger.info(
240
+ `powersync_${this.group_id} Flushed ${this.bucketDataInserts.length} + ${this.parameterDataInserts.length} + ${
241
+ this.currentDataInserts.size + this.currentDataDeletes.length
242
+ } updates, ${Math.round(this.currentSize / 1024)}kb.`
243
+ );
244
+
245
+ await this.flushBucketData(db);
246
+ await this.flushParameterData(db);
247
+ await this.flushCurrentData(db);
248
+
249
+ this.bucketDataInserts = [];
250
+ this.parameterDataInserts = [];
251
+ this.currentDataDeletes = [];
252
+ this.currentDataInserts = new Map();
253
+ this.currentSize = 0;
254
+ }
255
+
256
+ protected async flushBucketData(db: lib_postgres.WrappedConnection) {
257
+ if (this.bucketDataInserts.length > 0) {
258
+ await db.sql`
259
+ WITH
260
+ parsed_data AS (
261
+ SELECT
262
+ group_id,
263
+ bucket_name,
264
+ source_table,
265
+ decode(source_key, 'hex') AS source_key, -- Decode hex to bytea
266
+ table_name,
267
+ op,
268
+ row_id,
269
+ checksum,
270
+ data,
271
+ target_op
272
+ FROM
273
+ jsonb_to_recordset(${{ type: 'jsonb', value: this.bucketDataInserts }}::jsonb) AS t (
274
+ group_id integer,
275
+ bucket_name text,
276
+ source_table text,
277
+ source_key text, -- Input as hex string
278
+ table_name text,
279
+ op text,
280
+ row_id text,
281
+ checksum bigint,
282
+ data text,
283
+ target_op bigint
284
+ )
285
+ )
286
+ INSERT INTO
287
+ bucket_data (
288
+ group_id,
289
+ bucket_name,
290
+ op_id,
291
+ op,
292
+ source_table,
293
+ source_key,
294
+ table_name,
295
+ row_id,
296
+ checksum,
297
+ data,
298
+ target_op
299
+ )
300
+ SELECT
301
+ group_id,
302
+ bucket_name,
303
+ nextval('op_id_sequence'),
304
+ op,
305
+ source_table,
306
+ source_key, -- Already decoded
307
+ table_name,
308
+ row_id,
309
+ checksum,
310
+ data,
311
+ target_op
312
+ FROM
313
+ parsed_data;
314
+ `.execute();
315
+ }
316
+ }
317
+
318
+ protected async flushParameterData(db: lib_postgres.WrappedConnection) {
319
+ if (this.parameterDataInserts.length > 0) {
320
+ await db.sql`
321
+ WITH
322
+ parsed_data AS (
323
+ SELECT
324
+ group_id,
325
+ source_table,
326
+ decode(source_key, 'hex') AS source_key, -- Decode hex to bytea
327
+ decode(lookup, 'hex') AS lookup, -- Decode hex to bytea
328
+ bucket_parameters
329
+ FROM
330
+ jsonb_to_recordset(${{ type: 'jsonb', value: this.parameterDataInserts }}::jsonb) AS t (
331
+ group_id integer,
332
+ source_table text,
333
+ source_key text, -- Input as hex string
334
+ lookup text, -- Input as hex string
335
+ bucket_parameters text -- Input as stringified JSON
336
+ )
337
+ )
338
+ INSERT INTO
339
+ bucket_parameters (
340
+ group_id,
341
+ source_table,
342
+ source_key,
343
+ lookup,
344
+ bucket_parameters
345
+ )
346
+ SELECT
347
+ group_id,
348
+ source_table,
349
+ source_key, -- Already decoded
350
+ lookup, -- Already decoded
351
+ bucket_parameters
352
+ FROM
353
+ parsed_data;
354
+ `.execute();
355
+ }
356
+ }
357
+
358
+ protected async flushCurrentData(db: lib_postgres.WrappedConnection) {
359
+ if (this.currentDataInserts.size > 0) {
360
+ await db.sql`
361
+ WITH
362
+ parsed_data AS (
363
+ SELECT
364
+ group_id,
365
+ source_table,
366
+ decode(source_key, 'hex') AS source_key, -- Decode hex to bytea
367
+ buckets::jsonb AS buckets,
368
+ decode(data, 'hex') AS data, -- Decode hex to bytea
369
+ ARRAY(
370
+ SELECT
371
+ decode((value ->> 0)::TEXT, 'hex')
372
+ FROM
373
+ jsonb_array_elements(lookups::jsonb) AS value
374
+ ) AS lookups -- Decode array of hex strings to bytea[]
375
+ FROM
376
+ jsonb_to_recordset(${{
377
+ type: 'jsonb',
378
+ value: Array.from(this.currentDataInserts.values())
379
+ }}::jsonb) AS t (
380
+ group_id integer,
381
+ source_table text,
382
+ source_key text, -- Input as hex string
383
+ buckets text,
384
+ data text, -- Input as hex string
385
+ lookups text -- Input as stringified JSONB array of hex strings
386
+ )
387
+ )
388
+ INSERT INTO
389
+ current_data (
390
+ group_id,
391
+ source_table,
392
+ source_key,
393
+ buckets,
394
+ data,
395
+ lookups
396
+ )
397
+ SELECT
398
+ group_id,
399
+ source_table,
400
+ source_key, -- Already decoded
401
+ buckets,
402
+ data, -- Already decoded
403
+ lookups -- Already decoded
404
+ FROM
405
+ parsed_data
406
+ ON CONFLICT (group_id, source_table, source_key) DO UPDATE
407
+ SET
408
+ buckets = EXCLUDED.buckets,
409
+ data = EXCLUDED.data,
410
+ lookups = EXCLUDED.lookups;
411
+ `.execute();
412
+ }
413
+
414
+ if (this.currentDataDeletes.length > 0) {
415
+ await db.sql`
416
+ WITH
417
+ conditions AS (
418
+ SELECT
419
+ group_id,
420
+ source_table,
421
+ decode(source_key, 'hex') AS source_key -- Decode hex to bytea
422
+ FROM
423
+ jsonb_to_recordset(${{ type: 'jsonb', value: this.currentDataDeletes }}::jsonb) AS t (
424
+ group_id integer,
425
+ source_table text,
426
+ source_key text -- Input as hex string
427
+ )
428
+ )
429
+ DELETE FROM current_data USING conditions
430
+ WHERE
431
+ current_data.group_id = conditions.group_id
432
+ AND current_data.source_table = conditions.source_table
433
+ AND current_data.source_key = conditions.source_key;
434
+ `.execute();
435
+ }
436
+ }
437
+ }
438
+
439
+ export function currentBucketKey(b: models.CurrentBucket) {
440
+ return `${b.bucket}/${b.table}/${b.id}`;
441
+ }
@@ -0,0 +1,176 @@
1
+ import * as lib_postgres from '@powersync/lib-service-postgres';
2
+ import * as framework from '@powersync/lib-services-framework';
3
+ import { storage } from '@powersync/service-core';
4
+ import { JSONBig } from '@powersync/service-jsonbig';
5
+ import { models } from '../../types/types.js';
6
+
7
+ export type PostgresCheckpointAPIOptions = {
8
+ db: lib_postgres.DatabaseClient;
9
+ mode: storage.WriteCheckpointMode;
10
+ };
11
+
12
+ export class PostgresWriteCheckpointAPI implements storage.WriteCheckpointAPI {
13
+ readonly db: lib_postgres.DatabaseClient;
14
+ private _mode: storage.WriteCheckpointMode;
15
+
16
+ constructor(options: PostgresCheckpointAPIOptions) {
17
+ this.db = options.db;
18
+ this._mode = options.mode;
19
+ }
20
+
21
+ get writeCheckpointMode() {
22
+ return this._mode;
23
+ }
24
+
25
+ setWriteCheckpointMode(mode: storage.WriteCheckpointMode): void {
26
+ this._mode = mode;
27
+ }
28
+
29
+ async batchCreateCustomWriteCheckpoints(checkpoints: storage.CustomWriteCheckpointOptions[]): Promise<void> {
30
+ return batchCreateCustomWriteCheckpoints(this.db, checkpoints);
31
+ }
32
+
33
+ async createCustomWriteCheckpoint(options: storage.CustomWriteCheckpointOptions): Promise<bigint> {
34
+ if (this.writeCheckpointMode !== storage.WriteCheckpointMode.CUSTOM) {
35
+ throw new framework.errors.ValidationError(
36
+ `Creating a custom Write Checkpoint when the current Write Checkpoint mode is set to "${this.writeCheckpointMode}"`
37
+ );
38
+ }
39
+
40
+ const { checkpoint, user_id, sync_rules_id } = options;
41
+ const row = await this.db.sql`
42
+ INSERT INTO
43
+ custom_write_checkpoints (user_id, write_checkpoint, sync_rules_id)
44
+ VALUES
45
+ (
46
+ ${{ type: 'varchar', value: user_id }},
47
+ ${{ type: 'int8', value: checkpoint }},
48
+ ${{ type: 'int4', value: sync_rules_id }}
49
+ )
50
+ ON CONFLICT DO UPDATE
51
+ SET
52
+ write_checkpoint = EXCLUDED.write_checkpoint
53
+ RETURNING
54
+ *;
55
+ `
56
+ .decoded(models.CustomWriteCheckpoint)
57
+ .first();
58
+ return row!.write_checkpoint;
59
+ }
60
+
61
+ async createManagedWriteCheckpoint(checkpoint: storage.ManagedWriteCheckpointOptions): Promise<bigint> {
62
+ if (this.writeCheckpointMode !== storage.WriteCheckpointMode.MANAGED) {
63
+ throw new framework.errors.ValidationError(
64
+ `Attempting to create a managed Write Checkpoint when the current Write Checkpoint mode is set to "${this.writeCheckpointMode}"`
65
+ );
66
+ }
67
+
68
+ const row = await this.db.sql`
69
+ INSERT INTO
70
+ write_checkpoints (user_id, lsns, write_checkpoint)
71
+ VALUES
72
+ (
73
+ ${{ type: 'varchar', value: checkpoint.user_id }},
74
+ ${{ type: 'jsonb', value: checkpoint.heads }},
75
+ ${{ type: 'int8', value: 1 }}
76
+ )
77
+ ON CONFLICT (user_id) DO UPDATE
78
+ SET
79
+ write_checkpoint = write_checkpoints.write_checkpoint + 1,
80
+ lsns = EXCLUDED.lsns
81
+ RETURNING
82
+ *;
83
+ `
84
+ .decoded(models.WriteCheckpoint)
85
+ .first();
86
+ return row!.write_checkpoint;
87
+ }
88
+
89
+ async lastWriteCheckpoint(filters: storage.LastWriteCheckpointFilters): Promise<bigint | null> {
90
+ switch (this.writeCheckpointMode) {
91
+ case storage.WriteCheckpointMode.CUSTOM:
92
+ if (false == 'sync_rules_id' in filters) {
93
+ throw new framework.errors.ValidationError(`Sync rules ID is required for custom Write Checkpoint filtering`);
94
+ }
95
+ return this.lastCustomWriteCheckpoint(filters as storage.CustomWriteCheckpointFilters);
96
+ case storage.WriteCheckpointMode.MANAGED:
97
+ if (false == 'heads' in filters) {
98
+ throw new framework.errors.ValidationError(
99
+ `Replication HEAD is required for managed Write Checkpoint filtering`
100
+ );
101
+ }
102
+ return this.lastManagedWriteCheckpoint(filters as storage.ManagedWriteCheckpointFilters);
103
+ }
104
+ }
105
+
106
+ protected async lastCustomWriteCheckpoint(filters: storage.CustomWriteCheckpointFilters) {
107
+ const { user_id, sync_rules_id } = filters;
108
+ const row = await this.db.sql`
109
+ SELECT
110
+ *
111
+ FROM
112
+ custom_write_checkpoints
113
+ WHERE
114
+ user_id = ${{ type: 'varchar', value: user_id }}
115
+ AND sync_rules_id = ${{ type: 'int4', value: sync_rules_id }}
116
+ `
117
+ .decoded(models.CustomWriteCheckpoint)
118
+ .first();
119
+ return row?.write_checkpoint ?? null;
120
+ }
121
+
122
+ protected async lastManagedWriteCheckpoint(filters: storage.ManagedWriteCheckpointFilters) {
123
+ const { user_id, heads } = filters;
124
+ // TODO: support multiple heads when we need to support multiple connections
125
+ const lsn = heads['1'];
126
+ if (lsn == null) {
127
+ // Can happen if we haven't replicated anything yet.
128
+ return null;
129
+ }
130
+ const row = await this.db.sql`
131
+ SELECT
132
+ *
133
+ FROM
134
+ write_checkpoints
135
+ WHERE
136
+ user_id = ${{ type: 'varchar', value: user_id }}
137
+ AND lsns ->> '1' <= ${{ type: 'varchar', value: lsn }};
138
+ `
139
+ .decoded(models.WriteCheckpoint)
140
+ .first();
141
+ return row?.write_checkpoint ?? null;
142
+ }
143
+ }
144
+
145
+ export async function batchCreateCustomWriteCheckpoints(
146
+ db: lib_postgres.DatabaseClient,
147
+ checkpoints: storage.CustomWriteCheckpointOptions[]
148
+ ): Promise<void> {
149
+ if (!checkpoints.length) {
150
+ return;
151
+ }
152
+
153
+ await db.sql`
154
+ WITH
155
+ json_data AS (
156
+ SELECT
157
+ jsonb_array_elements(${{ type: 'jsonb', value: JSONBig.stringify(checkpoints) }}) AS
158
+ CHECKPOINT
159
+ )
160
+ INSERT INTO
161
+ custom_write_checkpoints (user_id, write_checkpoint, sync_rules_id)
162
+ SELECT
163
+ CHECKPOINT ->> 'user_id'::varchar,
164
+ (
165
+ CHECKPOINT ->> 'checkpoint'
166
+ )::int8,
167
+ (
168
+ CHECKPOINT ->> 'sync_rules_id'
169
+ )::int4
170
+ FROM
171
+ json_data
172
+ ON CONFLICT (user_id, sync_rules_id) DO UPDATE
173
+ SET
174
+ write_checkpoint = EXCLUDED.write_checkpoint;
175
+ `.execute();
176
+ }
@@ -0,0 +1,5 @@
1
+ export * from './PostgresBucketStorageFactory.js';
2
+ export * from './PostgresCompactor.js';
3
+ export * from './PostgresStorageProvider.js';
4
+ export * from './PostgresSyncRulesStorage.js';
5
+ export * from './PostgresTestStorageFactoryGenerator.js';
@@ -0,0 +1,67 @@
1
+ import * as lib_postgres from '@powersync/lib-service-postgres';
2
+ import { logger } from '@powersync/lib-services-framework';
3
+ import { storage } from '@powersync/service-core';
4
+ import { SqlSyncRules } from '@powersync/service-sync-rules';
5
+
6
+ import { models } from '../../types/types.js';
7
+
8
+ export class PostgresPersistedSyncRulesContent implements storage.PersistedSyncRulesContent {
9
+ public readonly slot_name: string;
10
+
11
+ public readonly id: number;
12
+ public readonly sync_rules_content: string;
13
+ public readonly last_checkpoint_lsn: string | null;
14
+ public readonly last_fatal_error: string | null;
15
+ public readonly last_keepalive_ts: Date | null;
16
+ public readonly last_checkpoint_ts: Date | null;
17
+ current_lock: storage.ReplicationLock | null = null;
18
+
19
+ constructor(
20
+ private db: lib_postgres.DatabaseClient,
21
+ row: models.SyncRulesDecoded
22
+ ) {
23
+ this.id = Number(row.id);
24
+ this.sync_rules_content = row.content;
25
+ this.last_checkpoint_lsn = row.last_checkpoint_lsn;
26
+ this.slot_name = row.slot_name;
27
+ this.last_fatal_error = row.last_fatal_error;
28
+ this.last_checkpoint_ts = row.last_checkpoint_ts ? new Date(row.last_checkpoint_ts) : null;
29
+ this.last_keepalive_ts = row.last_keepalive_ts ? new Date(row.last_keepalive_ts) : null;
30
+ }
31
+
32
+ parsed(options: storage.ParseSyncRulesOptions): storage.PersistedSyncRules {
33
+ return {
34
+ id: this.id,
35
+ slot_name: this.slot_name,
36
+ sync_rules: SqlSyncRules.fromYaml(this.sync_rules_content, options)
37
+ };
38
+ }
39
+
40
+ async lock(): Promise<storage.ReplicationLock> {
41
+ const manager = new lib_postgres.PostgresLockManager({
42
+ db: this.db,
43
+ name: `sync_rules_${this.id}_${this.slot_name}`
44
+ });
45
+ const lockHandle = await manager.acquire();
46
+ if (!lockHandle) {
47
+ throw new Error(`Sync rules: ${this.id} have been locked by another process for replication.`);
48
+ }
49
+
50
+ const interval = setInterval(async () => {
51
+ try {
52
+ await lockHandle.refresh();
53
+ } catch (e) {
54
+ logger.error('Failed to refresh lock', e);
55
+ clearInterval(interval);
56
+ }
57
+ }, 30_130);
58
+
59
+ return (this.current_lock = {
60
+ sync_rules_id: this.id,
61
+ release: async () => {
62
+ clearInterval(interval);
63
+ return lockHandle.release();
64
+ }
65
+ });
66
+ }
67
+ }