@powersync/service-core 0.18.0 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/CHANGELOG.md +22 -0
  2. package/dist/api/RouteAPI.d.ts +9 -1
  3. package/dist/api/diagnostics.js +107 -169
  4. package/dist/api/diagnostics.js.map +1 -1
  5. package/dist/entry/commands/compact-action.js +10 -73
  6. package/dist/entry/commands/compact-action.js.map +1 -1
  7. package/dist/modules/AbstractModule.d.ts +1 -1
  8. package/dist/replication/AbstractReplicator.js +2 -65
  9. package/dist/replication/AbstractReplicator.js.map +1 -1
  10. package/dist/routes/configure-fastify.d.ts +3 -3
  11. package/dist/routes/endpoints/admin.d.ts +6 -6
  12. package/dist/routes/endpoints/checkpointing.js +16 -85
  13. package/dist/routes/endpoints/checkpointing.js.map +1 -1
  14. package/dist/routes/endpoints/socket-route.js +5 -5
  15. package/dist/routes/endpoints/socket-route.js.map +1 -1
  16. package/dist/routes/endpoints/sync-stream.js +5 -5
  17. package/dist/routes/endpoints/sync-stream.js.map +1 -1
  18. package/dist/runner/teardown.js +3 -65
  19. package/dist/runner/teardown.js.map +1 -1
  20. package/dist/storage/BucketStorage.d.ts +1 -442
  21. package/dist/storage/BucketStorage.js +0 -9
  22. package/dist/storage/BucketStorage.js.map +1 -1
  23. package/dist/storage/BucketStorageBatch.d.ts +130 -0
  24. package/dist/storage/BucketStorageBatch.js +10 -0
  25. package/dist/storage/BucketStorageBatch.js.map +1 -0
  26. package/dist/storage/BucketStorageFactory.d.ts +136 -0
  27. package/dist/storage/BucketStorageFactory.js +2 -0
  28. package/dist/storage/BucketStorageFactory.js.map +1 -0
  29. package/dist/storage/ChecksumCache.js.map +1 -1
  30. package/dist/storage/PersistedSyncRulesContent.d.ts +20 -0
  31. package/dist/storage/PersistedSyncRulesContent.js +2 -0
  32. package/dist/storage/PersistedSyncRulesContent.js.map +1 -0
  33. package/dist/storage/ReplicationEventPayload.d.ts +1 -1
  34. package/dist/storage/ReplicationLock.d.ts +4 -0
  35. package/dist/storage/ReplicationLock.js +2 -0
  36. package/dist/storage/ReplicationLock.js.map +1 -0
  37. package/dist/storage/StorageEngine.d.ts +4 -4
  38. package/dist/storage/StorageEngine.js +2 -2
  39. package/dist/storage/StorageEngine.js.map +1 -1
  40. package/dist/storage/StorageProvider.d.ts +4 -1
  41. package/dist/storage/SyncRulesBucketStorage.d.ts +201 -0
  42. package/dist/storage/SyncRulesBucketStorage.js +7 -0
  43. package/dist/storage/SyncRulesBucketStorage.js.map +1 -0
  44. package/dist/storage/bson.d.ts +11 -3
  45. package/dist/storage/bson.js +24 -2
  46. package/dist/storage/bson.js.map +1 -1
  47. package/dist/storage/storage-index.d.ts +5 -0
  48. package/dist/storage/storage-index.js +5 -0
  49. package/dist/storage/storage-index.js.map +1 -1
  50. package/dist/sync/BucketChecksumState.d.ts +91 -0
  51. package/dist/sync/BucketChecksumState.js +313 -0
  52. package/dist/sync/BucketChecksumState.js.map +1 -0
  53. package/dist/sync/sync-index.d.ts +1 -0
  54. package/dist/sync/sync-index.js +1 -0
  55. package/dist/sync/sync-index.js.map +1 -1
  56. package/dist/sync/sync.d.ts +7 -3
  57. package/dist/sync/sync.js +131 -135
  58. package/dist/sync/sync.js.map +1 -1
  59. package/dist/sync/util.d.ts +9 -0
  60. package/dist/sync/util.js +44 -0
  61. package/dist/sync/util.js.map +1 -1
  62. package/dist/util/checkpointing.d.ts +13 -0
  63. package/dist/util/checkpointing.js +29 -0
  64. package/dist/util/checkpointing.js.map +1 -0
  65. package/dist/util/protocol-types.d.ts +13 -4
  66. package/dist/util/util-index.d.ts +1 -0
  67. package/dist/util/util-index.js +1 -0
  68. package/dist/util/util-index.js.map +1 -1
  69. package/dist/util/utils.d.ts +0 -1
  70. package/dist/util/utils.js +0 -9
  71. package/dist/util/utils.js.map +1 -1
  72. package/package.json +4 -4
  73. package/src/api/RouteAPI.ts +11 -1
  74. package/src/api/diagnostics.ts +1 -1
  75. package/src/entry/commands/compact-action.ts +2 -3
  76. package/src/modules/AbstractModule.ts +1 -1
  77. package/src/replication/AbstractReplicator.ts +1 -2
  78. package/src/routes/endpoints/checkpointing.ts +11 -22
  79. package/src/routes/endpoints/socket-route.ts +7 -5
  80. package/src/routes/endpoints/sync-stream.ts +8 -5
  81. package/src/runner/teardown.ts +1 -1
  82. package/src/storage/BucketStorage.ts +1 -552
  83. package/src/storage/BucketStorageBatch.ts +158 -0
  84. package/src/storage/BucketStorageFactory.ts +156 -0
  85. package/src/storage/ChecksumCache.ts +1 -0
  86. package/src/storage/PersistedSyncRulesContent.ts +26 -0
  87. package/src/storage/ReplicationEventPayload.ts +1 -1
  88. package/src/storage/ReplicationLock.ts +5 -0
  89. package/src/storage/StorageEngine.ts +4 -4
  90. package/src/storage/StorageProvider.ts +4 -1
  91. package/src/storage/SyncRulesBucketStorage.ts +256 -0
  92. package/src/storage/bson.ts +28 -4
  93. package/src/storage/storage-index.ts +5 -0
  94. package/src/sync/BucketChecksumState.ts +392 -0
  95. package/src/sync/sync-index.ts +1 -0
  96. package/src/sync/sync.ts +173 -157
  97. package/src/sync/util.ts +54 -0
  98. package/src/util/checkpointing.ts +41 -0
  99. package/src/util/protocol-types.ts +16 -4
  100. package/src/util/util-index.ts +1 -0
  101. package/src/util/utils.ts +0 -10
  102. package/test/src/auth.test.ts +5 -5
  103. package/test/src/sync/BucketChecksumState.test.ts +565 -0
  104. package/test/src/sync/util.test.ts +34 -0
  105. package/tsconfig.tsbuildinfo +1 -1
package/src/sync/sync.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
2
- import { RequestParameters } from '@powersync/service-sync-rules';
2
+ import { BucketDescription, BucketPriority, RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules';
3
3
  import { Semaphore, withTimeout } from 'async-mutex';
4
4
 
5
5
  import { AbortError } from 'ix/aborterror.js';
@@ -9,9 +9,10 @@ import * as storage from '../storage/storage-index.js';
9
9
  import * as util from '../util/util-index.js';
10
10
 
11
11
  import { logger } from '@powersync/lib-services-framework';
12
+ import { BucketChecksumState } from './BucketChecksumState.js';
12
13
  import { mergeAsyncIterables } from './merge.js';
13
14
  import { RequestTracker } from './RequestTracker.js';
14
- import { TokenStreamOptions, tokenStream } from './util.js';
15
+ import { acquireSemaphoreAbortable, settledPromise, tokenStream, TokenStreamOptions } from './util.js';
15
16
 
16
17
  /**
17
18
  * Maximum number of connections actively fetching data.
@@ -32,11 +33,11 @@ const syncSemaphore = withTimeout(
32
33
  );
33
34
 
34
35
  export interface SyncStreamParameters {
35
- storage: storage.BucketStorageFactory;
36
+ bucketStorage: storage.SyncRulesBucketStorage;
37
+ syncRules: SqlSyncRules;
36
38
  params: util.StreamingSyncRequest;
37
39
  syncParams: RequestParameters;
38
40
  token: auth.JwtPayload;
39
- parseOptions: storage.ParseSyncRulesOptions;
40
41
  /**
41
42
  * If this signal is aborted, the stream response ends as soon as possible, without error.
42
43
  */
@@ -49,7 +50,7 @@ export interface SyncStreamParameters {
49
50
  export async function* streamResponse(
50
51
  options: SyncStreamParameters
51
52
  ): AsyncIterable<util.StreamingSyncLine | string | null> {
52
- const { storage, params, syncParams, token, tokenStreamOptions, tracker, signal, parseOptions } = options;
53
+ const { bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } = options;
53
54
  // We also need to be able to abort, so we create our own controller.
54
55
  const controller = new AbortController();
55
56
  if (signal) {
@@ -65,7 +66,7 @@ export async function* streamResponse(
65
66
  }
66
67
  }
67
68
  const ki = tokenStream(token, controller.signal, tokenStreamOptions);
68
- const stream = streamResponseInner(storage, params, syncParams, tracker, parseOptions, controller.signal);
69
+ const stream = streamResponseInner(bucketStorage, syncRules, params, syncParams, tracker, controller.signal);
69
70
  // Merge the two streams, and abort as soon as one of the streams end.
70
71
  const merged = mergeAsyncIterables([stream, ki], controller.signal);
71
72
 
@@ -84,157 +85,155 @@ export async function* streamResponse(
84
85
  }
85
86
  }
86
87
 
88
+ export type BucketSyncState = {
89
+ description?: BucketDescription; // Undefined if the bucket has not yet been resolved by us.
90
+ start_op_id: string;
91
+ };
92
+
87
93
  async function* streamResponseInner(
88
- storage: storage.BucketStorageFactory,
94
+ bucketStorage: storage.SyncRulesBucketStorage,
95
+ syncRules: SqlSyncRules,
89
96
  params: util.StreamingSyncRequest,
90
97
  syncParams: RequestParameters,
91
98
  tracker: RequestTracker,
92
- parseOptions: storage.ParseSyncRulesOptions,
93
99
  signal: AbortSignal
94
100
  ): AsyncGenerator<util.StreamingSyncLine | string | null> {
95
- // Bucket state of bucket id -> op_id.
96
- // This starts with the state from the client. May contain buckets that the user do not have access to (anymore).
97
- let dataBuckets = new Map<string, string>();
98
-
99
- let lastChecksums: util.ChecksumMap | null = null;
100
- let lastWriteCheckpoint: bigint | null = null;
101
-
102
101
  const { raw_data, binary_data } = params;
103
102
 
104
- if (params.buckets) {
105
- for (let { name, after: start } of params.buckets) {
106
- dataBuckets.set(name, start);
107
- }
108
- }
109
-
110
103
  const checkpointUserId = util.checkpointUserId(syncParams.token_parameters.user_id as string, params.client_id);
111
- const stream = storage.watchWriteCheckpoint(checkpointUserId, signal);
112
- for await (const next of stream) {
113
- const { base, writeCheckpoint } = next;
114
- const checkpoint = base.checkpoint;
115
-
116
- const storage = await base.getBucketStorage();
117
- if (storage == null) {
118
- // Sync rules deleted in the meantime - try again with the next checkpoint.
119
- continue;
120
- }
121
- const syncRules = storage.getParsedSyncRules(parseOptions);
122
104
 
123
- const allBuckets = await syncRules.queryBucketIds({
124
- getParameterSets(lookups) {
125
- return storage.getParameterSets(checkpoint, lookups);
126
- },
127
- parameters: syncParams
128
- });
129
-
130
- if (allBuckets.length > 1000) {
131
- logger.error(`Too many buckets`, {
132
- checkpoint,
133
- user_id: syncParams.user_id,
134
- buckets: allBuckets.length
135
- });
136
- // TODO: Limit number of buckets even before we get to this point
137
- throw new Error(`Too many buckets: ${allBuckets.length}`);
138
- }
105
+ const checksumState = new BucketChecksumState({
106
+ bucketStorage,
107
+ syncRules,
108
+ syncParams,
109
+ initialBucketPositions: params.buckets
110
+ });
111
+ const stream = bucketStorage.watchWriteCheckpoint({
112
+ user_id: checkpointUserId,
113
+ signal
114
+ });
115
+ const newCheckpoints = stream[Symbol.asyncIterator]();
139
116
 
140
- let dataBucketsNew = new Map<string, string>();
141
- for (let bucket of allBuckets) {
142
- dataBucketsNew.set(bucket, dataBuckets.get(bucket) ?? '0');
143
- }
144
- dataBuckets = dataBucketsNew;
145
-
146
- const bucketList = [...dataBuckets.keys()];
147
- const checksumMap = await storage.getChecksums(checkpoint, bucketList);
148
- // Subset of buckets for which there may be new data in this batch.
149
- let bucketsToFetch: string[];
150
-
151
- if (lastChecksums) {
152
- const diff = util.checksumsDiff(lastChecksums, checksumMap);
153
-
154
- if (
155
- lastWriteCheckpoint == writeCheckpoint &&
156
- diff.removedBuckets.length == 0 &&
157
- diff.updatedBuckets.length == 0
158
- ) {
159
- // No changes - don't send anything to the client
117
+ try {
118
+ let nextCheckpointPromise:
119
+ | Promise<PromiseSettledResult<IteratorResult<storage.StorageCheckpointUpdate>>>
120
+ | undefined;
121
+
122
+ do {
123
+ if (!nextCheckpointPromise) {
124
+ // Wrap in a settledPromise, so that abort errors after the parent stopped iterating
125
+ // does not result in uncaught errors.
126
+ nextCheckpointPromise = settledPromise(newCheckpoints.next());
127
+ }
128
+ const next = await nextCheckpointPromise;
129
+ nextCheckpointPromise = undefined;
130
+ if (next.status == 'rejected') {
131
+ throw next.reason;
132
+ }
133
+ if (next.value.done) {
134
+ break;
135
+ }
136
+ const line = await checksumState.buildNextCheckpointLine(next.value.value);
137
+ if (line == null) {
138
+ // No update to sync
160
139
  continue;
161
140
  }
162
- bucketsToFetch = diff.updatedBuckets.map((c) => c.bucket);
163
-
164
- let message = `Updated checkpoint: ${checkpoint} | `;
165
- message += `write: ${writeCheckpoint} | `;
166
- message += `buckets: ${allBuckets.length} | `;
167
- message += `updated: ${limitedBuckets(diff.updatedBuckets, 20)} | `;
168
- message += `removed: ${limitedBuckets(diff.removedBuckets, 20)}`;
169
- logger.info(message, {
170
- checkpoint,
171
- user_id: syncParams.user_id,
172
- buckets: allBuckets.length,
173
- updated: diff.updatedBuckets.length,
174
- removed: diff.removedBuckets.length
175
- });
176
141
 
177
- const checksum_line: util.StreamingSyncCheckpointDiff = {
178
- checkpoint_diff: {
179
- last_op_id: checkpoint,
180
- write_checkpoint: writeCheckpoint ? String(writeCheckpoint) : undefined,
181
- removed_buckets: diff.removedBuckets,
182
- updated_buckets: diff.updatedBuckets
142
+ const { checkpointLine, bucketsToFetch } = line;
143
+
144
+ yield checkpointLine;
145
+ // Start syncing data for buckets up to the checkpoint. As soon as we have completed at least one priority and
146
+ // at least 1000 operations, we also start listening for new checkpoints concurrently. When a new checkpoint comes
147
+ // in while we're still busy syncing data for lower priorities, interrupt the current operation and start syncing
148
+ // the new checkpoint.
149
+ const abortCheckpointController = new AbortController();
150
+ let syncedOperations = 0;
151
+
152
+ const abortCheckpointSignal = AbortSignal.any([abortCheckpointController.signal, signal]);
153
+
154
+ const bucketsByPriority = [...Map.groupBy(bucketsToFetch, (bucket) => bucket.priority).entries()];
155
+ bucketsByPriority.sort((a, b) => a[0] - b[0]); // Sort from high to lower priorities
156
+ const lowestPriority = bucketsByPriority.at(-1)?.[0];
157
+
158
+ function maybeRaceForNewCheckpoint() {
159
+ if (syncedOperations >= 1000 && nextCheckpointPromise === undefined) {
160
+ nextCheckpointPromise = (async () => {
161
+ const next = await settledPromise(newCheckpoints.next());
162
+ if (next.status == 'rejected') {
163
+ abortCheckpointController.abort();
164
+ } else if (!next.value.done) {
165
+ // Stop the running bucketDataInBatches() iterations, making the main flow reach the new checkpoint.
166
+ abortCheckpointController.abort();
167
+ }
168
+
169
+ return next;
170
+ })();
183
171
  }
184
- };
172
+ }
185
173
 
186
- yield checksum_line;
187
- } else {
188
- let message = `New checkpoint: ${checkpoint} | write: ${writeCheckpoint} | `;
189
- message += `buckets: ${allBuckets.length} ${limitedBuckets(allBuckets, 20)}`;
190
- logger.info(message, { checkpoint, user_id: syncParams.user_id, buckets: allBuckets.length });
191
- bucketsToFetch = allBuckets;
192
- const checksum_line: util.StreamingSyncCheckpoint = {
193
- checkpoint: {
194
- last_op_id: checkpoint,
195
- write_checkpoint: writeCheckpoint ? String(writeCheckpoint) : undefined,
196
- buckets: [...checksumMap.values()]
174
+ function markOperationsSent(operations: number) {
175
+ syncedOperations += operations;
176
+ tracker.addOperationsSynced(operations);
177
+ maybeRaceForNewCheckpoint();
178
+ }
179
+
180
+ // This incrementally updates dataBuckets with each individual bucket position.
181
+ // At the end of this, we can be sure that all buckets have data up to the checkpoint.
182
+ for (const [priority, buckets] of bucketsByPriority) {
183
+ const isLast = priority === lowestPriority;
184
+ if (abortCheckpointSignal.aborted) {
185
+ break;
197
186
  }
198
- };
199
- yield checksum_line;
200
- }
201
- lastChecksums = checksumMap;
202
- lastWriteCheckpoint = writeCheckpoint;
203
-
204
- // This incrementally updates dataBuckets with each individual bucket position.
205
- // At the end of this, we can be sure that all buckets have data up to the checkpoint.
206
- yield* bucketDataInBatches({
207
- storage,
208
- checkpoint,
209
- bucketsToFetch,
210
- dataBuckets,
211
- raw_data,
212
- binary_data,
213
- signal,
214
- tracker,
215
- user_id: syncParams.user_id
216
- });
217
-
218
- await new Promise((resolve) => setTimeout(resolve, 10));
187
+
188
+ yield* bucketDataInBatches({
189
+ bucketStorage: bucketStorage,
190
+ checkpoint: next.value.value.base.checkpoint,
191
+ bucketsToFetch: buckets,
192
+ checksumState,
193
+ raw_data,
194
+ binary_data,
195
+ onRowsSent: markOperationsSent,
196
+ abort_connection: signal,
197
+ abort_batch: abortCheckpointSignal,
198
+ user_id: syncParams.user_id,
199
+ // Passing undefined will emit a full sync complete message at the end. If we pass a priority, we'll emit a partial
200
+ // sync complete message.
201
+ forPriority: !isLast ? priority : undefined
202
+ });
203
+ }
204
+
205
+ if (!abortCheckpointSignal.aborted) {
206
+ await new Promise((resolve) => setTimeout(resolve, 10));
207
+ }
208
+ } while (!signal.aborted);
209
+ } finally {
210
+ await newCheckpoints.return?.();
219
211
  }
220
212
  }
221
213
 
222
214
  interface BucketDataRequest {
223
- storage: storage.SyncRulesBucketStorage;
215
+ bucketStorage: storage.SyncRulesBucketStorage;
224
216
  checkpoint: string;
225
- bucketsToFetch: string[];
226
- /** Bucket data position, modified by the request. */
227
- dataBuckets: Map<string, string>;
217
+ bucketsToFetch: BucketDescription[];
218
+ /** Contains current bucket state. Modified by the request as data is sent. */
219
+ checksumState: BucketChecksumState;
228
220
  raw_data: boolean | undefined;
229
221
  binary_data: boolean | undefined;
230
- tracker: RequestTracker;
231
- signal: AbortSignal;
222
+ /** Signals that the connection was aborted and that streaming should stop ASAP. */
223
+ abort_connection: AbortSignal;
224
+ /**
225
+ * Signals that higher-priority batches are available. The current batch can stop at a sensible point.
226
+ * This signal also fires when abort_connection fires.
227
+ */
228
+ abort_batch: AbortSignal;
232
229
  user_id?: string;
230
+ forPriority?: BucketPriority;
231
+ onRowsSent: (amount: number) => void;
233
232
  }
234
233
 
235
234
  async function* bucketDataInBatches(request: BucketDataRequest) {
236
235
  let isDone = false;
237
- while (!request.signal.aborted && !isDone) {
236
+ while (!request.abort_batch.aborted && !isDone) {
238
237
  // The code below is functionally the same as this for-await loop below.
239
238
  // However, the for-await loop appears to have a memory leak, so we avoid it.
240
239
  // for await (const { done, data } of bucketDataBatch(storage, checkpoint, dataBuckets, raw_data, signal)) {
@@ -273,7 +272,17 @@ interface BucketDataBatchResult {
273
272
  * Extracted as a separate internal function just to avoid memory leaks.
274
273
  */
275
274
  async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<BucketDataBatchResult, void> {
276
- const { storage, checkpoint, bucketsToFetch, dataBuckets, raw_data, binary_data, tracker, signal } = request;
275
+ const {
276
+ bucketStorage: storage,
277
+ checkpoint,
278
+ bucketsToFetch,
279
+ checksumState,
280
+ raw_data,
281
+ binary_data,
282
+ abort_connection,
283
+ abort_batch,
284
+ onRowsSent
285
+ } = request;
277
286
 
278
287
  const checkpointOp = BigInt(checkpoint);
279
288
  let checkpointInvalidated = false;
@@ -281,7 +290,12 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
281
290
  if (syncSemaphore.isLocked()) {
282
291
  logger.info('Sync concurrency limit reached, waiting for lock', { user_id: request.user_id });
283
292
  }
284
- const [value, release] = await syncSemaphore.acquire();
293
+ const acquired = await acquireSemaphoreAbortable(syncSemaphore, AbortSignal.any([abort_batch]));
294
+ if (acquired === 'aborted') {
295
+ return;
296
+ }
297
+
298
+ const [value, release] = acquired;
285
299
  try {
286
300
  if (value <= 3) {
287
301
  // This can be noisy, so we only log when we get close to the
@@ -293,13 +307,14 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
293
307
  }
294
308
  // Optimization: Only fetch buckets for which the checksums have changed since the last checkpoint
295
309
  // For the first batch, this will be all buckets.
296
- const filteredBuckets = new Map(bucketsToFetch.map((bucket) => [bucket, dataBuckets.get(bucket)!]));
297
- const data = storage.getBucketDataBatch(checkpoint, filteredBuckets);
310
+ const filteredBuckets = checksumState.getFilteredBucketPositions(bucketsToFetch);
311
+ const dataBatches = storage.getBucketDataBatch(checkpoint, filteredBuckets);
298
312
 
299
313
  let has_more = false;
300
314
 
301
- for await (let { batch: r, targetOp } of data) {
302
- if (signal.aborted) {
315
+ for await (let { batch: r, targetOp } of dataBatches) {
316
+ // Abort in current batch if the connection is closed
317
+ if (abort_connection.aborted) {
303
318
  return;
304
319
  }
305
320
  if (r.has_more) {
@@ -339,9 +354,15 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
339
354
  // iterator memory in case if large data sent.
340
355
  yield { data: null, done: false };
341
356
  }
342
- tracker.addOperationsSynced(r.data.length);
357
+ onRowsSent(r.data.length);
358
+
359
+ checksumState.updateBucketPosition({ bucket: r.bucket, nextAfter: r.next_after, hasMore: r.has_more });
343
360
 
344
- dataBuckets.set(r.bucket, r.next_after);
361
+ // Check if syncing bucket data is supposed to stop before fetching more data
362
+ // from storage.
363
+ if (abort_batch.aborted) {
364
+ return;
365
+ }
345
366
  }
346
367
 
347
368
  if (!has_more) {
@@ -351,12 +372,22 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
351
372
  // More data should be available immediately for a new checkpoint.
352
373
  yield { data: null, done: true };
353
374
  } else {
354
- const line: util.StreamingSyncCheckpointComplete = {
355
- checkpoint_complete: {
356
- last_op_id: checkpoint
357
- }
358
- };
359
- yield { data: line, done: true };
375
+ if (request.forPriority !== undefined) {
376
+ const line: util.StreamingSyncCheckpointPartiallyComplete = {
377
+ partial_checkpoint_complete: {
378
+ last_op_id: checkpoint,
379
+ priority: request.forPriority
380
+ }
381
+ };
382
+ yield { data: line, done: true };
383
+ } else {
384
+ const line: util.StreamingSyncCheckpointComplete = {
385
+ checkpoint_complete: {
386
+ last_op_id: checkpoint
387
+ }
388
+ };
389
+ yield { data: line, done: true };
390
+ }
360
391
  }
361
392
  }
362
393
  } finally {
@@ -383,18 +414,3 @@ function transformLegacyResponse(bucketData: util.SyncBucketData): any {
383
414
  })
384
415
  };
385
416
  }
386
-
387
- function limitedBuckets(buckets: string[] | util.BucketChecksum[], limit: number) {
388
- buckets = buckets.map((b) => {
389
- if (typeof b != 'string') {
390
- return b.bucket;
391
- } else {
392
- return b;
393
- }
394
- });
395
- if (buckets.length <= limit) {
396
- return JSON.stringify(buckets);
397
- }
398
- const limited = buckets.slice(0, limit);
399
- return `${JSON.stringify(limited)}...`;
400
- }
package/src/sync/util.ts CHANGED
@@ -2,6 +2,7 @@ import * as timers from 'timers/promises';
2
2
 
3
3
  import * as util from '../util/util-index.js';
4
4
  import { RequestTracker } from './RequestTracker.js';
5
+ import { SemaphoreInterface } from 'async-mutex';
5
6
 
6
7
  export type TokenStreamOptions = {
7
8
  /**
@@ -99,3 +100,56 @@ export async function* transformToBytesTracked(
99
100
  yield encoded;
100
101
  }
101
102
  }
103
+
104
+ export function acquireSemaphoreAbortable(
105
+ semaphone: SemaphoreInterface,
106
+ abort: AbortSignal
107
+ ): Promise<[number, SemaphoreInterface.Releaser] | 'aborted'> {
108
+ return new Promise((resolve, reject) => {
109
+ let aborted = false;
110
+ let hasSemaphore = false;
111
+
112
+ const listener = () => {
113
+ if (!hasSemaphore) {
114
+ aborted = true;
115
+ abort.removeEventListener('abort', listener);
116
+ resolve('aborted');
117
+ }
118
+ };
119
+ abort.addEventListener('abort', listener);
120
+
121
+ semaphone.acquire().then((acquired) => {
122
+ hasSemaphore = true;
123
+ if (aborted) {
124
+ // Release semaphore, already aborted
125
+ acquired[1]();
126
+ } else {
127
+ abort.removeEventListener('abort', listener);
128
+ resolve(acquired);
129
+ }
130
+ }, reject);
131
+ });
132
+ }
133
+
134
+ /**
135
+ * Wrap a promise in the style of Promise.allSettled.
136
+ *
137
+ * This is specifically useful if rejections should not be treated as uncaught rejections
138
+ * if it is not specifically handled.
139
+ */
140
+ export function settledPromise<T>(promise: Promise<T>): Promise<PromiseSettledResult<T>> {
141
+ return promise.then(
142
+ (result) => {
143
+ return {
144
+ status: 'fulfilled',
145
+ value: result
146
+ };
147
+ },
148
+ (error) => {
149
+ return {
150
+ status: 'rejected',
151
+ reason: error
152
+ };
153
+ }
154
+ );
155
+ }
@@ -0,0 +1,41 @@
1
+ import { ErrorCode, ServiceError } from '@powersync/lib-services-framework';
2
+ import { RouteAPI } from '../api/RouteAPI.js';
3
+ import { BucketStorageFactory } from '../storage/storage-index.js';
4
+
5
+ export interface CreateWriteCheckpointOptions {
6
+ userId: string | undefined;
7
+ clientId: string | undefined;
8
+ api: RouteAPI;
9
+ storage: BucketStorageFactory;
10
+ }
11
+ export async function createWriteCheckpoint(options: CreateWriteCheckpointOptions) {
12
+ const full_user_id = checkpointUserId(options.userId, options.clientId);
13
+
14
+ const syncBucketStorage = await options.storage.getActiveStorage();
15
+ if (!syncBucketStorage) {
16
+ throw new ServiceError(ErrorCode.PSYNC_S2302, `Cannot create Write Checkpoint since no sync rules are active.`);
17
+ }
18
+
19
+ const { writeCheckpoint, currentCheckpoint } = await options.api.createReplicationHead(async (currentCheckpoint) => {
20
+ const writeCheckpoint = await syncBucketStorage.createManagedWriteCheckpoint({
21
+ user_id: full_user_id,
22
+ heads: { '1': currentCheckpoint }
23
+ });
24
+ return { writeCheckpoint, currentCheckpoint };
25
+ });
26
+
27
+ return {
28
+ writeCheckpoint: String(writeCheckpoint),
29
+ replicationHead: currentCheckpoint
30
+ };
31
+ }
32
+
33
+ export function checkpointUserId(user_id: string | undefined, client_id: string | undefined) {
34
+ if (user_id == null) {
35
+ throw new Error('user_id is required');
36
+ }
37
+ if (client_id == null) {
38
+ return user_id;
39
+ }
40
+ return `${user_id}/${client_id}`;
41
+ }
@@ -1,5 +1,5 @@
1
1
  import * as t from 'ts-codec';
2
- import { SqliteJsonValue } from '@powersync/service-sync-rules';
2
+ import { BucketDescription, BucketPriority, SqliteJsonValue } from '@powersync/service-sync-rules';
3
3
 
4
4
  export const BucketRequest = t.object({
5
5
  name: t.string,
@@ -59,7 +59,7 @@ export interface StreamingSyncCheckpointDiff {
59
59
  checkpoint_diff: {
60
60
  last_op_id: OpId;
61
61
  write_checkpoint?: OpId;
62
- updated_buckets: BucketChecksum[];
62
+ updated_buckets: BucketChecksumWithDescription[];
63
63
  removed_buckets: string[];
64
64
  };
65
65
  }
@@ -74,13 +74,23 @@ export interface StreamingSyncCheckpointComplete {
74
74
  };
75
75
  }
76
76
 
77
- export interface StreamingSyncKeepalive {}
77
+ export interface StreamingSyncCheckpointPartiallyComplete {
78
+ partial_checkpoint_complete: {
79
+ last_op_id: OpId;
80
+ priority: BucketPriority;
81
+ };
82
+ }
83
+
84
+ export interface StreamingSyncKeepalive {
85
+ token_expires_in: number;
86
+ }
78
87
 
79
88
  export type StreamingSyncLine =
80
89
  | StreamingSyncData
81
90
  | StreamingSyncCheckpoint
82
91
  | StreamingSyncCheckpointDiff
83
92
  | StreamingSyncCheckpointComplete
93
+ | StreamingSyncCheckpointPartiallyComplete
84
94
  | StreamingSyncKeepalive;
85
95
 
86
96
  /**
@@ -91,7 +101,7 @@ export type OpId = string;
91
101
  export interface Checkpoint {
92
102
  last_op_id: OpId;
93
103
  write_checkpoint?: OpId;
94
- buckets: BucketChecksum[];
104
+ buckets: BucketChecksumWithDescription[];
95
105
  }
96
106
 
97
107
  export interface BucketState {
@@ -142,3 +152,5 @@ export interface BucketChecksum {
142
152
  */
143
153
  count: number;
144
154
  }
155
+
156
+ export interface BucketChecksumWithDescription extends BucketChecksum, BucketDescription {}
@@ -5,6 +5,7 @@ export * from './Mutex.js';
5
5
  export * from './protocol-types.js';
6
6
  export * from './secs.js';
7
7
  export * from './utils.js';
8
+ export * from './checkpointing.js';
8
9
 
9
10
  export * from './config.js';
10
11
  export * from './config/compound-config-collector.js';
package/src/util/utils.ts CHANGED
@@ -145,16 +145,6 @@ export function isCompleteRow(storeData: boolean, row: sync_rules.ToastableSqlit
145
145
  return !hasToastedValues(row);
146
146
  }
147
147
 
148
- export function checkpointUserId(user_id: string | undefined, client_id: string | undefined) {
149
- if (user_id == null) {
150
- throw new Error('user_id is required');
151
- }
152
- if (client_id == null) {
153
- return user_id;
154
- }
155
- return `${user_id}/${client_id}`;
156
- }
157
-
158
148
  /**
159
149
  * Reduce a bucket to the final state as stored on the client.
160
150
  *
@@ -274,7 +274,7 @@ describe('JWT Auth', () => {
274
274
  ).rejects.toThrow('Token must expire in a maximum of');
275
275
  });
276
276
 
277
- test('http', async () => {
277
+ test('http', { timeout: 20_000 }, async () => {
278
278
  // Not ideal to rely on an external endpoint for tests, but it is good to test that this
279
279
  // one actually works.
280
280
  const remote = new RemoteJWKSCollector(
@@ -290,9 +290,9 @@ describe('JWT Auth', () => {
290
290
  reject_ip_ranges: ['local']
291
291
  }
292
292
  });
293
- expect(invalid.getKeys()).rejects.toThrow('IPs in this range are not supported');
293
+ await expect(invalid.getKeys()).rejects.toThrow('IPs in this range are not supported');
294
294
 
295
- // IPS throw an error immediately
295
+ // IPs throw an error immediately
296
296
  expect(
297
297
  () =>
298
298
  new RemoteJWKSCollector('https://127.0.0.1/.well-known/jwks.json', {
@@ -315,11 +315,11 @@ describe('JWT Auth', () => {
315
315
 
316
316
  const invalid = new RemoteJWKSCollector('https://127.0.0.1/.well-known/jwks.json');
317
317
  // Should try and fetch
318
- expect(invalid.getKeys()).rejects.toThrow();
318
+ await expect(invalid.getKeys()).rejects.toThrow();
319
319
 
320
320
  const invalid2 = new RemoteJWKSCollector('https://localhost/.well-known/jwks.json');
321
321
  // Should try and fetch
322
- expect(invalid2.getKeys()).rejects.toThrow();
322
+ await expect(invalid2.getKeys()).rejects.toThrow();
323
323
  });
324
324
 
325
325
  test('caching', async () => {