@powersync/service-core 0.18.0 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +22 -0
- package/dist/api/RouteAPI.d.ts +9 -1
- package/dist/api/diagnostics.js +107 -169
- package/dist/api/diagnostics.js.map +1 -1
- package/dist/entry/commands/compact-action.js +10 -73
- package/dist/entry/commands/compact-action.js.map +1 -1
- package/dist/modules/AbstractModule.d.ts +1 -1
- package/dist/replication/AbstractReplicator.js +2 -65
- package/dist/replication/AbstractReplicator.js.map +1 -1
- package/dist/routes/configure-fastify.d.ts +3 -3
- package/dist/routes/endpoints/admin.d.ts +6 -6
- package/dist/routes/endpoints/checkpointing.js +16 -85
- package/dist/routes/endpoints/checkpointing.js.map +1 -1
- package/dist/routes/endpoints/socket-route.js +5 -5
- package/dist/routes/endpoints/socket-route.js.map +1 -1
- package/dist/routes/endpoints/sync-stream.js +5 -5
- package/dist/routes/endpoints/sync-stream.js.map +1 -1
- package/dist/runner/teardown.js +3 -65
- package/dist/runner/teardown.js.map +1 -1
- package/dist/storage/BucketStorage.d.ts +1 -442
- package/dist/storage/BucketStorage.js +0 -9
- package/dist/storage/BucketStorage.js.map +1 -1
- package/dist/storage/BucketStorageBatch.d.ts +130 -0
- package/dist/storage/BucketStorageBatch.js +10 -0
- package/dist/storage/BucketStorageBatch.js.map +1 -0
- package/dist/storage/BucketStorageFactory.d.ts +136 -0
- package/dist/storage/BucketStorageFactory.js +2 -0
- package/dist/storage/BucketStorageFactory.js.map +1 -0
- package/dist/storage/ChecksumCache.js.map +1 -1
- package/dist/storage/PersistedSyncRulesContent.d.ts +20 -0
- package/dist/storage/PersistedSyncRulesContent.js +2 -0
- package/dist/storage/PersistedSyncRulesContent.js.map +1 -0
- package/dist/storage/ReplicationEventPayload.d.ts +1 -1
- package/dist/storage/ReplicationLock.d.ts +4 -0
- package/dist/storage/ReplicationLock.js +2 -0
- package/dist/storage/ReplicationLock.js.map +1 -0
- package/dist/storage/StorageEngine.d.ts +4 -4
- package/dist/storage/StorageEngine.js +2 -2
- package/dist/storage/StorageEngine.js.map +1 -1
- package/dist/storage/StorageProvider.d.ts +4 -1
- package/dist/storage/SyncRulesBucketStorage.d.ts +201 -0
- package/dist/storage/SyncRulesBucketStorage.js +7 -0
- package/dist/storage/SyncRulesBucketStorage.js.map +1 -0
- package/dist/storage/bson.d.ts +11 -3
- package/dist/storage/bson.js +24 -2
- package/dist/storage/bson.js.map +1 -1
- package/dist/storage/storage-index.d.ts +5 -0
- package/dist/storage/storage-index.js +5 -0
- package/dist/storage/storage-index.js.map +1 -1
- package/dist/sync/BucketChecksumState.d.ts +91 -0
- package/dist/sync/BucketChecksumState.js +313 -0
- package/dist/sync/BucketChecksumState.js.map +1 -0
- package/dist/sync/sync-index.d.ts +1 -0
- package/dist/sync/sync-index.js +1 -0
- package/dist/sync/sync-index.js.map +1 -1
- package/dist/sync/sync.d.ts +7 -3
- package/dist/sync/sync.js +131 -135
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/util.d.ts +9 -0
- package/dist/sync/util.js +44 -0
- package/dist/sync/util.js.map +1 -1
- package/dist/util/checkpointing.d.ts +13 -0
- package/dist/util/checkpointing.js +29 -0
- package/dist/util/checkpointing.js.map +1 -0
- package/dist/util/protocol-types.d.ts +13 -4
- package/dist/util/util-index.d.ts +1 -0
- package/dist/util/util-index.js +1 -0
- package/dist/util/util-index.js.map +1 -1
- package/dist/util/utils.d.ts +0 -1
- package/dist/util/utils.js +0 -9
- package/dist/util/utils.js.map +1 -1
- package/package.json +4 -4
- package/src/api/RouteAPI.ts +11 -1
- package/src/api/diagnostics.ts +1 -1
- package/src/entry/commands/compact-action.ts +2 -3
- package/src/modules/AbstractModule.ts +1 -1
- package/src/replication/AbstractReplicator.ts +1 -2
- package/src/routes/endpoints/checkpointing.ts +11 -22
- package/src/routes/endpoints/socket-route.ts +7 -5
- package/src/routes/endpoints/sync-stream.ts +8 -5
- package/src/runner/teardown.ts +1 -1
- package/src/storage/BucketStorage.ts +1 -552
- package/src/storage/BucketStorageBatch.ts +158 -0
- package/src/storage/BucketStorageFactory.ts +156 -0
- package/src/storage/ChecksumCache.ts +1 -0
- package/src/storage/PersistedSyncRulesContent.ts +26 -0
- package/src/storage/ReplicationEventPayload.ts +1 -1
- package/src/storage/ReplicationLock.ts +5 -0
- package/src/storage/StorageEngine.ts +4 -4
- package/src/storage/StorageProvider.ts +4 -1
- package/src/storage/SyncRulesBucketStorage.ts +256 -0
- package/src/storage/bson.ts +28 -4
- package/src/storage/storage-index.ts +5 -0
- package/src/sync/BucketChecksumState.ts +392 -0
- package/src/sync/sync-index.ts +1 -0
- package/src/sync/sync.ts +173 -157
- package/src/sync/util.ts +54 -0
- package/src/util/checkpointing.ts +41 -0
- package/src/util/protocol-types.ts +16 -4
- package/src/util/util-index.ts +1 -0
- package/src/util/utils.ts +0 -10
- package/test/src/auth.test.ts +5 -5
- package/test/src/sync/BucketChecksumState.test.ts +565 -0
- package/test/src/sync/util.test.ts +34 -0
- package/tsconfig.tsbuildinfo +1 -1
package/src/sync/sync.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
|
|
2
|
-
import { RequestParameters } from '@powersync/service-sync-rules';
|
|
2
|
+
import { BucketDescription, BucketPriority, RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules';
|
|
3
3
|
import { Semaphore, withTimeout } from 'async-mutex';
|
|
4
4
|
|
|
5
5
|
import { AbortError } from 'ix/aborterror.js';
|
|
@@ -9,9 +9,10 @@ import * as storage from '../storage/storage-index.js';
|
|
|
9
9
|
import * as util from '../util/util-index.js';
|
|
10
10
|
|
|
11
11
|
import { logger } from '@powersync/lib-services-framework';
|
|
12
|
+
import { BucketChecksumState } from './BucketChecksumState.js';
|
|
12
13
|
import { mergeAsyncIterables } from './merge.js';
|
|
13
14
|
import { RequestTracker } from './RequestTracker.js';
|
|
14
|
-
import {
|
|
15
|
+
import { acquireSemaphoreAbortable, settledPromise, tokenStream, TokenStreamOptions } from './util.js';
|
|
15
16
|
|
|
16
17
|
/**
|
|
17
18
|
* Maximum number of connections actively fetching data.
|
|
@@ -32,11 +33,11 @@ const syncSemaphore = withTimeout(
|
|
|
32
33
|
);
|
|
33
34
|
|
|
34
35
|
export interface SyncStreamParameters {
|
|
35
|
-
|
|
36
|
+
bucketStorage: storage.SyncRulesBucketStorage;
|
|
37
|
+
syncRules: SqlSyncRules;
|
|
36
38
|
params: util.StreamingSyncRequest;
|
|
37
39
|
syncParams: RequestParameters;
|
|
38
40
|
token: auth.JwtPayload;
|
|
39
|
-
parseOptions: storage.ParseSyncRulesOptions;
|
|
40
41
|
/**
|
|
41
42
|
* If this signal is aborted, the stream response ends as soon as possible, without error.
|
|
42
43
|
*/
|
|
@@ -49,7 +50,7 @@ export interface SyncStreamParameters {
|
|
|
49
50
|
export async function* streamResponse(
|
|
50
51
|
options: SyncStreamParameters
|
|
51
52
|
): AsyncIterable<util.StreamingSyncLine | string | null> {
|
|
52
|
-
const {
|
|
53
|
+
const { bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } = options;
|
|
53
54
|
// We also need to be able to abort, so we create our own controller.
|
|
54
55
|
const controller = new AbortController();
|
|
55
56
|
if (signal) {
|
|
@@ -65,7 +66,7 @@ export async function* streamResponse(
|
|
|
65
66
|
}
|
|
66
67
|
}
|
|
67
68
|
const ki = tokenStream(token, controller.signal, tokenStreamOptions);
|
|
68
|
-
const stream = streamResponseInner(
|
|
69
|
+
const stream = streamResponseInner(bucketStorage, syncRules, params, syncParams, tracker, controller.signal);
|
|
69
70
|
// Merge the two streams, and abort as soon as one of the streams end.
|
|
70
71
|
const merged = mergeAsyncIterables([stream, ki], controller.signal);
|
|
71
72
|
|
|
@@ -84,157 +85,155 @@ export async function* streamResponse(
|
|
|
84
85
|
}
|
|
85
86
|
}
|
|
86
87
|
|
|
88
|
+
export type BucketSyncState = {
|
|
89
|
+
description?: BucketDescription; // Undefined if the bucket has not yet been resolved by us.
|
|
90
|
+
start_op_id: string;
|
|
91
|
+
};
|
|
92
|
+
|
|
87
93
|
async function* streamResponseInner(
|
|
88
|
-
|
|
94
|
+
bucketStorage: storage.SyncRulesBucketStorage,
|
|
95
|
+
syncRules: SqlSyncRules,
|
|
89
96
|
params: util.StreamingSyncRequest,
|
|
90
97
|
syncParams: RequestParameters,
|
|
91
98
|
tracker: RequestTracker,
|
|
92
|
-
parseOptions: storage.ParseSyncRulesOptions,
|
|
93
99
|
signal: AbortSignal
|
|
94
100
|
): AsyncGenerator<util.StreamingSyncLine | string | null> {
|
|
95
|
-
// Bucket state of bucket id -> op_id.
|
|
96
|
-
// This starts with the state from the client. May contain buckets that the user do not have access to (anymore).
|
|
97
|
-
let dataBuckets = new Map<string, string>();
|
|
98
|
-
|
|
99
|
-
let lastChecksums: util.ChecksumMap | null = null;
|
|
100
|
-
let lastWriteCheckpoint: bigint | null = null;
|
|
101
|
-
|
|
102
101
|
const { raw_data, binary_data } = params;
|
|
103
102
|
|
|
104
|
-
if (params.buckets) {
|
|
105
|
-
for (let { name, after: start } of params.buckets) {
|
|
106
|
-
dataBuckets.set(name, start);
|
|
107
|
-
}
|
|
108
|
-
}
|
|
109
|
-
|
|
110
103
|
const checkpointUserId = util.checkpointUserId(syncParams.token_parameters.user_id as string, params.client_id);
|
|
111
|
-
const stream = storage.watchWriteCheckpoint(checkpointUserId, signal);
|
|
112
|
-
for await (const next of stream) {
|
|
113
|
-
const { base, writeCheckpoint } = next;
|
|
114
|
-
const checkpoint = base.checkpoint;
|
|
115
|
-
|
|
116
|
-
const storage = await base.getBucketStorage();
|
|
117
|
-
if (storage == null) {
|
|
118
|
-
// Sync rules deleted in the meantime - try again with the next checkpoint.
|
|
119
|
-
continue;
|
|
120
|
-
}
|
|
121
|
-
const syncRules = storage.getParsedSyncRules(parseOptions);
|
|
122
104
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
buckets: allBuckets.length
|
|
135
|
-
});
|
|
136
|
-
// TODO: Limit number of buckets even before we get to this point
|
|
137
|
-
throw new Error(`Too many buckets: ${allBuckets.length}`);
|
|
138
|
-
}
|
|
105
|
+
const checksumState = new BucketChecksumState({
|
|
106
|
+
bucketStorage,
|
|
107
|
+
syncRules,
|
|
108
|
+
syncParams,
|
|
109
|
+
initialBucketPositions: params.buckets
|
|
110
|
+
});
|
|
111
|
+
const stream = bucketStorage.watchWriteCheckpoint({
|
|
112
|
+
user_id: checkpointUserId,
|
|
113
|
+
signal
|
|
114
|
+
});
|
|
115
|
+
const newCheckpoints = stream[Symbol.asyncIterator]();
|
|
139
116
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
117
|
+
try {
|
|
118
|
+
let nextCheckpointPromise:
|
|
119
|
+
| Promise<PromiseSettledResult<IteratorResult<storage.StorageCheckpointUpdate>>>
|
|
120
|
+
| undefined;
|
|
121
|
+
|
|
122
|
+
do {
|
|
123
|
+
if (!nextCheckpointPromise) {
|
|
124
|
+
// Wrap in a settledPromise, so that abort errors after the parent stopped iterating
|
|
125
|
+
// does not result in uncaught errors.
|
|
126
|
+
nextCheckpointPromise = settledPromise(newCheckpoints.next());
|
|
127
|
+
}
|
|
128
|
+
const next = await nextCheckpointPromise;
|
|
129
|
+
nextCheckpointPromise = undefined;
|
|
130
|
+
if (next.status == 'rejected') {
|
|
131
|
+
throw next.reason;
|
|
132
|
+
}
|
|
133
|
+
if (next.value.done) {
|
|
134
|
+
break;
|
|
135
|
+
}
|
|
136
|
+
const line = await checksumState.buildNextCheckpointLine(next.value.value);
|
|
137
|
+
if (line == null) {
|
|
138
|
+
// No update to sync
|
|
160
139
|
continue;
|
|
161
140
|
}
|
|
162
|
-
bucketsToFetch = diff.updatedBuckets.map((c) => c.bucket);
|
|
163
|
-
|
|
164
|
-
let message = `Updated checkpoint: ${checkpoint} | `;
|
|
165
|
-
message += `write: ${writeCheckpoint} | `;
|
|
166
|
-
message += `buckets: ${allBuckets.length} | `;
|
|
167
|
-
message += `updated: ${limitedBuckets(diff.updatedBuckets, 20)} | `;
|
|
168
|
-
message += `removed: ${limitedBuckets(diff.removedBuckets, 20)}`;
|
|
169
|
-
logger.info(message, {
|
|
170
|
-
checkpoint,
|
|
171
|
-
user_id: syncParams.user_id,
|
|
172
|
-
buckets: allBuckets.length,
|
|
173
|
-
updated: diff.updatedBuckets.length,
|
|
174
|
-
removed: diff.removedBuckets.length
|
|
175
|
-
});
|
|
176
141
|
|
|
177
|
-
const
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
142
|
+
const { checkpointLine, bucketsToFetch } = line;
|
|
143
|
+
|
|
144
|
+
yield checkpointLine;
|
|
145
|
+
// Start syncing data for buckets up to the checkpoint. As soon as we have completed at least one priority and
|
|
146
|
+
// at least 1000 operations, we also start listening for new checkpoints concurrently. When a new checkpoint comes
|
|
147
|
+
// in while we're still busy syncing data for lower priorities, interrupt the current operation and start syncing
|
|
148
|
+
// the new checkpoint.
|
|
149
|
+
const abortCheckpointController = new AbortController();
|
|
150
|
+
let syncedOperations = 0;
|
|
151
|
+
|
|
152
|
+
const abortCheckpointSignal = AbortSignal.any([abortCheckpointController.signal, signal]);
|
|
153
|
+
|
|
154
|
+
const bucketsByPriority = [...Map.groupBy(bucketsToFetch, (bucket) => bucket.priority).entries()];
|
|
155
|
+
bucketsByPriority.sort((a, b) => a[0] - b[0]); // Sort from high to lower priorities
|
|
156
|
+
const lowestPriority = bucketsByPriority.at(-1)?.[0];
|
|
157
|
+
|
|
158
|
+
function maybeRaceForNewCheckpoint() {
|
|
159
|
+
if (syncedOperations >= 1000 && nextCheckpointPromise === undefined) {
|
|
160
|
+
nextCheckpointPromise = (async () => {
|
|
161
|
+
const next = await settledPromise(newCheckpoints.next());
|
|
162
|
+
if (next.status == 'rejected') {
|
|
163
|
+
abortCheckpointController.abort();
|
|
164
|
+
} else if (!next.value.done) {
|
|
165
|
+
// Stop the running bucketDataInBatches() iterations, making the main flow reach the new checkpoint.
|
|
166
|
+
abortCheckpointController.abort();
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
return next;
|
|
170
|
+
})();
|
|
183
171
|
}
|
|
184
|
-
}
|
|
172
|
+
}
|
|
185
173
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
174
|
+
function markOperationsSent(operations: number) {
|
|
175
|
+
syncedOperations += operations;
|
|
176
|
+
tracker.addOperationsSynced(operations);
|
|
177
|
+
maybeRaceForNewCheckpoint();
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// This incrementally updates dataBuckets with each individual bucket position.
|
|
181
|
+
// At the end of this, we can be sure that all buckets have data up to the checkpoint.
|
|
182
|
+
for (const [priority, buckets] of bucketsByPriority) {
|
|
183
|
+
const isLast = priority === lowestPriority;
|
|
184
|
+
if (abortCheckpointSignal.aborted) {
|
|
185
|
+
break;
|
|
197
186
|
}
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
187
|
+
|
|
188
|
+
yield* bucketDataInBatches({
|
|
189
|
+
bucketStorage: bucketStorage,
|
|
190
|
+
checkpoint: next.value.value.base.checkpoint,
|
|
191
|
+
bucketsToFetch: buckets,
|
|
192
|
+
checksumState,
|
|
193
|
+
raw_data,
|
|
194
|
+
binary_data,
|
|
195
|
+
onRowsSent: markOperationsSent,
|
|
196
|
+
abort_connection: signal,
|
|
197
|
+
abort_batch: abortCheckpointSignal,
|
|
198
|
+
user_id: syncParams.user_id,
|
|
199
|
+
// Passing undefined will emit a full sync complete message at the end. If we pass a priority, we'll emit a partial
|
|
200
|
+
// sync complete message.
|
|
201
|
+
forPriority: !isLast ? priority : undefined
|
|
202
|
+
});
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
if (!abortCheckpointSignal.aborted) {
|
|
206
|
+
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
207
|
+
}
|
|
208
|
+
} while (!signal.aborted);
|
|
209
|
+
} finally {
|
|
210
|
+
await newCheckpoints.return?.();
|
|
219
211
|
}
|
|
220
212
|
}
|
|
221
213
|
|
|
222
214
|
interface BucketDataRequest {
|
|
223
|
-
|
|
215
|
+
bucketStorage: storage.SyncRulesBucketStorage;
|
|
224
216
|
checkpoint: string;
|
|
225
|
-
bucketsToFetch:
|
|
226
|
-
/**
|
|
227
|
-
|
|
217
|
+
bucketsToFetch: BucketDescription[];
|
|
218
|
+
/** Contains current bucket state. Modified by the request as data is sent. */
|
|
219
|
+
checksumState: BucketChecksumState;
|
|
228
220
|
raw_data: boolean | undefined;
|
|
229
221
|
binary_data: boolean | undefined;
|
|
230
|
-
|
|
231
|
-
|
|
222
|
+
/** Signals that the connection was aborted and that streaming should stop ASAP. */
|
|
223
|
+
abort_connection: AbortSignal;
|
|
224
|
+
/**
|
|
225
|
+
* Signals that higher-priority batches are available. The current batch can stop at a sensible point.
|
|
226
|
+
* This signal also fires when abort_connection fires.
|
|
227
|
+
*/
|
|
228
|
+
abort_batch: AbortSignal;
|
|
232
229
|
user_id?: string;
|
|
230
|
+
forPriority?: BucketPriority;
|
|
231
|
+
onRowsSent: (amount: number) => void;
|
|
233
232
|
}
|
|
234
233
|
|
|
235
234
|
async function* bucketDataInBatches(request: BucketDataRequest) {
|
|
236
235
|
let isDone = false;
|
|
237
|
-
while (!request.
|
|
236
|
+
while (!request.abort_batch.aborted && !isDone) {
|
|
238
237
|
// The code below is functionally the same as this for-await loop below.
|
|
239
238
|
// However, the for-await loop appears to have a memory leak, so we avoid it.
|
|
240
239
|
// for await (const { done, data } of bucketDataBatch(storage, checkpoint, dataBuckets, raw_data, signal)) {
|
|
@@ -273,7 +272,17 @@ interface BucketDataBatchResult {
|
|
|
273
272
|
* Extracted as a separate internal function just to avoid memory leaks.
|
|
274
273
|
*/
|
|
275
274
|
async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<BucketDataBatchResult, void> {
|
|
276
|
-
const {
|
|
275
|
+
const {
|
|
276
|
+
bucketStorage: storage,
|
|
277
|
+
checkpoint,
|
|
278
|
+
bucketsToFetch,
|
|
279
|
+
checksumState,
|
|
280
|
+
raw_data,
|
|
281
|
+
binary_data,
|
|
282
|
+
abort_connection,
|
|
283
|
+
abort_batch,
|
|
284
|
+
onRowsSent
|
|
285
|
+
} = request;
|
|
277
286
|
|
|
278
287
|
const checkpointOp = BigInt(checkpoint);
|
|
279
288
|
let checkpointInvalidated = false;
|
|
@@ -281,7 +290,12 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
281
290
|
if (syncSemaphore.isLocked()) {
|
|
282
291
|
logger.info('Sync concurrency limit reached, waiting for lock', { user_id: request.user_id });
|
|
283
292
|
}
|
|
284
|
-
const
|
|
293
|
+
const acquired = await acquireSemaphoreAbortable(syncSemaphore, AbortSignal.any([abort_batch]));
|
|
294
|
+
if (acquired === 'aborted') {
|
|
295
|
+
return;
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
const [value, release] = acquired;
|
|
285
299
|
try {
|
|
286
300
|
if (value <= 3) {
|
|
287
301
|
// This can be noisy, so we only log when we get close to the
|
|
@@ -293,13 +307,14 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
293
307
|
}
|
|
294
308
|
// Optimization: Only fetch buckets for which the checksums have changed since the last checkpoint
|
|
295
309
|
// For the first batch, this will be all buckets.
|
|
296
|
-
const filteredBuckets =
|
|
297
|
-
const
|
|
310
|
+
const filteredBuckets = checksumState.getFilteredBucketPositions(bucketsToFetch);
|
|
311
|
+
const dataBatches = storage.getBucketDataBatch(checkpoint, filteredBuckets);
|
|
298
312
|
|
|
299
313
|
let has_more = false;
|
|
300
314
|
|
|
301
|
-
for await (let { batch: r, targetOp } of
|
|
302
|
-
if
|
|
315
|
+
for await (let { batch: r, targetOp } of dataBatches) {
|
|
316
|
+
// Abort in current batch if the connection is closed
|
|
317
|
+
if (abort_connection.aborted) {
|
|
303
318
|
return;
|
|
304
319
|
}
|
|
305
320
|
if (r.has_more) {
|
|
@@ -339,9 +354,15 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
339
354
|
// iterator memory in case if large data sent.
|
|
340
355
|
yield { data: null, done: false };
|
|
341
356
|
}
|
|
342
|
-
|
|
357
|
+
onRowsSent(r.data.length);
|
|
358
|
+
|
|
359
|
+
checksumState.updateBucketPosition({ bucket: r.bucket, nextAfter: r.next_after, hasMore: r.has_more });
|
|
343
360
|
|
|
344
|
-
|
|
361
|
+
// Check if syncing bucket data is supposed to stop before fetching more data
|
|
362
|
+
// from storage.
|
|
363
|
+
if (abort_batch.aborted) {
|
|
364
|
+
return;
|
|
365
|
+
}
|
|
345
366
|
}
|
|
346
367
|
|
|
347
368
|
if (!has_more) {
|
|
@@ -351,12 +372,22 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
351
372
|
// More data should be available immediately for a new checkpoint.
|
|
352
373
|
yield { data: null, done: true };
|
|
353
374
|
} else {
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
375
|
+
if (request.forPriority !== undefined) {
|
|
376
|
+
const line: util.StreamingSyncCheckpointPartiallyComplete = {
|
|
377
|
+
partial_checkpoint_complete: {
|
|
378
|
+
last_op_id: checkpoint,
|
|
379
|
+
priority: request.forPriority
|
|
380
|
+
}
|
|
381
|
+
};
|
|
382
|
+
yield { data: line, done: true };
|
|
383
|
+
} else {
|
|
384
|
+
const line: util.StreamingSyncCheckpointComplete = {
|
|
385
|
+
checkpoint_complete: {
|
|
386
|
+
last_op_id: checkpoint
|
|
387
|
+
}
|
|
388
|
+
};
|
|
389
|
+
yield { data: line, done: true };
|
|
390
|
+
}
|
|
360
391
|
}
|
|
361
392
|
}
|
|
362
393
|
} finally {
|
|
@@ -383,18 +414,3 @@ function transformLegacyResponse(bucketData: util.SyncBucketData): any {
|
|
|
383
414
|
})
|
|
384
415
|
};
|
|
385
416
|
}
|
|
386
|
-
|
|
387
|
-
function limitedBuckets(buckets: string[] | util.BucketChecksum[], limit: number) {
|
|
388
|
-
buckets = buckets.map((b) => {
|
|
389
|
-
if (typeof b != 'string') {
|
|
390
|
-
return b.bucket;
|
|
391
|
-
} else {
|
|
392
|
-
return b;
|
|
393
|
-
}
|
|
394
|
-
});
|
|
395
|
-
if (buckets.length <= limit) {
|
|
396
|
-
return JSON.stringify(buckets);
|
|
397
|
-
}
|
|
398
|
-
const limited = buckets.slice(0, limit);
|
|
399
|
-
return `${JSON.stringify(limited)}...`;
|
|
400
|
-
}
|
package/src/sync/util.ts
CHANGED
|
@@ -2,6 +2,7 @@ import * as timers from 'timers/promises';
|
|
|
2
2
|
|
|
3
3
|
import * as util from '../util/util-index.js';
|
|
4
4
|
import { RequestTracker } from './RequestTracker.js';
|
|
5
|
+
import { SemaphoreInterface } from 'async-mutex';
|
|
5
6
|
|
|
6
7
|
export type TokenStreamOptions = {
|
|
7
8
|
/**
|
|
@@ -99,3 +100,56 @@ export async function* transformToBytesTracked(
|
|
|
99
100
|
yield encoded;
|
|
100
101
|
}
|
|
101
102
|
}
|
|
103
|
+
|
|
104
|
+
export function acquireSemaphoreAbortable(
|
|
105
|
+
semaphone: SemaphoreInterface,
|
|
106
|
+
abort: AbortSignal
|
|
107
|
+
): Promise<[number, SemaphoreInterface.Releaser] | 'aborted'> {
|
|
108
|
+
return new Promise((resolve, reject) => {
|
|
109
|
+
let aborted = false;
|
|
110
|
+
let hasSemaphore = false;
|
|
111
|
+
|
|
112
|
+
const listener = () => {
|
|
113
|
+
if (!hasSemaphore) {
|
|
114
|
+
aborted = true;
|
|
115
|
+
abort.removeEventListener('abort', listener);
|
|
116
|
+
resolve('aborted');
|
|
117
|
+
}
|
|
118
|
+
};
|
|
119
|
+
abort.addEventListener('abort', listener);
|
|
120
|
+
|
|
121
|
+
semaphone.acquire().then((acquired) => {
|
|
122
|
+
hasSemaphore = true;
|
|
123
|
+
if (aborted) {
|
|
124
|
+
// Release semaphore, already aborted
|
|
125
|
+
acquired[1]();
|
|
126
|
+
} else {
|
|
127
|
+
abort.removeEventListener('abort', listener);
|
|
128
|
+
resolve(acquired);
|
|
129
|
+
}
|
|
130
|
+
}, reject);
|
|
131
|
+
});
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* Wrap a promise in the style of Promise.allSettled.
|
|
136
|
+
*
|
|
137
|
+
* This is specifically useful if rejections should not be treated as uncaught rejections
|
|
138
|
+
* if it is not specifically handled.
|
|
139
|
+
*/
|
|
140
|
+
export function settledPromise<T>(promise: Promise<T>): Promise<PromiseSettledResult<T>> {
|
|
141
|
+
return promise.then(
|
|
142
|
+
(result) => {
|
|
143
|
+
return {
|
|
144
|
+
status: 'fulfilled',
|
|
145
|
+
value: result
|
|
146
|
+
};
|
|
147
|
+
},
|
|
148
|
+
(error) => {
|
|
149
|
+
return {
|
|
150
|
+
status: 'rejected',
|
|
151
|
+
reason: error
|
|
152
|
+
};
|
|
153
|
+
}
|
|
154
|
+
);
|
|
155
|
+
}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import { ErrorCode, ServiceError } from '@powersync/lib-services-framework';
|
|
2
|
+
import { RouteAPI } from '../api/RouteAPI.js';
|
|
3
|
+
import { BucketStorageFactory } from '../storage/storage-index.js';
|
|
4
|
+
|
|
5
|
+
export interface CreateWriteCheckpointOptions {
|
|
6
|
+
userId: string | undefined;
|
|
7
|
+
clientId: string | undefined;
|
|
8
|
+
api: RouteAPI;
|
|
9
|
+
storage: BucketStorageFactory;
|
|
10
|
+
}
|
|
11
|
+
export async function createWriteCheckpoint(options: CreateWriteCheckpointOptions) {
|
|
12
|
+
const full_user_id = checkpointUserId(options.userId, options.clientId);
|
|
13
|
+
|
|
14
|
+
const syncBucketStorage = await options.storage.getActiveStorage();
|
|
15
|
+
if (!syncBucketStorage) {
|
|
16
|
+
throw new ServiceError(ErrorCode.PSYNC_S2302, `Cannot create Write Checkpoint since no sync rules are active.`);
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
const { writeCheckpoint, currentCheckpoint } = await options.api.createReplicationHead(async (currentCheckpoint) => {
|
|
20
|
+
const writeCheckpoint = await syncBucketStorage.createManagedWriteCheckpoint({
|
|
21
|
+
user_id: full_user_id,
|
|
22
|
+
heads: { '1': currentCheckpoint }
|
|
23
|
+
});
|
|
24
|
+
return { writeCheckpoint, currentCheckpoint };
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
return {
|
|
28
|
+
writeCheckpoint: String(writeCheckpoint),
|
|
29
|
+
replicationHead: currentCheckpoint
|
|
30
|
+
};
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export function checkpointUserId(user_id: string | undefined, client_id: string | undefined) {
|
|
34
|
+
if (user_id == null) {
|
|
35
|
+
throw new Error('user_id is required');
|
|
36
|
+
}
|
|
37
|
+
if (client_id == null) {
|
|
38
|
+
return user_id;
|
|
39
|
+
}
|
|
40
|
+
return `${user_id}/${client_id}`;
|
|
41
|
+
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import * as t from 'ts-codec';
|
|
2
|
-
import { SqliteJsonValue } from '@powersync/service-sync-rules';
|
|
2
|
+
import { BucketDescription, BucketPriority, SqliteJsonValue } from '@powersync/service-sync-rules';
|
|
3
3
|
|
|
4
4
|
export const BucketRequest = t.object({
|
|
5
5
|
name: t.string,
|
|
@@ -59,7 +59,7 @@ export interface StreamingSyncCheckpointDiff {
|
|
|
59
59
|
checkpoint_diff: {
|
|
60
60
|
last_op_id: OpId;
|
|
61
61
|
write_checkpoint?: OpId;
|
|
62
|
-
updated_buckets:
|
|
62
|
+
updated_buckets: BucketChecksumWithDescription[];
|
|
63
63
|
removed_buckets: string[];
|
|
64
64
|
};
|
|
65
65
|
}
|
|
@@ -74,13 +74,23 @@ export interface StreamingSyncCheckpointComplete {
|
|
|
74
74
|
};
|
|
75
75
|
}
|
|
76
76
|
|
|
77
|
-
export interface
|
|
77
|
+
export interface StreamingSyncCheckpointPartiallyComplete {
|
|
78
|
+
partial_checkpoint_complete: {
|
|
79
|
+
last_op_id: OpId;
|
|
80
|
+
priority: BucketPriority;
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
export interface StreamingSyncKeepalive {
|
|
85
|
+
token_expires_in: number;
|
|
86
|
+
}
|
|
78
87
|
|
|
79
88
|
export type StreamingSyncLine =
|
|
80
89
|
| StreamingSyncData
|
|
81
90
|
| StreamingSyncCheckpoint
|
|
82
91
|
| StreamingSyncCheckpointDiff
|
|
83
92
|
| StreamingSyncCheckpointComplete
|
|
93
|
+
| StreamingSyncCheckpointPartiallyComplete
|
|
84
94
|
| StreamingSyncKeepalive;
|
|
85
95
|
|
|
86
96
|
/**
|
|
@@ -91,7 +101,7 @@ export type OpId = string;
|
|
|
91
101
|
export interface Checkpoint {
|
|
92
102
|
last_op_id: OpId;
|
|
93
103
|
write_checkpoint?: OpId;
|
|
94
|
-
buckets:
|
|
104
|
+
buckets: BucketChecksumWithDescription[];
|
|
95
105
|
}
|
|
96
106
|
|
|
97
107
|
export interface BucketState {
|
|
@@ -142,3 +152,5 @@ export interface BucketChecksum {
|
|
|
142
152
|
*/
|
|
143
153
|
count: number;
|
|
144
154
|
}
|
|
155
|
+
|
|
156
|
+
export interface BucketChecksumWithDescription extends BucketChecksum, BucketDescription {}
|
package/src/util/util-index.ts
CHANGED
package/src/util/utils.ts
CHANGED
|
@@ -145,16 +145,6 @@ export function isCompleteRow(storeData: boolean, row: sync_rules.ToastableSqlit
|
|
|
145
145
|
return !hasToastedValues(row);
|
|
146
146
|
}
|
|
147
147
|
|
|
148
|
-
export function checkpointUserId(user_id: string | undefined, client_id: string | undefined) {
|
|
149
|
-
if (user_id == null) {
|
|
150
|
-
throw new Error('user_id is required');
|
|
151
|
-
}
|
|
152
|
-
if (client_id == null) {
|
|
153
|
-
return user_id;
|
|
154
|
-
}
|
|
155
|
-
return `${user_id}/${client_id}`;
|
|
156
|
-
}
|
|
157
|
-
|
|
158
148
|
/**
|
|
159
149
|
* Reduce a bucket to the final state as stored on the client.
|
|
160
150
|
*
|
package/test/src/auth.test.ts
CHANGED
|
@@ -274,7 +274,7 @@ describe('JWT Auth', () => {
|
|
|
274
274
|
).rejects.toThrow('Token must expire in a maximum of');
|
|
275
275
|
});
|
|
276
276
|
|
|
277
|
-
test('http', async () => {
|
|
277
|
+
test('http', { timeout: 20_000 }, async () => {
|
|
278
278
|
// Not ideal to rely on an external endpoint for tests, but it is good to test that this
|
|
279
279
|
// one actually works.
|
|
280
280
|
const remote = new RemoteJWKSCollector(
|
|
@@ -290,9 +290,9 @@ describe('JWT Auth', () => {
|
|
|
290
290
|
reject_ip_ranges: ['local']
|
|
291
291
|
}
|
|
292
292
|
});
|
|
293
|
-
expect(invalid.getKeys()).rejects.toThrow('IPs in this range are not supported');
|
|
293
|
+
await expect(invalid.getKeys()).rejects.toThrow('IPs in this range are not supported');
|
|
294
294
|
|
|
295
|
-
//
|
|
295
|
+
// IPs throw an error immediately
|
|
296
296
|
expect(
|
|
297
297
|
() =>
|
|
298
298
|
new RemoteJWKSCollector('https://127.0.0.1/.well-known/jwks.json', {
|
|
@@ -315,11 +315,11 @@ describe('JWT Auth', () => {
|
|
|
315
315
|
|
|
316
316
|
const invalid = new RemoteJWKSCollector('https://127.0.0.1/.well-known/jwks.json');
|
|
317
317
|
// Should try and fetch
|
|
318
|
-
expect(invalid.getKeys()).rejects.toThrow();
|
|
318
|
+
await expect(invalid.getKeys()).rejects.toThrow();
|
|
319
319
|
|
|
320
320
|
const invalid2 = new RemoteJWKSCollector('https://localhost/.well-known/jwks.json');
|
|
321
321
|
// Should try and fetch
|
|
322
|
-
expect(invalid2.getKeys()).rejects.toThrow();
|
|
322
|
+
await expect(invalid2.getKeys()).rejects.toThrow();
|
|
323
323
|
});
|
|
324
324
|
|
|
325
325
|
test('caching', async () => {
|