@powersync/service-core 0.18.1 → 1.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +29 -0
- package/dist/api/RouteAPI.d.ts +1 -1
- package/dist/api/diagnostics.js +107 -169
- package/dist/api/diagnostics.js.map +1 -1
- package/dist/entry/commands/compact-action.js +10 -73
- package/dist/entry/commands/compact-action.js.map +1 -1
- package/dist/modules/AbstractModule.d.ts +1 -1
- package/dist/replication/AbstractReplicator.js +8 -76
- package/dist/replication/AbstractReplicator.js.map +1 -1
- package/dist/routes/endpoints/checkpointing.js +3 -2
- package/dist/routes/endpoints/checkpointing.js.map +1 -1
- package/dist/routes/endpoints/socket-route.js +5 -5
- package/dist/routes/endpoints/socket-route.js.map +1 -1
- package/dist/routes/endpoints/sync-stream.js +5 -5
- package/dist/routes/endpoints/sync-stream.js.map +1 -1
- package/dist/runner/teardown.js +3 -65
- package/dist/runner/teardown.js.map +1 -1
- package/dist/storage/BucketStorage.d.ts +8 -441
- package/dist/storage/BucketStorage.js +9 -10
- package/dist/storage/BucketStorage.js.map +1 -1
- package/dist/storage/BucketStorageBatch.d.ts +130 -0
- package/dist/storage/BucketStorageBatch.js +10 -0
- package/dist/storage/BucketStorageBatch.js.map +1 -0
- package/dist/storage/BucketStorageFactory.d.ts +145 -0
- package/dist/storage/BucketStorageFactory.js +2 -0
- package/dist/storage/BucketStorageFactory.js.map +1 -0
- package/dist/storage/ChecksumCache.js.map +1 -1
- package/dist/storage/PersistedSyncRulesContent.d.ts +20 -0
- package/dist/storage/PersistedSyncRulesContent.js +2 -0
- package/dist/storage/PersistedSyncRulesContent.js.map +1 -0
- package/dist/storage/ReplicationEventPayload.d.ts +1 -1
- package/dist/storage/ReplicationLock.d.ts +4 -0
- package/dist/storage/ReplicationLock.js +2 -0
- package/dist/storage/ReplicationLock.js.map +1 -0
- package/dist/storage/SourceEntity.d.ts +6 -2
- package/dist/storage/SourceTable.d.ts +2 -2
- package/dist/storage/SourceTable.js.map +1 -1
- package/dist/storage/StorageEngine.d.ts +4 -4
- package/dist/storage/StorageEngine.js +2 -2
- package/dist/storage/StorageEngine.js.map +1 -1
- package/dist/storage/StorageProvider.d.ts +4 -1
- package/dist/storage/SyncRulesBucketStorage.d.ts +207 -0
- package/dist/storage/SyncRulesBucketStorage.js +7 -0
- package/dist/storage/SyncRulesBucketStorage.js.map +1 -0
- package/dist/storage/bson.d.ts +14 -3
- package/dist/storage/bson.js +18 -2
- package/dist/storage/bson.js.map +1 -1
- package/dist/storage/storage-index.d.ts +5 -0
- package/dist/storage/storage-index.js +5 -0
- package/dist/storage/storage-index.js.map +1 -1
- package/dist/sync/BucketChecksumState.d.ts +91 -0
- package/dist/sync/BucketChecksumState.js +313 -0
- package/dist/sync/BucketChecksumState.js.map +1 -0
- package/dist/sync/sync-index.d.ts +1 -0
- package/dist/sync/sync-index.js +1 -0
- package/dist/sync/sync-index.js.map +1 -1
- package/dist/sync/sync.d.ts +7 -3
- package/dist/sync/sync.js +139 -135
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/util.d.ts +9 -0
- package/dist/sync/util.js +44 -0
- package/dist/sync/util.js.map +1 -1
- package/dist/util/checkpointing.d.ts +1 -1
- package/dist/util/checkpointing.js +15 -78
- package/dist/util/checkpointing.js.map +1 -1
- package/dist/util/protocol-types.d.ts +13 -4
- package/package.json +5 -5
- package/src/api/RouteAPI.ts +1 -1
- package/src/api/diagnostics.ts +1 -1
- package/src/entry/commands/compact-action.ts +2 -3
- package/src/modules/AbstractModule.ts +1 -1
- package/src/replication/AbstractReplicator.ts +7 -12
- package/src/routes/endpoints/checkpointing.ts +3 -3
- package/src/routes/endpoints/socket-route.ts +7 -5
- package/src/routes/endpoints/sync-stream.ts +8 -5
- package/src/runner/teardown.ts +1 -1
- package/src/storage/BucketStorage.ts +8 -550
- package/src/storage/BucketStorageBatch.ts +158 -0
- package/src/storage/BucketStorageFactory.ts +166 -0
- package/src/storage/ChecksumCache.ts +1 -0
- package/src/storage/PersistedSyncRulesContent.ts +26 -0
- package/src/storage/ReplicationEventPayload.ts +1 -1
- package/src/storage/ReplicationLock.ts +5 -0
- package/src/storage/SourceEntity.ts +6 -2
- package/src/storage/SourceTable.ts +1 -1
- package/src/storage/StorageEngine.ts +4 -4
- package/src/storage/StorageProvider.ts +4 -1
- package/src/storage/SyncRulesBucketStorage.ts +265 -0
- package/src/storage/bson.ts +22 -4
- package/src/storage/storage-index.ts +5 -0
- package/src/sync/BucketChecksumState.ts +392 -0
- package/src/sync/sync-index.ts +1 -0
- package/src/sync/sync.ts +182 -157
- package/src/sync/util.ts +54 -0
- package/src/util/checkpointing.ts +4 -6
- package/src/util/protocol-types.ts +16 -4
- package/test/src/auth.test.ts +5 -5
- package/test/src/sync/BucketChecksumState.test.ts +565 -0
- package/test/src/sync/util.test.ts +34 -0
- package/tsconfig.tsbuildinfo +1 -1
package/src/sync/sync.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
|
|
2
|
-
import { RequestParameters } from '@powersync/service-sync-rules';
|
|
2
|
+
import { BucketDescription, BucketPriority, RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules';
|
|
3
3
|
import { Semaphore, withTimeout } from 'async-mutex';
|
|
4
4
|
|
|
5
5
|
import { AbortError } from 'ix/aborterror.js';
|
|
@@ -9,9 +9,10 @@ import * as storage from '../storage/storage-index.js';
|
|
|
9
9
|
import * as util from '../util/util-index.js';
|
|
10
10
|
|
|
11
11
|
import { logger } from '@powersync/lib-services-framework';
|
|
12
|
+
import { BucketChecksumState } from './BucketChecksumState.js';
|
|
12
13
|
import { mergeAsyncIterables } from './merge.js';
|
|
13
14
|
import { RequestTracker } from './RequestTracker.js';
|
|
14
|
-
import {
|
|
15
|
+
import { acquireSemaphoreAbortable, settledPromise, tokenStream, TokenStreamOptions } from './util.js';
|
|
15
16
|
|
|
16
17
|
/**
|
|
17
18
|
* Maximum number of connections actively fetching data.
|
|
@@ -32,11 +33,11 @@ const syncSemaphore = withTimeout(
|
|
|
32
33
|
);
|
|
33
34
|
|
|
34
35
|
export interface SyncStreamParameters {
|
|
35
|
-
|
|
36
|
+
bucketStorage: storage.SyncRulesBucketStorage;
|
|
37
|
+
syncRules: SqlSyncRules;
|
|
36
38
|
params: util.StreamingSyncRequest;
|
|
37
39
|
syncParams: RequestParameters;
|
|
38
40
|
token: auth.JwtPayload;
|
|
39
|
-
parseOptions: storage.ParseSyncRulesOptions;
|
|
40
41
|
/**
|
|
41
42
|
* If this signal is aborted, the stream response ends as soon as possible, without error.
|
|
42
43
|
*/
|
|
@@ -49,7 +50,7 @@ export interface SyncStreamParameters {
|
|
|
49
50
|
export async function* streamResponse(
|
|
50
51
|
options: SyncStreamParameters
|
|
51
52
|
): AsyncIterable<util.StreamingSyncLine | string | null> {
|
|
52
|
-
const {
|
|
53
|
+
const { bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } = options;
|
|
53
54
|
// We also need to be able to abort, so we create our own controller.
|
|
54
55
|
const controller = new AbortController();
|
|
55
56
|
if (signal) {
|
|
@@ -65,7 +66,7 @@ export async function* streamResponse(
|
|
|
65
66
|
}
|
|
66
67
|
}
|
|
67
68
|
const ki = tokenStream(token, controller.signal, tokenStreamOptions);
|
|
68
|
-
const stream = streamResponseInner(
|
|
69
|
+
const stream = streamResponseInner(bucketStorage, syncRules, params, syncParams, tracker, controller.signal);
|
|
69
70
|
// Merge the two streams, and abort as soon as one of the streams end.
|
|
70
71
|
const merged = mergeAsyncIterables([stream, ki], controller.signal);
|
|
71
72
|
|
|
@@ -84,157 +85,164 @@ export async function* streamResponse(
|
|
|
84
85
|
}
|
|
85
86
|
}
|
|
86
87
|
|
|
88
|
+
export type BucketSyncState = {
|
|
89
|
+
description?: BucketDescription; // Undefined if the bucket has not yet been resolved by us.
|
|
90
|
+
start_op_id: string;
|
|
91
|
+
};
|
|
92
|
+
|
|
87
93
|
async function* streamResponseInner(
|
|
88
|
-
|
|
94
|
+
bucketStorage: storage.SyncRulesBucketStorage,
|
|
95
|
+
syncRules: SqlSyncRules,
|
|
89
96
|
params: util.StreamingSyncRequest,
|
|
90
97
|
syncParams: RequestParameters,
|
|
91
98
|
tracker: RequestTracker,
|
|
92
|
-
parseOptions: storage.ParseSyncRulesOptions,
|
|
93
99
|
signal: AbortSignal
|
|
94
100
|
): AsyncGenerator<util.StreamingSyncLine | string | null> {
|
|
95
|
-
// Bucket state of bucket id -> op_id.
|
|
96
|
-
// This starts with the state from the client. May contain buckets that the user do not have access to (anymore).
|
|
97
|
-
let dataBuckets = new Map<string, string>();
|
|
98
|
-
|
|
99
|
-
let lastChecksums: util.ChecksumMap | null = null;
|
|
100
|
-
let lastWriteCheckpoint: bigint | null = null;
|
|
101
|
-
|
|
102
101
|
const { raw_data, binary_data } = params;
|
|
103
102
|
|
|
104
|
-
if (params.buckets) {
|
|
105
|
-
for (let { name, after: start } of params.buckets) {
|
|
106
|
-
dataBuckets.set(name, start);
|
|
107
|
-
}
|
|
108
|
-
}
|
|
109
|
-
|
|
110
103
|
const checkpointUserId = util.checkpointUserId(syncParams.token_parameters.user_id as string, params.client_id);
|
|
111
|
-
const stream = storage.watchWriteCheckpoint(checkpointUserId, signal);
|
|
112
|
-
for await (const next of stream) {
|
|
113
|
-
const { base, writeCheckpoint } = next;
|
|
114
|
-
const checkpoint = base.checkpoint;
|
|
115
|
-
|
|
116
|
-
const storage = await base.getBucketStorage();
|
|
117
|
-
if (storage == null) {
|
|
118
|
-
// Sync rules deleted in the meantime - try again with the next checkpoint.
|
|
119
|
-
continue;
|
|
120
|
-
}
|
|
121
|
-
const syncRules = storage.getParsedSyncRules(parseOptions);
|
|
122
104
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
buckets: allBuckets.length
|
|
135
|
-
});
|
|
136
|
-
// TODO: Limit number of buckets even before we get to this point
|
|
137
|
-
throw new Error(`Too many buckets: ${allBuckets.length}`);
|
|
138
|
-
}
|
|
105
|
+
const checksumState = new BucketChecksumState({
|
|
106
|
+
bucketStorage,
|
|
107
|
+
syncRules,
|
|
108
|
+
syncParams,
|
|
109
|
+
initialBucketPositions: params.buckets
|
|
110
|
+
});
|
|
111
|
+
const stream = bucketStorage.watchWriteCheckpoint({
|
|
112
|
+
user_id: checkpointUserId,
|
|
113
|
+
signal
|
|
114
|
+
});
|
|
115
|
+
const newCheckpoints = stream[Symbol.asyncIterator]();
|
|
139
116
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
117
|
+
try {
|
|
118
|
+
let nextCheckpointPromise:
|
|
119
|
+
| Promise<PromiseSettledResult<IteratorResult<storage.StorageCheckpointUpdate>>>
|
|
120
|
+
| undefined;
|
|
121
|
+
|
|
122
|
+
do {
|
|
123
|
+
if (!nextCheckpointPromise) {
|
|
124
|
+
// Wrap in a settledPromise, so that abort errors after the parent stopped iterating
|
|
125
|
+
// does not result in uncaught errors.
|
|
126
|
+
nextCheckpointPromise = settledPromise(newCheckpoints.next());
|
|
127
|
+
}
|
|
128
|
+
const next = await nextCheckpointPromise;
|
|
129
|
+
nextCheckpointPromise = undefined;
|
|
130
|
+
if (next.status == 'rejected') {
|
|
131
|
+
throw next.reason;
|
|
132
|
+
}
|
|
133
|
+
if (next.value.done) {
|
|
134
|
+
break;
|
|
135
|
+
}
|
|
136
|
+
const line = await checksumState.buildNextCheckpointLine(next.value.value);
|
|
137
|
+
if (line == null) {
|
|
138
|
+
// No update to sync
|
|
160
139
|
continue;
|
|
161
140
|
}
|
|
162
|
-
bucketsToFetch = diff.updatedBuckets.map((c) => c.bucket);
|
|
163
|
-
|
|
164
|
-
let message = `Updated checkpoint: ${checkpoint} | `;
|
|
165
|
-
message += `write: ${writeCheckpoint} | `;
|
|
166
|
-
message += `buckets: ${allBuckets.length} | `;
|
|
167
|
-
message += `updated: ${limitedBuckets(diff.updatedBuckets, 20)} | `;
|
|
168
|
-
message += `removed: ${limitedBuckets(diff.removedBuckets, 20)}`;
|
|
169
|
-
logger.info(message, {
|
|
170
|
-
checkpoint,
|
|
171
|
-
user_id: syncParams.user_id,
|
|
172
|
-
buckets: allBuckets.length,
|
|
173
|
-
updated: diff.updatedBuckets.length,
|
|
174
|
-
removed: diff.removedBuckets.length
|
|
175
|
-
});
|
|
176
141
|
|
|
177
|
-
const
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
142
|
+
const { checkpointLine, bucketsToFetch } = line;
|
|
143
|
+
|
|
144
|
+
yield checkpointLine;
|
|
145
|
+
// Start syncing data for buckets up to the checkpoint. As soon as we have completed at least one priority and
|
|
146
|
+
// at least 1000 operations, we also start listening for new checkpoints concurrently. When a new checkpoint comes
|
|
147
|
+
// in while we're still busy syncing data for lower priorities, interrupt the current operation and start syncing
|
|
148
|
+
// the new checkpoint.
|
|
149
|
+
const abortCheckpointController = new AbortController();
|
|
150
|
+
let syncedOperations = 0;
|
|
151
|
+
|
|
152
|
+
const abortCheckpointSignal = AbortSignal.any([abortCheckpointController.signal, signal]);
|
|
153
|
+
|
|
154
|
+
const bucketsByPriority = [...Map.groupBy(bucketsToFetch, (bucket) => bucket.priority).entries()];
|
|
155
|
+
bucketsByPriority.sort((a, b) => a[0] - b[0]); // Sort from high to lower priorities
|
|
156
|
+
const lowestPriority = bucketsByPriority.at(-1)?.[0];
|
|
157
|
+
|
|
158
|
+
// Ensure that we have at least one priority batch: After sending the checkpoint line, clients expect to
|
|
159
|
+
// receive a sync complete message after the synchronization is done (which happens in the last
|
|
160
|
+
// bucketDataInBatches iteration). Without any batch, the line is missing and clients might not complete their
|
|
161
|
+
// sync properly.
|
|
162
|
+
const priorityBatches: [BucketPriority | null, BucketDescription[]][] = bucketsByPriority;
|
|
163
|
+
if (priorityBatches.length == 0) {
|
|
164
|
+
priorityBatches.push([null, []]);
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
function maybeRaceForNewCheckpoint() {
|
|
168
|
+
if (syncedOperations >= 1000 && nextCheckpointPromise === undefined) {
|
|
169
|
+
nextCheckpointPromise = (async () => {
|
|
170
|
+
const next = await settledPromise(newCheckpoints.next());
|
|
171
|
+
if (next.status == 'rejected') {
|
|
172
|
+
abortCheckpointController.abort();
|
|
173
|
+
} else if (!next.value.done) {
|
|
174
|
+
// Stop the running bucketDataInBatches() iterations, making the main flow reach the new checkpoint.
|
|
175
|
+
abortCheckpointController.abort();
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
return next;
|
|
179
|
+
})();
|
|
183
180
|
}
|
|
184
|
-
}
|
|
181
|
+
}
|
|
185
182
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
183
|
+
function markOperationsSent(operations: number) {
|
|
184
|
+
syncedOperations += operations;
|
|
185
|
+
tracker.addOperationsSynced(operations);
|
|
186
|
+
maybeRaceForNewCheckpoint();
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
// This incrementally updates dataBuckets with each individual bucket position.
|
|
190
|
+
// At the end of this, we can be sure that all buckets have data up to the checkpoint.
|
|
191
|
+
for (const [priority, buckets] of priorityBatches) {
|
|
192
|
+
const isLast = priority === lowestPriority;
|
|
193
|
+
if (abortCheckpointSignal.aborted) {
|
|
194
|
+
break;
|
|
197
195
|
}
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
196
|
+
|
|
197
|
+
yield* bucketDataInBatches({
|
|
198
|
+
bucketStorage: bucketStorage,
|
|
199
|
+
checkpoint: next.value.value.base.checkpoint,
|
|
200
|
+
bucketsToFetch: buckets,
|
|
201
|
+
checksumState,
|
|
202
|
+
raw_data,
|
|
203
|
+
binary_data,
|
|
204
|
+
onRowsSent: markOperationsSent,
|
|
205
|
+
abort_connection: signal,
|
|
206
|
+
abort_batch: abortCheckpointSignal,
|
|
207
|
+
user_id: syncParams.user_id,
|
|
208
|
+
// Passing null here will emit a full sync complete message at the end. If we pass a priority, we'll emit a partial
|
|
209
|
+
// sync complete message instead.
|
|
210
|
+
forPriority: !isLast ? priority : null
|
|
211
|
+
});
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
if (!abortCheckpointSignal.aborted) {
|
|
215
|
+
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
216
|
+
}
|
|
217
|
+
} while (!signal.aborted);
|
|
218
|
+
} finally {
|
|
219
|
+
await newCheckpoints.return?.();
|
|
219
220
|
}
|
|
220
221
|
}
|
|
221
222
|
|
|
222
223
|
interface BucketDataRequest {
|
|
223
|
-
|
|
224
|
+
bucketStorage: storage.SyncRulesBucketStorage;
|
|
224
225
|
checkpoint: string;
|
|
225
|
-
bucketsToFetch:
|
|
226
|
-
/**
|
|
227
|
-
|
|
226
|
+
bucketsToFetch: BucketDescription[];
|
|
227
|
+
/** Contains current bucket state. Modified by the request as data is sent. */
|
|
228
|
+
checksumState: BucketChecksumState;
|
|
228
229
|
raw_data: boolean | undefined;
|
|
229
230
|
binary_data: boolean | undefined;
|
|
230
|
-
|
|
231
|
-
|
|
231
|
+
/** Signals that the connection was aborted and that streaming should stop ASAP. */
|
|
232
|
+
abort_connection: AbortSignal;
|
|
233
|
+
/**
|
|
234
|
+
* Signals that higher-priority batches are available. The current batch can stop at a sensible point.
|
|
235
|
+
* This signal also fires when abort_connection fires.
|
|
236
|
+
*/
|
|
237
|
+
abort_batch: AbortSignal;
|
|
232
238
|
user_id?: string;
|
|
239
|
+
forPriority: BucketPriority | null;
|
|
240
|
+
onRowsSent: (amount: number) => void;
|
|
233
241
|
}
|
|
234
242
|
|
|
235
243
|
async function* bucketDataInBatches(request: BucketDataRequest) {
|
|
236
244
|
let isDone = false;
|
|
237
|
-
while (!request.
|
|
245
|
+
while (!request.abort_batch.aborted && !isDone) {
|
|
238
246
|
// The code below is functionally the same as this for-await loop below.
|
|
239
247
|
// However, the for-await loop appears to have a memory leak, so we avoid it.
|
|
240
248
|
// for await (const { done, data } of bucketDataBatch(storage, checkpoint, dataBuckets, raw_data, signal)) {
|
|
@@ -273,7 +281,17 @@ interface BucketDataBatchResult {
|
|
|
273
281
|
* Extracted as a separate internal function just to avoid memory leaks.
|
|
274
282
|
*/
|
|
275
283
|
async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<BucketDataBatchResult, void> {
|
|
276
|
-
const {
|
|
284
|
+
const {
|
|
285
|
+
bucketStorage: storage,
|
|
286
|
+
checkpoint,
|
|
287
|
+
bucketsToFetch,
|
|
288
|
+
checksumState,
|
|
289
|
+
raw_data,
|
|
290
|
+
binary_data,
|
|
291
|
+
abort_connection,
|
|
292
|
+
abort_batch,
|
|
293
|
+
onRowsSent
|
|
294
|
+
} = request;
|
|
277
295
|
|
|
278
296
|
const checkpointOp = BigInt(checkpoint);
|
|
279
297
|
let checkpointInvalidated = false;
|
|
@@ -281,7 +299,12 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
281
299
|
if (syncSemaphore.isLocked()) {
|
|
282
300
|
logger.info('Sync concurrency limit reached, waiting for lock', { user_id: request.user_id });
|
|
283
301
|
}
|
|
284
|
-
const
|
|
302
|
+
const acquired = await acquireSemaphoreAbortable(syncSemaphore, AbortSignal.any([abort_batch]));
|
|
303
|
+
if (acquired === 'aborted') {
|
|
304
|
+
return;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
const [value, release] = acquired;
|
|
285
308
|
try {
|
|
286
309
|
if (value <= 3) {
|
|
287
310
|
// This can be noisy, so we only log when we get close to the
|
|
@@ -293,13 +316,14 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
293
316
|
}
|
|
294
317
|
// Optimization: Only fetch buckets for which the checksums have changed since the last checkpoint
|
|
295
318
|
// For the first batch, this will be all buckets.
|
|
296
|
-
const filteredBuckets =
|
|
297
|
-
const
|
|
319
|
+
const filteredBuckets = checksumState.getFilteredBucketPositions(bucketsToFetch);
|
|
320
|
+
const dataBatches = storage.getBucketDataBatch(checkpoint, filteredBuckets);
|
|
298
321
|
|
|
299
322
|
let has_more = false;
|
|
300
323
|
|
|
301
|
-
for await (let { batch: r, targetOp } of
|
|
302
|
-
if
|
|
324
|
+
for await (let { batch: r, targetOp } of dataBatches) {
|
|
325
|
+
// Abort in current batch if the connection is closed
|
|
326
|
+
if (abort_connection.aborted) {
|
|
303
327
|
return;
|
|
304
328
|
}
|
|
305
329
|
if (r.has_more) {
|
|
@@ -339,9 +363,15 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
339
363
|
// iterator memory in case if large data sent.
|
|
340
364
|
yield { data: null, done: false };
|
|
341
365
|
}
|
|
342
|
-
|
|
366
|
+
onRowsSent(r.data.length);
|
|
367
|
+
|
|
368
|
+
checksumState.updateBucketPosition({ bucket: r.bucket, nextAfter: r.next_after, hasMore: r.has_more });
|
|
343
369
|
|
|
344
|
-
|
|
370
|
+
// Check if syncing bucket data is supposed to stop before fetching more data
|
|
371
|
+
// from storage.
|
|
372
|
+
if (abort_batch.aborted) {
|
|
373
|
+
return;
|
|
374
|
+
}
|
|
345
375
|
}
|
|
346
376
|
|
|
347
377
|
if (!has_more) {
|
|
@@ -351,12 +381,22 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
351
381
|
// More data should be available immediately for a new checkpoint.
|
|
352
382
|
yield { data: null, done: true };
|
|
353
383
|
} else {
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
384
|
+
if (request.forPriority != null) {
|
|
385
|
+
const line: util.StreamingSyncCheckpointPartiallyComplete = {
|
|
386
|
+
partial_checkpoint_complete: {
|
|
387
|
+
last_op_id: checkpoint,
|
|
388
|
+
priority: request.forPriority
|
|
389
|
+
}
|
|
390
|
+
};
|
|
391
|
+
yield { data: line, done: true };
|
|
392
|
+
} else {
|
|
393
|
+
const line: util.StreamingSyncCheckpointComplete = {
|
|
394
|
+
checkpoint_complete: {
|
|
395
|
+
last_op_id: checkpoint
|
|
396
|
+
}
|
|
397
|
+
};
|
|
398
|
+
yield { data: line, done: true };
|
|
399
|
+
}
|
|
360
400
|
}
|
|
361
401
|
}
|
|
362
402
|
} finally {
|
|
@@ -383,18 +423,3 @@ function transformLegacyResponse(bucketData: util.SyncBucketData): any {
|
|
|
383
423
|
})
|
|
384
424
|
};
|
|
385
425
|
}
|
|
386
|
-
|
|
387
|
-
function limitedBuckets(buckets: string[] | util.BucketChecksum[], limit: number) {
|
|
388
|
-
buckets = buckets.map((b) => {
|
|
389
|
-
if (typeof b != 'string') {
|
|
390
|
-
return b.bucket;
|
|
391
|
-
} else {
|
|
392
|
-
return b;
|
|
393
|
-
}
|
|
394
|
-
});
|
|
395
|
-
if (buckets.length <= limit) {
|
|
396
|
-
return JSON.stringify(buckets);
|
|
397
|
-
}
|
|
398
|
-
const limited = buckets.slice(0, limit);
|
|
399
|
-
return `${JSON.stringify(limited)}...`;
|
|
400
|
-
}
|
package/src/sync/util.ts
CHANGED
|
@@ -2,6 +2,7 @@ import * as timers from 'timers/promises';
|
|
|
2
2
|
|
|
3
3
|
import * as util from '../util/util-index.js';
|
|
4
4
|
import { RequestTracker } from './RequestTracker.js';
|
|
5
|
+
import { SemaphoreInterface } from 'async-mutex';
|
|
5
6
|
|
|
6
7
|
export type TokenStreamOptions = {
|
|
7
8
|
/**
|
|
@@ -99,3 +100,56 @@ export async function* transformToBytesTracked(
|
|
|
99
100
|
yield encoded;
|
|
100
101
|
}
|
|
101
102
|
}
|
|
103
|
+
|
|
104
|
+
export function acquireSemaphoreAbortable(
|
|
105
|
+
semaphone: SemaphoreInterface,
|
|
106
|
+
abort: AbortSignal
|
|
107
|
+
): Promise<[number, SemaphoreInterface.Releaser] | 'aborted'> {
|
|
108
|
+
return new Promise((resolve, reject) => {
|
|
109
|
+
let aborted = false;
|
|
110
|
+
let hasSemaphore = false;
|
|
111
|
+
|
|
112
|
+
const listener = () => {
|
|
113
|
+
if (!hasSemaphore) {
|
|
114
|
+
aborted = true;
|
|
115
|
+
abort.removeEventListener('abort', listener);
|
|
116
|
+
resolve('aborted');
|
|
117
|
+
}
|
|
118
|
+
};
|
|
119
|
+
abort.addEventListener('abort', listener);
|
|
120
|
+
|
|
121
|
+
semaphone.acquire().then((acquired) => {
|
|
122
|
+
hasSemaphore = true;
|
|
123
|
+
if (aborted) {
|
|
124
|
+
// Release semaphore, already aborted
|
|
125
|
+
acquired[1]();
|
|
126
|
+
} else {
|
|
127
|
+
abort.removeEventListener('abort', listener);
|
|
128
|
+
resolve(acquired);
|
|
129
|
+
}
|
|
130
|
+
}, reject);
|
|
131
|
+
});
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* Wrap a promise in the style of Promise.allSettled.
|
|
136
|
+
*
|
|
137
|
+
* This is specifically useful if rejections should not be treated as uncaught rejections
|
|
138
|
+
* if it is not specifically handled.
|
|
139
|
+
*/
|
|
140
|
+
export function settledPromise<T>(promise: Promise<T>): Promise<PromiseSettledResult<T>> {
|
|
141
|
+
return promise.then(
|
|
142
|
+
(result) => {
|
|
143
|
+
return {
|
|
144
|
+
status: 'fulfilled',
|
|
145
|
+
value: result
|
|
146
|
+
};
|
|
147
|
+
},
|
|
148
|
+
(error) => {
|
|
149
|
+
return {
|
|
150
|
+
status: 'rejected',
|
|
151
|
+
reason: error
|
|
152
|
+
};
|
|
153
|
+
}
|
|
154
|
+
);
|
|
155
|
+
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { ErrorCode,
|
|
1
|
+
import { ErrorCode, ServiceError } from '@powersync/lib-services-framework';
|
|
2
2
|
import { RouteAPI } from '../api/RouteAPI.js';
|
|
3
|
-
import { BucketStorageFactory } from '../storage/
|
|
3
|
+
import { BucketStorageFactory } from '../storage/storage-index.js';
|
|
4
4
|
|
|
5
5
|
export interface CreateWriteCheckpointOptions {
|
|
6
6
|
userId: string | undefined;
|
|
@@ -11,13 +11,11 @@ export interface CreateWriteCheckpointOptions {
|
|
|
11
11
|
export async function createWriteCheckpoint(options: CreateWriteCheckpointOptions) {
|
|
12
12
|
const full_user_id = checkpointUserId(options.userId, options.clientId);
|
|
13
13
|
|
|
14
|
-
const
|
|
15
|
-
if (!
|
|
14
|
+
const syncBucketStorage = await options.storage.getActiveStorage();
|
|
15
|
+
if (!syncBucketStorage) {
|
|
16
16
|
throw new ServiceError(ErrorCode.PSYNC_S2302, `Cannot create Write Checkpoint since no sync rules are active.`);
|
|
17
17
|
}
|
|
18
18
|
|
|
19
|
-
using syncBucketStorage = options.storage.getInstance(activeSyncRules);
|
|
20
|
-
|
|
21
19
|
const { writeCheckpoint, currentCheckpoint } = await options.api.createReplicationHead(async (currentCheckpoint) => {
|
|
22
20
|
const writeCheckpoint = await syncBucketStorage.createManagedWriteCheckpoint({
|
|
23
21
|
user_id: full_user_id,
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import * as t from 'ts-codec';
|
|
2
|
-
import { SqliteJsonValue } from '@powersync/service-sync-rules';
|
|
2
|
+
import { BucketDescription, BucketPriority, SqliteJsonValue } from '@powersync/service-sync-rules';
|
|
3
3
|
|
|
4
4
|
export const BucketRequest = t.object({
|
|
5
5
|
name: t.string,
|
|
@@ -59,7 +59,7 @@ export interface StreamingSyncCheckpointDiff {
|
|
|
59
59
|
checkpoint_diff: {
|
|
60
60
|
last_op_id: OpId;
|
|
61
61
|
write_checkpoint?: OpId;
|
|
62
|
-
updated_buckets:
|
|
62
|
+
updated_buckets: BucketChecksumWithDescription[];
|
|
63
63
|
removed_buckets: string[];
|
|
64
64
|
};
|
|
65
65
|
}
|
|
@@ -74,13 +74,23 @@ export interface StreamingSyncCheckpointComplete {
|
|
|
74
74
|
};
|
|
75
75
|
}
|
|
76
76
|
|
|
77
|
-
export interface
|
|
77
|
+
export interface StreamingSyncCheckpointPartiallyComplete {
|
|
78
|
+
partial_checkpoint_complete: {
|
|
79
|
+
last_op_id: OpId;
|
|
80
|
+
priority: BucketPriority;
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
export interface StreamingSyncKeepalive {
|
|
85
|
+
token_expires_in: number;
|
|
86
|
+
}
|
|
78
87
|
|
|
79
88
|
export type StreamingSyncLine =
|
|
80
89
|
| StreamingSyncData
|
|
81
90
|
| StreamingSyncCheckpoint
|
|
82
91
|
| StreamingSyncCheckpointDiff
|
|
83
92
|
| StreamingSyncCheckpointComplete
|
|
93
|
+
| StreamingSyncCheckpointPartiallyComplete
|
|
84
94
|
| StreamingSyncKeepalive;
|
|
85
95
|
|
|
86
96
|
/**
|
|
@@ -91,7 +101,7 @@ export type OpId = string;
|
|
|
91
101
|
export interface Checkpoint {
|
|
92
102
|
last_op_id: OpId;
|
|
93
103
|
write_checkpoint?: OpId;
|
|
94
|
-
buckets:
|
|
104
|
+
buckets: BucketChecksumWithDescription[];
|
|
95
105
|
}
|
|
96
106
|
|
|
97
107
|
export interface BucketState {
|
|
@@ -142,3 +152,5 @@ export interface BucketChecksum {
|
|
|
142
152
|
*/
|
|
143
153
|
count: number;
|
|
144
154
|
}
|
|
155
|
+
|
|
156
|
+
export interface BucketChecksumWithDescription extends BucketChecksum, BucketDescription {}
|
package/test/src/auth.test.ts
CHANGED
|
@@ -274,7 +274,7 @@ describe('JWT Auth', () => {
|
|
|
274
274
|
).rejects.toThrow('Token must expire in a maximum of');
|
|
275
275
|
});
|
|
276
276
|
|
|
277
|
-
test('http', async () => {
|
|
277
|
+
test('http', { timeout: 20_000 }, async () => {
|
|
278
278
|
// Not ideal to rely on an external endpoint for tests, but it is good to test that this
|
|
279
279
|
// one actually works.
|
|
280
280
|
const remote = new RemoteJWKSCollector(
|
|
@@ -290,9 +290,9 @@ describe('JWT Auth', () => {
|
|
|
290
290
|
reject_ip_ranges: ['local']
|
|
291
291
|
}
|
|
292
292
|
});
|
|
293
|
-
expect(invalid.getKeys()).rejects.toThrow('IPs in this range are not supported');
|
|
293
|
+
await expect(invalid.getKeys()).rejects.toThrow('IPs in this range are not supported');
|
|
294
294
|
|
|
295
|
-
//
|
|
295
|
+
// IPs throw an error immediately
|
|
296
296
|
expect(
|
|
297
297
|
() =>
|
|
298
298
|
new RemoteJWKSCollector('https://127.0.0.1/.well-known/jwks.json', {
|
|
@@ -315,11 +315,11 @@ describe('JWT Auth', () => {
|
|
|
315
315
|
|
|
316
316
|
const invalid = new RemoteJWKSCollector('https://127.0.0.1/.well-known/jwks.json');
|
|
317
317
|
// Should try and fetch
|
|
318
|
-
expect(invalid.getKeys()).rejects.toThrow();
|
|
318
|
+
await expect(invalid.getKeys()).rejects.toThrow();
|
|
319
319
|
|
|
320
320
|
const invalid2 = new RemoteJWKSCollector('https://localhost/.well-known/jwks.json');
|
|
321
321
|
// Should try and fetch
|
|
322
|
-
expect(invalid2.getKeys()).rejects.toThrow();
|
|
322
|
+
await expect(invalid2.getKeys()).rejects.toThrow();
|
|
323
323
|
});
|
|
324
324
|
|
|
325
325
|
test('caching', async () => {
|