@powersync/service-core 0.0.0-dev-20250507151436 → 0.0.0-dev-20250611110033
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +47 -7
- package/dist/api/RouteAPI.d.ts +1 -5
- package/dist/api/diagnostics.js +1 -1
- package/dist/api/diagnostics.js.map +1 -1
- package/dist/auth/CachedKeyCollector.js +2 -7
- package/dist/auth/CachedKeyCollector.js.map +1 -1
- package/dist/auth/CompoundKeyCollector.js.map +1 -1
- package/dist/auth/KeyCollector.d.ts +2 -2
- package/dist/auth/KeyStore.js +32 -14
- package/dist/auth/KeyStore.js.map +1 -1
- package/dist/auth/RemoteJWKSCollector.d.ts +1 -0
- package/dist/auth/RemoteJWKSCollector.js +39 -16
- package/dist/auth/RemoteJWKSCollector.js.map +1 -1
- package/dist/auth/auth-index.d.ts +1 -0
- package/dist/auth/auth-index.js +1 -0
- package/dist/auth/auth-index.js.map +1 -1
- package/dist/auth/utils.d.ts +6 -0
- package/dist/auth/utils.js +97 -0
- package/dist/auth/utils.js.map +1 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/dist/index.js.map +1 -1
- package/dist/metrics/open-telemetry/OpenTelemetryMetricsFactory.d.ts +1 -1
- package/dist/metrics/open-telemetry/OpenTelemetryMetricsFactory.js.map +1 -1
- package/dist/replication/AbstractReplicationJob.d.ts +4 -0
- package/dist/replication/AbstractReplicationJob.js.map +1 -1
- package/dist/replication/AbstractReplicator.d.ts +23 -0
- package/dist/replication/AbstractReplicator.js +45 -0
- package/dist/replication/AbstractReplicator.js.map +1 -1
- package/dist/replication/RelationCache.d.ts +9 -0
- package/dist/replication/RelationCache.js +20 -0
- package/dist/replication/RelationCache.js.map +1 -0
- package/dist/replication/replication-index.d.ts +1 -0
- package/dist/replication/replication-index.js +1 -0
- package/dist/replication/replication-index.js.map +1 -1
- package/dist/replication/replication-metrics.js +6 -0
- package/dist/replication/replication-metrics.js.map +1 -1
- package/dist/routes/RouterEngine.js +1 -1
- package/dist/routes/RouterEngine.js.map +1 -1
- package/dist/routes/auth.d.ts +5 -16
- package/dist/routes/auth.js +6 -4
- package/dist/routes/auth.js.map +1 -1
- package/dist/routes/configure-fastify.d.ts +3 -21
- package/dist/routes/configure-fastify.js +3 -2
- package/dist/routes/configure-fastify.js.map +1 -1
- package/dist/routes/configure-rsocket.js +28 -11
- package/dist/routes/configure-rsocket.js.map +1 -1
- package/dist/routes/endpoints/admin.js +2 -0
- package/dist/routes/endpoints/admin.js.map +1 -1
- package/dist/routes/endpoints/checkpointing.d.ts +4 -28
- package/dist/routes/endpoints/socket-route.js +22 -8
- package/dist/routes/endpoints/socket-route.js.map +1 -1
- package/dist/routes/endpoints/sync-rules.js +6 -6
- package/dist/routes/endpoints/sync-rules.js.map +1 -1
- package/dist/routes/endpoints/sync-stream.d.ts +2 -14
- package/dist/routes/endpoints/sync-stream.js +28 -9
- package/dist/routes/endpoints/sync-stream.js.map +1 -1
- package/dist/routes/route-register.js +10 -6
- package/dist/routes/route-register.js.map +1 -1
- package/dist/routes/router.d.ts +7 -3
- package/dist/routes/router.js.map +1 -1
- package/dist/storage/BucketStorageBatch.d.ts +17 -1
- package/dist/storage/BucketStorageBatch.js +2 -1
- package/dist/storage/BucketStorageBatch.js.map +1 -1
- package/dist/storage/PersistedSyncRulesContent.d.ts +5 -0
- package/dist/storage/SourceTable.d.ts +17 -1
- package/dist/storage/SourceTable.js +28 -0
- package/dist/storage/SourceTable.js.map +1 -1
- package/dist/storage/SyncRulesBucketStorage.d.ts +11 -2
- package/dist/storage/SyncRulesBucketStorage.js.map +1 -1
- package/dist/storage/bson.js +4 -1
- package/dist/storage/bson.js.map +1 -1
- package/dist/sync/BucketChecksumState.d.ts +40 -18
- package/dist/sync/BucketChecksumState.js +122 -74
- package/dist/sync/BucketChecksumState.js.map +1 -1
- package/dist/sync/RequestTracker.d.ts +22 -1
- package/dist/sync/RequestTracker.js +51 -2
- package/dist/sync/RequestTracker.js.map +1 -1
- package/dist/sync/sync.d.ts +3 -5
- package/dist/sync/sync.js +49 -34
- package/dist/sync/sync.js.map +1 -1
- package/dist/util/config/collectors/config-collector.js +2 -5
- package/dist/util/config/collectors/config-collector.js.map +1 -1
- package/dist/util/protocol-types.d.ts +9 -9
- package/dist/util/protocol-types.js.map +1 -1
- package/dist/util/utils.d.ts +1 -1
- package/package.json +6 -7
- package/src/api/RouteAPI.ts +1 -6
- package/src/api/diagnostics.ts +1 -1
- package/src/auth/CachedKeyCollector.ts +4 -6
- package/src/auth/CompoundKeyCollector.ts +2 -1
- package/src/auth/KeyCollector.ts +2 -2
- package/src/auth/KeyStore.ts +45 -20
- package/src/auth/RemoteJWKSCollector.ts +39 -16
- package/src/auth/auth-index.ts +1 -0
- package/src/auth/utils.ts +102 -0
- package/src/index.ts +2 -0
- package/src/metrics/open-telemetry/OpenTelemetryMetricsFactory.ts +3 -3
- package/src/replication/AbstractReplicationJob.ts +5 -0
- package/src/replication/AbstractReplicator.ts +47 -0
- package/src/replication/RelationCache.ts +25 -0
- package/src/replication/replication-index.ts +1 -0
- package/src/replication/replication-metrics.ts +7 -0
- package/src/routes/RouterEngine.ts +1 -1
- package/src/routes/auth.ts +7 -6
- package/src/routes/configure-fastify.ts +6 -3
- package/src/routes/configure-rsocket.ts +33 -14
- package/src/routes/endpoints/admin.ts +2 -0
- package/src/routes/endpoints/socket-route.ts +24 -8
- package/src/routes/endpoints/sync-rules.ts +6 -6
- package/src/routes/endpoints/sync-stream.ts +31 -8
- package/src/routes/route-register.ts +10 -7
- package/src/routes/router.ts +9 -3
- package/src/storage/BucketStorageBatch.ts +22 -2
- package/src/storage/PersistedSyncRulesContent.ts +6 -0
- package/src/storage/SourceTable.ts +44 -1
- package/src/storage/SyncRulesBucketStorage.ts +14 -2
- package/src/storage/bson.ts +4 -1
- package/src/sync/BucketChecksumState.ts +162 -77
- package/src/sync/RequestTracker.ts +70 -3
- package/src/sync/sync.ts +72 -49
- package/src/util/config/collectors/config-collector.ts +3 -7
- package/src/util/protocol-types.ts +15 -10
- package/test/src/auth.test.ts +29 -11
- package/test/src/sync/BucketChecksumState.test.ts +32 -18
- package/tsconfig.tsbuildinfo +1 -1
package/src/sync/sync.ts
CHANGED
|
@@ -7,12 +7,12 @@ import * as auth from '../auth/auth-index.js';
|
|
|
7
7
|
import * as storage from '../storage/storage-index.js';
|
|
8
8
|
import * as util from '../util/util-index.js';
|
|
9
9
|
|
|
10
|
-
import { logger } from '@powersync/lib-services-framework';
|
|
11
|
-
import { BucketChecksumState } from './BucketChecksumState.js';
|
|
10
|
+
import { Logger, logger as defaultLogger } from '@powersync/lib-services-framework';
|
|
11
|
+
import { BucketChecksumState, CheckpointLine } from './BucketChecksumState.js';
|
|
12
12
|
import { mergeAsyncIterables } from '../streams/streams-index.js';
|
|
13
13
|
import { acquireSemaphoreAbortable, settledPromise, tokenStream, TokenStreamOptions } from './util.js';
|
|
14
14
|
import { SyncContext } from './SyncContext.js';
|
|
15
|
-
import { RequestTracker } from './RequestTracker.js';
|
|
15
|
+
import { OperationsSentStats, RequestTracker, statsForBatch } from './RequestTracker.js';
|
|
16
16
|
|
|
17
17
|
export interface SyncStreamParameters {
|
|
18
18
|
syncContext: SyncContext;
|
|
@@ -21,6 +21,7 @@ export interface SyncStreamParameters {
|
|
|
21
21
|
params: util.StreamingSyncRequest;
|
|
22
22
|
syncParams: RequestParameters;
|
|
23
23
|
token: auth.JwtPayload;
|
|
24
|
+
logger?: Logger;
|
|
24
25
|
/**
|
|
25
26
|
* If this signal is aborted, the stream response ends as soon as possible, without error.
|
|
26
27
|
*/
|
|
@@ -35,6 +36,8 @@ export async function* streamResponse(
|
|
|
35
36
|
): AsyncIterable<util.StreamingSyncLine | string | null> {
|
|
36
37
|
const { syncContext, bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } =
|
|
37
38
|
options;
|
|
39
|
+
const logger = options.logger ?? defaultLogger;
|
|
40
|
+
|
|
38
41
|
// We also need to be able to abort, so we create our own controller.
|
|
39
42
|
const controller = new AbortController();
|
|
40
43
|
if (signal) {
|
|
@@ -57,7 +60,8 @@ export async function* streamResponse(
|
|
|
57
60
|
params,
|
|
58
61
|
syncParams,
|
|
59
62
|
tracker,
|
|
60
|
-
controller.signal
|
|
63
|
+
controller.signal,
|
|
64
|
+
logger
|
|
61
65
|
);
|
|
62
66
|
// Merge the two streams, and abort as soon as one of the streams end.
|
|
63
67
|
const merged = mergeAsyncIterables([stream, ki], controller.signal);
|
|
@@ -77,11 +81,6 @@ export async function* streamResponse(
|
|
|
77
81
|
}
|
|
78
82
|
}
|
|
79
83
|
|
|
80
|
-
export type BucketSyncState = {
|
|
81
|
-
description?: BucketDescription; // Undefined if the bucket has not yet been resolved by us.
|
|
82
|
-
start_op_id: util.InternalOpId;
|
|
83
|
-
};
|
|
84
|
-
|
|
85
84
|
async function* streamResponseInner(
|
|
86
85
|
syncContext: SyncContext,
|
|
87
86
|
bucketStorage: storage.SyncRulesBucketStorage,
|
|
@@ -89,11 +88,12 @@ async function* streamResponseInner(
|
|
|
89
88
|
params: util.StreamingSyncRequest,
|
|
90
89
|
syncParams: RequestParameters,
|
|
91
90
|
tracker: RequestTracker,
|
|
92
|
-
signal: AbortSignal
|
|
91
|
+
signal: AbortSignal,
|
|
92
|
+
logger: Logger
|
|
93
93
|
): AsyncGenerator<util.StreamingSyncLine | string | null> {
|
|
94
94
|
const { raw_data, binary_data } = params;
|
|
95
95
|
|
|
96
|
-
const checkpointUserId = util.checkpointUserId(syncParams.
|
|
96
|
+
const checkpointUserId = util.checkpointUserId(syncParams.tokenParameters.user_id as string, params.client_id);
|
|
97
97
|
|
|
98
98
|
const checksumState = new BucketChecksumState({
|
|
99
99
|
syncContext,
|
|
@@ -103,7 +103,8 @@ async function* streamResponseInner(
|
|
|
103
103
|
initialBucketPositions: params.buckets?.map((bucket) => ({
|
|
104
104
|
name: bucket.name,
|
|
105
105
|
after: BigInt(bucket.after)
|
|
106
|
-
}))
|
|
106
|
+
})),
|
|
107
|
+
logger: logger
|
|
107
108
|
});
|
|
108
109
|
const stream = bucketStorage.watchCheckpointChanges({
|
|
109
110
|
user_id: checkpointUserId,
|
|
@@ -111,16 +112,29 @@ async function* streamResponseInner(
|
|
|
111
112
|
});
|
|
112
113
|
const newCheckpoints = stream[Symbol.asyncIterator]();
|
|
113
114
|
|
|
115
|
+
type CheckpointAndLine = {
|
|
116
|
+
checkpoint: bigint;
|
|
117
|
+
line: CheckpointLine | null;
|
|
118
|
+
};
|
|
119
|
+
|
|
120
|
+
async function waitForNewCheckpointLine(): Promise<IteratorResult<CheckpointAndLine>> {
|
|
121
|
+
const next = await newCheckpoints.next();
|
|
122
|
+
if (next.done) {
|
|
123
|
+
return { done: true, value: undefined };
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
const line = await checksumState.buildNextCheckpointLine(next.value);
|
|
127
|
+
return { done: false, value: { checkpoint: next.value.base.checkpoint, line } };
|
|
128
|
+
}
|
|
129
|
+
|
|
114
130
|
try {
|
|
115
|
-
let nextCheckpointPromise:
|
|
116
|
-
| Promise<PromiseSettledResult<IteratorResult<storage.StorageCheckpointUpdate>>>
|
|
117
|
-
| undefined;
|
|
131
|
+
let nextCheckpointPromise: Promise<PromiseSettledResult<IteratorResult<CheckpointAndLine>>> | undefined;
|
|
118
132
|
|
|
119
133
|
do {
|
|
120
134
|
if (!nextCheckpointPromise) {
|
|
121
135
|
// Wrap in a settledPromise, so that abort errors after the parent stopped iterating
|
|
122
136
|
// does not result in uncaught errors.
|
|
123
|
-
nextCheckpointPromise = settledPromise(
|
|
137
|
+
nextCheckpointPromise = settledPromise(waitForNewCheckpointLine());
|
|
124
138
|
}
|
|
125
139
|
const next = await nextCheckpointPromise;
|
|
126
140
|
nextCheckpointPromise = undefined;
|
|
@@ -130,7 +144,7 @@ async function* streamResponseInner(
|
|
|
130
144
|
if (next.value.done) {
|
|
131
145
|
break;
|
|
132
146
|
}
|
|
133
|
-
const line =
|
|
147
|
+
const line = next.value.value.line;
|
|
134
148
|
if (line == null) {
|
|
135
149
|
// No update to sync
|
|
136
150
|
continue;
|
|
@@ -138,7 +152,10 @@ async function* streamResponseInner(
|
|
|
138
152
|
|
|
139
153
|
const { checkpointLine, bucketsToFetch } = line;
|
|
140
154
|
|
|
155
|
+
// Since yielding can block, we update the state just before yielding the line.
|
|
156
|
+
line.advance();
|
|
141
157
|
yield checkpointLine;
|
|
158
|
+
|
|
142
159
|
// Start syncing data for buckets up to the checkpoint. As soon as we have completed at least one priority and
|
|
143
160
|
// at least 1000 operations, we also start listening for new checkpoints concurrently. When a new checkpoint comes
|
|
144
161
|
// in while we're still busy syncing data for lower priorities, interrupt the current operation and start syncing
|
|
@@ -164,30 +181,32 @@ async function* streamResponseInner(
|
|
|
164
181
|
function maybeRaceForNewCheckpoint() {
|
|
165
182
|
if (syncedOperations >= 1000 && nextCheckpointPromise === undefined) {
|
|
166
183
|
nextCheckpointPromise = (async () => {
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
184
|
+
while (true) {
|
|
185
|
+
const next = await settledPromise(waitForNewCheckpointLine());
|
|
186
|
+
if (next.status == 'rejected') {
|
|
187
|
+
abortCheckpointController.abort();
|
|
188
|
+
} else if (!next.value.done) {
|
|
189
|
+
if (next.value.value.line == null) {
|
|
190
|
+
// There's a new checkpoint that doesn't affect this sync stream. Keep listening, but don't
|
|
191
|
+
// interrupt this batch.
|
|
192
|
+
continue;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
// A new sync line can be emitted. Stop running the bucketDataInBatches() iterations, making the
|
|
196
|
+
// main flow reach the new checkpoint.
|
|
197
|
+
abortCheckpointController.abort();
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
return next;
|
|
173
201
|
}
|
|
174
|
-
|
|
175
|
-
return next;
|
|
176
202
|
})();
|
|
177
203
|
}
|
|
178
204
|
}
|
|
179
205
|
|
|
180
|
-
function markOperationsSent(
|
|
181
|
-
syncedOperations +=
|
|
182
|
-
tracker.addOperationsSynced(
|
|
183
|
-
|
|
184
|
-
// There is a bug with interrupting checkpoints where:
|
|
185
|
-
// 1. User is in the middle of syncing a large batch of data (for example initial sync).
|
|
186
|
-
// 2. A new checkpoint comes in, which interrupts the batch.
|
|
187
|
-
// 3. However, the new checkpoint does not contain any new data for this connection, so nothing further is synced.
|
|
188
|
-
// This then causes the client to wait indefinitely for the remaining data or checkpoint_complete message. That is
|
|
189
|
-
// only resolved when a new checkpoint comes in that does have data for this connection, or the connection is restarted.
|
|
190
|
-
// maybeRaceForNewCheckpoint();
|
|
206
|
+
function markOperationsSent(stats: OperationsSentStats) {
|
|
207
|
+
syncedOperations += stats.total;
|
|
208
|
+
tracker.addOperationsSynced(stats);
|
|
209
|
+
maybeRaceForNewCheckpoint();
|
|
191
210
|
}
|
|
192
211
|
|
|
193
212
|
// This incrementally updates dataBuckets with each individual bucket position.
|
|
@@ -201,18 +220,19 @@ async function* streamResponseInner(
|
|
|
201
220
|
yield* bucketDataInBatches({
|
|
202
221
|
syncContext: syncContext,
|
|
203
222
|
bucketStorage: bucketStorage,
|
|
204
|
-
checkpoint: next.value.value.
|
|
223
|
+
checkpoint: next.value.value.checkpoint,
|
|
205
224
|
bucketsToFetch: buckets,
|
|
206
|
-
|
|
225
|
+
checkpointLine: line,
|
|
207
226
|
raw_data,
|
|
208
227
|
binary_data,
|
|
209
228
|
onRowsSent: markOperationsSent,
|
|
210
229
|
abort_connection: signal,
|
|
211
230
|
abort_batch: abortCheckpointSignal,
|
|
212
|
-
user_id: syncParams.
|
|
231
|
+
user_id: syncParams.userId,
|
|
213
232
|
// Passing null here will emit a full sync complete message at the end. If we pass a priority, we'll emit a partial
|
|
214
233
|
// sync complete message instead.
|
|
215
|
-
forPriority: !isLast ? priority : null
|
|
234
|
+
forPriority: !isLast ? priority : null,
|
|
235
|
+
logger
|
|
216
236
|
});
|
|
217
237
|
}
|
|
218
238
|
|
|
@@ -229,9 +249,10 @@ interface BucketDataRequest {
|
|
|
229
249
|
syncContext: SyncContext;
|
|
230
250
|
bucketStorage: storage.SyncRulesBucketStorage;
|
|
231
251
|
checkpoint: util.InternalOpId;
|
|
252
|
+
/** Contains current bucket state. Modified by the request as data is sent. */
|
|
253
|
+
checkpointLine: CheckpointLine;
|
|
254
|
+
/** Subset of checkpointLine.bucketsToFetch, filtered by priority. */
|
|
232
255
|
bucketsToFetch: BucketDescription[];
|
|
233
|
-
/** Contains current bucket state. Modified by the request as data is sent. */
|
|
234
|
-
checksumState: BucketChecksumState;
|
|
235
256
|
raw_data: boolean | undefined;
|
|
236
257
|
binary_data: boolean | undefined;
|
|
237
258
|
/** Signals that the connection was aborted and that streaming should stop ASAP. */
|
|
@@ -243,7 +264,8 @@ interface BucketDataRequest {
|
|
|
243
264
|
abort_batch: AbortSignal;
|
|
244
265
|
user_id?: string;
|
|
245
266
|
forPriority: BucketPriority | null;
|
|
246
|
-
onRowsSent: (
|
|
267
|
+
onRowsSent: (stats: OperationsSentStats) => void;
|
|
268
|
+
logger: Logger;
|
|
247
269
|
}
|
|
248
270
|
|
|
249
271
|
async function* bucketDataInBatches(request: BucketDataRequest) {
|
|
@@ -292,12 +314,13 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
292
314
|
bucketStorage: storage,
|
|
293
315
|
checkpoint,
|
|
294
316
|
bucketsToFetch,
|
|
295
|
-
|
|
317
|
+
checkpointLine,
|
|
296
318
|
raw_data,
|
|
297
319
|
binary_data,
|
|
298
320
|
abort_connection,
|
|
299
321
|
abort_batch,
|
|
300
|
-
onRowsSent
|
|
322
|
+
onRowsSent,
|
|
323
|
+
logger
|
|
301
324
|
} = request;
|
|
302
325
|
|
|
303
326
|
let checkpointInvalidated = false;
|
|
@@ -322,7 +345,7 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
322
345
|
}
|
|
323
346
|
// Optimization: Only fetch buckets for which the checksums have changed since the last checkpoint
|
|
324
347
|
// For the first batch, this will be all buckets.
|
|
325
|
-
const filteredBuckets =
|
|
348
|
+
const filteredBuckets = checkpointLine.getFilteredBucketPositions(bucketsToFetch);
|
|
326
349
|
const dataBatches = storage.getBucketDataBatch(checkpoint, filteredBuckets);
|
|
327
350
|
|
|
328
351
|
let has_more = false;
|
|
@@ -369,9 +392,9 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
369
392
|
// iterator memory in case if large data sent.
|
|
370
393
|
yield { data: null, done: false };
|
|
371
394
|
}
|
|
372
|
-
onRowsSent(r
|
|
395
|
+
onRowsSent(statsForBatch(r));
|
|
373
396
|
|
|
374
|
-
|
|
397
|
+
checkpointLine.updateBucketPosition({ bucket: r.bucket, nextAfter: BigInt(r.next_after), hasMore: r.has_more });
|
|
375
398
|
|
|
376
399
|
// Check if syncing bucket data is supposed to stop before fetching more data
|
|
377
400
|
// from storage.
|
|
@@ -417,7 +440,7 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
417
440
|
}
|
|
418
441
|
}
|
|
419
442
|
|
|
420
|
-
function transformLegacyResponse(bucketData: util.SyncBucketData):
|
|
443
|
+
function transformLegacyResponse(bucketData: util.SyncBucketData): util.SyncBucketData<util.ProtocolOplogData> {
|
|
421
444
|
return {
|
|
422
445
|
...bucketData,
|
|
423
446
|
data: bucketData.data.map((entry) => {
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import { configFile } from '@powersync/service-types';
|
|
2
|
-
import * as t from 'ts-codec';
|
|
3
1
|
import * as yaml from 'yaml';
|
|
4
2
|
|
|
5
3
|
import { schema } from '@powersync/lib-services-framework';
|
|
4
|
+
import { configFile } from '@powersync/service-types';
|
|
5
|
+
|
|
6
6
|
import { RunnerConfig } from '../types.js';
|
|
7
7
|
import { YamlEnvTag } from './impl/yaml-env.js';
|
|
8
8
|
|
|
@@ -12,11 +12,7 @@ export enum ConfigFileFormat {
|
|
|
12
12
|
}
|
|
13
13
|
|
|
14
14
|
// ts-codec itself doesn't give great validation errors, so we use json schema for that
|
|
15
|
-
const configSchemaValidator = schema
|
|
16
|
-
.parseJSONSchema(
|
|
17
|
-
t.generateJSONSchema(configFile.powerSyncConfig, { allowAdditional: true, parsers: [configFile.portParser] })
|
|
18
|
-
)
|
|
19
|
-
.validator();
|
|
15
|
+
const configSchemaValidator = schema.parseJSONSchema(configFile.PowerSyncConfigJSONSchema).validator();
|
|
20
16
|
|
|
21
17
|
export abstract class ConfigCollector {
|
|
22
18
|
abstract get name(): string;
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import * as t from 'ts-codec';
|
|
2
|
-
import { BucketDescription, BucketPriority,
|
|
2
|
+
import { BucketDescription, BucketPriority, SqliteJsonRow } from '@powersync/service-sync-rules';
|
|
3
|
+
import { JsonContainer } from '@powersync/service-jsonbig';
|
|
3
4
|
|
|
4
5
|
export const BucketRequest = t.object({
|
|
5
6
|
name: t.string,
|
|
@@ -65,7 +66,7 @@ export interface StreamingSyncCheckpointDiff {
|
|
|
65
66
|
}
|
|
66
67
|
|
|
67
68
|
export interface StreamingSyncData {
|
|
68
|
-
data: SyncBucketData
|
|
69
|
+
data: SyncBucketData<ProtocolOplogData>;
|
|
69
70
|
}
|
|
70
71
|
|
|
71
72
|
export interface StreamingSyncCheckpointComplete {
|
|
@@ -109,13 +110,9 @@ export interface BucketState {
|
|
|
109
110
|
op_id: string;
|
|
110
111
|
}
|
|
111
112
|
|
|
112
|
-
export interface
|
|
113
|
-
buckets: SyncBucketData[];
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
export interface SyncBucketData {
|
|
113
|
+
export interface SyncBucketData<Data extends ProtocolOplogData = StoredOplogData> {
|
|
117
114
|
bucket: string;
|
|
118
|
-
data: OplogEntry[];
|
|
115
|
+
data: OplogEntry<Data>[];
|
|
119
116
|
/**
|
|
120
117
|
* True if there _could_ be more data for this bucket, and another request must be made.
|
|
121
118
|
*/
|
|
@@ -130,12 +127,20 @@ export interface SyncBucketData {
|
|
|
130
127
|
next_after: ProtocolOpId;
|
|
131
128
|
}
|
|
132
129
|
|
|
133
|
-
export
|
|
130
|
+
export type StoredOplogData = string | null;
|
|
131
|
+
|
|
132
|
+
// Note: When clients have both raw_data and binary_data disabled (this only affects legacy
|
|
133
|
+
// clients), data is actually a `Record<string, SqliteJsonValue>`. Oplog entries are always
|
|
134
|
+
// stored as a serialized (JSON) string so that they don't have to be parsed in the sync service,
|
|
135
|
+
// this representation only exists on the way out for legacy clients.
|
|
136
|
+
export type ProtocolOplogData = SqliteJsonRow | JsonContainer | StoredOplogData;
|
|
137
|
+
|
|
138
|
+
export interface OplogEntry<Data extends ProtocolOplogData = StoredOplogData> {
|
|
134
139
|
op_id: ProtocolOpId;
|
|
135
140
|
op: 'PUT' | 'REMOVE' | 'MOVE' | 'CLEAR';
|
|
136
141
|
object_type?: string;
|
|
137
142
|
object_id?: string;
|
|
138
|
-
data?:
|
|
143
|
+
data?: Data;
|
|
139
144
|
checksum: number | bigint;
|
|
140
145
|
subkey?: string;
|
|
141
146
|
}
|
package/test/src/auth.test.ts
CHANGED
|
@@ -75,21 +75,21 @@ describe('JWT Auth', () => {
|
|
|
75
75
|
defaultAudiences: ['other'],
|
|
76
76
|
maxAge: '6m'
|
|
77
77
|
})
|
|
78
|
-
).rejects.toThrow('
|
|
78
|
+
).rejects.toThrow('[PSYNC_S2105] Unexpected "aud" claim value: "tests"');
|
|
79
79
|
|
|
80
80
|
await expect(
|
|
81
81
|
store.verifyJwt(signedJwt, {
|
|
82
82
|
defaultAudiences: [],
|
|
83
83
|
maxAge: '6m'
|
|
84
84
|
})
|
|
85
|
-
).rejects.toThrow('
|
|
85
|
+
).rejects.toThrow('[PSYNC_S2105] Unexpected "aud" claim value: "tests"');
|
|
86
86
|
|
|
87
87
|
await expect(
|
|
88
88
|
store.verifyJwt(signedJwt, {
|
|
89
89
|
defaultAudiences: ['tests'],
|
|
90
90
|
maxAge: '1m'
|
|
91
91
|
})
|
|
92
|
-
).rejects.toThrow('Token must expire in a maximum of');
|
|
92
|
+
).rejects.toThrow('[PSYNC_S2104] Token must expire in a maximum of 60 seconds, got 300s');
|
|
93
93
|
|
|
94
94
|
const signedJwt2 = await new jose.SignJWT({})
|
|
95
95
|
.setProtectedHeader({ alg: 'HS256', kid: 'k1' })
|
|
@@ -104,7 +104,25 @@ describe('JWT Auth', () => {
|
|
|
104
104
|
defaultAudiences: ['tests'],
|
|
105
105
|
maxAge: '5m'
|
|
106
106
|
})
|
|
107
|
-
).rejects.toThrow('missing required "sub"
|
|
107
|
+
).rejects.toThrow('[PSYNC_S2101] JWT payload is missing a required claim "sub"');
|
|
108
|
+
|
|
109
|
+
// expired token
|
|
110
|
+
const d = Math.round(Date.now() / 1000);
|
|
111
|
+
const signedJwt3 = await new jose.SignJWT({})
|
|
112
|
+
.setProtectedHeader({ alg: 'HS256', kid: 'k1' })
|
|
113
|
+
.setSubject('f1')
|
|
114
|
+
.setIssuedAt(d - 500)
|
|
115
|
+
.setIssuer('tester')
|
|
116
|
+
.setAudience('tests')
|
|
117
|
+
.setExpirationTime(d - 400)
|
|
118
|
+
.sign(signKey);
|
|
119
|
+
|
|
120
|
+
await expect(
|
|
121
|
+
store.verifyJwt(signedJwt3, {
|
|
122
|
+
defaultAudiences: ['tests'],
|
|
123
|
+
maxAge: '5m'
|
|
124
|
+
})
|
|
125
|
+
).rejects.toThrow('[PSYNC_S2103] JWT has expired');
|
|
108
126
|
});
|
|
109
127
|
|
|
110
128
|
test('Algorithm validation', async () => {
|
|
@@ -159,7 +177,7 @@ describe('JWT Auth', () => {
|
|
|
159
177
|
maxAge: '6m'
|
|
160
178
|
})
|
|
161
179
|
).rejects.toThrow(
|
|
162
|
-
'Could not find an appropriate key in the keystore. The key is missing or no key matched the token KID'
|
|
180
|
+
'[PSYNC_S2101] Could not find an appropriate key in the keystore. The key is missing or no key matched the token KID'
|
|
163
181
|
);
|
|
164
182
|
|
|
165
183
|
// Wrong kid
|
|
@@ -178,7 +196,7 @@ describe('JWT Auth', () => {
|
|
|
178
196
|
maxAge: '6m'
|
|
179
197
|
})
|
|
180
198
|
).rejects.toThrow(
|
|
181
|
-
'Could not find an appropriate key in the keystore. The key is missing or no key matched the token KID'
|
|
199
|
+
'[PSYNC_S2101] Could not find an appropriate key in the keystore. The key is missing or no key matched the token KID'
|
|
182
200
|
);
|
|
183
201
|
|
|
184
202
|
// No kid, matches sharedKey2
|
|
@@ -255,7 +273,7 @@ describe('JWT Auth', () => {
|
|
|
255
273
|
defaultAudiences: ['tests'],
|
|
256
274
|
maxAge: '6m'
|
|
257
275
|
})
|
|
258
|
-
).rejects.toThrow('
|
|
276
|
+
).rejects.toThrow('[PSYNC_S2105] Unexpected "aud" claim value: "tests"');
|
|
259
277
|
|
|
260
278
|
const signedJwt3 = await new jose.SignJWT({})
|
|
261
279
|
.setProtectedHeader({ alg: 'HS256', kid: 'k1' })
|
|
@@ -290,7 +308,7 @@ describe('JWT Auth', () => {
|
|
|
290
308
|
reject_ip_ranges: ['local']
|
|
291
309
|
}
|
|
292
310
|
});
|
|
293
|
-
await expect(invalid.getKeys()).rejects.toThrow('
|
|
311
|
+
await expect(invalid.getKeys()).rejects.toThrow('[PSYNC_S2204] JWKS request failed');
|
|
294
312
|
|
|
295
313
|
// IPs throw an error immediately
|
|
296
314
|
expect(
|
|
@@ -345,7 +363,7 @@ describe('JWT Auth', () => {
|
|
|
345
363
|
expect(key.kid).toEqual(publicKeyRSA.kid!);
|
|
346
364
|
|
|
347
365
|
cached.addTimeForTests(301_000);
|
|
348
|
-
currentResponse = Promise.reject('refresh failed');
|
|
366
|
+
currentResponse = Promise.reject(new Error('refresh failed'));
|
|
349
367
|
|
|
350
368
|
// Uses the promise, refreshes in the background
|
|
351
369
|
let response = await cached.getKeys();
|
|
@@ -357,14 +375,14 @@ describe('JWT Auth', () => {
|
|
|
357
375
|
response = await cached.getKeys();
|
|
358
376
|
// Still have the cached key, but also have the error
|
|
359
377
|
expect(response.keys[0].kid).toEqual(publicKeyRSA.kid!);
|
|
360
|
-
expect(response.errors[0].message).toMatch('
|
|
378
|
+
expect(response.errors[0].message).toMatch('[PSYNC_S2201] refresh failed');
|
|
361
379
|
|
|
362
380
|
await cached.addTimeForTests(3601_000);
|
|
363
381
|
response = await cached.getKeys();
|
|
364
382
|
|
|
365
383
|
// Now the keys have expired, and the request still fails
|
|
366
384
|
expect(response.keys).toEqual([]);
|
|
367
|
-
expect(response.errors[0].message).toMatch('
|
|
385
|
+
expect(response.errors[0].message).toMatch('[PSYNC_S2201] refresh failed');
|
|
368
386
|
|
|
369
387
|
currentResponse = Promise.resolve({
|
|
370
388
|
errors: [],
|
|
@@ -71,6 +71,7 @@ bucket_definitions:
|
|
|
71
71
|
writeCheckpoint: null,
|
|
72
72
|
update: CHECKPOINT_INVALIDATE_ALL
|
|
73
73
|
}))!;
|
|
74
|
+
line.advance();
|
|
74
75
|
expect(line.checkpointLine).toEqual({
|
|
75
76
|
checkpoint: {
|
|
76
77
|
buckets: [{ bucket: 'global[]', checksum: 1, count: 1, priority: 3 }],
|
|
@@ -85,10 +86,11 @@ bucket_definitions:
|
|
|
85
86
|
}
|
|
86
87
|
]);
|
|
87
88
|
// This is the bucket data to be fetched
|
|
88
|
-
expect(
|
|
89
|
+
expect(line.getFilteredBucketPositions()).toEqual(new Map([['global[]', 0n]]));
|
|
89
90
|
|
|
90
91
|
// This similuates the bucket data being sent
|
|
91
|
-
|
|
92
|
+
line.advance();
|
|
93
|
+
line.updateBucketPosition({ bucket: 'global[]', nextAfter: 1n, hasMore: false });
|
|
92
94
|
|
|
93
95
|
// Update bucket storage state
|
|
94
96
|
storage.updateTestChecksum({ bucket: 'global[]', checksum: 2, count: 2 });
|
|
@@ -104,6 +106,7 @@ bucket_definitions:
|
|
|
104
106
|
invalidateParameterBuckets: false
|
|
105
107
|
}
|
|
106
108
|
}))!;
|
|
109
|
+
line2.advance();
|
|
107
110
|
expect(line2.checkpointLine).toEqual({
|
|
108
111
|
checkpoint_diff: {
|
|
109
112
|
removed_buckets: [],
|
|
@@ -112,7 +115,7 @@ bucket_definitions:
|
|
|
112
115
|
write_checkpoint: undefined
|
|
113
116
|
}
|
|
114
117
|
});
|
|
115
|
-
expect(
|
|
118
|
+
expect(line2.getFilteredBucketPositions()).toEqual(new Map([['global[]', 1n]]));
|
|
116
119
|
});
|
|
117
120
|
|
|
118
121
|
test('global bucket with initial state', async () => {
|
|
@@ -137,6 +140,7 @@ bucket_definitions:
|
|
|
137
140
|
writeCheckpoint: null,
|
|
138
141
|
update: CHECKPOINT_INVALIDATE_ALL
|
|
139
142
|
}))!;
|
|
143
|
+
line.advance();
|
|
140
144
|
expect(line.checkpointLine).toEqual({
|
|
141
145
|
checkpoint: {
|
|
142
146
|
buckets: [{ bucket: 'global[]', checksum: 1, count: 1, priority: 3 }],
|
|
@@ -151,7 +155,7 @@ bucket_definitions:
|
|
|
151
155
|
}
|
|
152
156
|
]);
|
|
153
157
|
// This is the main difference between this and the previous test
|
|
154
|
-
expect(
|
|
158
|
+
expect(line.getFilteredBucketPositions()).toEqual(new Map([['global[]', 1n]]));
|
|
155
159
|
});
|
|
156
160
|
|
|
157
161
|
test('multiple static buckets', async () => {
|
|
@@ -192,6 +196,7 @@ bucket_definitions:
|
|
|
192
196
|
priority: 3
|
|
193
197
|
}
|
|
194
198
|
]);
|
|
199
|
+
line.advance();
|
|
195
200
|
|
|
196
201
|
storage.updateTestChecksum({ bucket: 'global[1]', checksum: 2, count: 2 });
|
|
197
202
|
storage.updateTestChecksum({ bucket: 'global[2]', checksum: 2, count: 2 });
|
|
@@ -240,6 +245,7 @@ bucket_definitions:
|
|
|
240
245
|
writeCheckpoint: null,
|
|
241
246
|
update: CHECKPOINT_INVALIDATE_ALL
|
|
242
247
|
}))!;
|
|
248
|
+
line.advance();
|
|
243
249
|
expect(line.checkpointLine).toEqual({
|
|
244
250
|
checkpoint: {
|
|
245
251
|
buckets: [{ bucket: 'global[]', checksum: 1, count: 1, priority: 3 }],
|
|
@@ -253,7 +259,7 @@ bucket_definitions:
|
|
|
253
259
|
priority: 3
|
|
254
260
|
}
|
|
255
261
|
]);
|
|
256
|
-
expect(
|
|
262
|
+
expect(line.getFilteredBucketPositions()).toEqual(new Map([['global[]', 0n]]));
|
|
257
263
|
});
|
|
258
264
|
|
|
259
265
|
test('invalidating individual bucket', async () => {
|
|
@@ -274,14 +280,14 @@ bucket_definitions:
|
|
|
274
280
|
// We specifically do not set this here, so that we have manual control over the events.
|
|
275
281
|
// storage.filter = state.checkpointFilter;
|
|
276
282
|
|
|
277
|
-
await state.buildNextCheckpointLine({
|
|
283
|
+
const line = await state.buildNextCheckpointLine({
|
|
278
284
|
base: { checkpoint: 1n, lsn: '1' },
|
|
279
285
|
writeCheckpoint: null,
|
|
280
286
|
update: CHECKPOINT_INVALIDATE_ALL
|
|
281
287
|
});
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
288
|
+
line!.advance();
|
|
289
|
+
line!.updateBucketPosition({ bucket: 'global[1]', nextAfter: 1n, hasMore: false });
|
|
290
|
+
line!.updateBucketPosition({ bucket: 'global[2]', nextAfter: 1n, hasMore: false });
|
|
285
291
|
|
|
286
292
|
storage.updateTestChecksum({ bucket: 'global[1]', checksum: 2, count: 2 });
|
|
287
293
|
storage.updateTestChecksum({ bucket: 'global[2]', checksum: 2, count: 2 });
|
|
@@ -330,12 +336,14 @@ bucket_definitions:
|
|
|
330
336
|
storage.updateTestChecksum({ bucket: 'global[1]', checksum: 1, count: 1 });
|
|
331
337
|
storage.updateTestChecksum({ bucket: 'global[2]', checksum: 1, count: 1 });
|
|
332
338
|
|
|
333
|
-
await state.buildNextCheckpointLine({
|
|
339
|
+
const line = await state.buildNextCheckpointLine({
|
|
334
340
|
base: { checkpoint: 1n, lsn: '1' },
|
|
335
341
|
writeCheckpoint: null,
|
|
336
342
|
update: CHECKPOINT_INVALIDATE_ALL
|
|
337
343
|
});
|
|
338
344
|
|
|
345
|
+
line!.advance();
|
|
346
|
+
|
|
339
347
|
storage.updateTestChecksum({ bucket: 'global[1]', checksum: 2, count: 2 });
|
|
340
348
|
storage.updateTestChecksum({ bucket: 'global[2]', checksum: 2, count: 2 });
|
|
341
349
|
|
|
@@ -380,6 +388,7 @@ bucket_definitions:
|
|
|
380
388
|
writeCheckpoint: null,
|
|
381
389
|
update: CHECKPOINT_INVALIDATE_ALL
|
|
382
390
|
}))!;
|
|
391
|
+
line.advance();
|
|
383
392
|
expect(line.checkpointLine).toEqual({
|
|
384
393
|
checkpoint: {
|
|
385
394
|
buckets: [
|
|
@@ -402,7 +411,7 @@ bucket_definitions:
|
|
|
402
411
|
]);
|
|
403
412
|
|
|
404
413
|
// This is the bucket data to be fetched
|
|
405
|
-
expect(
|
|
414
|
+
expect(line.getFilteredBucketPositions()).toEqual(
|
|
406
415
|
new Map([
|
|
407
416
|
['global[1]', 0n],
|
|
408
417
|
['global[2]', 0n]
|
|
@@ -411,8 +420,9 @@ bucket_definitions:
|
|
|
411
420
|
|
|
412
421
|
// No data changes here.
|
|
413
422
|
// We simulate partial data sent, before a checkpoint is interrupted.
|
|
414
|
-
|
|
415
|
-
|
|
423
|
+
line.advance();
|
|
424
|
+
line.updateBucketPosition({ bucket: 'global[1]', nextAfter: 3n, hasMore: false });
|
|
425
|
+
line.updateBucketPosition({ bucket: 'global[2]', nextAfter: 1n, hasMore: true });
|
|
416
426
|
storage.updateTestChecksum({ bucket: 'global[1]', checksum: 4, count: 4 });
|
|
417
427
|
|
|
418
428
|
const line2 = (await state.buildNextCheckpointLine({
|
|
@@ -424,6 +434,7 @@ bucket_definitions:
|
|
|
424
434
|
updatedDataBuckets: new Set(['global[1]'])
|
|
425
435
|
}
|
|
426
436
|
}))!;
|
|
437
|
+
line2.advance();
|
|
427
438
|
expect(line2.checkpointLine).toEqual({
|
|
428
439
|
checkpoint_diff: {
|
|
429
440
|
removed_buckets: [],
|
|
@@ -451,7 +462,7 @@ bucket_definitions:
|
|
|
451
462
|
}
|
|
452
463
|
]);
|
|
453
464
|
|
|
454
|
-
expect(
|
|
465
|
+
expect(line2.getFilteredBucketPositions()).toEqual(
|
|
455
466
|
new Map([
|
|
456
467
|
['global[1]', 3n],
|
|
457
468
|
['global[2]', 1n]
|
|
@@ -507,16 +518,18 @@ bucket_definitions:
|
|
|
507
518
|
priority: 3
|
|
508
519
|
}
|
|
509
520
|
]);
|
|
521
|
+
line.advance();
|
|
510
522
|
// This is the bucket data to be fetched
|
|
511
|
-
expect(
|
|
523
|
+
expect(line.getFilteredBucketPositions()).toEqual(
|
|
512
524
|
new Map([
|
|
513
525
|
['by_project[1]', 0n],
|
|
514
526
|
['by_project[2]', 0n]
|
|
515
527
|
])
|
|
516
528
|
);
|
|
517
529
|
|
|
518
|
-
|
|
519
|
-
|
|
530
|
+
line.advance();
|
|
531
|
+
line.updateBucketPosition({ bucket: 'by_project[1]', nextAfter: 1n, hasMore: false });
|
|
532
|
+
line.updateBucketPosition({ bucket: 'by_project[2]', nextAfter: 1n, hasMore: false });
|
|
520
533
|
|
|
521
534
|
storage.getParameterSets = async (
|
|
522
535
|
checkpoint: InternalOpId,
|
|
@@ -538,6 +551,7 @@ bucket_definitions:
|
|
|
538
551
|
invalidateParameterBuckets: false
|
|
539
552
|
}
|
|
540
553
|
}))!;
|
|
554
|
+
line2.advance();
|
|
541
555
|
expect(line2.checkpointLine).toEqual({
|
|
542
556
|
checkpoint_diff: {
|
|
543
557
|
removed_buckets: [],
|
|
@@ -546,7 +560,7 @@ bucket_definitions:
|
|
|
546
560
|
write_checkpoint: undefined
|
|
547
561
|
}
|
|
548
562
|
});
|
|
549
|
-
expect(
|
|
563
|
+
expect(line2.getFilteredBucketPositions()).toEqual(new Map([['by_project[3]', 0n]]));
|
|
550
564
|
});
|
|
551
565
|
});
|
|
552
566
|
|