@powersync/service-core 1.14.0 → 1.15.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +39 -0
- package/LICENSE +3 -3
- package/dist/api/api-metrics.js +5 -0
- package/dist/api/api-metrics.js.map +1 -1
- package/dist/api/diagnostics.js +1 -1
- package/dist/api/diagnostics.js.map +1 -1
- package/dist/metrics/open-telemetry/util.d.ts +0 -3
- package/dist/metrics/open-telemetry/util.js +18 -13
- package/dist/metrics/open-telemetry/util.js.map +1 -1
- package/dist/routes/compression.d.ts +19 -0
- package/dist/routes/compression.js +70 -0
- package/dist/routes/compression.js.map +1 -0
- package/dist/routes/configure-fastify.d.ts +40 -5
- package/dist/routes/endpoints/socket-route.js +24 -9
- package/dist/routes/endpoints/socket-route.js.map +1 -1
- package/dist/routes/endpoints/sync-rules.js +1 -27
- package/dist/routes/endpoints/sync-rules.js.map +1 -1
- package/dist/routes/endpoints/sync-stream.d.ts +80 -10
- package/dist/routes/endpoints/sync-stream.js +17 -12
- package/dist/routes/endpoints/sync-stream.js.map +1 -1
- package/dist/storage/BucketStorage.d.ts +1 -1
- package/dist/storage/BucketStorage.js.map +1 -1
- package/dist/storage/BucketStorageBatch.d.ts +4 -4
- package/dist/storage/BucketStorageBatch.js.map +1 -1
- package/dist/storage/ChecksumCache.d.ts +4 -19
- package/dist/storage/ChecksumCache.js +4 -0
- package/dist/storage/ChecksumCache.js.map +1 -1
- package/dist/storage/ReplicationEventPayload.d.ts +2 -2
- package/dist/storage/SyncRulesBucketStorage.d.ts +9 -0
- package/dist/storage/SyncRulesBucketStorage.js.map +1 -1
- package/dist/sync/BucketChecksumState.d.ts +40 -10
- package/dist/sync/BucketChecksumState.js +154 -18
- package/dist/sync/BucketChecksumState.js.map +1 -1
- package/dist/sync/RequestTracker.d.ts +7 -1
- package/dist/sync/RequestTracker.js +22 -2
- package/dist/sync/RequestTracker.js.map +1 -1
- package/dist/sync/sync.d.ts +3 -3
- package/dist/sync/sync.js +23 -42
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/util.js +1 -1
- package/dist/sync/util.js.map +1 -1
- package/dist/util/protocol-types.d.ts +153 -9
- package/dist/util/protocol-types.js +41 -6
- package/dist/util/protocol-types.js.map +1 -1
- package/dist/util/utils.d.ts +18 -3
- package/dist/util/utils.js +39 -10
- package/dist/util/utils.js.map +1 -1
- package/package.json +14 -14
- package/src/api/api-metrics.ts +6 -0
- package/src/api/diagnostics.ts +1 -1
- package/src/metrics/open-telemetry/util.ts +22 -21
- package/src/routes/compression.ts +75 -0
- package/src/routes/endpoints/socket-route.ts +24 -9
- package/src/routes/endpoints/sync-rules.ts +1 -28
- package/src/routes/endpoints/sync-stream.ts +18 -15
- package/src/storage/BucketStorage.ts +2 -2
- package/src/storage/BucketStorageBatch.ts +10 -4
- package/src/storage/ChecksumCache.ts +8 -22
- package/src/storage/ReplicationEventPayload.ts +2 -2
- package/src/storage/SyncRulesBucketStorage.ts +12 -0
- package/src/sync/BucketChecksumState.ts +192 -29
- package/src/sync/RequestTracker.ts +27 -2
- package/src/sync/sync.ts +53 -51
- package/src/sync/util.ts +1 -1
- package/src/util/protocol-types.ts +138 -10
- package/src/util/utils.ts +64 -13
- package/test/src/checksum_cache.test.ts +6 -8
- package/test/src/routes/mocks.ts +59 -0
- package/test/src/routes/stream.test.ts +84 -0
- package/test/src/sync/BucketChecksumState.test.ts +340 -42
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -13,9 +13,51 @@ export const BucketRequest = t.object({
|
|
|
13
13
|
|
|
14
14
|
export type BucketRequest = t.Decoded<typeof BucketRequest>;
|
|
15
15
|
|
|
16
|
+
/**
|
|
17
|
+
* A sync steam that a client has expressed interest in by explicitly opening it on the client side.
|
|
18
|
+
*/
|
|
19
|
+
export const RequestedStreamSubscription = t.object({
|
|
20
|
+
/**
|
|
21
|
+
* The defined name of the stream as it appears in sync stream definitions.
|
|
22
|
+
*/
|
|
23
|
+
stream: t.string,
|
|
24
|
+
/**
|
|
25
|
+
* An optional dictionary of parameters to pass to this specific stream.
|
|
26
|
+
*/
|
|
27
|
+
parameters: t.record(t.any).optional(),
|
|
28
|
+
/**
|
|
29
|
+
* Set when the client wishes to re-assign a different priority to this stream.
|
|
30
|
+
*
|
|
31
|
+
* Streams and sync rules can also assign a default priority, but clients are allowed to override those. This can be
|
|
32
|
+
* useful when the priority for partial syncs depends on e.g. the current page opened in a client.
|
|
33
|
+
*/
|
|
34
|
+
override_priority: t.union(t.number, t.Null)
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
export type RequestedStreamSubscription = t.Decoded<typeof RequestedStreamSubscription>;
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* An overview of all subscribed streams as part of a streaming sync request.
|
|
41
|
+
*/
|
|
42
|
+
export const StreamSubscriptionRequest = t.object({
|
|
43
|
+
/**
|
|
44
|
+
* Whether to sync default streams.
|
|
45
|
+
*
|
|
46
|
+
* When disabled, only explicitly-opened subscriptions are included.
|
|
47
|
+
*/
|
|
48
|
+
include_defaults: t.boolean.optional(),
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* An array of sync streams the client has opened explicitly.
|
|
52
|
+
*/
|
|
53
|
+
subscriptions: t.array(RequestedStreamSubscription)
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
export type StreamSubscriptionRequest = t.Decoded<typeof StreamSubscriptionRequest>;
|
|
57
|
+
|
|
16
58
|
export const StreamingSyncRequest = t.object({
|
|
17
59
|
/**
|
|
18
|
-
* Existing bucket states.
|
|
60
|
+
* Existing client-side bucket states.
|
|
19
61
|
*/
|
|
20
62
|
buckets: t.array(BucketRequest).optional(),
|
|
21
63
|
|
|
@@ -34,11 +76,6 @@ export const StreamingSyncRequest = t.object({
|
|
|
34
76
|
*/
|
|
35
77
|
raw_data: t.boolean.optional(),
|
|
36
78
|
|
|
37
|
-
/**
|
|
38
|
-
* Data is received in a serialized BSON Buffer
|
|
39
|
-
*/
|
|
40
|
-
binary_data: t.boolean.optional(),
|
|
41
|
-
|
|
42
79
|
/**
|
|
43
80
|
* Client parameters to be passed to the sync rules.
|
|
44
81
|
*/
|
|
@@ -47,7 +84,12 @@ export const StreamingSyncRequest = t.object({
|
|
|
47
84
|
/**
|
|
48
85
|
* Unique client id.
|
|
49
86
|
*/
|
|
50
|
-
client_id: t.string.optional()
|
|
87
|
+
client_id: t.string.optional(),
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* If the client is aware of streams, an array of streams the client has opened.
|
|
91
|
+
*/
|
|
92
|
+
streams: StreamSubscriptionRequest.optional()
|
|
51
93
|
});
|
|
52
94
|
|
|
53
95
|
export type StreamingSyncRequest = t.Decoded<typeof StreamingSyncRequest>;
|
|
@@ -60,7 +102,7 @@ export interface StreamingSyncCheckpointDiff {
|
|
|
60
102
|
checkpoint_diff: {
|
|
61
103
|
last_op_id: ProtocolOpId;
|
|
62
104
|
write_checkpoint?: ProtocolOpId;
|
|
63
|
-
updated_buckets:
|
|
105
|
+
updated_buckets: CheckpointBucket[];
|
|
64
106
|
removed_buckets: string[];
|
|
65
107
|
};
|
|
66
108
|
}
|
|
@@ -99,10 +141,54 @@ export type StreamingSyncLine =
|
|
|
99
141
|
*/
|
|
100
142
|
export type ProtocolOpId = string;
|
|
101
143
|
|
|
144
|
+
export interface StreamDescription {
|
|
145
|
+
/**
|
|
146
|
+
* The name of the stream as it appears in the sync configuration.
|
|
147
|
+
*/
|
|
148
|
+
name: string;
|
|
149
|
+
|
|
150
|
+
/**
|
|
151
|
+
* Whether this stream is subscribed to by default.
|
|
152
|
+
*
|
|
153
|
+
* For default streams, this field is still `true` if clients have an explicit subscription to the stream.
|
|
154
|
+
*/
|
|
155
|
+
is_default: boolean;
|
|
156
|
+
|
|
157
|
+
/**
|
|
158
|
+
* If some subscriptions on this stream could not be resolved, e.g. due to an error, this array contains the faulty
|
|
159
|
+
* subscriptions along with an error message.
|
|
160
|
+
*/
|
|
161
|
+
errors: StreamSubscriptionError[];
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
export interface StreamSubscriptionError {
|
|
165
|
+
/**
|
|
166
|
+
* The subscription that errored - either the default subscription or some of the explicit subscriptions.
|
|
167
|
+
*/
|
|
168
|
+
subscription: 'default' | number;
|
|
169
|
+
/**
|
|
170
|
+
* A message describing the error on the subscription.
|
|
171
|
+
*/
|
|
172
|
+
message: string;
|
|
173
|
+
}
|
|
174
|
+
|
|
102
175
|
export interface Checkpoint {
|
|
103
176
|
last_op_id: ProtocolOpId;
|
|
104
177
|
write_checkpoint?: ProtocolOpId;
|
|
105
|
-
buckets:
|
|
178
|
+
buckets: CheckpointBucket[];
|
|
179
|
+
|
|
180
|
+
/**
|
|
181
|
+
* All streams that the client is subscribed to.
|
|
182
|
+
*
|
|
183
|
+
* This field has two purposes:
|
|
184
|
+
*
|
|
185
|
+
* 1. It allows clients to determine which of their subscriptions actually works. E.g. if a user does
|
|
186
|
+
* `db.syncStream('non_existent_stream').subscribe()`, clients don't immediately know that the stream doesn't
|
|
187
|
+
* exist. Only after the next `checkpoint` line can they query this field and mark unresolved subscriptions.
|
|
188
|
+
*. 2. It allows clients to learn which default streams they have been subscribed to. This is relevant for APIs
|
|
189
|
+
* listing all streams on the client-side.
|
|
190
|
+
*/
|
|
191
|
+
streams: StreamDescription[];
|
|
106
192
|
}
|
|
107
193
|
|
|
108
194
|
export interface BucketState {
|
|
@@ -158,4 +244,46 @@ export interface BucketChecksum {
|
|
|
158
244
|
count: number;
|
|
159
245
|
}
|
|
160
246
|
|
|
161
|
-
|
|
247
|
+
/**
|
|
248
|
+
* The reason a particular bucket is included in a checkpoint.
|
|
249
|
+
*
|
|
250
|
+
* This information allows clients to associate individual buckets with sync streams they're subscribed to. Having that
|
|
251
|
+
* association is useful because it enables clients to track progress for individual sync streams.
|
|
252
|
+
*/
|
|
253
|
+
export type BucketSubscriptionReason = BucketDerivedFromDefaultStream | BucketDerivedFromExplicitSubscription;
|
|
254
|
+
|
|
255
|
+
/**
|
|
256
|
+
* A bucket has been included in a checkpoint because it's part of a default stream.
|
|
257
|
+
*/
|
|
258
|
+
export type BucketDerivedFromDefaultStream = {
|
|
259
|
+
/**
|
|
260
|
+
* The index (into {@link Checkpoint.streams}) of the stream defining the bucket.
|
|
261
|
+
*/
|
|
262
|
+
default: number;
|
|
263
|
+
};
|
|
264
|
+
|
|
265
|
+
/**
|
|
266
|
+
* The bucket has been included in a checkpoint because it's part of a stream that a client has explicitly subscribed
|
|
267
|
+
* to.
|
|
268
|
+
*/
|
|
269
|
+
export type BucketDerivedFromExplicitSubscription = {
|
|
270
|
+
/**
|
|
271
|
+
* The index (into {@link StreamSubscriptionRequest.subscriptions}) of the subscription yielding this bucket.
|
|
272
|
+
*/
|
|
273
|
+
sub: number;
|
|
274
|
+
};
|
|
275
|
+
|
|
276
|
+
export interface ClientBucketDescription {
|
|
277
|
+
/**
|
|
278
|
+
* An opaque id of the bucket.
|
|
279
|
+
*/
|
|
280
|
+
bucket: string;
|
|
281
|
+
/**
|
|
282
|
+
* The priority used to synchronize this bucket, derived from its definition and an optional priority override from
|
|
283
|
+
* the stream subscription.
|
|
284
|
+
*/
|
|
285
|
+
priority: BucketPriority;
|
|
286
|
+
subscriptions: BucketSubscriptionReason[];
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
export interface CheckpointBucket extends BucketChecksum, ClientBucketDescription {}
|
package/src/util/utils.ts
CHANGED
|
@@ -6,11 +6,26 @@ import { BucketChecksum, ProtocolOpId, OplogEntry } from './protocol-types.js';
|
|
|
6
6
|
|
|
7
7
|
import * as storage from '../storage/storage-index.js';
|
|
8
8
|
|
|
9
|
-
import { PartialChecksum } from '../storage/ChecksumCache.js';
|
|
10
9
|
import { ServiceAssertionError } from '@powersync/lib-services-framework';
|
|
11
10
|
|
|
12
11
|
export type ChecksumMap = Map<string, BucketChecksum>;
|
|
13
12
|
|
|
13
|
+
/**
|
|
14
|
+
* A partial checksum can never be used on its own - must always be combined with a full BucketChecksum.
|
|
15
|
+
*/
|
|
16
|
+
export interface PartialChecksum {
|
|
17
|
+
bucket: string;
|
|
18
|
+
/**
|
|
19
|
+
* 32-bit unsigned hash.
|
|
20
|
+
*/
|
|
21
|
+
partialChecksum: number;
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Count of operations - informational only.
|
|
25
|
+
*/
|
|
26
|
+
partialCount: number;
|
|
27
|
+
}
|
|
28
|
+
|
|
14
29
|
/**
|
|
15
30
|
* op_id as used internally, for individual operations and checkpoints.
|
|
16
31
|
*
|
|
@@ -83,20 +98,53 @@ export function addChecksums(a: number, b: number) {
|
|
|
83
98
|
return (a + b) & 0xffffffff;
|
|
84
99
|
}
|
|
85
100
|
|
|
86
|
-
export function
|
|
87
|
-
|
|
101
|
+
export function isPartialChecksum(c: PartialChecksum | BucketChecksum): c is PartialChecksum {
|
|
102
|
+
return 'partialChecksum' in c;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
export function addBucketChecksums(a: BucketChecksum, b: PartialChecksum | BucketChecksum | null): BucketChecksum {
|
|
106
|
+
const checksum = addPartialChecksums(a.bucket, a, b);
|
|
107
|
+
if (isPartialChecksum(checksum)) {
|
|
108
|
+
// Should not happen since a != null
|
|
109
|
+
throw new ServiceAssertionError('Expected full checksum');
|
|
110
|
+
}
|
|
111
|
+
return checksum;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
export function addPartialChecksums(
|
|
115
|
+
bucket: string,
|
|
116
|
+
a: PartialChecksum | BucketChecksum | null,
|
|
117
|
+
b: PartialChecksum | BucketChecksum | null
|
|
118
|
+
): PartialChecksum | BucketChecksum {
|
|
119
|
+
if (a != null && b != null) {
|
|
120
|
+
if (!isPartialChecksum(b)) {
|
|
121
|
+
// Replaces a
|
|
122
|
+
return b;
|
|
123
|
+
}
|
|
124
|
+
// merge
|
|
125
|
+
if (!isPartialChecksum(a)) {
|
|
126
|
+
return {
|
|
127
|
+
bucket,
|
|
128
|
+
checksum: addChecksums(a.checksum, b.partialChecksum),
|
|
129
|
+
count: a.count + b.partialCount
|
|
130
|
+
};
|
|
131
|
+
} else {
|
|
132
|
+
return {
|
|
133
|
+
bucket,
|
|
134
|
+
partialChecksum: addChecksums(a.partialChecksum, b.partialChecksum),
|
|
135
|
+
partialCount: a.partialCount + b.partialCount
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
} else if (a != null) {
|
|
88
139
|
return a;
|
|
89
|
-
} else if (b
|
|
90
|
-
return
|
|
91
|
-
bucket: b.bucket,
|
|
92
|
-
count: b.partialCount,
|
|
93
|
-
checksum: b.partialChecksum
|
|
94
|
-
};
|
|
140
|
+
} else if (b != null) {
|
|
141
|
+
return b;
|
|
95
142
|
} else {
|
|
143
|
+
// No data found (may still have a previously-cached checksum).
|
|
96
144
|
return {
|
|
97
|
-
bucket
|
|
98
|
-
|
|
99
|
-
|
|
145
|
+
bucket,
|
|
146
|
+
partialChecksum: 0,
|
|
147
|
+
partialCount: 0
|
|
100
148
|
};
|
|
101
149
|
}
|
|
102
150
|
}
|
|
@@ -148,7 +196,10 @@ export function hasToastedValues(row: sync_rules.ToastableSqliteRow) {
|
|
|
148
196
|
*
|
|
149
197
|
* If we don't store data, we assume we always have a complete row.
|
|
150
198
|
*/
|
|
151
|
-
export function isCompleteRow(
|
|
199
|
+
export function isCompleteRow(
|
|
200
|
+
storeData: boolean,
|
|
201
|
+
row: sync_rules.ToastableSqliteRow
|
|
202
|
+
): row is sync_rules.SqliteInputRow {
|
|
152
203
|
if (!storeData) {
|
|
153
204
|
// Assume the row is complete - no need to check
|
|
154
205
|
return true;
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { ChecksumCache, FetchChecksums, FetchPartialBucketChecksum
|
|
2
|
-
import { addChecksums, InternalOpId } from '@/util/util-index.js';
|
|
1
|
+
import { ChecksumCache, FetchChecksums, FetchPartialBucketChecksum } from '@/storage/ChecksumCache.js';
|
|
2
|
+
import { addChecksums, BucketChecksum, InternalOpId, PartialChecksum } from '@/util/util-index.js';
|
|
3
3
|
import * as crypto from 'node:crypto';
|
|
4
4
|
import { describe, expect, it } from 'vitest';
|
|
5
5
|
|
|
@@ -12,22 +12,20 @@ function testHash(bucket: string, checkpoint: InternalOpId) {
|
|
|
12
12
|
return hash;
|
|
13
13
|
}
|
|
14
14
|
|
|
15
|
-
function testPartialHash(request: FetchPartialBucketChecksum): PartialChecksum {
|
|
15
|
+
function testPartialHash(request: FetchPartialBucketChecksum): PartialChecksum | BucketChecksum {
|
|
16
16
|
if (request.start) {
|
|
17
17
|
const a = testHash(request.bucket, request.start);
|
|
18
18
|
const b = testHash(request.bucket, request.end);
|
|
19
19
|
return {
|
|
20
20
|
bucket: request.bucket,
|
|
21
21
|
partialCount: Number(request.end) - Number(request.start),
|
|
22
|
-
partialChecksum: addChecksums(b, -a)
|
|
23
|
-
isFullChecksum: false
|
|
22
|
+
partialChecksum: addChecksums(b, -a)
|
|
24
23
|
};
|
|
25
24
|
} else {
|
|
26
25
|
return {
|
|
27
26
|
bucket: request.bucket,
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
isFullChecksum: true
|
|
27
|
+
checksum: testHash(request.bucket, request.end),
|
|
28
|
+
count: Number(request.end)
|
|
31
29
|
};
|
|
32
30
|
}
|
|
33
31
|
}
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import {
|
|
2
|
+
BucketStorageFactory,
|
|
3
|
+
createCoreAPIMetrics,
|
|
4
|
+
MetricsEngine,
|
|
5
|
+
OpenTelemetryMetricsFactory,
|
|
6
|
+
RouteAPI,
|
|
7
|
+
RouterEngine,
|
|
8
|
+
ServiceContext,
|
|
9
|
+
StorageEngine,
|
|
10
|
+
SyncContext,
|
|
11
|
+
SyncRulesBucketStorage
|
|
12
|
+
} from '@/index.js';
|
|
13
|
+
import { MeterProvider } from '@opentelemetry/sdk-metrics';
|
|
14
|
+
|
|
15
|
+
export function mockServiceContext(storage: Partial<SyncRulesBucketStorage> | null) {
|
|
16
|
+
// This is very incomplete - just enough to get the current tests passing.
|
|
17
|
+
|
|
18
|
+
const storageEngine: StorageEngine = {
|
|
19
|
+
activeBucketStorage: {
|
|
20
|
+
async getActiveStorage() {
|
|
21
|
+
return storage;
|
|
22
|
+
}
|
|
23
|
+
} as Partial<BucketStorageFactory>
|
|
24
|
+
} as any;
|
|
25
|
+
|
|
26
|
+
const meterProvider = new MeterProvider({
|
|
27
|
+
readers: []
|
|
28
|
+
});
|
|
29
|
+
const meter = meterProvider.getMeter('powersync-tests');
|
|
30
|
+
const metricsEngine = new MetricsEngine({
|
|
31
|
+
disable_telemetry_sharing: true,
|
|
32
|
+
factory: new OpenTelemetryMetricsFactory(meter)
|
|
33
|
+
});
|
|
34
|
+
createCoreAPIMetrics(metricsEngine);
|
|
35
|
+
const service_context: Partial<ServiceContext> = {
|
|
36
|
+
syncContext: new SyncContext({ maxBuckets: 1, maxDataFetchConcurrency: 1, maxParameterQueryResults: 1 }),
|
|
37
|
+
routerEngine: {
|
|
38
|
+
getAPI() {
|
|
39
|
+
return {
|
|
40
|
+
getParseSyncRulesOptions() {
|
|
41
|
+
return { defaultSchema: 'public' };
|
|
42
|
+
}
|
|
43
|
+
} as Partial<RouteAPI>;
|
|
44
|
+
},
|
|
45
|
+
addStopHandler() {
|
|
46
|
+
return () => {};
|
|
47
|
+
}
|
|
48
|
+
} as Partial<RouterEngine> as any,
|
|
49
|
+
storageEngine,
|
|
50
|
+
metricsEngine: metricsEngine,
|
|
51
|
+
// Not used
|
|
52
|
+
configuration: null as any,
|
|
53
|
+
lifeCycleEngine: null as any,
|
|
54
|
+
migrations: null as any,
|
|
55
|
+
replicationEngine: null as any,
|
|
56
|
+
serviceMode: null as any
|
|
57
|
+
};
|
|
58
|
+
return service_context as ServiceContext;
|
|
59
|
+
}
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import { BasicRouterRequest, Context, SyncRulesBucketStorage } from '@/index.js';
|
|
2
|
+
import { logger, RouterResponse, ServiceError } from '@powersync/lib-services-framework';
|
|
3
|
+
import { SqlSyncRules } from '@powersync/service-sync-rules';
|
|
4
|
+
import { Readable, Writable } from 'stream';
|
|
5
|
+
import { pipeline } from 'stream/promises';
|
|
6
|
+
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
|
7
|
+
import { syncStreamed } from '../../../src/routes/endpoints/sync-stream.js';
|
|
8
|
+
import { mockServiceContext } from './mocks.js';
|
|
9
|
+
|
|
10
|
+
describe('Stream Route', () => {
|
|
11
|
+
describe('compressed stream', () => {
|
|
12
|
+
it('handles missing sync rules', async () => {
|
|
13
|
+
const context: Context = {
|
|
14
|
+
logger: logger,
|
|
15
|
+
service_context: mockServiceContext(null)
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
const request: BasicRouterRequest = {
|
|
19
|
+
headers: {},
|
|
20
|
+
hostname: '',
|
|
21
|
+
protocol: 'http'
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
const error = (await (syncStreamed.handler({ context, params: {}, request }) as Promise<RouterResponse>).catch(
|
|
25
|
+
(e) => e
|
|
26
|
+
)) as ServiceError;
|
|
27
|
+
|
|
28
|
+
expect(error.errorData.status).toEqual(500);
|
|
29
|
+
expect(error.errorData.code).toEqual('PSYNC_S2302');
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
it('handles a stream error with compression', async () => {
|
|
33
|
+
// This primarily tests that an underlying storage error doesn't result in an uncaught error
|
|
34
|
+
// when compressing the stream.
|
|
35
|
+
|
|
36
|
+
const storage = {
|
|
37
|
+
getParsedSyncRules() {
|
|
38
|
+
return new SqlSyncRules('bucket_definitions: {}');
|
|
39
|
+
},
|
|
40
|
+
watchCheckpointChanges: async function* (options) {
|
|
41
|
+
throw new Error('Simulated storage error');
|
|
42
|
+
}
|
|
43
|
+
} as Partial<SyncRulesBucketStorage>;
|
|
44
|
+
const serviceContext = mockServiceContext(storage);
|
|
45
|
+
|
|
46
|
+
const context: Context = {
|
|
47
|
+
logger: logger,
|
|
48
|
+
service_context: serviceContext,
|
|
49
|
+
token_payload: {
|
|
50
|
+
exp: new Date().getTime() / 1000 + 10000,
|
|
51
|
+
iat: new Date().getTime() / 1000 - 10000,
|
|
52
|
+
sub: 'test-user'
|
|
53
|
+
}
|
|
54
|
+
};
|
|
55
|
+
|
|
56
|
+
// It may be worth eventually doing this via Fastify to test the full stack
|
|
57
|
+
|
|
58
|
+
const request: BasicRouterRequest = {
|
|
59
|
+
headers: {
|
|
60
|
+
'accept-encoding': 'gzip'
|
|
61
|
+
},
|
|
62
|
+
hostname: '',
|
|
63
|
+
protocol: 'http'
|
|
64
|
+
};
|
|
65
|
+
|
|
66
|
+
const response = await (syncStreamed.handler({ context, params: {}, request }) as Promise<RouterResponse>);
|
|
67
|
+
expect(response.status).toEqual(200);
|
|
68
|
+
const stream = response.data as Readable;
|
|
69
|
+
const r = await drainWithTimeout(stream).catch((error) => error);
|
|
70
|
+
expect(r.message).toContain('Simulated storage error');
|
|
71
|
+
});
|
|
72
|
+
});
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
export async function drainWithTimeout(readable: Readable, ms = 2_000) {
|
|
76
|
+
const devNull = new Writable({
|
|
77
|
+
write(_chunk, _enc, cb) {
|
|
78
|
+
cb();
|
|
79
|
+
} // discard everything
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
// Throws AbortError if it takes longer than ms, and destroys the stream
|
|
83
|
+
await pipeline(readable, devNull, { signal: AbortSignal.timeout(ms) });
|
|
84
|
+
}
|