@powersync/service-core 0.0.0-dev-20250819134004 → 0.0.0-dev-20250825132649
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -6
- package/dist/events/EventsEngine.d.ts +4 -1
- package/dist/events/EventsEngine.js +4 -3
- package/dist/events/EventsEngine.js.map +1 -1
- package/dist/routes/configure-fastify.d.ts +0 -5
- package/dist/routes/configure-fastify.js.map +1 -1
- package/dist/routes/endpoints/socket-route.js +6 -5
- package/dist/routes/endpoints/socket-route.js.map +1 -1
- package/dist/routes/endpoints/sync-stream.d.ts +0 -10
- package/dist/routes/endpoints/sync-stream.js +8 -5
- package/dist/routes/endpoints/sync-stream.js.map +1 -1
- package/dist/routes/router.d.ts +3 -3
- package/dist/storage/ReportStorage.d.ts +30 -2
- package/dist/sync/sync.d.ts +1 -0
- package/dist/sync/sync.js +18 -35
- package/dist/sync/sync.js.map +1 -1
- package/dist/util/protocol-types.d.ts +0 -4
- package/dist/util/protocol-types.js +0 -4
- package/dist/util/protocol-types.js.map +1 -1
- package/package.json +6 -6
- package/src/events/EventsEngine.ts +4 -4
- package/src/routes/configure-fastify.ts +0 -1
- package/src/routes/endpoints/socket-route.ts +6 -5
- package/src/routes/endpoints/sync-stream.ts +8 -6
- package/src/routes/router.ts +3 -3
- package/src/storage/ReportStorage.ts +30 -2
- package/src/sync/sync.ts +36 -36
- package/src/util/protocol-types.ts +0 -5
- package/test/src/sync/BucketChecksumState.test.ts +33 -92
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -23,10 +23,11 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
|
23
23
|
};
|
|
24
24
|
|
|
25
25
|
const sdkData: event_types.ConnectedUserData & event_types.ClientConnectionEventData = {
|
|
26
|
-
client_id: params.client_id,
|
|
26
|
+
client_id: params.client_id ?? '',
|
|
27
27
|
user_id: context.user_id!,
|
|
28
28
|
user_agent: context.user_agent,
|
|
29
|
-
|
|
29
|
+
// At this point the token_payload is guaranteed to be present
|
|
30
|
+
jwt_exp: new Date(context.token_payload!.exp * 1000),
|
|
30
31
|
connected_at: new Date(streamStart)
|
|
31
32
|
};
|
|
32
33
|
|
|
@@ -98,8 +99,7 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
|
98
99
|
bucketStorage: bucketStorage,
|
|
99
100
|
syncRules: syncRules,
|
|
100
101
|
params: {
|
|
101
|
-
...params
|
|
102
|
-
binary_data: true // always true for web sockets
|
|
102
|
+
...params
|
|
103
103
|
},
|
|
104
104
|
token: context!.token_payload!,
|
|
105
105
|
tokenStreamOptions: {
|
|
@@ -108,7 +108,8 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
|
|
|
108
108
|
},
|
|
109
109
|
tracker,
|
|
110
110
|
signal,
|
|
111
|
-
logger
|
|
111
|
+
logger,
|
|
112
|
+
isEncodingAsBson: true
|
|
112
113
|
})) {
|
|
113
114
|
if (signal.aborted) {
|
|
114
115
|
break;
|
|
@@ -30,9 +30,9 @@ export const syncStreamed = routeDefinition({
|
|
|
30
30
|
const clientId = payload.params.client_id;
|
|
31
31
|
const streamStart = Date.now();
|
|
32
32
|
// This falls back to JSON unless there's preference for the bson-stream in the Accept header.
|
|
33
|
-
const useBson =
|
|
34
|
-
payload.request.
|
|
35
|
-
|
|
33
|
+
const useBson = payload.request.headers.accept
|
|
34
|
+
? new Negotiator(payload.request).mediaType(supportedContentTypes) == concatenatedBsonContentType
|
|
35
|
+
: false;
|
|
36
36
|
|
|
37
37
|
logger.defaultMeta = {
|
|
38
38
|
...logger.defaultMeta,
|
|
@@ -42,10 +42,11 @@ export const syncStreamed = routeDefinition({
|
|
|
42
42
|
bson: useBson
|
|
43
43
|
};
|
|
44
44
|
const sdkData: event_types.ConnectedUserData & event_types.ClientConnectionEventData = {
|
|
45
|
-
client_id: clientId,
|
|
45
|
+
client_id: clientId ?? '',
|
|
46
46
|
user_id: payload.context.user_id!,
|
|
47
47
|
user_agent: userAgent as string,
|
|
48
|
-
|
|
48
|
+
// At this point the token_payload is guaranteed to be present
|
|
49
|
+
jwt_exp: new Date(token_payload!.exp * 1000),
|
|
49
50
|
connected_at: new Date(streamStart)
|
|
50
51
|
};
|
|
51
52
|
|
|
@@ -82,7 +83,8 @@ export const syncStreamed = routeDefinition({
|
|
|
82
83
|
token: payload.context.token_payload!,
|
|
83
84
|
tracker,
|
|
84
85
|
signal: controller.signal,
|
|
85
|
-
logger
|
|
86
|
+
logger,
|
|
87
|
+
isEncodingAsBson: useBson
|
|
86
88
|
});
|
|
87
89
|
|
|
88
90
|
const byteContents = useBson ? sync.bsonLines(syncLines) : sync.ndjson(syncLines);
|
package/src/routes/router.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { Logger, router } from '@powersync/lib-services-framework';
|
|
2
2
|
import type { JwtPayload } from '../auth/auth-index.js';
|
|
3
3
|
import { ServiceContext } from '../system/ServiceContext.js';
|
|
4
4
|
import { RouterEngine } from './RouterEngine.js';
|
|
@@ -31,11 +31,11 @@ export type BasicRouterRequest = {
|
|
|
31
31
|
hostname: string;
|
|
32
32
|
};
|
|
33
33
|
|
|
34
|
-
export type
|
|
34
|
+
export type ContextProviderOptions = {
|
|
35
35
|
logger: Logger;
|
|
36
36
|
};
|
|
37
37
|
|
|
38
|
-
export type ContextProvider = (request: BasicRouterRequest, options:
|
|
38
|
+
export type ContextProvider = (request: BasicRouterRequest, options: ContextProviderOptions) => Promise<Context>;
|
|
39
39
|
|
|
40
40
|
export type RequestEndpoint<
|
|
41
41
|
I,
|
|
@@ -1,11 +1,39 @@
|
|
|
1
1
|
import { event_types } from '@powersync/service-types';
|
|
2
2
|
|
|
3
|
+
/**
|
|
4
|
+
* Represents a configured report storage.
|
|
5
|
+
*
|
|
6
|
+
* Report storage is used for storing localized data for the instance.
|
|
7
|
+
* Data can then be used for reporting purposes.
|
|
8
|
+
*
|
|
9
|
+
*/
|
|
3
10
|
export interface ReportStorage extends AsyncDisposable {
|
|
11
|
+
/**
|
|
12
|
+
* Report a client connection.
|
|
13
|
+
*/
|
|
4
14
|
reportClientConnection(data: event_types.ClientConnectionBucketData): Promise<void>;
|
|
15
|
+
/**
|
|
16
|
+
* Report a client disconnection.
|
|
17
|
+
*/
|
|
5
18
|
reportClientDisconnection(data: event_types.ClientDisconnectionEventData): Promise<void>;
|
|
6
|
-
|
|
19
|
+
/**
|
|
20
|
+
* Get currently connected clients.
|
|
21
|
+
* This will return any short or long term connected clients.
|
|
22
|
+
* Clients that have no disconnected_at timestamp and that have a valid jwt_exp timestamp are considered connected.
|
|
23
|
+
*/
|
|
24
|
+
getConnectedClients(): Promise<event_types.ClientConnectionReportResponse>;
|
|
25
|
+
/**
|
|
26
|
+
* Get a report of client connections over a day, week or month.
|
|
27
|
+
* This is internally used to generate reports over it always returns the previous day, week or month.
|
|
28
|
+
* Usually this is call on the start of the new day, week or month. It will return all unique completed connections
|
|
29
|
+
* as well as uniques currently connected clients.
|
|
30
|
+
*/
|
|
7
31
|
getClientConnectionReports(
|
|
8
32
|
data: event_types.ClientConnectionReportRequest
|
|
9
|
-
): Promise<event_types.
|
|
33
|
+
): Promise<event_types.ClientConnectionReportResponse>;
|
|
34
|
+
/**
|
|
35
|
+
* Delete old connection data based on a specific date.
|
|
36
|
+
* This is used to clean up old connection data that is no longer needed.
|
|
37
|
+
*/
|
|
10
38
|
deleteOldConnectionData(data: event_types.DeleteOldConnectionData): Promise<void>;
|
|
11
39
|
}
|
package/src/sync/sync.ts
CHANGED
|
@@ -27,6 +27,7 @@ export interface SyncStreamParameters {
|
|
|
27
27
|
params: util.StreamingSyncRequest;
|
|
28
28
|
token: auth.JwtPayload;
|
|
29
29
|
logger?: Logger;
|
|
30
|
+
isEncodingAsBson: boolean;
|
|
30
31
|
/**
|
|
31
32
|
* If this signal is aborted, the stream response ends as soon as possible, without error.
|
|
32
33
|
*/
|
|
@@ -39,7 +40,17 @@ export interface SyncStreamParameters {
|
|
|
39
40
|
export async function* streamResponse(
|
|
40
41
|
options: SyncStreamParameters
|
|
41
42
|
): AsyncIterable<util.StreamingSyncLine | string | null> {
|
|
42
|
-
const {
|
|
43
|
+
const {
|
|
44
|
+
syncContext,
|
|
45
|
+
bucketStorage,
|
|
46
|
+
syncRules,
|
|
47
|
+
params,
|
|
48
|
+
token,
|
|
49
|
+
tokenStreamOptions,
|
|
50
|
+
tracker,
|
|
51
|
+
signal,
|
|
52
|
+
isEncodingAsBson
|
|
53
|
+
} = options;
|
|
43
54
|
const logger = options.logger ?? defaultLogger;
|
|
44
55
|
|
|
45
56
|
// We also need to be able to abort, so we create our own controller.
|
|
@@ -65,7 +76,8 @@ export async function* streamResponse(
|
|
|
65
76
|
token,
|
|
66
77
|
tracker,
|
|
67
78
|
controller.signal,
|
|
68
|
-
logger
|
|
79
|
+
logger,
|
|
80
|
+
isEncodingAsBson
|
|
69
81
|
);
|
|
70
82
|
// Merge the two streams, and abort as soon as one of the streams end.
|
|
71
83
|
const merged = mergeAsyncIterables([stream, ki], controller.signal);
|
|
@@ -93,9 +105,10 @@ async function* streamResponseInner(
|
|
|
93
105
|
tokenPayload: RequestJwtPayload,
|
|
94
106
|
tracker: RequestTracker,
|
|
95
107
|
signal: AbortSignal,
|
|
96
|
-
logger: Logger
|
|
108
|
+
logger: Logger,
|
|
109
|
+
isEncodingAsBson: boolean
|
|
97
110
|
): AsyncGenerator<util.StreamingSyncLine | string | null> {
|
|
98
|
-
const { raw_data
|
|
111
|
+
const { raw_data } = params;
|
|
99
112
|
|
|
100
113
|
const userId = tokenPayload.sub;
|
|
101
114
|
const checkpointUserId = util.checkpointUserId(userId as string, params.client_id);
|
|
@@ -225,8 +238,7 @@ async function* streamResponseInner(
|
|
|
225
238
|
checkpoint: next.value.value.checkpoint,
|
|
226
239
|
bucketsToFetch: buckets,
|
|
227
240
|
checkpointLine: line,
|
|
228
|
-
raw_data,
|
|
229
|
-
binary_data,
|
|
241
|
+
legacyDataLines: !isEncodingAsBson && params.raw_data != true,
|
|
230
242
|
onRowsSent: markOperationsSent,
|
|
231
243
|
abort_connection: signal,
|
|
232
244
|
abort_batch: abortCheckpointSignal,
|
|
@@ -255,8 +267,8 @@ interface BucketDataRequest {
|
|
|
255
267
|
checkpointLine: CheckpointLine;
|
|
256
268
|
/** Subset of checkpointLine.bucketsToFetch, filtered by priority. */
|
|
257
269
|
bucketsToFetch: BucketDescription[];
|
|
258
|
-
|
|
259
|
-
|
|
270
|
+
/** Whether data lines should be encoded in a legacy format where {@link util.OplogEntry.data} is a nested object. */
|
|
271
|
+
legacyDataLines: boolean;
|
|
260
272
|
/** Signals that the connection was aborted and that streaming should stop ASAP. */
|
|
261
273
|
abort_connection: AbortSignal;
|
|
262
274
|
/**
|
|
@@ -317,8 +329,7 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
317
329
|
checkpoint,
|
|
318
330
|
bucketsToFetch,
|
|
319
331
|
checkpointLine,
|
|
320
|
-
|
|
321
|
-
binary_data,
|
|
332
|
+
legacyDataLines,
|
|
322
333
|
abort_connection,
|
|
323
334
|
abort_batch,
|
|
324
335
|
onRowsSent,
|
|
@@ -368,32 +379,21 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
|
|
|
368
379
|
}
|
|
369
380
|
logger.debug(`Sending data for ${r.bucket}`);
|
|
370
381
|
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
const response: util.StreamingSyncData = {
|
|
387
|
-
data: transformLegacyResponse(r)
|
|
388
|
-
};
|
|
389
|
-
send_data = JSONBig.stringify(response);
|
|
390
|
-
}
|
|
391
|
-
yield { data: send_data, done: false };
|
|
392
|
-
if (send_data.length > 50_000) {
|
|
393
|
-
// IMPORTANT: This does not affect the output stream, but is used to flush
|
|
394
|
-
// iterator memory in case if large data sent.
|
|
395
|
-
yield { data: null, done: false };
|
|
396
|
-
}
|
|
382
|
+
const line = legacyDataLines
|
|
383
|
+
? // We need to preserve the embedded data exactly, so this uses a JsonContainer
|
|
384
|
+
// and JSONBig to stringify.
|
|
385
|
+
JSONBig.stringify({
|
|
386
|
+
data: transformLegacyResponse(r)
|
|
387
|
+
} satisfies util.StreamingSyncData)
|
|
388
|
+
: // We can send the object as-is, which will be converted to JSON or BSON by a downstream transformer.
|
|
389
|
+
({ data: r } satisfies util.StreamingSyncData);
|
|
390
|
+
|
|
391
|
+
yield { data: line, done: false };
|
|
392
|
+
|
|
393
|
+
// IMPORTANT: This does not affect the output stream, but is used to flush
|
|
394
|
+
// iterator memory in case if large data sent.
|
|
395
|
+
yield { data: null, done: false };
|
|
396
|
+
|
|
397
397
|
onRowsSent(statsForBatch(r));
|
|
398
398
|
|
|
399
399
|
checkpointLine.updateBucketPosition({ bucket: r.bucket, nextAfter: BigInt(r.next_after), hasMore: r.has_more });
|
|
@@ -76,11 +76,6 @@ export const StreamingSyncRequest = t.object({
|
|
|
76
76
|
*/
|
|
77
77
|
raw_data: t.boolean.optional(),
|
|
78
78
|
|
|
79
|
-
/**
|
|
80
|
-
* Data is received in a serialized BSON Buffer
|
|
81
|
-
*/
|
|
82
|
-
binary_data: t.boolean.optional(),
|
|
83
|
-
|
|
84
79
|
/**
|
|
85
80
|
* Client parameters to be passed to the sync rules.
|
|
86
81
|
*/
|
|
@@ -607,11 +607,20 @@ bucket_definitions:
|
|
|
607
607
|
describe('streams', () => {
|
|
608
608
|
let source: { -readonly [P in keyof BucketSource]: BucketSource[P] };
|
|
609
609
|
let storage: MockBucketChecksumStateStorage;
|
|
610
|
-
let staticBucketIds = ['stream|0[]'];
|
|
611
610
|
|
|
612
|
-
function checksumState(options?: Partial<BucketChecksumStateOptions>) {
|
|
613
|
-
|
|
614
|
-
|
|
611
|
+
function checksumState(source: string | boolean, options?: Partial<BucketChecksumStateOptions>) {
|
|
612
|
+
if (typeof source == 'boolean') {
|
|
613
|
+
source = `
|
|
614
|
+
streams:
|
|
615
|
+
stream:
|
|
616
|
+
auto_subscribe: ${source}
|
|
617
|
+
query: SELECT * FROM assets WHERE id IN ifnull(subscription.parameter('ids'), '["default"]');
|
|
618
|
+
`;
|
|
619
|
+
}
|
|
620
|
+
|
|
621
|
+
const rules = SqlSyncRules.fromYaml(source, {
|
|
622
|
+
defaultSchema: 'public'
|
|
623
|
+
});
|
|
615
624
|
|
|
616
625
|
return new BucketChecksumState({
|
|
617
626
|
syncContext,
|
|
@@ -623,77 +632,15 @@ bucket_definitions:
|
|
|
623
632
|
});
|
|
624
633
|
}
|
|
625
634
|
|
|
626
|
-
function createQuerier(ids: string[], subscription: number | null): BucketParameterQuerier {
|
|
627
|
-
return {
|
|
628
|
-
staticBuckets: ids.map((bucket) => ({
|
|
629
|
-
definition: 'stream',
|
|
630
|
-
inclusion_reasons: subscription == null ? ['default'] : [{ subscription }],
|
|
631
|
-
bucket,
|
|
632
|
-
priority: 3
|
|
633
|
-
})),
|
|
634
|
-
hasDynamicBuckets: false,
|
|
635
|
-
parameterQueryLookups: [],
|
|
636
|
-
queryDynamicBucketDescriptions: function (): never {
|
|
637
|
-
throw new Error('no dynamic buckets.');
|
|
638
|
-
}
|
|
639
|
-
};
|
|
640
|
-
}
|
|
641
|
-
|
|
642
635
|
beforeEach(() => {
|
|
643
|
-
// Currently using mocked streams before streams are actually implemented as parsable rules.
|
|
644
|
-
source = {
|
|
645
|
-
name: 'stream',
|
|
646
|
-
type: BucketSourceType.SYNC_STREAM,
|
|
647
|
-
subscribedToByDefault: false,
|
|
648
|
-
pushBucketParameterQueriers(result, options) {
|
|
649
|
-
// Create a fake querier that resolves the global stream["default"] bucket by default and allows extracting
|
|
650
|
-
// additional buckets from parameters.
|
|
651
|
-
const subscriptions = options.streams['stream'] ?? [];
|
|
652
|
-
if (!this.subscribedToByDefault && !subscriptions.length) {
|
|
653
|
-
return;
|
|
654
|
-
}
|
|
655
|
-
|
|
656
|
-
let hasExplicitDefaultSubscription = false;
|
|
657
|
-
for (const subscription of subscriptions) {
|
|
658
|
-
try {
|
|
659
|
-
let subscriptionParameters = [];
|
|
660
|
-
|
|
661
|
-
if (subscription.parameters != null) {
|
|
662
|
-
subscriptionParameters = JSON.parse(subscription.parameters['ids'] as string).map(
|
|
663
|
-
(e: string) => `stream["${e}"]`
|
|
664
|
-
);
|
|
665
|
-
} else {
|
|
666
|
-
hasExplicitDefaultSubscription = true;
|
|
667
|
-
}
|
|
668
|
-
|
|
669
|
-
result.queriers.push(createQuerier([...subscriptionParameters], subscription.opaque_id));
|
|
670
|
-
} catch (e) {
|
|
671
|
-
result.errors.push({
|
|
672
|
-
descriptor: 'stream',
|
|
673
|
-
subscription,
|
|
674
|
-
message: `Error evaluating bucket ids: ${e.message}`
|
|
675
|
-
});
|
|
676
|
-
}
|
|
677
|
-
}
|
|
678
|
-
|
|
679
|
-
// If the stream is subscribed to by default and there is no explicit subscription that would match the default
|
|
680
|
-
// subscription, also include the default querier.
|
|
681
|
-
if (this.subscribedToByDefault && !hasExplicitDefaultSubscription) {
|
|
682
|
-
result.queriers.push(createQuerier(['stream["default"]'], null));
|
|
683
|
-
}
|
|
684
|
-
}
|
|
685
|
-
} satisfies Partial<BucketSource> as any;
|
|
686
|
-
|
|
687
636
|
storage = new MockBucketChecksumStateStorage();
|
|
688
|
-
storage.updateTestChecksum({ bucket: 'stream["default"]', checksum: 1, count: 1 });
|
|
689
|
-
storage.updateTestChecksum({ bucket: 'stream["a"]', checksum: 1, count: 1 });
|
|
690
|
-
storage.updateTestChecksum({ bucket: 'stream["b"]', checksum: 1, count: 1 });
|
|
637
|
+
storage.updateTestChecksum({ bucket: 'stream|0["default"]', checksum: 1, count: 1 });
|
|
638
|
+
storage.updateTestChecksum({ bucket: 'stream|0["a"]', checksum: 1, count: 1 });
|
|
639
|
+
storage.updateTestChecksum({ bucket: 'stream|0["b"]', checksum: 1, count: 1 });
|
|
691
640
|
});
|
|
692
641
|
|
|
693
642
|
test('includes defaults', async () => {
|
|
694
|
-
|
|
695
|
-
const state = checksumState();
|
|
696
|
-
|
|
643
|
+
const state = checksumState(true);
|
|
697
644
|
const line = await state.buildNextCheckpointLine({
|
|
698
645
|
base: storage.makeCheckpoint(1n),
|
|
699
646
|
writeCheckpoint: null,
|
|
@@ -703,7 +650,7 @@ bucket_definitions:
|
|
|
703
650
|
expect(line?.checkpointLine).toEqual({
|
|
704
651
|
checkpoint: {
|
|
705
652
|
buckets: [
|
|
706
|
-
{ bucket: 'stream["default"]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }
|
|
653
|
+
{ bucket: 'stream|0["default"]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }
|
|
707
654
|
],
|
|
708
655
|
last_op_id: '1',
|
|
709
656
|
write_checkpoint: undefined,
|
|
@@ -713,8 +660,7 @@ bucket_definitions:
|
|
|
713
660
|
});
|
|
714
661
|
|
|
715
662
|
test('can exclude defaults', async () => {
|
|
716
|
-
|
|
717
|
-
const state = checksumState({ syncRequest: { streams: { include_defaults: false, subscriptions: [] } } });
|
|
663
|
+
const state = checksumState(true, { syncRequest: { streams: { include_defaults: false, subscriptions: [] } } });
|
|
718
664
|
|
|
719
665
|
const line = await state.buildNextCheckpointLine({
|
|
720
666
|
base: storage.makeCheckpoint(1n),
|
|
@@ -733,9 +679,7 @@ bucket_definitions:
|
|
|
733
679
|
});
|
|
734
680
|
|
|
735
681
|
test('custom subscriptions', async () => {
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
const state = checksumState({
|
|
682
|
+
const state = checksumState(true, {
|
|
739
683
|
syncRequest: {
|
|
740
684
|
streams: {
|
|
741
685
|
subscriptions: [
|
|
@@ -755,9 +699,9 @@ bucket_definitions:
|
|
|
755
699
|
expect(line?.checkpointLine).toEqual({
|
|
756
700
|
checkpoint: {
|
|
757
701
|
buckets: [
|
|
758
|
-
{ bucket: 'stream["a"]', checksum: 1, count: 1, priority: 3, subscriptions: [{ sub: 0 }] },
|
|
759
|
-
{ bucket: 'stream["b"]', checksum: 1, count: 1, priority: 1, subscriptions: [{ sub: 1 }] },
|
|
760
|
-
{ bucket: 'stream["default"]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }
|
|
702
|
+
{ bucket: 'stream|0["a"]', checksum: 1, count: 1, priority: 3, subscriptions: [{ sub: 0 }] },
|
|
703
|
+
{ bucket: 'stream|0["b"]', checksum: 1, count: 1, priority: 1, subscriptions: [{ sub: 1 }] },
|
|
704
|
+
{ bucket: 'stream|0["default"]', checksum: 1, count: 1, priority: 3, subscriptions: [{ default: 0 }] }
|
|
761
705
|
],
|
|
762
706
|
last_op_id: '1',
|
|
763
707
|
write_checkpoint: undefined,
|
|
@@ -767,7 +711,7 @@ bucket_definitions:
|
|
|
767
711
|
});
|
|
768
712
|
|
|
769
713
|
test('overlap between custom subscriptions', async () => {
|
|
770
|
-
const state = checksumState({
|
|
714
|
+
const state = checksumState(false, {
|
|
771
715
|
syncRequest: {
|
|
772
716
|
streams: {
|
|
773
717
|
subscriptions: [
|
|
@@ -787,8 +731,8 @@ bucket_definitions:
|
|
|
787
731
|
expect(line?.checkpointLine).toEqual({
|
|
788
732
|
checkpoint: {
|
|
789
733
|
buckets: [
|
|
790
|
-
{ bucket: 'stream["a"]', checksum: 1, count: 1, priority: 3, subscriptions: [{ sub: 0 }] },
|
|
791
|
-
{ bucket: 'stream["b"]', checksum: 1, count: 1, priority: 1, subscriptions: [{ sub: 0 }, { sub: 1 }] }
|
|
734
|
+
{ bucket: 'stream|0["a"]', checksum: 1, count: 1, priority: 3, subscriptions: [{ sub: 0 }] },
|
|
735
|
+
{ bucket: 'stream|0["b"]', checksum: 1, count: 1, priority: 1, subscriptions: [{ sub: 0 }, { sub: 1 }] }
|
|
792
736
|
],
|
|
793
737
|
last_op_id: '1',
|
|
794
738
|
write_checkpoint: undefined,
|
|
@@ -798,8 +742,7 @@ bucket_definitions:
|
|
|
798
742
|
});
|
|
799
743
|
|
|
800
744
|
test('overlap between default and custom subscription', async () => {
|
|
801
|
-
|
|
802
|
-
const state = checksumState({
|
|
745
|
+
const state = checksumState(true, {
|
|
803
746
|
syncRequest: {
|
|
804
747
|
streams: {
|
|
805
748
|
subscriptions: [{ stream: 'stream', parameters: { ids: '["a", "default"]' }, override_priority: 1 }]
|
|
@@ -816,9 +759,9 @@ bucket_definitions:
|
|
|
816
759
|
expect(line?.checkpointLine).toEqual({
|
|
817
760
|
checkpoint: {
|
|
818
761
|
buckets: [
|
|
819
|
-
{ bucket: 'stream["a"]', checksum: 1, count: 1, priority: 1, subscriptions: [{ sub: 0 }] },
|
|
762
|
+
{ bucket: 'stream|0["a"]', checksum: 1, count: 1, priority: 1, subscriptions: [{ sub: 0 }] },
|
|
820
763
|
{
|
|
821
|
-
bucket: 'stream["default"]',
|
|
764
|
+
bucket: 'stream|0["default"]',
|
|
822
765
|
checksum: 1,
|
|
823
766
|
count: 1,
|
|
824
767
|
priority: 1,
|
|
@@ -833,9 +776,7 @@ bucket_definitions:
|
|
|
833
776
|
});
|
|
834
777
|
|
|
835
778
|
test('reports errors', async () => {
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
const state = checksumState({
|
|
779
|
+
const state = checksumState(true, {
|
|
839
780
|
syncRequest: {
|
|
840
781
|
streams: {
|
|
841
782
|
subscriptions: [
|
|
@@ -855,10 +796,10 @@ bucket_definitions:
|
|
|
855
796
|
expect(line?.checkpointLine).toEqual({
|
|
856
797
|
checkpoint: {
|
|
857
798
|
buckets: [
|
|
858
|
-
{ bucket: 'stream["a"]', checksum: 1, count: 1, priority: 1, subscriptions: [{ sub: 0 }] },
|
|
859
|
-
{ bucket: 'stream["b"]', checksum: 1, count: 1, priority: 1, subscriptions: [{ sub: 0 }] },
|
|
799
|
+
{ bucket: 'stream|0["a"]', checksum: 1, count: 1, priority: 1, subscriptions: [{ sub: 0 }] },
|
|
800
|
+
{ bucket: 'stream|0["b"]', checksum: 1, count: 1, priority: 1, subscriptions: [{ sub: 0 }] },
|
|
860
801
|
{
|
|
861
|
-
bucket: 'stream["default"]',
|
|
802
|
+
bucket: 'stream|0["default"]',
|
|
862
803
|
checksum: 1,
|
|
863
804
|
count: 1,
|
|
864
805
|
priority: 3,
|