@powersync/service-core 0.0.0-dev-20250813080357 → 0.0.0-dev-20250819134004

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/CHANGELOG.md +10 -6
  2. package/dist/api/diagnostics.js +1 -1
  3. package/dist/api/diagnostics.js.map +1 -1
  4. package/dist/events/EventsEngine.d.ts +11 -0
  5. package/dist/{emitters/EmitterEngine.js → events/EventsEngine.js} +2 -2
  6. package/dist/events/EventsEngine.js.map +1 -0
  7. package/dist/routes/configure-fastify.d.ts +40 -0
  8. package/dist/routes/endpoints/socket-route.js +4 -7
  9. package/dist/routes/endpoints/socket-route.js.map +1 -1
  10. package/dist/routes/endpoints/sync-rules.js +1 -27
  11. package/dist/routes/endpoints/sync-rules.js.map +1 -1
  12. package/dist/routes/endpoints/sync-stream.d.ts +80 -0
  13. package/dist/routes/endpoints/sync-stream.js +7 -11
  14. package/dist/routes/endpoints/sync-stream.js.map +1 -1
  15. package/dist/storage/ReportStorage.d.ts +8 -0
  16. package/dist/storage/ReportStorage.js +2 -0
  17. package/dist/storage/ReportStorage.js.map +1 -0
  18. package/dist/storage/StorageEngine.d.ts +2 -2
  19. package/dist/storage/StorageEngine.js.map +1 -1
  20. package/dist/storage/StorageProvider.d.ts +3 -3
  21. package/dist/storage/storage-index.d.ts +1 -1
  22. package/dist/storage/storage-index.js +1 -1
  23. package/dist/storage/storage-index.js.map +1 -1
  24. package/dist/sync/BucketChecksumState.d.ts +34 -8
  25. package/dist/sync/BucketChecksumState.js +153 -18
  26. package/dist/sync/BucketChecksumState.js.map +1 -1
  27. package/dist/sync/sync.d.ts +1 -2
  28. package/dist/sync/sync.js +8 -10
  29. package/dist/sync/sync.js.map +1 -1
  30. package/dist/system/ServiceContext.d.ts +3 -3
  31. package/dist/system/ServiceContext.js +7 -4
  32. package/dist/system/ServiceContext.js.map +1 -1
  33. package/dist/util/protocol-types.d.ts +153 -5
  34. package/dist/util/protocol-types.js +41 -2
  35. package/dist/util/protocol-types.js.map +1 -1
  36. package/package.json +6 -6
  37. package/src/api/diagnostics.ts +1 -1
  38. package/src/{emitters/EmitterEngine.ts → events/EventsEngine.ts} +5 -6
  39. package/src/routes/endpoints/socket-route.ts +5 -9
  40. package/src/routes/endpoints/sync-rules.ts +1 -28
  41. package/src/routes/endpoints/sync-stream.ts +8 -13
  42. package/src/storage/ReportStorage.ts +11 -0
  43. package/src/storage/StorageEngine.ts +3 -3
  44. package/src/storage/StorageProvider.ts +3 -4
  45. package/src/storage/storage-index.ts +1 -1
  46. package/src/sync/BucketChecksumState.ts +183 -26
  47. package/src/sync/sync.ts +15 -13
  48. package/src/system/ServiceContext.ts +8 -5
  49. package/src/util/protocol-types.ts +138 -5
  50. package/test/src/sync/BucketChecksumState.test.ts +366 -34
  51. package/tsconfig.tsbuildinfo +1 -1
  52. package/dist/emitters/EmitterEngine.d.ts +0 -12
  53. package/dist/emitters/EmitterEngine.js.map +0 -1
  54. package/dist/emitters/emitter-interfaces.d.ts +0 -8
  55. package/dist/emitters/emitter-interfaces.js +0 -2
  56. package/dist/emitters/emitter-interfaces.js.map +0 -1
  57. package/dist/storage/ReportStorageFactory.d.ts +0 -8
  58. package/dist/storage/ReportStorageFactory.js +0 -2
  59. package/dist/storage/ReportStorageFactory.js.map +0 -1
  60. package/src/emitters/emitter-interfaces.ts +0 -12
  61. package/src/storage/ReportStorageFactory.ts +0 -9
@@ -1,4 +1,13 @@
1
- import { BucketDescription, RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules';
1
+ import {
2
+ BucketDescription,
3
+ BucketPriority,
4
+ BucketSource,
5
+ RequestedStream,
6
+ RequestJwtPayload,
7
+ RequestParameters,
8
+ ResolvedBucket,
9
+ SqlSyncRules
10
+ } from '@powersync/service-sync-rules';
2
11
 
3
12
  import * as storage from '../storage/storage-index.js';
4
13
  import * as util from '../util/util-index.js';
@@ -11,7 +20,7 @@ import {
11
20
  logger as defaultLogger
12
21
  } from '@powersync/lib-services-framework';
13
22
  import { JSONBig } from '@powersync/service-jsonbig';
14
- import { BucketParameterQuerier } from '@powersync/service-sync-rules/src/BucketParameterQuerier.js';
23
+ import { BucketParameterQuerier, QuerierError } from '@powersync/service-sync-rules/src/BucketParameterQuerier.js';
15
24
  import { SyncContext } from './SyncContext.js';
16
25
  import { getIntersection, hasIntersection } from './util.js';
17
26
 
@@ -19,9 +28,9 @@ export interface BucketChecksumStateOptions {
19
28
  syncContext: SyncContext;
20
29
  bucketStorage: BucketChecksumStateStorage;
21
30
  syncRules: SqlSyncRules;
22
- syncParams: RequestParameters;
31
+ tokenPayload: RequestJwtPayload;
32
+ syncRequest: util.StreamingSyncRequest;
23
33
  logger?: Logger;
24
- initialBucketPositions?: { name: string; after: util.InternalOpId }[];
25
34
  }
26
35
 
27
36
  type BucketSyncState = {
@@ -50,6 +59,17 @@ export class BucketChecksumState {
50
59
  */
51
60
  private lastChecksums: util.ChecksumMap | null = null;
52
61
  private lastWriteCheckpoint: bigint | null = null;
62
+ /**
63
+ * Once we've sent the first full checkpoint line including all {@link util.Checkpoint.streams} that the user is
64
+ * subscribed to, we keep an index of the stream names to their index in that array.
65
+ *
66
+ * This is used to compress the representation of buckets in `checkpoint` and `checkpoint_diff` lines: For buckets
67
+ * that are part of sync rules or default streams, we need to include the name of the defining sync rule or definition
68
+ * yielding that bucket (so that clients can track progress for default streams).
69
+ * But instead of sending the name for each bucket, we use the fact that it's part of the streams array and only send
70
+ * their index, reducing the size of those messages.
71
+ */
72
+ private streamNameToIndex: Map<string, number> | null = null;
53
73
 
54
74
  private readonly parameterState: BucketParameterState;
55
75
 
@@ -69,13 +89,14 @@ export class BucketChecksumState {
69
89
  options.syncContext,
70
90
  options.bucketStorage,
71
91
  options.syncRules,
72
- options.syncParams,
92
+ options.tokenPayload,
93
+ options.syncRequest,
73
94
  this.logger
74
95
  );
75
96
  this.bucketDataPositions = new Map();
76
97
 
77
- for (let { name, after: start } of options.initialBucketPositions ?? []) {
78
- this.bucketDataPositions.set(name, { start_op_id: start });
98
+ for (let { name, after: start } of options.syncRequest.buckets ?? []) {
99
+ this.bucketDataPositions.set(name, { start_op_id: BigInt(start) });
79
100
  }
80
101
  }
81
102
 
@@ -158,6 +179,7 @@ export class BucketChecksumState {
158
179
  // TODO: If updatedBuckets is present, we can use that to more efficiently calculate a diff,
159
180
  // and avoid any unnecessary loops through the entire list of buckets.
160
181
  const diff = util.checksumsDiff(this.lastChecksums, checksumMap);
182
+ const streamNameToIndex = this.streamNameToIndex!;
161
183
 
162
184
  if (
163
185
  this.lastWriteCheckpoint == writeCheckpoint &&
@@ -182,12 +204,12 @@ export class BucketChecksumState {
182
204
 
183
205
  const updatedBucketDescriptions = diff.updatedBuckets.map((e) => ({
184
206
  ...e,
185
- priority: bucketDescriptionMap.get(e.bucket)!.priority
207
+ ...this.parameterState.translateResolvedBucket(bucketDescriptionMap.get(e.bucket)!, streamNameToIndex)
186
208
  }));
187
209
  bucketsToFetch = [...generateBucketsToFetch].map((b) => {
188
210
  return {
189
- bucket: b,
190
- priority: bucketDescriptionMap.get(b)!.priority
211
+ priority: bucketDescriptionMap.get(b)!.priority,
212
+ bucket: b
191
213
  };
192
214
  });
193
215
 
@@ -220,15 +242,37 @@ export class BucketChecksumState {
220
242
  message += `buckets: ${allBuckets.length} ${limitedBuckets(allBuckets, 20)}`;
221
243
  this.logger.info(message, { checkpoint: base.checkpoint, user_id: user_id, buckets: allBuckets.length });
222
244
  };
223
- bucketsToFetch = allBuckets;
245
+ bucketsToFetch = allBuckets.map((b) => ({ bucket: b.bucket, priority: b.priority }));
246
+
247
+ const subscriptions: util.StreamDescription[] = [];
248
+ const streamNameToIndex = new Map<string, number>();
249
+ this.streamNameToIndex = streamNameToIndex;
250
+
251
+ for (const source of this.parameterState.syncRules.bucketSources) {
252
+ if (this.parameterState.isSubscribedToStream(source)) {
253
+ streamNameToIndex.set(source.name, subscriptions.length);
254
+
255
+ subscriptions.push({
256
+ name: source.name,
257
+ is_default: source.subscribedToByDefault,
258
+ errors:
259
+ this.parameterState.streamErrors[source.name]?.map((e) => ({
260
+ subscription: e.subscription?.opaque_id ?? 'default',
261
+ message: e.message
262
+ })) ?? []
263
+ });
264
+ }
265
+ }
266
+
224
267
  checkpointLine = {
225
268
  checkpoint: {
226
269
  last_op_id: util.internalToExternalOpId(base.checkpoint),
227
270
  write_checkpoint: writeCheckpoint ? String(writeCheckpoint) : undefined,
228
271
  buckets: [...checksumMap.values()].map((e) => ({
229
272
  ...e,
230
- priority: bucketDescriptionMap.get(e.bucket)!.priority
231
- }))
273
+ ...this.parameterState.translateResolvedBucket(bucketDescriptionMap.get(e.bucket)!, streamNameToIndex)
274
+ })),
275
+ streams: subscriptions
232
276
  }
233
277
  } satisfies util.StreamingSyncCheckpoint;
234
278
  }
@@ -319,7 +363,7 @@ export interface CheckpointUpdate {
319
363
  /**
320
364
  * All buckets forming part of the checkpoint.
321
365
  */
322
- buckets: BucketDescription[];
366
+ buckets: ResolvedBucket[];
323
367
 
324
368
  /**
325
369
  * If present, a set of buckets that have been updated since the last checkpoint.
@@ -335,9 +379,19 @@ export class BucketParameterState {
335
379
  public readonly syncRules: SqlSyncRules;
336
380
  public readonly syncParams: RequestParameters;
337
381
  private readonly querier: BucketParameterQuerier;
338
- private readonly staticBuckets: Map<string, BucketDescription>;
382
+ /**
383
+ * Static buckets. This map is guaranteed not to change during a request, since resolving static buckets can only
384
+ * take request parameters into account,
385
+ */
386
+ private readonly staticBuckets: Map<string, ResolvedBucket>;
387
+ private readonly includeDefaultStreams: boolean;
388
+ // Indexed by the client-side id
389
+ private readonly explicitStreamSubscriptions: util.RequestedStreamSubscription[];
390
+ // Indexed by descriptor name.
391
+ readonly streamErrors: Record<string, QuerierError[]>;
392
+ private readonly subscribedStreamNames: Set<string>;
339
393
  private readonly logger: Logger;
340
- private cachedDynamicBuckets: BucketDescription[] | null = null;
394
+ private cachedDynamicBuckets: ResolvedBucket[] | null = null;
341
395
  private cachedDynamicBucketSet: Set<string> | null = null;
342
396
 
343
397
  private readonly lookups: Set<string>;
@@ -346,18 +400,92 @@ export class BucketParameterState {
346
400
  context: SyncContext,
347
401
  bucketStorage: BucketChecksumStateStorage,
348
402
  syncRules: SqlSyncRules,
349
- syncParams: RequestParameters,
403
+ tokenPayload: RequestJwtPayload,
404
+ request: util.StreamingSyncRequest,
350
405
  logger: Logger
351
406
  ) {
352
407
  this.context = context;
353
408
  this.bucketStorage = bucketStorage;
354
409
  this.syncRules = syncRules;
355
- this.syncParams = syncParams;
410
+ this.syncParams = new RequestParameters(tokenPayload, request.parameters ?? {});
356
411
  this.logger = logger;
357
412
 
358
- this.querier = syncRules.getBucketParameterQuerier(this.syncParams);
359
- this.staticBuckets = new Map<string, BucketDescription>(this.querier.staticBuckets.map((b) => [b.bucket, b]));
413
+ const streamsByName: Record<string, RequestedStream[]> = {};
414
+ const subscriptions = request.streams;
415
+ const explicitStreamSubscriptions: util.RequestedStreamSubscription[] = subscriptions?.subscriptions ?? [];
416
+ if (subscriptions) {
417
+ for (let i = 0; i < explicitStreamSubscriptions.length; i++) {
418
+ const subscription = explicitStreamSubscriptions[i];
419
+
420
+ const syncRuleStream: RequestedStream = {
421
+ parameters: subscription.parameters ?? {},
422
+ opaque_id: i
423
+ };
424
+ if (Object.hasOwn(streamsByName, subscription.stream)) {
425
+ streamsByName[subscription.stream].push(syncRuleStream);
426
+ } else {
427
+ streamsByName[subscription.stream] = [syncRuleStream];
428
+ }
429
+ }
430
+ }
431
+ this.includeDefaultStreams = subscriptions?.include_defaults ?? true;
432
+ this.explicitStreamSubscriptions = explicitStreamSubscriptions;
433
+
434
+ const { querier, errors } = syncRules.getBucketParameterQuerier({
435
+ globalParameters: this.syncParams,
436
+ hasDefaultStreams: this.includeDefaultStreams,
437
+ streams: streamsByName
438
+ });
439
+ this.querier = querier;
440
+ this.streamErrors = Object.groupBy(errors, (e) => e.descriptor) as Record<string, QuerierError[]>;
441
+
442
+ this.staticBuckets = new Map<string, ResolvedBucket>(
443
+ mergeBuckets(this.querier.staticBuckets).map((b) => [b.bucket, b])
444
+ );
360
445
  this.lookups = new Set<string>(this.querier.parameterQueryLookups.map((l) => JSONBig.stringify(l.values)));
446
+ this.subscribedStreamNames = new Set(Object.keys(streamsByName));
447
+ }
448
+
449
+ /**
450
+ * Translates an internal sync-rules {@link ResolvedBucket} instance to the public
451
+ * {@link util.ClientBucketDescription}.
452
+ *
453
+ * @param lookupIndex A map from stream names to their index in {@link util.Checkpoint.streams}. These are used to
454
+ * reference default buckets by their stream index instead of duplicating the name on wire.
455
+ */
456
+ translateResolvedBucket(description: ResolvedBucket, lookupIndex: Map<string, number>): util.ClientBucketDescription {
457
+ // If the client is overriding the priority of any stream that yields this bucket, sync the bucket with that
458
+ // priority.
459
+ let priorityOverride: BucketPriority | null = null;
460
+ for (const reason of description.inclusion_reasons) {
461
+ if (reason != 'default') {
462
+ const requestedPriority = this.explicitStreamSubscriptions[reason.subscription]?.override_priority;
463
+ if (requestedPriority != null) {
464
+ if (priorityOverride == null) {
465
+ priorityOverride = requestedPriority as BucketPriority;
466
+ } else {
467
+ priorityOverride = Math.min(requestedPriority, priorityOverride) as BucketPriority;
468
+ }
469
+ }
470
+ }
471
+ }
472
+
473
+ return {
474
+ bucket: description.bucket,
475
+ priority: priorityOverride ?? description.priority,
476
+ subscriptions: description.inclusion_reasons.map((reason) => {
477
+ if (reason == 'default') {
478
+ const stream = description.definition;
479
+ return { default: lookupIndex.get(stream)! };
480
+ } else {
481
+ return { sub: reason.subscription };
482
+ }
483
+ })
484
+ };
485
+ }
486
+
487
+ isSubscribedToStream(desc: BucketSource): boolean {
488
+ return (desc.subscribedToByDefault && this.includeDefaultStreams) || this.subscribedStreamNames.has(desc.name);
361
489
  }
362
490
 
363
491
  async getCheckpointUpdate(checkpoint: storage.StorageCheckpointUpdate): Promise<CheckpointUpdate> {
@@ -391,19 +519,19 @@ export class BucketParameterState {
391
519
  * For static buckets, we can keep track of which buckets have been updated.
392
520
  */
393
521
  private async getCheckpointUpdateStatic(checkpoint: storage.StorageCheckpointUpdate): Promise<CheckpointUpdate> {
394
- const querier = this.querier;
522
+ const staticBuckets = [...this.staticBuckets.values()];
395
523
  const update = checkpoint.update;
396
524
 
397
525
  if (update.invalidateDataBuckets) {
398
526
  return {
399
- buckets: querier.staticBuckets,
527
+ buckets: staticBuckets,
400
528
  updatedBuckets: INVALIDATE_ALL_BUCKETS
401
529
  };
402
530
  }
403
531
 
404
532
  const updatedBuckets = new Set<string>(getIntersection(this.staticBuckets, update.updatedDataBuckets));
405
533
  return {
406
- buckets: querier.staticBuckets,
534
+ buckets: staticBuckets,
407
535
  updatedBuckets
408
536
  };
409
537
  }
@@ -414,7 +542,7 @@ export class BucketParameterState {
414
542
  private async getCheckpointUpdateDynamic(checkpoint: storage.StorageCheckpointUpdate): Promise<CheckpointUpdate> {
415
543
  const querier = this.querier;
416
544
  const storage = this.bucketStorage;
417
- const staticBuckets = querier.staticBuckets;
545
+ const staticBuckets = this.staticBuckets.values();
418
546
  const update = checkpoint.update;
419
547
 
420
548
  let hasParameterChange = false;
@@ -436,7 +564,7 @@ export class BucketParameterState {
436
564
  }
437
565
  }
438
566
 
439
- let dynamicBuckets: BucketDescription[];
567
+ let dynamicBuckets: ResolvedBucket[];
440
568
  if (hasParameterChange || this.cachedDynamicBuckets == null || this.cachedDynamicBucketSet == null) {
441
569
  dynamicBuckets = await querier.queryDynamicBucketDescriptions({
442
570
  getParameterSets(lookups) {
@@ -458,7 +586,7 @@ export class BucketParameterState {
458
586
  }
459
587
  }
460
588
  }
461
- const allBuckets = [...staticBuckets, ...dynamicBuckets];
589
+ const allBuckets = [...staticBuckets, ...mergeBuckets(dynamicBuckets)];
462
590
 
463
591
  if (invalidateDataBuckets) {
464
592
  return {
@@ -517,3 +645,32 @@ function limitedBuckets(buckets: string[] | { bucket: string }[], limit: number)
517
645
  const limited = buckets.slice(0, limit);
518
646
  return `${JSON.stringify(limited)}...`;
519
647
  }
648
+
649
+ /**
650
+ * Resolves duplicate buckets in the given array, merging the inclusion reasons for duplicate.
651
+ *
652
+ * It's possible for duplicates to occur when a stream has multiple subscriptions, consider e.g.
653
+ *
654
+ * ```
655
+ * sync_streams:
656
+ * assets_by_category:
657
+ * query: select * from assets where category in (request.parameters() -> 'categories')
658
+ * ```
659
+ *
660
+ * Here, a client might subscribe once with `{"categories": [1]}` and once with `{"categories": [1, 2]}`. Since each
661
+ * subscription is evaluated independently, this would lead to three buckets, with a duplicate `assets_by_category[1]`
662
+ * bucket.
663
+ */
664
+ function mergeBuckets(buckets: ResolvedBucket[]): ResolvedBucket[] {
665
+ const byBucketId: Record<string, ResolvedBucket> = {};
666
+
667
+ for (const bucket of buckets) {
668
+ if (Object.hasOwn(byBucketId, bucket.bucket)) {
669
+ byBucketId[bucket.bucket].inclusion_reasons.push(...bucket.inclusion_reasons);
670
+ } else {
671
+ byBucketId[bucket.bucket] = structuredClone(bucket);
672
+ }
673
+ }
674
+
675
+ return Object.values(byBucketId);
676
+ }
package/src/sync/sync.ts CHANGED
@@ -1,5 +1,11 @@
1
1
  import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
2
- import { BucketDescription, BucketPriority, RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules';
2
+ import {
3
+ BucketDescription,
4
+ BucketPriority,
5
+ RequestJwtPayload,
6
+ RequestParameters,
7
+ SqlSyncRules
8
+ } from '@powersync/service-sync-rules';
3
9
 
4
10
  import { AbortError } from 'ix/aborterror.js';
5
11
 
@@ -19,7 +25,6 @@ export interface SyncStreamParameters {
19
25
  bucketStorage: storage.SyncRulesBucketStorage;
20
26
  syncRules: SqlSyncRules;
21
27
  params: util.StreamingSyncRequest;
22
- syncParams: RequestParameters;
23
28
  token: auth.JwtPayload;
24
29
  logger?: Logger;
25
30
  /**
@@ -34,8 +39,7 @@ export interface SyncStreamParameters {
34
39
  export async function* streamResponse(
35
40
  options: SyncStreamParameters
36
41
  ): AsyncIterable<util.StreamingSyncLine | string | null> {
37
- const { syncContext, bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } =
38
- options;
42
+ const { syncContext, bucketStorage, syncRules, params, token, tokenStreamOptions, tracker, signal } = options;
39
43
  const logger = options.logger ?? defaultLogger;
40
44
 
41
45
  // We also need to be able to abort, so we create our own controller.
@@ -58,7 +62,7 @@ export async function* streamResponse(
58
62
  bucketStorage,
59
63
  syncRules,
60
64
  params,
61
- syncParams,
65
+ token,
62
66
  tracker,
63
67
  controller.signal,
64
68
  logger
@@ -86,24 +90,22 @@ async function* streamResponseInner(
86
90
  bucketStorage: storage.SyncRulesBucketStorage,
87
91
  syncRules: SqlSyncRules,
88
92
  params: util.StreamingSyncRequest,
89
- syncParams: RequestParameters,
93
+ tokenPayload: RequestJwtPayload,
90
94
  tracker: RequestTracker,
91
95
  signal: AbortSignal,
92
96
  logger: Logger
93
97
  ): AsyncGenerator<util.StreamingSyncLine | string | null> {
94
98
  const { raw_data, binary_data } = params;
95
99
 
96
- const checkpointUserId = util.checkpointUserId(syncParams.tokenParameters.user_id as string, params.client_id);
100
+ const userId = tokenPayload.sub;
101
+ const checkpointUserId = util.checkpointUserId(userId as string, params.client_id);
97
102
 
98
103
  const checksumState = new BucketChecksumState({
99
104
  syncContext,
100
105
  bucketStorage,
101
106
  syncRules,
102
- syncParams,
103
- initialBucketPositions: params.buckets?.map((bucket) => ({
104
- name: bucket.name,
105
- after: BigInt(bucket.after)
106
- })),
107
+ tokenPayload,
108
+ syncRequest: params,
107
109
  logger: logger
108
110
  });
109
111
  const stream = bucketStorage.watchCheckpointChanges({
@@ -228,7 +230,7 @@ async function* streamResponseInner(
228
230
  onRowsSent: markOperationsSent,
229
231
  abort_connection: signal,
230
232
  abort_batch: abortCheckpointSignal,
231
- user_id: syncParams.userId,
233
+ user_id: userId,
232
234
  // Passing null here will emit a full sync complete message at the end. If we pass a priority, we'll emit a partial
233
235
  // sync complete message instead.
234
236
  forPriority: !isLast ? priority : null,
@@ -8,7 +8,7 @@ import * as routes from '../routes/routes-index.js';
8
8
  import * as storage from '../storage/storage-index.js';
9
9
  import { SyncContext } from '../sync/SyncContext.js';
10
10
  import * as utils from '../util/util-index.js';
11
- import { EmitterEngine } from '../emitters/EmitterEngine.js';
11
+ import { EventsEngine } from '../events/EventsEngine.js';
12
12
 
13
13
  export interface ServiceContext {
14
14
  configuration: utils.ResolvedPowerSyncConfig;
@@ -20,7 +20,7 @@ export interface ServiceContext {
20
20
  migrations: PowerSyncMigrationManager;
21
21
  syncContext: SyncContext;
22
22
  serviceMode: ServiceContextMode;
23
- emitterEngine: EmitterEngine;
23
+ eventsEngine: EventsEngine;
24
24
  }
25
25
 
26
26
  export enum ServiceContextMode {
@@ -47,7 +47,7 @@ export class ServiceContextContainer implements ServiceContext {
47
47
  configuration: utils.ResolvedPowerSyncConfig;
48
48
  lifeCycleEngine: LifeCycledSystem;
49
49
  storageEngine: storage.StorageEngine;
50
- emitterEngine: EmitterEngine;
50
+ eventsEngine: EventsEngine;
51
51
  syncContext: SyncContext;
52
52
  routerEngine: routes.RouterEngine;
53
53
  serviceMode: ServiceContextMode;
@@ -69,7 +69,10 @@ export class ServiceContextContainer implements ServiceContext {
69
69
  }
70
70
  });
71
71
 
72
- this.emitterEngine = new EmitterEngine();
72
+ this.eventsEngine = new EventsEngine();
73
+ this.lifeCycleEngine.withLifecycle(this.eventsEngine, {
74
+ stop: (emitterEngine) => emitterEngine.shutDown()
75
+ });
73
76
 
74
77
  this.lifeCycleEngine.withLifecycle(this.storageEngine, {
75
78
  start: (storageEngine) => storageEngine.start(),
@@ -95,7 +98,7 @@ export class ServiceContextContainer implements ServiceContext {
95
98
  start: () => migrationManager[Symbol.asyncDispose]()
96
99
  });
97
100
 
98
- this.lifeCycleEngine.withLifecycle(this.emitterEngine, {
101
+ this.lifeCycleEngine.withLifecycle(this.eventsEngine, {
99
102
  stop: (emitterEngine) => emitterEngine.shutDown()
100
103
  });
101
104
  }
@@ -13,9 +13,51 @@ export const BucketRequest = t.object({
13
13
 
14
14
  export type BucketRequest = t.Decoded<typeof BucketRequest>;
15
15
 
16
+ /**
17
+ * A sync steam that a client has expressed interest in by explicitly opening it on the client side.
18
+ */
19
+ export const RequestedStreamSubscription = t.object({
20
+ /**
21
+ * The defined name of the stream as it appears in sync stream definitions.
22
+ */
23
+ stream: t.string,
24
+ /**
25
+ * An optional dictionary of parameters to pass to this specific stream.
26
+ */
27
+ parameters: t.record(t.any).optional(),
28
+ /**
29
+ * Set when the client wishes to re-assign a different priority to this stream.
30
+ *
31
+ * Streams and sync rules can also assign a default priority, but clients are allowed to override those. This can be
32
+ * useful when the priority for partial syncs depends on e.g. the current page opened in a client.
33
+ */
34
+ override_priority: t.union(t.number, t.Null)
35
+ });
36
+
37
+ export type RequestedStreamSubscription = t.Decoded<typeof RequestedStreamSubscription>;
38
+
39
+ /**
40
+ * An overview of all subscribed streams as part of a streaming sync request.
41
+ */
42
+ export const StreamSubscriptionRequest = t.object({
43
+ /**
44
+ * Whether to sync default streams.
45
+ *
46
+ * When disabled, only explicitly-opened subscriptions are included.
47
+ */
48
+ include_defaults: t.boolean.optional(),
49
+
50
+ /**
51
+ * An array of sync streams the client has opened explicitly.
52
+ */
53
+ subscriptions: t.array(RequestedStreamSubscription)
54
+ });
55
+
56
+ export type StreamSubscriptionRequest = t.Decoded<typeof StreamSubscriptionRequest>;
57
+
16
58
  export const StreamingSyncRequest = t.object({
17
59
  /**
18
- * Existing bucket states.
60
+ * Existing client-side bucket states.
19
61
  */
20
62
  buckets: t.array(BucketRequest).optional(),
21
63
 
@@ -47,7 +89,12 @@ export const StreamingSyncRequest = t.object({
47
89
  /**
48
90
  * Unique client id.
49
91
  */
50
- client_id: t.string.optional()
92
+ client_id: t.string.optional(),
93
+
94
+ /**
95
+ * If the client is aware of streams, an array of streams the client has opened.
96
+ */
97
+ streams: StreamSubscriptionRequest.optional()
51
98
  });
52
99
 
53
100
  export type StreamingSyncRequest = t.Decoded<typeof StreamingSyncRequest>;
@@ -60,7 +107,7 @@ export interface StreamingSyncCheckpointDiff {
60
107
  checkpoint_diff: {
61
108
  last_op_id: ProtocolOpId;
62
109
  write_checkpoint?: ProtocolOpId;
63
- updated_buckets: BucketChecksumWithDescription[];
110
+ updated_buckets: CheckpointBucket[];
64
111
  removed_buckets: string[];
65
112
  };
66
113
  }
@@ -99,10 +146,54 @@ export type StreamingSyncLine =
99
146
  */
100
147
  export type ProtocolOpId = string;
101
148
 
149
+ export interface StreamDescription {
150
+ /**
151
+ * The name of the stream as it appears in the sync configuration.
152
+ */
153
+ name: string;
154
+
155
+ /**
156
+ * Whether this stream is subscribed to by default.
157
+ *
158
+ * For default streams, this field is still `true` if clients have an explicit subscription to the stream.
159
+ */
160
+ is_default: boolean;
161
+
162
+ /**
163
+ * If some subscriptions on this stream could not be resolved, e.g. due to an error, this array contains the faulty
164
+ * subscriptions along with an error message.
165
+ */
166
+ errors: StreamSubscriptionError[];
167
+ }
168
+
169
+ export interface StreamSubscriptionError {
170
+ /**
171
+ * The subscription that errored - either the default subscription or some of the explicit subscriptions.
172
+ */
173
+ subscription: 'default' | number;
174
+ /**
175
+ * A message describing the error on the subscription.
176
+ */
177
+ message: string;
178
+ }
179
+
102
180
  export interface Checkpoint {
103
181
  last_op_id: ProtocolOpId;
104
182
  write_checkpoint?: ProtocolOpId;
105
- buckets: BucketChecksumWithDescription[];
183
+ buckets: CheckpointBucket[];
184
+
185
+ /**
186
+ * All streams that the client is subscribed to.
187
+ *
188
+ * This field has two purposes:
189
+ *
190
+ * 1. It allows clients to determine which of their subscriptions actually works. E.g. if a user does
191
+ * `db.syncStream('non_existent_stream').subscribe()`, clients don't immediately know that the stream doesn't
192
+ * exist. Only after the next `checkpoint` line can they query this field and mark unresolved subscriptions.
193
+ *. 2. It allows clients to learn which default streams they have been subscribed to. This is relevant for APIs
194
+ * listing all streams on the client-side.
195
+ */
196
+ streams: StreamDescription[];
106
197
  }
107
198
 
108
199
  export interface BucketState {
@@ -158,4 +249,46 @@ export interface BucketChecksum {
158
249
  count: number;
159
250
  }
160
251
 
161
- export interface BucketChecksumWithDescription extends BucketChecksum, BucketDescription {}
252
+ /**
253
+ * The reason a particular bucket is included in a checkpoint.
254
+ *
255
+ * This information allows clients to associate individual buckets with sync streams they're subscribed to. Having that
256
+ * association is useful because it enables clients to track progress for individual sync streams.
257
+ */
258
+ export type BucketSubscriptionReason = BucketDerivedFromDefaultStream | BucketDerivedFromExplicitSubscription;
259
+
260
+ /**
261
+ * A bucket has been included in a checkpoint because it's part of a default stream.
262
+ */
263
+ export type BucketDerivedFromDefaultStream = {
264
+ /**
265
+ * The index (into {@link Checkpoint.streams}) of the stream defining the bucket.
266
+ */
267
+ default: number;
268
+ };
269
+
270
+ /**
271
+ * The bucket has been included in a checkpoint because it's part of a stream that a client has explicitly subscribed
272
+ * to.
273
+ */
274
+ export type BucketDerivedFromExplicitSubscription = {
275
+ /**
276
+ * The index (into {@link StreamSubscriptionRequest.subscriptions}) of the subscription yielding this bucket.
277
+ */
278
+ sub: number;
279
+ };
280
+
281
+ export interface ClientBucketDescription {
282
+ /**
283
+ * An opaque id of the bucket.
284
+ */
285
+ bucket: string;
286
+ /**
287
+ * The priority used to synchronize this bucket, derived from its definition and an optional priority override from
288
+ * the stream subscription.
289
+ */
290
+ priority: BucketPriority;
291
+ subscriptions: BucketSubscriptionReason[];
292
+ }
293
+
294
+ export interface CheckpointBucket extends BucketChecksum, ClientBucketDescription {}