@powersync/service-core 1.14.0 → 1.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. package/CHANGELOG.md +32 -0
  2. package/LICENSE +3 -3
  3. package/dist/api/api-metrics.js +5 -0
  4. package/dist/api/api-metrics.js.map +1 -1
  5. package/dist/api/diagnostics.js +1 -1
  6. package/dist/api/diagnostics.js.map +1 -1
  7. package/dist/metrics/open-telemetry/util.d.ts +0 -3
  8. package/dist/metrics/open-telemetry/util.js +18 -13
  9. package/dist/metrics/open-telemetry/util.js.map +1 -1
  10. package/dist/routes/compression.d.ts +19 -0
  11. package/dist/routes/compression.js +70 -0
  12. package/dist/routes/compression.js.map +1 -0
  13. package/dist/routes/configure-fastify.d.ts +40 -5
  14. package/dist/routes/endpoints/socket-route.js +24 -9
  15. package/dist/routes/endpoints/socket-route.js.map +1 -1
  16. package/dist/routes/endpoints/sync-rules.js +1 -27
  17. package/dist/routes/endpoints/sync-rules.js.map +1 -1
  18. package/dist/routes/endpoints/sync-stream.d.ts +80 -10
  19. package/dist/routes/endpoints/sync-stream.js +17 -12
  20. package/dist/routes/endpoints/sync-stream.js.map +1 -1
  21. package/dist/storage/BucketStorage.d.ts +1 -1
  22. package/dist/storage/BucketStorage.js.map +1 -1
  23. package/dist/storage/BucketStorageBatch.d.ts +4 -4
  24. package/dist/storage/BucketStorageBatch.js.map +1 -1
  25. package/dist/storage/ChecksumCache.d.ts +4 -19
  26. package/dist/storage/ChecksumCache.js +4 -0
  27. package/dist/storage/ChecksumCache.js.map +1 -1
  28. package/dist/storage/ReplicationEventPayload.d.ts +2 -2
  29. package/dist/storage/SyncRulesBucketStorage.d.ts +9 -0
  30. package/dist/storage/SyncRulesBucketStorage.js.map +1 -1
  31. package/dist/sync/BucketChecksumState.d.ts +40 -10
  32. package/dist/sync/BucketChecksumState.js +154 -18
  33. package/dist/sync/BucketChecksumState.js.map +1 -1
  34. package/dist/sync/RequestTracker.d.ts +7 -1
  35. package/dist/sync/RequestTracker.js +22 -2
  36. package/dist/sync/RequestTracker.js.map +1 -1
  37. package/dist/sync/sync.d.ts +3 -3
  38. package/dist/sync/sync.js +23 -42
  39. package/dist/sync/sync.js.map +1 -1
  40. package/dist/sync/util.js +1 -1
  41. package/dist/sync/util.js.map +1 -1
  42. package/dist/util/protocol-types.d.ts +153 -9
  43. package/dist/util/protocol-types.js +41 -6
  44. package/dist/util/protocol-types.js.map +1 -1
  45. package/dist/util/utils.d.ts +18 -3
  46. package/dist/util/utils.js +33 -9
  47. package/dist/util/utils.js.map +1 -1
  48. package/package.json +14 -14
  49. package/src/api/api-metrics.ts +6 -0
  50. package/src/api/diagnostics.ts +1 -1
  51. package/src/metrics/open-telemetry/util.ts +22 -21
  52. package/src/routes/compression.ts +75 -0
  53. package/src/routes/endpoints/socket-route.ts +24 -9
  54. package/src/routes/endpoints/sync-rules.ts +1 -28
  55. package/src/routes/endpoints/sync-stream.ts +18 -15
  56. package/src/storage/BucketStorage.ts +2 -2
  57. package/src/storage/BucketStorageBatch.ts +10 -4
  58. package/src/storage/ChecksumCache.ts +8 -22
  59. package/src/storage/ReplicationEventPayload.ts +2 -2
  60. package/src/storage/SyncRulesBucketStorage.ts +12 -0
  61. package/src/sync/BucketChecksumState.ts +192 -29
  62. package/src/sync/RequestTracker.ts +27 -2
  63. package/src/sync/sync.ts +53 -51
  64. package/src/sync/util.ts +1 -1
  65. package/src/util/protocol-types.ts +138 -10
  66. package/src/util/utils.ts +59 -12
  67. package/test/src/checksum_cache.test.ts +6 -8
  68. package/test/src/routes/mocks.ts +59 -0
  69. package/test/src/routes/stream.test.ts +84 -0
  70. package/test/src/sync/BucketChecksumState.test.ts +340 -42
  71. package/tsconfig.tsbuildinfo +1 -1
@@ -1,4 +1,13 @@
1
- import { BucketDescription, RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules';
1
+ import {
2
+ BucketDescription,
3
+ BucketPriority,
4
+ BucketSource,
5
+ RequestedStream,
6
+ RequestJwtPayload,
7
+ RequestParameters,
8
+ ResolvedBucket,
9
+ SqlSyncRules
10
+ } from '@powersync/service-sync-rules';
2
11
 
3
12
  import * as storage from '../storage/storage-index.js';
4
13
  import * as util from '../util/util-index.js';
@@ -11,17 +20,22 @@ import {
11
20
  logger as defaultLogger
12
21
  } from '@powersync/lib-services-framework';
13
22
  import { JSONBig } from '@powersync/service-jsonbig';
14
- import { BucketParameterQuerier } from '@powersync/service-sync-rules/src/BucketParameterQuerier.js';
23
+ import { BucketParameterQuerier, QuerierError } from '@powersync/service-sync-rules/src/BucketParameterQuerier.js';
15
24
  import { SyncContext } from './SyncContext.js';
16
25
  import { getIntersection, hasIntersection } from './util.js';
17
26
 
27
+ export interface VersionedSyncRules {
28
+ syncRules: SqlSyncRules;
29
+ version: number;
30
+ }
31
+
18
32
  export interface BucketChecksumStateOptions {
19
33
  syncContext: SyncContext;
20
34
  bucketStorage: BucketChecksumStateStorage;
21
- syncRules: SqlSyncRules;
22
- syncParams: RequestParameters;
35
+ syncRules: VersionedSyncRules;
36
+ tokenPayload: RequestJwtPayload;
37
+ syncRequest: util.StreamingSyncRequest;
23
38
  logger?: Logger;
24
- initialBucketPositions?: { name: string; after: util.InternalOpId }[];
25
39
  }
26
40
 
27
41
  type BucketSyncState = {
@@ -50,6 +64,17 @@ export class BucketChecksumState {
50
64
  */
51
65
  private lastChecksums: util.ChecksumMap | null = null;
52
66
  private lastWriteCheckpoint: bigint | null = null;
67
+ /**
68
+ * Once we've sent the first full checkpoint line including all {@link util.Checkpoint.streams} that the user is
69
+ * subscribed to, we keep an index of the stream names to their index in that array.
70
+ *
71
+ * This is used to compress the representation of buckets in `checkpoint` and `checkpoint_diff` lines: For buckets
72
+ * that are part of sync rules or default streams, we need to include the name of the defining sync rule or definition
73
+ * yielding that bucket (so that clients can track progress for default streams).
74
+ * But instead of sending the name for each bucket, we use the fact that it's part of the streams array and only send
75
+ * their index, reducing the size of those messages.
76
+ */
77
+ private streamNameToIndex: Map<string, number> | null = null;
53
78
 
54
79
  private readonly parameterState: BucketParameterState;
55
80
 
@@ -69,13 +94,14 @@ export class BucketChecksumState {
69
94
  options.syncContext,
70
95
  options.bucketStorage,
71
96
  options.syncRules,
72
- options.syncParams,
97
+ options.tokenPayload,
98
+ options.syncRequest,
73
99
  this.logger
74
100
  );
75
101
  this.bucketDataPositions = new Map();
76
102
 
77
- for (let { name, after: start } of options.initialBucketPositions ?? []) {
78
- this.bucketDataPositions.set(name, { start_op_id: start });
103
+ for (let { name, after: start } of options.syncRequest.buckets ?? []) {
104
+ this.bucketDataPositions.set(name, { start_op_id: BigInt(start) });
79
105
  }
80
106
  }
81
107
 
@@ -158,6 +184,7 @@ export class BucketChecksumState {
158
184
  // TODO: If updatedBuckets is present, we can use that to more efficiently calculate a diff,
159
185
  // and avoid any unnecessary loops through the entire list of buckets.
160
186
  const diff = util.checksumsDiff(this.lastChecksums, checksumMap);
187
+ const streamNameToIndex = this.streamNameToIndex!;
161
188
 
162
189
  if (
163
190
  this.lastWriteCheckpoint == writeCheckpoint &&
@@ -182,12 +209,12 @@ export class BucketChecksumState {
182
209
 
183
210
  const updatedBucketDescriptions = diff.updatedBuckets.map((e) => ({
184
211
  ...e,
185
- priority: bucketDescriptionMap.get(e.bucket)!.priority
212
+ ...this.parameterState.translateResolvedBucket(bucketDescriptionMap.get(e.bucket)!, streamNameToIndex)
186
213
  }));
187
214
  bucketsToFetch = [...generateBucketsToFetch].map((b) => {
188
215
  return {
189
- bucket: b,
190
- priority: bucketDescriptionMap.get(b)!.priority
216
+ priority: bucketDescriptionMap.get(b)!.priority,
217
+ bucket: b
191
218
  };
192
219
  });
193
220
 
@@ -220,15 +247,37 @@ export class BucketChecksumState {
220
247
  message += `buckets: ${allBuckets.length} ${limitedBuckets(allBuckets, 20)}`;
221
248
  this.logger.info(message, { checkpoint: base.checkpoint, user_id: user_id, buckets: allBuckets.length });
222
249
  };
223
- bucketsToFetch = allBuckets;
250
+ bucketsToFetch = allBuckets.map((b) => ({ bucket: b.bucket, priority: b.priority }));
251
+
252
+ const subscriptions: util.StreamDescription[] = [];
253
+ const streamNameToIndex = new Map<string, number>();
254
+ this.streamNameToIndex = streamNameToIndex;
255
+
256
+ for (const source of this.parameterState.syncRules.syncRules.bucketSources) {
257
+ if (this.parameterState.isSubscribedToStream(source)) {
258
+ streamNameToIndex.set(source.name, subscriptions.length);
259
+
260
+ subscriptions.push({
261
+ name: source.name,
262
+ is_default: source.subscribedToByDefault,
263
+ errors:
264
+ this.parameterState.streamErrors[source.name]?.map((e) => ({
265
+ subscription: e.subscription?.opaque_id ?? 'default',
266
+ message: e.message
267
+ })) ?? []
268
+ });
269
+ }
270
+ }
271
+
224
272
  checkpointLine = {
225
273
  checkpoint: {
226
274
  last_op_id: util.internalToExternalOpId(base.checkpoint),
227
275
  write_checkpoint: writeCheckpoint ? String(writeCheckpoint) : undefined,
228
276
  buckets: [...checksumMap.values()].map((e) => ({
229
277
  ...e,
230
- priority: bucketDescriptionMap.get(e.bucket)!.priority
231
- }))
278
+ ...this.parameterState.translateResolvedBucket(bucketDescriptionMap.get(e.bucket)!, streamNameToIndex)
279
+ })),
280
+ streams: subscriptions
232
281
  }
233
282
  } satisfies util.StreamingSyncCheckpoint;
234
283
  }
@@ -319,7 +368,7 @@ export interface CheckpointUpdate {
319
368
  /**
320
369
  * All buckets forming part of the checkpoint.
321
370
  */
322
- buckets: BucketDescription[];
371
+ buckets: ResolvedBucket[];
323
372
 
324
373
  /**
325
374
  * If present, a set of buckets that have been updated since the last checkpoint.
@@ -332,12 +381,22 @@ export interface CheckpointUpdate {
332
381
  export class BucketParameterState {
333
382
  private readonly context: SyncContext;
334
383
  public readonly bucketStorage: BucketChecksumStateStorage;
335
- public readonly syncRules: SqlSyncRules;
384
+ public readonly syncRules: VersionedSyncRules;
336
385
  public readonly syncParams: RequestParameters;
337
386
  private readonly querier: BucketParameterQuerier;
338
- private readonly staticBuckets: Map<string, BucketDescription>;
387
+ /**
388
+ * Static buckets. This map is guaranteed not to change during a request, since resolving static buckets can only
389
+ * take request parameters into account,
390
+ */
391
+ private readonly staticBuckets: Map<string, ResolvedBucket>;
392
+ private readonly includeDefaultStreams: boolean;
393
+ // Indexed by the client-side id
394
+ private readonly explicitStreamSubscriptions: util.RequestedStreamSubscription[];
395
+ // Indexed by descriptor name.
396
+ readonly streamErrors: Record<string, QuerierError[]>;
397
+ private readonly subscribedStreamNames: Set<string>;
339
398
  private readonly logger: Logger;
340
- private cachedDynamicBuckets: BucketDescription[] | null = null;
399
+ private cachedDynamicBuckets: ResolvedBucket[] | null = null;
341
400
  private cachedDynamicBucketSet: Set<string> | null = null;
342
401
 
343
402
  private readonly lookups: Set<string>;
@@ -345,19 +404,94 @@ export class BucketParameterState {
345
404
  constructor(
346
405
  context: SyncContext,
347
406
  bucketStorage: BucketChecksumStateStorage,
348
- syncRules: SqlSyncRules,
349
- syncParams: RequestParameters,
407
+ syncRules: VersionedSyncRules,
408
+ tokenPayload: RequestJwtPayload,
409
+ request: util.StreamingSyncRequest,
350
410
  logger: Logger
351
411
  ) {
352
412
  this.context = context;
353
413
  this.bucketStorage = bucketStorage;
354
414
  this.syncRules = syncRules;
355
- this.syncParams = syncParams;
415
+ this.syncParams = new RequestParameters(tokenPayload, request.parameters ?? {});
356
416
  this.logger = logger;
357
417
 
358
- this.querier = syncRules.getBucketParameterQuerier(this.syncParams);
359
- this.staticBuckets = new Map<string, BucketDescription>(this.querier.staticBuckets.map((b) => [b.bucket, b]));
418
+ const streamsByName: Record<string, RequestedStream[]> = {};
419
+ const subscriptions = request.streams;
420
+ const explicitStreamSubscriptions: util.RequestedStreamSubscription[] = subscriptions?.subscriptions ?? [];
421
+ if (subscriptions) {
422
+ for (let i = 0; i < explicitStreamSubscriptions.length; i++) {
423
+ const subscription = explicitStreamSubscriptions[i];
424
+
425
+ const syncRuleStream: RequestedStream = {
426
+ parameters: subscription.parameters ?? {},
427
+ opaque_id: i
428
+ };
429
+ if (Object.hasOwn(streamsByName, subscription.stream)) {
430
+ streamsByName[subscription.stream].push(syncRuleStream);
431
+ } else {
432
+ streamsByName[subscription.stream] = [syncRuleStream];
433
+ }
434
+ }
435
+ }
436
+ this.includeDefaultStreams = subscriptions?.include_defaults ?? true;
437
+ this.explicitStreamSubscriptions = explicitStreamSubscriptions;
438
+
439
+ const { querier, errors } = syncRules.syncRules.getBucketParameterQuerier({
440
+ globalParameters: this.syncParams,
441
+ hasDefaultStreams: this.includeDefaultStreams,
442
+ streams: streamsByName,
443
+ bucketIdTransformer: SqlSyncRules.versionedBucketIdTransformer(`${syncRules.version}`)
444
+ });
445
+ this.querier = querier;
446
+ this.streamErrors = Object.groupBy(errors, (e) => e.descriptor) as Record<string, QuerierError[]>;
447
+
448
+ this.staticBuckets = new Map<string, ResolvedBucket>(
449
+ mergeBuckets(this.querier.staticBuckets).map((b) => [b.bucket, b])
450
+ );
360
451
  this.lookups = new Set<string>(this.querier.parameterQueryLookups.map((l) => JSONBig.stringify(l.values)));
452
+ this.subscribedStreamNames = new Set(Object.keys(streamsByName));
453
+ }
454
+
455
+ /**
456
+ * Translates an internal sync-rules {@link ResolvedBucket} instance to the public
457
+ * {@link util.ClientBucketDescription}.
458
+ *
459
+ * @param lookupIndex A map from stream names to their index in {@link util.Checkpoint.streams}. These are used to
460
+ * reference default buckets by their stream index instead of duplicating the name on wire.
461
+ */
462
+ translateResolvedBucket(description: ResolvedBucket, lookupIndex: Map<string, number>): util.ClientBucketDescription {
463
+ // If the client is overriding the priority of any stream that yields this bucket, sync the bucket with that
464
+ // priority.
465
+ let priorityOverride: BucketPriority | null = null;
466
+ for (const reason of description.inclusion_reasons) {
467
+ if (reason != 'default') {
468
+ const requestedPriority = this.explicitStreamSubscriptions[reason.subscription]?.override_priority;
469
+ if (requestedPriority != null) {
470
+ if (priorityOverride == null) {
471
+ priorityOverride = requestedPriority as BucketPriority;
472
+ } else {
473
+ priorityOverride = Math.min(requestedPriority, priorityOverride) as BucketPriority;
474
+ }
475
+ }
476
+ }
477
+ }
478
+
479
+ return {
480
+ bucket: description.bucket,
481
+ priority: priorityOverride ?? description.priority,
482
+ subscriptions: description.inclusion_reasons.map((reason) => {
483
+ if (reason == 'default') {
484
+ const stream = description.definition;
485
+ return { default: lookupIndex.get(stream)! };
486
+ } else {
487
+ return { sub: reason.subscription };
488
+ }
489
+ })
490
+ };
491
+ }
492
+
493
+ isSubscribedToStream(desc: BucketSource): boolean {
494
+ return (desc.subscribedToByDefault && this.includeDefaultStreams) || this.subscribedStreamNames.has(desc.name);
361
495
  }
362
496
 
363
497
  async getCheckpointUpdate(checkpoint: storage.StorageCheckpointUpdate): Promise<CheckpointUpdate> {
@@ -391,19 +525,19 @@ export class BucketParameterState {
391
525
  * For static buckets, we can keep track of which buckets have been updated.
392
526
  */
393
527
  private async getCheckpointUpdateStatic(checkpoint: storage.StorageCheckpointUpdate): Promise<CheckpointUpdate> {
394
- const querier = this.querier;
528
+ const staticBuckets = [...this.staticBuckets.values()];
395
529
  const update = checkpoint.update;
396
530
 
397
531
  if (update.invalidateDataBuckets) {
398
532
  return {
399
- buckets: querier.staticBuckets,
533
+ buckets: staticBuckets,
400
534
  updatedBuckets: INVALIDATE_ALL_BUCKETS
401
535
  };
402
536
  }
403
537
 
404
538
  const updatedBuckets = new Set<string>(getIntersection(this.staticBuckets, update.updatedDataBuckets));
405
539
  return {
406
- buckets: querier.staticBuckets,
540
+ buckets: staticBuckets,
407
541
  updatedBuckets
408
542
  };
409
543
  }
@@ -414,7 +548,7 @@ export class BucketParameterState {
414
548
  private async getCheckpointUpdateDynamic(checkpoint: storage.StorageCheckpointUpdate): Promise<CheckpointUpdate> {
415
549
  const querier = this.querier;
416
550
  const storage = this.bucketStorage;
417
- const staticBuckets = querier.staticBuckets;
551
+ const staticBuckets = this.staticBuckets.values();
418
552
  const update = checkpoint.update;
419
553
 
420
554
  let hasParameterChange = false;
@@ -436,7 +570,7 @@ export class BucketParameterState {
436
570
  }
437
571
  }
438
572
 
439
- let dynamicBuckets: BucketDescription[];
573
+ let dynamicBuckets: ResolvedBucket[];
440
574
  if (hasParameterChange || this.cachedDynamicBuckets == null || this.cachedDynamicBucketSet == null) {
441
575
  dynamicBuckets = await querier.queryDynamicBucketDescriptions({
442
576
  getParameterSets(lookups) {
@@ -458,7 +592,7 @@ export class BucketParameterState {
458
592
  }
459
593
  }
460
594
  }
461
- const allBuckets = [...staticBuckets, ...dynamicBuckets];
595
+ const allBuckets = [...staticBuckets, ...mergeBuckets(dynamicBuckets)];
462
596
 
463
597
  if (invalidateDataBuckets) {
464
598
  return {
@@ -517,3 +651,32 @@ function limitedBuckets(buckets: string[] | { bucket: string }[], limit: number)
517
651
  const limited = buckets.slice(0, limit);
518
652
  return `${JSON.stringify(limited)}...`;
519
653
  }
654
+
655
+ /**
656
+ * Resolves duplicate buckets in the given array, merging the inclusion reasons for duplicate.
657
+ *
658
+ * It's possible for duplicates to occur when a stream has multiple subscriptions, consider e.g.
659
+ *
660
+ * ```
661
+ * sync_streams:
662
+ * assets_by_category:
663
+ * query: select * from assets where category in (request.parameters() -> 'categories')
664
+ * ```
665
+ *
666
+ * Here, a client might subscribe once with `{"categories": [1]}` and once with `{"categories": [1, 2]}`. Since each
667
+ * subscription is evaluated independently, this would lead to three buckets, with a duplicate `assets_by_category[1]`
668
+ * bucket.
669
+ */
670
+ function mergeBuckets(buckets: ResolvedBucket[]): ResolvedBucket[] {
671
+ const byBucketId: Record<string, ResolvedBucket> = {};
672
+
673
+ for (const bucket of buckets) {
674
+ if (Object.hasOwn(byBucketId, bucket.bucket)) {
675
+ byBucketId[bucket.bucket].inclusion_reasons.push(...bucket.inclusion_reasons);
676
+ } else {
677
+ byBucketId[bucket.bucket] = structuredClone(bucket);
678
+ }
679
+ }
680
+
681
+ return Object.values(byBucketId);
682
+ }
@@ -2,6 +2,7 @@ import { MetricsEngine } from '../metrics/MetricsEngine.js';
2
2
 
3
3
  import { APIMetric } from '@powersync/service-types';
4
4
  import { SyncBucketData } from '../util/protocol-types.js';
5
+ import { ServiceAssertionError } from '@powersync/lib-services-framework';
5
6
 
6
7
  /**
7
8
  * Record sync stats per request stream.
@@ -9,9 +10,12 @@ import { SyncBucketData } from '../util/protocol-types.js';
9
10
  export class RequestTracker {
10
11
  operationsSynced = 0;
11
12
  dataSyncedBytes = 0;
13
+ dataSentBytes = 0;
12
14
  operationCounts: OperationCounts = { put: 0, remove: 0, move: 0, clear: 0 };
13
15
  largeBuckets: Record<string, number> = {};
14
16
 
17
+ private encoding: string | undefined = undefined;
18
+
15
19
  constructor(private metrics: MetricsEngine) {
16
20
  this.metrics = metrics;
17
21
  }
@@ -29,18 +33,39 @@ export class RequestTracker {
29
33
  this.metrics.getCounter(APIMetric.OPERATIONS_SYNCED).add(operations.total);
30
34
  }
31
35
 
32
- addDataSynced(bytes: number) {
36
+ setCompressed(encoding: string) {
37
+ this.encoding = encoding;
38
+ }
39
+
40
+ addPlaintextDataSynced(bytes: number) {
33
41
  this.dataSyncedBytes += bytes;
34
42
 
35
43
  this.metrics.getCounter(APIMetric.DATA_SYNCED_BYTES).add(bytes);
44
+
45
+ if (this.encoding == null) {
46
+ // This avoids having to create a separate stream just to track this
47
+ this.dataSentBytes += bytes;
48
+
49
+ this.metrics.getCounter(APIMetric.DATA_SENT_BYTES).add(bytes);
50
+ }
51
+ }
52
+
53
+ addCompressedDataSent(bytes: number) {
54
+ if (this.encoding == null) {
55
+ throw new ServiceAssertionError('No compression encoding set');
56
+ }
57
+ this.dataSentBytes += bytes;
58
+ this.metrics.getCounter(APIMetric.DATA_SENT_BYTES).add(bytes);
36
59
  }
37
60
 
38
61
  getLogMeta() {
39
62
  return {
40
63
  operations_synced: this.operationsSynced,
41
64
  data_synced_bytes: this.dataSyncedBytes,
65
+ data_sent_bytes: this.dataSentBytes,
42
66
  operation_counts: this.operationCounts,
43
- large_buckets: this.largeBuckets
67
+ large_buckets: this.largeBuckets,
68
+ encoding: this.encoding
44
69
  };
45
70
  }
46
71
  }
package/src/sync/sync.ts CHANGED
@@ -1,5 +1,11 @@
1
1
  import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
2
- import { BucketDescription, BucketPriority, RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules';
2
+ import {
3
+ BucketDescription,
4
+ BucketPriority,
5
+ RequestJwtPayload,
6
+ RequestParameters,
7
+ SqlSyncRules
8
+ } from '@powersync/service-sync-rules';
3
9
 
4
10
  import { AbortError } from 'ix/aborterror.js';
5
11
 
@@ -8,7 +14,7 @@ import * as storage from '../storage/storage-index.js';
8
14
  import * as util from '../util/util-index.js';
9
15
 
10
16
  import { Logger, logger as defaultLogger } from '@powersync/lib-services-framework';
11
- import { BucketChecksumState, CheckpointLine } from './BucketChecksumState.js';
17
+ import { BucketChecksumState, CheckpointLine, VersionedSyncRules } from './BucketChecksumState.js';
12
18
  import { mergeAsyncIterables } from '../streams/streams-index.js';
13
19
  import { acquireSemaphoreAbortable, settledPromise, tokenStream, TokenStreamOptions } from './util.js';
14
20
  import { SyncContext } from './SyncContext.js';
@@ -17,11 +23,11 @@ import { OperationsSentStats, RequestTracker, statsForBatch } from './RequestTra
17
23
  export interface SyncStreamParameters {
18
24
  syncContext: SyncContext;
19
25
  bucketStorage: storage.SyncRulesBucketStorage;
20
- syncRules: SqlSyncRules;
26
+ syncRules: VersionedSyncRules;
21
27
  params: util.StreamingSyncRequest;
22
- syncParams: RequestParameters;
23
28
  token: auth.JwtPayload;
24
29
  logger?: Logger;
30
+ isEncodingAsBson: boolean;
25
31
  /**
26
32
  * If this signal is aborted, the stream response ends as soon as possible, without error.
27
33
  */
@@ -34,8 +40,17 @@ export interface SyncStreamParameters {
34
40
  export async function* streamResponse(
35
41
  options: SyncStreamParameters
36
42
  ): AsyncIterable<util.StreamingSyncLine | string | null> {
37
- const { syncContext, bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } =
38
- options;
43
+ const {
44
+ syncContext,
45
+ bucketStorage,
46
+ syncRules,
47
+ params,
48
+ token,
49
+ tokenStreamOptions,
50
+ tracker,
51
+ signal,
52
+ isEncodingAsBson
53
+ } = options;
39
54
  const logger = options.logger ?? defaultLogger;
40
55
 
41
56
  // We also need to be able to abort, so we create our own controller.
@@ -58,10 +73,11 @@ export async function* streamResponse(
58
73
  bucketStorage,
59
74
  syncRules,
60
75
  params,
61
- syncParams,
76
+ token,
62
77
  tracker,
63
78
  controller.signal,
64
- logger
79
+ logger,
80
+ isEncodingAsBson
65
81
  );
66
82
  // Merge the two streams, and abort as soon as one of the streams end.
67
83
  const merged = mergeAsyncIterables([stream, ki], controller.signal);
@@ -84,26 +100,25 @@ export async function* streamResponse(
84
100
  async function* streamResponseInner(
85
101
  syncContext: SyncContext,
86
102
  bucketStorage: storage.SyncRulesBucketStorage,
87
- syncRules: SqlSyncRules,
103
+ syncRules: VersionedSyncRules,
88
104
  params: util.StreamingSyncRequest,
89
- syncParams: RequestParameters,
105
+ tokenPayload: RequestJwtPayload,
90
106
  tracker: RequestTracker,
91
107
  signal: AbortSignal,
92
- logger: Logger
108
+ logger: Logger,
109
+ isEncodingAsBson: boolean
93
110
  ): AsyncGenerator<util.StreamingSyncLine | string | null> {
94
- const { raw_data, binary_data } = params;
111
+ const { raw_data } = params;
95
112
 
96
- const checkpointUserId = util.checkpointUserId(syncParams.tokenParameters.user_id as string, params.client_id);
113
+ const userId = tokenPayload.sub;
114
+ const checkpointUserId = util.checkpointUserId(userId as string, params.client_id);
97
115
 
98
116
  const checksumState = new BucketChecksumState({
99
117
  syncContext,
100
118
  bucketStorage,
101
119
  syncRules,
102
- syncParams,
103
- initialBucketPositions: params.buckets?.map((bucket) => ({
104
- name: bucket.name,
105
- after: BigInt(bucket.after)
106
- })),
120
+ tokenPayload,
121
+ syncRequest: params,
107
122
  logger: logger
108
123
  });
109
124
  const stream = bucketStorage.watchCheckpointChanges({
@@ -223,12 +238,11 @@ async function* streamResponseInner(
223
238
  checkpoint: next.value.value.checkpoint,
224
239
  bucketsToFetch: buckets,
225
240
  checkpointLine: line,
226
- raw_data,
227
- binary_data,
241
+ legacyDataLines: !isEncodingAsBson && params.raw_data != true,
228
242
  onRowsSent: markOperationsSent,
229
243
  abort_connection: signal,
230
244
  abort_batch: abortCheckpointSignal,
231
- user_id: syncParams.userId,
245
+ user_id: userId,
232
246
  // Passing null here will emit a full sync complete message at the end. If we pass a priority, we'll emit a partial
233
247
  // sync complete message instead.
234
248
  forPriority: !isLast ? priority : null,
@@ -253,8 +267,8 @@ interface BucketDataRequest {
253
267
  checkpointLine: CheckpointLine;
254
268
  /** Subset of checkpointLine.bucketsToFetch, filtered by priority. */
255
269
  bucketsToFetch: BucketDescription[];
256
- raw_data: boolean | undefined;
257
- binary_data: boolean | undefined;
270
+ /** Whether data lines should be encoded in a legacy format where {@link util.OplogEntry.data} is a nested object. */
271
+ legacyDataLines: boolean;
258
272
  /** Signals that the connection was aborted and that streaming should stop ASAP. */
259
273
  abort_connection: AbortSignal;
260
274
  /**
@@ -315,8 +329,7 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
315
329
  checkpoint,
316
330
  bucketsToFetch,
317
331
  checkpointLine,
318
- raw_data,
319
- binary_data,
332
+ legacyDataLines,
320
333
  abort_connection,
321
334
  abort_batch,
322
335
  onRowsSent,
@@ -366,32 +379,21 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
366
379
  }
367
380
  logger.debug(`Sending data for ${r.bucket}`);
368
381
 
369
- let send_data: any;
370
- if (binary_data) {
371
- // Send the object as is, will most likely be encoded as a BSON document
372
- send_data = { data: r };
373
- } else if (raw_data) {
374
- /**
375
- * Data is a raw string - we can use the more efficient JSON.stringify.
376
- */
377
- const response: util.StreamingSyncData = {
378
- data: r
379
- };
380
- send_data = JSON.stringify(response);
381
- } else {
382
- // We need to preserve the embedded data exactly, so this uses a JsonContainer
383
- // and JSONBig to stringify.
384
- const response: util.StreamingSyncData = {
385
- data: transformLegacyResponse(r)
386
- };
387
- send_data = JSONBig.stringify(response);
388
- }
389
- yield { data: send_data, done: false };
390
- if (send_data.length > 50_000) {
391
- // IMPORTANT: This does not affect the output stream, but is used to flush
392
- // iterator memory in case if large data sent.
393
- yield { data: null, done: false };
394
- }
382
+ const line = legacyDataLines
383
+ ? // We need to preserve the embedded data exactly, so this uses a JsonContainer
384
+ // and JSONBig to stringify.
385
+ JSONBig.stringify({
386
+ data: transformLegacyResponse(r)
387
+ } satisfies util.StreamingSyncData)
388
+ : // We can send the object as-is, which will be converted to JSON or BSON by a downstream transformer.
389
+ ({ data: r } satisfies util.StreamingSyncData);
390
+
391
+ yield { data: line, done: false };
392
+
393
+ // IMPORTANT: This does not affect the output stream, but is used to flush
394
+ // iterator memory in case if large data sent.
395
+ yield { data: null, done: false };
396
+
395
397
  onRowsSent(statsForBatch(r));
396
398
 
397
399
  checkpointLine.updateBucketPosition({ bucket: r.bucket, nextAfter: BigInt(r.next_after), hasMore: r.has_more });
package/src/sync/util.ts CHANGED
@@ -125,7 +125,7 @@ export async function* transformToBytesTracked(
125
125
  encoded = data;
126
126
  }
127
127
 
128
- tracker.addDataSynced(encoded.length);
128
+ tracker.addPlaintextDataSynced(encoded.length);
129
129
  yield encoded;
130
130
  }
131
131
  }