@powersync/service-core 1.13.4 → 1.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. package/CHANGELOG.md +61 -0
  2. package/LICENSE +3 -3
  3. package/dist/api/api-metrics.js +5 -0
  4. package/dist/api/api-metrics.js.map +1 -1
  5. package/dist/api/diagnostics.js +31 -1
  6. package/dist/api/diagnostics.js.map +1 -1
  7. package/dist/auth/KeyStore.d.ts +19 -0
  8. package/dist/auth/KeyStore.js +16 -4
  9. package/dist/auth/KeyStore.js.map +1 -1
  10. package/dist/auth/RemoteJWKSCollector.d.ts +3 -0
  11. package/dist/auth/RemoteJWKSCollector.js +3 -1
  12. package/dist/auth/RemoteJWKSCollector.js.map +1 -1
  13. package/dist/auth/StaticSupabaseKeyCollector.d.ts +2 -1
  14. package/dist/auth/StaticSupabaseKeyCollector.js +1 -1
  15. package/dist/auth/StaticSupabaseKeyCollector.js.map +1 -1
  16. package/dist/auth/utils.d.ts +19 -0
  17. package/dist/auth/utils.js +106 -3
  18. package/dist/auth/utils.js.map +1 -1
  19. package/dist/entry/commands/compact-action.js +10 -1
  20. package/dist/entry/commands/compact-action.js.map +1 -1
  21. package/dist/metrics/open-telemetry/util.d.ts +0 -3
  22. package/dist/metrics/open-telemetry/util.js +19 -12
  23. package/dist/metrics/open-telemetry/util.js.map +1 -1
  24. package/dist/replication/AbstractReplicator.js +2 -2
  25. package/dist/replication/AbstractReplicator.js.map +1 -1
  26. package/dist/routes/compression.d.ts +19 -0
  27. package/dist/routes/compression.js +70 -0
  28. package/dist/routes/compression.js.map +1 -0
  29. package/dist/routes/configure-fastify.d.ts +40 -5
  30. package/dist/routes/configure-fastify.js +2 -1
  31. package/dist/routes/configure-fastify.js.map +1 -1
  32. package/dist/routes/endpoints/socket-route.js +25 -17
  33. package/dist/routes/endpoints/socket-route.js.map +1 -1
  34. package/dist/routes/endpoints/sync-rules.js +1 -27
  35. package/dist/routes/endpoints/sync-rules.js.map +1 -1
  36. package/dist/routes/endpoints/sync-stream.d.ts +80 -10
  37. package/dist/routes/endpoints/sync-stream.js +29 -11
  38. package/dist/routes/endpoints/sync-stream.js.map +1 -1
  39. package/dist/routes/route-register.d.ts +4 -0
  40. package/dist/routes/route-register.js +29 -15
  41. package/dist/routes/route-register.js.map +1 -1
  42. package/dist/storage/BucketStorage.d.ts +1 -1
  43. package/dist/storage/BucketStorage.js.map +1 -1
  44. package/dist/storage/BucketStorageBatch.d.ts +16 -6
  45. package/dist/storage/BucketStorageBatch.js.map +1 -1
  46. package/dist/storage/ChecksumCache.d.ts +4 -19
  47. package/dist/storage/ChecksumCache.js +4 -0
  48. package/dist/storage/ChecksumCache.js.map +1 -1
  49. package/dist/storage/ReplicationEventPayload.d.ts +2 -2
  50. package/dist/storage/SourceEntity.d.ts +5 -4
  51. package/dist/storage/SourceTable.d.ts +22 -20
  52. package/dist/storage/SourceTable.js +34 -30
  53. package/dist/storage/SourceTable.js.map +1 -1
  54. package/dist/storage/SyncRulesBucketStorage.d.ts +19 -4
  55. package/dist/storage/SyncRulesBucketStorage.js.map +1 -1
  56. package/dist/sync/BucketChecksumState.d.ts +41 -11
  57. package/dist/sync/BucketChecksumState.js +155 -19
  58. package/dist/sync/BucketChecksumState.js.map +1 -1
  59. package/dist/sync/RequestTracker.d.ts +7 -1
  60. package/dist/sync/RequestTracker.js +22 -2
  61. package/dist/sync/RequestTracker.js.map +1 -1
  62. package/dist/sync/sync.d.ts +3 -3
  63. package/dist/sync/sync.js +23 -42
  64. package/dist/sync/sync.js.map +1 -1
  65. package/dist/sync/util.d.ts +3 -1
  66. package/dist/sync/util.js +30 -2
  67. package/dist/sync/util.js.map +1 -1
  68. package/dist/util/config/compound-config-collector.js +23 -0
  69. package/dist/util/config/compound-config-collector.js.map +1 -1
  70. package/dist/util/lsn.d.ts +4 -0
  71. package/dist/util/lsn.js +11 -0
  72. package/dist/util/lsn.js.map +1 -0
  73. package/dist/util/protocol-types.d.ts +153 -9
  74. package/dist/util/protocol-types.js +41 -6
  75. package/dist/util/protocol-types.js.map +1 -1
  76. package/dist/util/util-index.d.ts +1 -0
  77. package/dist/util/util-index.js +1 -0
  78. package/dist/util/util-index.js.map +1 -1
  79. package/dist/util/utils.d.ts +18 -3
  80. package/dist/util/utils.js +33 -9
  81. package/dist/util/utils.js.map +1 -1
  82. package/package.json +16 -14
  83. package/src/api/api-metrics.ts +6 -0
  84. package/src/api/diagnostics.ts +33 -1
  85. package/src/auth/KeyStore.ts +28 -4
  86. package/src/auth/RemoteJWKSCollector.ts +5 -2
  87. package/src/auth/StaticSupabaseKeyCollector.ts +1 -1
  88. package/src/auth/utils.ts +123 -3
  89. package/src/entry/commands/compact-action.ts +9 -1
  90. package/src/metrics/open-telemetry/util.ts +23 -19
  91. package/src/replication/AbstractReplicator.ts +2 -2
  92. package/src/routes/compression.ts +75 -0
  93. package/src/routes/configure-fastify.ts +3 -1
  94. package/src/routes/endpoints/socket-route.ts +25 -16
  95. package/src/routes/endpoints/sync-rules.ts +1 -28
  96. package/src/routes/endpoints/sync-stream.ts +37 -26
  97. package/src/routes/route-register.ts +41 -15
  98. package/src/storage/BucketStorage.ts +2 -2
  99. package/src/storage/BucketStorageBatch.ts +23 -6
  100. package/src/storage/ChecksumCache.ts +8 -22
  101. package/src/storage/ReplicationEventPayload.ts +2 -2
  102. package/src/storage/SourceEntity.ts +5 -5
  103. package/src/storage/SourceTable.ts +48 -34
  104. package/src/storage/SyncRulesBucketStorage.ts +26 -7
  105. package/src/sync/BucketChecksumState.ts +194 -31
  106. package/src/sync/RequestTracker.ts +27 -2
  107. package/src/sync/sync.ts +53 -51
  108. package/src/sync/util.ts +32 -3
  109. package/src/util/config/compound-config-collector.ts +24 -0
  110. package/src/util/lsn.ts +8 -0
  111. package/src/util/protocol-types.ts +138 -10
  112. package/src/util/util-index.ts +1 -0
  113. package/src/util/utils.ts +59 -12
  114. package/test/src/auth.test.ts +323 -1
  115. package/test/src/checksum_cache.test.ts +6 -8
  116. package/test/src/routes/mocks.ts +59 -0
  117. package/test/src/routes/stream.test.ts +84 -0
  118. package/test/src/sync/BucketChecksumState.test.ts +375 -76
  119. package/tsconfig.tsbuildinfo +1 -1
package/src/sync/sync.ts CHANGED
@@ -1,5 +1,11 @@
1
1
  import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
2
- import { BucketDescription, BucketPriority, RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules';
2
+ import {
3
+ BucketDescription,
4
+ BucketPriority,
5
+ RequestJwtPayload,
6
+ RequestParameters,
7
+ SqlSyncRules
8
+ } from '@powersync/service-sync-rules';
3
9
 
4
10
  import { AbortError } from 'ix/aborterror.js';
5
11
 
@@ -8,7 +14,7 @@ import * as storage from '../storage/storage-index.js';
8
14
  import * as util from '../util/util-index.js';
9
15
 
10
16
  import { Logger, logger as defaultLogger } from '@powersync/lib-services-framework';
11
- import { BucketChecksumState, CheckpointLine } from './BucketChecksumState.js';
17
+ import { BucketChecksumState, CheckpointLine, VersionedSyncRules } from './BucketChecksumState.js';
12
18
  import { mergeAsyncIterables } from '../streams/streams-index.js';
13
19
  import { acquireSemaphoreAbortable, settledPromise, tokenStream, TokenStreamOptions } from './util.js';
14
20
  import { SyncContext } from './SyncContext.js';
@@ -17,11 +23,11 @@ import { OperationsSentStats, RequestTracker, statsForBatch } from './RequestTra
17
23
  export interface SyncStreamParameters {
18
24
  syncContext: SyncContext;
19
25
  bucketStorage: storage.SyncRulesBucketStorage;
20
- syncRules: SqlSyncRules;
26
+ syncRules: VersionedSyncRules;
21
27
  params: util.StreamingSyncRequest;
22
- syncParams: RequestParameters;
23
28
  token: auth.JwtPayload;
24
29
  logger?: Logger;
30
+ isEncodingAsBson: boolean;
25
31
  /**
26
32
  * If this signal is aborted, the stream response ends as soon as possible, without error.
27
33
  */
@@ -34,8 +40,17 @@ export interface SyncStreamParameters {
34
40
  export async function* streamResponse(
35
41
  options: SyncStreamParameters
36
42
  ): AsyncIterable<util.StreamingSyncLine | string | null> {
37
- const { syncContext, bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } =
38
- options;
43
+ const {
44
+ syncContext,
45
+ bucketStorage,
46
+ syncRules,
47
+ params,
48
+ token,
49
+ tokenStreamOptions,
50
+ tracker,
51
+ signal,
52
+ isEncodingAsBson
53
+ } = options;
39
54
  const logger = options.logger ?? defaultLogger;
40
55
 
41
56
  // We also need to be able to abort, so we create our own controller.
@@ -58,10 +73,11 @@ export async function* streamResponse(
58
73
  bucketStorage,
59
74
  syncRules,
60
75
  params,
61
- syncParams,
76
+ token,
62
77
  tracker,
63
78
  controller.signal,
64
- logger
79
+ logger,
80
+ isEncodingAsBson
65
81
  );
66
82
  // Merge the two streams, and abort as soon as one of the streams end.
67
83
  const merged = mergeAsyncIterables([stream, ki], controller.signal);
@@ -84,26 +100,25 @@ export async function* streamResponse(
84
100
  async function* streamResponseInner(
85
101
  syncContext: SyncContext,
86
102
  bucketStorage: storage.SyncRulesBucketStorage,
87
- syncRules: SqlSyncRules,
103
+ syncRules: VersionedSyncRules,
88
104
  params: util.StreamingSyncRequest,
89
- syncParams: RequestParameters,
105
+ tokenPayload: RequestJwtPayload,
90
106
  tracker: RequestTracker,
91
107
  signal: AbortSignal,
92
- logger: Logger
108
+ logger: Logger,
109
+ isEncodingAsBson: boolean
93
110
  ): AsyncGenerator<util.StreamingSyncLine | string | null> {
94
- const { raw_data, binary_data } = params;
111
+ const { raw_data } = params;
95
112
 
96
- const checkpointUserId = util.checkpointUserId(syncParams.tokenParameters.user_id as string, params.client_id);
113
+ const userId = tokenPayload.sub;
114
+ const checkpointUserId = util.checkpointUserId(userId as string, params.client_id);
97
115
 
98
116
  const checksumState = new BucketChecksumState({
99
117
  syncContext,
100
118
  bucketStorage,
101
119
  syncRules,
102
- syncParams,
103
- initialBucketPositions: params.buckets?.map((bucket) => ({
104
- name: bucket.name,
105
- after: BigInt(bucket.after)
106
- })),
120
+ tokenPayload,
121
+ syncRequest: params,
107
122
  logger: logger
108
123
  });
109
124
  const stream = bucketStorage.watchCheckpointChanges({
@@ -223,12 +238,11 @@ async function* streamResponseInner(
223
238
  checkpoint: next.value.value.checkpoint,
224
239
  bucketsToFetch: buckets,
225
240
  checkpointLine: line,
226
- raw_data,
227
- binary_data,
241
+ legacyDataLines: !isEncodingAsBson && params.raw_data != true,
228
242
  onRowsSent: markOperationsSent,
229
243
  abort_connection: signal,
230
244
  abort_batch: abortCheckpointSignal,
231
- user_id: syncParams.userId,
245
+ user_id: userId,
232
246
  // Passing null here will emit a full sync complete message at the end. If we pass a priority, we'll emit a partial
233
247
  // sync complete message instead.
234
248
  forPriority: !isLast ? priority : null,
@@ -253,8 +267,8 @@ interface BucketDataRequest {
253
267
  checkpointLine: CheckpointLine;
254
268
  /** Subset of checkpointLine.bucketsToFetch, filtered by priority. */
255
269
  bucketsToFetch: BucketDescription[];
256
- raw_data: boolean | undefined;
257
- binary_data: boolean | undefined;
270
+ /** Whether data lines should be encoded in a legacy format where {@link util.OplogEntry.data} is a nested object. */
271
+ legacyDataLines: boolean;
258
272
  /** Signals that the connection was aborted and that streaming should stop ASAP. */
259
273
  abort_connection: AbortSignal;
260
274
  /**
@@ -315,8 +329,7 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
315
329
  checkpoint,
316
330
  bucketsToFetch,
317
331
  checkpointLine,
318
- raw_data,
319
- binary_data,
332
+ legacyDataLines,
320
333
  abort_connection,
321
334
  abort_batch,
322
335
  onRowsSent,
@@ -366,32 +379,21 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
366
379
  }
367
380
  logger.debug(`Sending data for ${r.bucket}`);
368
381
 
369
- let send_data: any;
370
- if (binary_data) {
371
- // Send the object as is, will most likely be encoded as a BSON document
372
- send_data = { data: r };
373
- } else if (raw_data) {
374
- /**
375
- * Data is a raw string - we can use the more efficient JSON.stringify.
376
- */
377
- const response: util.StreamingSyncData = {
378
- data: r
379
- };
380
- send_data = JSON.stringify(response);
381
- } else {
382
- // We need to preserve the embedded data exactly, so this uses a JsonContainer
383
- // and JSONBig to stringify.
384
- const response: util.StreamingSyncData = {
385
- data: transformLegacyResponse(r)
386
- };
387
- send_data = JSONBig.stringify(response);
388
- }
389
- yield { data: send_data, done: false };
390
- if (send_data.length > 50_000) {
391
- // IMPORTANT: This does not affect the output stream, but is used to flush
392
- // iterator memory in case if large data sent.
393
- yield { data: null, done: false };
394
- }
382
+ const line = legacyDataLines
383
+ ? // We need to preserve the embedded data exactly, so this uses a JsonContainer
384
+ // and JSONBig to stringify.
385
+ JSONBig.stringify({
386
+ data: transformLegacyResponse(r)
387
+ } satisfies util.StreamingSyncData)
388
+ : // We can send the object as-is, which will be converted to JSON or BSON by a downstream transformer.
389
+ ({ data: r } satisfies util.StreamingSyncData);
390
+
391
+ yield { data: line, done: false };
392
+
393
+ // IMPORTANT: This does not affect the output stream, but is used to flush
394
+ // iterator memory in case if large data sent.
395
+ yield { data: null, done: false };
396
+
395
397
  onRowsSent(statsForBatch(r));
396
398
 
397
399
  checkpointLine.updateBucketPosition({ bucket: r.bucket, nextAfter: BigInt(r.next_after), hasMore: r.has_more });
package/src/sync/util.ts CHANGED
@@ -3,6 +3,7 @@ import * as timers from 'timers/promises';
3
3
  import { SemaphoreInterface } from 'async-mutex';
4
4
  import * as util from '../util/util-index.js';
5
5
  import { RequestTracker } from './RequestTracker.js';
6
+ import { serialize } from 'bson';
6
7
 
7
8
  export type TokenStreamOptions = {
8
9
  /**
@@ -76,6 +77,27 @@ export async function* tokenStream(
76
77
  }
77
78
  }
78
79
 
80
+ export function syncLineToBson(line: string | Record<string, any>): Buffer {
81
+ if (typeof line == 'string') {
82
+ // Should not happen with binary_data: true
83
+ throw new Error(`Unexpected string data: ${line}`);
84
+ } else {
85
+ // On NodeJS, serialize always returns a Buffer
86
+ return serialize(line) as Buffer;
87
+ }
88
+ }
89
+
90
+ export async function* bsonLines(iterator: AsyncIterable<string | null | Record<string, any>>): AsyncGenerator<Buffer> {
91
+ for await (let line of iterator) {
92
+ if (line == null) {
93
+ // Empty value just to flush iterator memory
94
+ continue;
95
+ } else {
96
+ yield syncLineToBson(line);
97
+ }
98
+ }
99
+ }
100
+
79
101
  export async function* ndjson(iterator: AsyncIterable<string | null | Record<string, any>>): AsyncGenerator<string> {
80
102
  for await (let data of iterator) {
81
103
  if (data == null) {
@@ -91,12 +113,19 @@ export async function* ndjson(iterator: AsyncIterable<string | null | Record<str
91
113
  }
92
114
 
93
115
  export async function* transformToBytesTracked(
94
- iterator: AsyncIterable<string>,
116
+ iterator: AsyncIterable<string | Buffer>,
95
117
  tracker: RequestTracker
96
118
  ): AsyncGenerator<Buffer> {
97
119
  for await (let data of iterator) {
98
- const encoded = Buffer.from(data, 'utf8');
99
- tracker.addDataSynced(encoded.length);
120
+ let encoded: Buffer;
121
+
122
+ if (typeof data == 'string') {
123
+ encoded = Buffer.from(data, 'utf8');
124
+ } else {
125
+ encoded = data;
126
+ }
127
+
128
+ tracker.addPlaintextDataSynced(encoded.length);
100
129
  yield encoded;
101
130
  }
102
131
  }
@@ -89,6 +89,7 @@ export class CompoundConfigCollector {
89
89
  }
90
90
  ])
91
91
  );
92
+ keyStore.supabaseAuthDebug.sharedSecretEnabled = true;
92
93
  }
93
94
 
94
95
  let jwks_uris = baseConfig.client_auth?.jwks_uri ?? [];
@@ -114,6 +115,29 @@ export class CompoundConfigCollector {
114
115
  for (let uri of jwks_uris) {
115
116
  collectors.add(new auth.CachedKeyCollector(new auth.RemoteJWKSCollector(uri, { lookupOptions: jwksLookup })));
116
117
  }
118
+ const supabaseAuthDetails = auth.getSupabaseJwksUrl(baseConfig.replication?.connections?.[0]);
119
+ keyStore.supabaseAuthDebug.jwksDetails = supabaseAuthDetails;
120
+
121
+ if (baseConfig.client_auth?.supabase) {
122
+ // Automatic support for Supabase signing keys:
123
+ // https://supabase.com/docs/guides/auth/signing-keys
124
+ if (supabaseAuthDetails != null) {
125
+ const collector = new auth.RemoteJWKSCollector(supabaseAuthDetails.url, {
126
+ lookupOptions: jwksLookup,
127
+ // Special case aud and max lifetime for Supabase keys
128
+ keyOptions: auth.SUPABASE_KEY_OPTIONS
129
+ });
130
+ collectors.add(new auth.CachedKeyCollector(collector));
131
+ keyStore.supabaseAuthDebug.jwksEnabled = true;
132
+ logger.info(`Configured Supabase Auth with ${supabaseAuthDetails.url}`);
133
+ } else {
134
+ logger.warn(
135
+ 'Supabase Auth is enabled, but no Supabase connection string found. Skipping Supabase JWKS URL configuration.'
136
+ );
137
+ }
138
+ } else if (supabaseAuthDetails != null) {
139
+ logger.warn(`Supabase connection string found, but Supabase Auth is not enabled in the config.`);
140
+ }
117
141
 
118
142
  const sync_rules = await this.collectSyncRules(baseConfig, runnerConfig);
119
143
 
@@ -0,0 +1,8 @@
1
+ /**
2
+ * Return the larger of two LSNs.
3
+ */
4
+ export function maxLsn(a: string | null | undefined, b: string | null | undefined): string | null {
5
+ if (a == null) return b ?? null;
6
+ if (b == null) return a;
7
+ return a > b ? a : b;
8
+ }
@@ -13,9 +13,51 @@ export const BucketRequest = t.object({
13
13
 
14
14
  export type BucketRequest = t.Decoded<typeof BucketRequest>;
15
15
 
16
+ /**
17
+ * A sync steam that a client has expressed interest in by explicitly opening it on the client side.
18
+ */
19
+ export const RequestedStreamSubscription = t.object({
20
+ /**
21
+ * The defined name of the stream as it appears in sync stream definitions.
22
+ */
23
+ stream: t.string,
24
+ /**
25
+ * An optional dictionary of parameters to pass to this specific stream.
26
+ */
27
+ parameters: t.record(t.any).optional(),
28
+ /**
29
+ * Set when the client wishes to re-assign a different priority to this stream.
30
+ *
31
+ * Streams and sync rules can also assign a default priority, but clients are allowed to override those. This can be
32
+ * useful when the priority for partial syncs depends on e.g. the current page opened in a client.
33
+ */
34
+ override_priority: t.union(t.number, t.Null)
35
+ });
36
+
37
+ export type RequestedStreamSubscription = t.Decoded<typeof RequestedStreamSubscription>;
38
+
39
+ /**
40
+ * An overview of all subscribed streams as part of a streaming sync request.
41
+ */
42
+ export const StreamSubscriptionRequest = t.object({
43
+ /**
44
+ * Whether to sync default streams.
45
+ *
46
+ * When disabled, only explicitly-opened subscriptions are included.
47
+ */
48
+ include_defaults: t.boolean.optional(),
49
+
50
+ /**
51
+ * An array of sync streams the client has opened explicitly.
52
+ */
53
+ subscriptions: t.array(RequestedStreamSubscription)
54
+ });
55
+
56
+ export type StreamSubscriptionRequest = t.Decoded<typeof StreamSubscriptionRequest>;
57
+
16
58
  export const StreamingSyncRequest = t.object({
17
59
  /**
18
- * Existing bucket states.
60
+ * Existing client-side bucket states.
19
61
  */
20
62
  buckets: t.array(BucketRequest).optional(),
21
63
 
@@ -34,11 +76,6 @@ export const StreamingSyncRequest = t.object({
34
76
  */
35
77
  raw_data: t.boolean.optional(),
36
78
 
37
- /**
38
- * Data is received in a serialized BSON Buffer
39
- */
40
- binary_data: t.boolean.optional(),
41
-
42
79
  /**
43
80
  * Client parameters to be passed to the sync rules.
44
81
  */
@@ -47,7 +84,12 @@ export const StreamingSyncRequest = t.object({
47
84
  /**
48
85
  * Unique client id.
49
86
  */
50
- client_id: t.string.optional()
87
+ client_id: t.string.optional(),
88
+
89
+ /**
90
+ * If the client is aware of streams, an array of streams the client has opened.
91
+ */
92
+ streams: StreamSubscriptionRequest.optional()
51
93
  });
52
94
 
53
95
  export type StreamingSyncRequest = t.Decoded<typeof StreamingSyncRequest>;
@@ -60,7 +102,7 @@ export interface StreamingSyncCheckpointDiff {
60
102
  checkpoint_diff: {
61
103
  last_op_id: ProtocolOpId;
62
104
  write_checkpoint?: ProtocolOpId;
63
- updated_buckets: BucketChecksumWithDescription[];
105
+ updated_buckets: CheckpointBucket[];
64
106
  removed_buckets: string[];
65
107
  };
66
108
  }
@@ -99,10 +141,54 @@ export type StreamingSyncLine =
99
141
  */
100
142
  export type ProtocolOpId = string;
101
143
 
144
+ export interface StreamDescription {
145
+ /**
146
+ * The name of the stream as it appears in the sync configuration.
147
+ */
148
+ name: string;
149
+
150
+ /**
151
+ * Whether this stream is subscribed to by default.
152
+ *
153
+ * For default streams, this field is still `true` if clients have an explicit subscription to the stream.
154
+ */
155
+ is_default: boolean;
156
+
157
+ /**
158
+ * If some subscriptions on this stream could not be resolved, e.g. due to an error, this array contains the faulty
159
+ * subscriptions along with an error message.
160
+ */
161
+ errors: StreamSubscriptionError[];
162
+ }
163
+
164
+ export interface StreamSubscriptionError {
165
+ /**
166
+ * The subscription that errored - either the default subscription or some of the explicit subscriptions.
167
+ */
168
+ subscription: 'default' | number;
169
+ /**
170
+ * A message describing the error on the subscription.
171
+ */
172
+ message: string;
173
+ }
174
+
102
175
  export interface Checkpoint {
103
176
  last_op_id: ProtocolOpId;
104
177
  write_checkpoint?: ProtocolOpId;
105
- buckets: BucketChecksumWithDescription[];
178
+ buckets: CheckpointBucket[];
179
+
180
+ /**
181
+ * All streams that the client is subscribed to.
182
+ *
183
+ * This field has two purposes:
184
+ *
185
+ * 1. It allows clients to determine which of their subscriptions actually works. E.g. if a user does
186
+ * `db.syncStream('non_existent_stream').subscribe()`, clients don't immediately know that the stream doesn't
187
+ * exist. Only after the next `checkpoint` line can they query this field and mark unresolved subscriptions.
188
+ *. 2. It allows clients to learn which default streams they have been subscribed to. This is relevant for APIs
189
+ * listing all streams on the client-side.
190
+ */
191
+ streams: StreamDescription[];
106
192
  }
107
193
 
108
194
  export interface BucketState {
@@ -158,4 +244,46 @@ export interface BucketChecksum {
158
244
  count: number;
159
245
  }
160
246
 
161
- export interface BucketChecksumWithDescription extends BucketChecksum, BucketDescription {}
247
+ /**
248
+ * The reason a particular bucket is included in a checkpoint.
249
+ *
250
+ * This information allows clients to associate individual buckets with sync streams they're subscribed to. Having that
251
+ * association is useful because it enables clients to track progress for individual sync streams.
252
+ */
253
+ export type BucketSubscriptionReason = BucketDerivedFromDefaultStream | BucketDerivedFromExplicitSubscription;
254
+
255
+ /**
256
+ * A bucket has been included in a checkpoint because it's part of a default stream.
257
+ */
258
+ export type BucketDerivedFromDefaultStream = {
259
+ /**
260
+ * The index (into {@link Checkpoint.streams}) of the stream defining the bucket.
261
+ */
262
+ default: number;
263
+ };
264
+
265
+ /**
266
+ * The bucket has been included in a checkpoint because it's part of a stream that a client has explicitly subscribed
267
+ * to.
268
+ */
269
+ export type BucketDerivedFromExplicitSubscription = {
270
+ /**
271
+ * The index (into {@link StreamSubscriptionRequest.subscriptions}) of the subscription yielding this bucket.
272
+ */
273
+ sub: number;
274
+ };
275
+
276
+ export interface ClientBucketDescription {
277
+ /**
278
+ * An opaque id of the bucket.
279
+ */
280
+ bucket: string;
281
+ /**
282
+ * The priority used to synchronize this bucket, derived from its definition and an optional priority override from
283
+ * the stream subscription.
284
+ */
285
+ priority: BucketPriority;
286
+ subscriptions: BucketSubscriptionReason[];
287
+ }
288
+
289
+ export interface CheckpointBucket extends BucketChecksum, ClientBucketDescription {}
@@ -1,5 +1,6 @@
1
1
  export * from './alerting.js';
2
2
  export * from './env.js';
3
+ export * from './lsn.js';
3
4
  export * from './memory-tracking.js';
4
5
  export * from './Mutex.js';
5
6
  export * from './protocol-types.js';
package/src/util/utils.ts CHANGED
@@ -6,11 +6,26 @@ import { BucketChecksum, ProtocolOpId, OplogEntry } from './protocol-types.js';
6
6
 
7
7
  import * as storage from '../storage/storage-index.js';
8
8
 
9
- import { PartialChecksum } from '../storage/ChecksumCache.js';
10
9
  import { ServiceAssertionError } from '@powersync/lib-services-framework';
11
10
 
12
11
  export type ChecksumMap = Map<string, BucketChecksum>;
13
12
 
13
+ /**
14
+ * A partial checksum can never be used on its own - must always be combined with a full BucketChecksum.
15
+ */
16
+ export interface PartialChecksum {
17
+ bucket: string;
18
+ /**
19
+ * 32-bit unsigned hash.
20
+ */
21
+ partialChecksum: number;
22
+
23
+ /**
24
+ * Count of operations - informational only.
25
+ */
26
+ partialCount: number;
27
+ }
28
+
14
29
  /**
15
30
  * op_id as used internally, for individual operations and checkpoints.
16
31
  *
@@ -83,20 +98,49 @@ export function addChecksums(a: number, b: number) {
83
98
  return (a + b) & 0xffffffff;
84
99
  }
85
100
 
86
- export function addBucketChecksums(a: BucketChecksum, b: PartialChecksum | null): BucketChecksum {
87
- if (b == null) {
88
- return a;
89
- } else if (b.isFullChecksum) {
101
+ export function isPartialChecksum(c: PartialChecksum | BucketChecksum): c is PartialChecksum {
102
+ return 'partialChecksum' in c;
103
+ }
104
+
105
+ export function addBucketChecksums(a: BucketChecksum, b: PartialChecksum | BucketChecksum | null): BucketChecksum {
106
+ const checksum = addPartialChecksums(a.bucket, a, b);
107
+ if (isPartialChecksum(checksum)) {
108
+ // Should not happen since a != null
109
+ throw new ServiceAssertionError('Expected full checksum');
110
+ }
111
+ return checksum;
112
+ }
113
+
114
+ export function addPartialChecksums(
115
+ bucket: string,
116
+ a: BucketChecksum | null,
117
+ b: PartialChecksum | BucketChecksum | null
118
+ ): PartialChecksum | BucketChecksum {
119
+ if (a != null && b != null) {
120
+ if (!isPartialChecksum(b)) {
121
+ // Replaces preState
122
+ return b;
123
+ }
124
+ // merge
125
+ return {
126
+ bucket,
127
+ checksum: addChecksums(a.checksum, b.partialChecksum),
128
+ count: a.count + b.partialCount
129
+ };
130
+ } else if (a != null) {
90
131
  return {
91
- bucket: b.bucket,
92
- count: b.partialCount,
93
- checksum: b.partialChecksum
132
+ bucket,
133
+ checksum: a.checksum,
134
+ count: a.count
94
135
  };
136
+ } else if (b != null) {
137
+ return b;
95
138
  } else {
139
+ // No data found (may still have a previously-cached checksum).
96
140
  return {
97
- bucket: a.bucket,
98
- count: a.count + b.partialCount,
99
- checksum: addChecksums(a.checksum, b.partialChecksum)
141
+ bucket,
142
+ partialChecksum: 0,
143
+ partialCount: 0
100
144
  };
101
145
  }
102
146
  }
@@ -148,7 +192,10 @@ export function hasToastedValues(row: sync_rules.ToastableSqliteRow) {
148
192
  *
149
193
  * If we don't store data, we assume we always have a complete row.
150
194
  */
151
- export function isCompleteRow(storeData: boolean, row: sync_rules.ToastableSqliteRow): row is sync_rules.SqliteRow {
195
+ export function isCompleteRow(
196
+ storeData: boolean,
197
+ row: sync_rules.ToastableSqliteRow
198
+ ): row is sync_rules.SqliteInputRow {
152
199
  if (!storeData) {
153
200
  // Assume the row is complete - no need to check
154
201
  return true;