@powersync/service-core 1.14.0 → 1.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. package/CHANGELOG.md +32 -0
  2. package/LICENSE +3 -3
  3. package/dist/api/api-metrics.js +5 -0
  4. package/dist/api/api-metrics.js.map +1 -1
  5. package/dist/api/diagnostics.js +1 -1
  6. package/dist/api/diagnostics.js.map +1 -1
  7. package/dist/metrics/open-telemetry/util.d.ts +0 -3
  8. package/dist/metrics/open-telemetry/util.js +18 -13
  9. package/dist/metrics/open-telemetry/util.js.map +1 -1
  10. package/dist/routes/compression.d.ts +19 -0
  11. package/dist/routes/compression.js +70 -0
  12. package/dist/routes/compression.js.map +1 -0
  13. package/dist/routes/configure-fastify.d.ts +40 -5
  14. package/dist/routes/endpoints/socket-route.js +24 -9
  15. package/dist/routes/endpoints/socket-route.js.map +1 -1
  16. package/dist/routes/endpoints/sync-rules.js +1 -27
  17. package/dist/routes/endpoints/sync-rules.js.map +1 -1
  18. package/dist/routes/endpoints/sync-stream.d.ts +80 -10
  19. package/dist/routes/endpoints/sync-stream.js +17 -12
  20. package/dist/routes/endpoints/sync-stream.js.map +1 -1
  21. package/dist/storage/BucketStorage.d.ts +1 -1
  22. package/dist/storage/BucketStorage.js.map +1 -1
  23. package/dist/storage/BucketStorageBatch.d.ts +4 -4
  24. package/dist/storage/BucketStorageBatch.js.map +1 -1
  25. package/dist/storage/ChecksumCache.d.ts +4 -19
  26. package/dist/storage/ChecksumCache.js +4 -0
  27. package/dist/storage/ChecksumCache.js.map +1 -1
  28. package/dist/storage/ReplicationEventPayload.d.ts +2 -2
  29. package/dist/storage/SyncRulesBucketStorage.d.ts +9 -0
  30. package/dist/storage/SyncRulesBucketStorage.js.map +1 -1
  31. package/dist/sync/BucketChecksumState.d.ts +40 -10
  32. package/dist/sync/BucketChecksumState.js +154 -18
  33. package/dist/sync/BucketChecksumState.js.map +1 -1
  34. package/dist/sync/RequestTracker.d.ts +7 -1
  35. package/dist/sync/RequestTracker.js +22 -2
  36. package/dist/sync/RequestTracker.js.map +1 -1
  37. package/dist/sync/sync.d.ts +3 -3
  38. package/dist/sync/sync.js +23 -42
  39. package/dist/sync/sync.js.map +1 -1
  40. package/dist/sync/util.js +1 -1
  41. package/dist/sync/util.js.map +1 -1
  42. package/dist/util/protocol-types.d.ts +153 -9
  43. package/dist/util/protocol-types.js +41 -6
  44. package/dist/util/protocol-types.js.map +1 -1
  45. package/dist/util/utils.d.ts +18 -3
  46. package/dist/util/utils.js +33 -9
  47. package/dist/util/utils.js.map +1 -1
  48. package/package.json +14 -14
  49. package/src/api/api-metrics.ts +6 -0
  50. package/src/api/diagnostics.ts +1 -1
  51. package/src/metrics/open-telemetry/util.ts +22 -21
  52. package/src/routes/compression.ts +75 -0
  53. package/src/routes/endpoints/socket-route.ts +24 -9
  54. package/src/routes/endpoints/sync-rules.ts +1 -28
  55. package/src/routes/endpoints/sync-stream.ts +18 -15
  56. package/src/storage/BucketStorage.ts +2 -2
  57. package/src/storage/BucketStorageBatch.ts +10 -4
  58. package/src/storage/ChecksumCache.ts +8 -22
  59. package/src/storage/ReplicationEventPayload.ts +2 -2
  60. package/src/storage/SyncRulesBucketStorage.ts +12 -0
  61. package/src/sync/BucketChecksumState.ts +192 -29
  62. package/src/sync/RequestTracker.ts +27 -2
  63. package/src/sync/sync.ts +53 -51
  64. package/src/sync/util.ts +1 -1
  65. package/src/util/protocol-types.ts +138 -10
  66. package/src/util/utils.ts +59 -12
  67. package/test/src/checksum_cache.test.ts +6 -8
  68. package/test/src/routes/mocks.ts +59 -0
  69. package/test/src/routes/stream.test.ts +84 -0
  70. package/test/src/sync/BucketChecksumState.test.ts +340 -42
  71. package/tsconfig.tsbuildinfo +1 -1
@@ -13,9 +13,51 @@ export const BucketRequest = t.object({
13
13
 
14
14
  export type BucketRequest = t.Decoded<typeof BucketRequest>;
15
15
 
16
+ /**
17
+ * A sync steam that a client has expressed interest in by explicitly opening it on the client side.
18
+ */
19
+ export const RequestedStreamSubscription = t.object({
20
+ /**
21
+ * The defined name of the stream as it appears in sync stream definitions.
22
+ */
23
+ stream: t.string,
24
+ /**
25
+ * An optional dictionary of parameters to pass to this specific stream.
26
+ */
27
+ parameters: t.record(t.any).optional(),
28
+ /**
29
+ * Set when the client wishes to re-assign a different priority to this stream.
30
+ *
31
+ * Streams and sync rules can also assign a default priority, but clients are allowed to override those. This can be
32
+ * useful when the priority for partial syncs depends on e.g. the current page opened in a client.
33
+ */
34
+ override_priority: t.union(t.number, t.Null)
35
+ });
36
+
37
+ export type RequestedStreamSubscription = t.Decoded<typeof RequestedStreamSubscription>;
38
+
39
+ /**
40
+ * An overview of all subscribed streams as part of a streaming sync request.
41
+ */
42
+ export const StreamSubscriptionRequest = t.object({
43
+ /**
44
+ * Whether to sync default streams.
45
+ *
46
+ * When disabled, only explicitly-opened subscriptions are included.
47
+ */
48
+ include_defaults: t.boolean.optional(),
49
+
50
+ /**
51
+ * An array of sync streams the client has opened explicitly.
52
+ */
53
+ subscriptions: t.array(RequestedStreamSubscription)
54
+ });
55
+
56
+ export type StreamSubscriptionRequest = t.Decoded<typeof StreamSubscriptionRequest>;
57
+
16
58
  export const StreamingSyncRequest = t.object({
17
59
  /**
18
- * Existing bucket states.
60
+ * Existing client-side bucket states.
19
61
  */
20
62
  buckets: t.array(BucketRequest).optional(),
21
63
 
@@ -34,11 +76,6 @@ export const StreamingSyncRequest = t.object({
34
76
  */
35
77
  raw_data: t.boolean.optional(),
36
78
 
37
- /**
38
- * Data is received in a serialized BSON Buffer
39
- */
40
- binary_data: t.boolean.optional(),
41
-
42
79
  /**
43
80
  * Client parameters to be passed to the sync rules.
44
81
  */
@@ -47,7 +84,12 @@ export const StreamingSyncRequest = t.object({
47
84
  /**
48
85
  * Unique client id.
49
86
  */
50
- client_id: t.string.optional()
87
+ client_id: t.string.optional(),
88
+
89
+ /**
90
+ * If the client is aware of streams, an array of streams the client has opened.
91
+ */
92
+ streams: StreamSubscriptionRequest.optional()
51
93
  });
52
94
 
53
95
  export type StreamingSyncRequest = t.Decoded<typeof StreamingSyncRequest>;
@@ -60,7 +102,7 @@ export interface StreamingSyncCheckpointDiff {
60
102
  checkpoint_diff: {
61
103
  last_op_id: ProtocolOpId;
62
104
  write_checkpoint?: ProtocolOpId;
63
- updated_buckets: BucketChecksumWithDescription[];
105
+ updated_buckets: CheckpointBucket[];
64
106
  removed_buckets: string[];
65
107
  };
66
108
  }
@@ -99,10 +141,54 @@ export type StreamingSyncLine =
99
141
  */
100
142
  export type ProtocolOpId = string;
101
143
 
144
+ export interface StreamDescription {
145
+ /**
146
+ * The name of the stream as it appears in the sync configuration.
147
+ */
148
+ name: string;
149
+
150
+ /**
151
+ * Whether this stream is subscribed to by default.
152
+ *
153
+ * For default streams, this field is still `true` if clients have an explicit subscription to the stream.
154
+ */
155
+ is_default: boolean;
156
+
157
+ /**
158
+ * If some subscriptions on this stream could not be resolved, e.g. due to an error, this array contains the faulty
159
+ * subscriptions along with an error message.
160
+ */
161
+ errors: StreamSubscriptionError[];
162
+ }
163
+
164
+ export interface StreamSubscriptionError {
165
+ /**
166
+ * The subscription that errored - either the default subscription or some of the explicit subscriptions.
167
+ */
168
+ subscription: 'default' | number;
169
+ /**
170
+ * A message describing the error on the subscription.
171
+ */
172
+ message: string;
173
+ }
174
+
102
175
  export interface Checkpoint {
103
176
  last_op_id: ProtocolOpId;
104
177
  write_checkpoint?: ProtocolOpId;
105
- buckets: BucketChecksumWithDescription[];
178
+ buckets: CheckpointBucket[];
179
+
180
+ /**
181
+ * All streams that the client is subscribed to.
182
+ *
183
+ * This field has two purposes:
184
+ *
185
+ * 1. It allows clients to determine which of their subscriptions actually works. E.g. if a user does
186
+ * `db.syncStream('non_existent_stream').subscribe()`, clients don't immediately know that the stream doesn't
187
+ * exist. Only after the next `checkpoint` line can they query this field and mark unresolved subscriptions.
188
+ *. 2. It allows clients to learn which default streams they have been subscribed to. This is relevant for APIs
189
+ * listing all streams on the client-side.
190
+ */
191
+ streams: StreamDescription[];
106
192
  }
107
193
 
108
194
  export interface BucketState {
@@ -158,4 +244,46 @@ export interface BucketChecksum {
158
244
  count: number;
159
245
  }
160
246
 
161
- export interface BucketChecksumWithDescription extends BucketChecksum, BucketDescription {}
247
+ /**
248
+ * The reason a particular bucket is included in a checkpoint.
249
+ *
250
+ * This information allows clients to associate individual buckets with sync streams they're subscribed to. Having that
251
+ * association is useful because it enables clients to track progress for individual sync streams.
252
+ */
253
+ export type BucketSubscriptionReason = BucketDerivedFromDefaultStream | BucketDerivedFromExplicitSubscription;
254
+
255
+ /**
256
+ * A bucket has been included in a checkpoint because it's part of a default stream.
257
+ */
258
+ export type BucketDerivedFromDefaultStream = {
259
+ /**
260
+ * The index (into {@link Checkpoint.streams}) of the stream defining the bucket.
261
+ */
262
+ default: number;
263
+ };
264
+
265
+ /**
266
+ * The bucket has been included in a checkpoint because it's part of a stream that a client has explicitly subscribed
267
+ * to.
268
+ */
269
+ export type BucketDerivedFromExplicitSubscription = {
270
+ /**
271
+ * The index (into {@link StreamSubscriptionRequest.subscriptions}) of the subscription yielding this bucket.
272
+ */
273
+ sub: number;
274
+ };
275
+
276
+ export interface ClientBucketDescription {
277
+ /**
278
+ * An opaque id of the bucket.
279
+ */
280
+ bucket: string;
281
+ /**
282
+ * The priority used to synchronize this bucket, derived from its definition and an optional priority override from
283
+ * the stream subscription.
284
+ */
285
+ priority: BucketPriority;
286
+ subscriptions: BucketSubscriptionReason[];
287
+ }
288
+
289
+ export interface CheckpointBucket extends BucketChecksum, ClientBucketDescription {}
package/src/util/utils.ts CHANGED
@@ -6,11 +6,26 @@ import { BucketChecksum, ProtocolOpId, OplogEntry } from './protocol-types.js';
6
6
 
7
7
  import * as storage from '../storage/storage-index.js';
8
8
 
9
- import { PartialChecksum } from '../storage/ChecksumCache.js';
10
9
  import { ServiceAssertionError } from '@powersync/lib-services-framework';
11
10
 
12
11
  export type ChecksumMap = Map<string, BucketChecksum>;
13
12
 
13
+ /**
14
+ * A partial checksum can never be used on its own - must always be combined with a full BucketChecksum.
15
+ */
16
+ export interface PartialChecksum {
17
+ bucket: string;
18
+ /**
19
+ * 32-bit unsigned hash.
20
+ */
21
+ partialChecksum: number;
22
+
23
+ /**
24
+ * Count of operations - informational only.
25
+ */
26
+ partialCount: number;
27
+ }
28
+
14
29
  /**
15
30
  * op_id as used internally, for individual operations and checkpoints.
16
31
  *
@@ -83,20 +98,49 @@ export function addChecksums(a: number, b: number) {
83
98
  return (a + b) & 0xffffffff;
84
99
  }
85
100
 
86
- export function addBucketChecksums(a: BucketChecksum, b: PartialChecksum | null): BucketChecksum {
87
- if (b == null) {
88
- return a;
89
- } else if (b.isFullChecksum) {
101
+ export function isPartialChecksum(c: PartialChecksum | BucketChecksum): c is PartialChecksum {
102
+ return 'partialChecksum' in c;
103
+ }
104
+
105
+ export function addBucketChecksums(a: BucketChecksum, b: PartialChecksum | BucketChecksum | null): BucketChecksum {
106
+ const checksum = addPartialChecksums(a.bucket, a, b);
107
+ if (isPartialChecksum(checksum)) {
108
+ // Should not happen since a != null
109
+ throw new ServiceAssertionError('Expected full checksum');
110
+ }
111
+ return checksum;
112
+ }
113
+
114
+ export function addPartialChecksums(
115
+ bucket: string,
116
+ a: BucketChecksum | null,
117
+ b: PartialChecksum | BucketChecksum | null
118
+ ): PartialChecksum | BucketChecksum {
119
+ if (a != null && b != null) {
120
+ if (!isPartialChecksum(b)) {
121
+ // Replaces preState
122
+ return b;
123
+ }
124
+ // merge
125
+ return {
126
+ bucket,
127
+ checksum: addChecksums(a.checksum, b.partialChecksum),
128
+ count: a.count + b.partialCount
129
+ };
130
+ } else if (a != null) {
90
131
  return {
91
- bucket: b.bucket,
92
- count: b.partialCount,
93
- checksum: b.partialChecksum
132
+ bucket,
133
+ checksum: a.checksum,
134
+ count: a.count
94
135
  };
136
+ } else if (b != null) {
137
+ return b;
95
138
  } else {
139
+ // No data found (may still have a previously-cached checksum).
96
140
  return {
97
- bucket: a.bucket,
98
- count: a.count + b.partialCount,
99
- checksum: addChecksums(a.checksum, b.partialChecksum)
141
+ bucket,
142
+ partialChecksum: 0,
143
+ partialCount: 0
100
144
  };
101
145
  }
102
146
  }
@@ -148,7 +192,10 @@ export function hasToastedValues(row: sync_rules.ToastableSqliteRow) {
148
192
  *
149
193
  * If we don't store data, we assume we always have a complete row.
150
194
  */
151
- export function isCompleteRow(storeData: boolean, row: sync_rules.ToastableSqliteRow): row is sync_rules.SqliteRow {
195
+ export function isCompleteRow(
196
+ storeData: boolean,
197
+ row: sync_rules.ToastableSqliteRow
198
+ ): row is sync_rules.SqliteInputRow {
152
199
  if (!storeData) {
153
200
  // Assume the row is complete - no need to check
154
201
  return true;
@@ -1,5 +1,5 @@
1
- import { ChecksumCache, FetchChecksums, FetchPartialBucketChecksum, PartialChecksum } from '@/storage/ChecksumCache.js';
2
- import { addChecksums, InternalOpId } from '@/util/util-index.js';
1
+ import { ChecksumCache, FetchChecksums, FetchPartialBucketChecksum } from '@/storage/ChecksumCache.js';
2
+ import { addChecksums, BucketChecksum, InternalOpId, PartialChecksum } from '@/util/util-index.js';
3
3
  import * as crypto from 'node:crypto';
4
4
  import { describe, expect, it } from 'vitest';
5
5
 
@@ -12,22 +12,20 @@ function testHash(bucket: string, checkpoint: InternalOpId) {
12
12
  return hash;
13
13
  }
14
14
 
15
- function testPartialHash(request: FetchPartialBucketChecksum): PartialChecksum {
15
+ function testPartialHash(request: FetchPartialBucketChecksum): PartialChecksum | BucketChecksum {
16
16
  if (request.start) {
17
17
  const a = testHash(request.bucket, request.start);
18
18
  const b = testHash(request.bucket, request.end);
19
19
  return {
20
20
  bucket: request.bucket,
21
21
  partialCount: Number(request.end) - Number(request.start),
22
- partialChecksum: addChecksums(b, -a),
23
- isFullChecksum: false
22
+ partialChecksum: addChecksums(b, -a)
24
23
  };
25
24
  } else {
26
25
  return {
27
26
  bucket: request.bucket,
28
- partialChecksum: testHash(request.bucket, request.end),
29
- partialCount: Number(request.end),
30
- isFullChecksum: true
27
+ checksum: testHash(request.bucket, request.end),
28
+ count: Number(request.end)
31
29
  };
32
30
  }
33
31
  }
@@ -0,0 +1,59 @@
1
+ import {
2
+ BucketStorageFactory,
3
+ createCoreAPIMetrics,
4
+ MetricsEngine,
5
+ OpenTelemetryMetricsFactory,
6
+ RouteAPI,
7
+ RouterEngine,
8
+ ServiceContext,
9
+ StorageEngine,
10
+ SyncContext,
11
+ SyncRulesBucketStorage
12
+ } from '@/index.js';
13
+ import { MeterProvider } from '@opentelemetry/sdk-metrics';
14
+
15
+ export function mockServiceContext(storage: Partial<SyncRulesBucketStorage> | null) {
16
+ // This is very incomplete - just enough to get the current tests passing.
17
+
18
+ const storageEngine: StorageEngine = {
19
+ activeBucketStorage: {
20
+ async getActiveStorage() {
21
+ return storage;
22
+ }
23
+ } as Partial<BucketStorageFactory>
24
+ } as any;
25
+
26
+ const meterProvider = new MeterProvider({
27
+ readers: []
28
+ });
29
+ const meter = meterProvider.getMeter('powersync-tests');
30
+ const metricsEngine = new MetricsEngine({
31
+ disable_telemetry_sharing: true,
32
+ factory: new OpenTelemetryMetricsFactory(meter)
33
+ });
34
+ createCoreAPIMetrics(metricsEngine);
35
+ const service_context: Partial<ServiceContext> = {
36
+ syncContext: new SyncContext({ maxBuckets: 1, maxDataFetchConcurrency: 1, maxParameterQueryResults: 1 }),
37
+ routerEngine: {
38
+ getAPI() {
39
+ return {
40
+ getParseSyncRulesOptions() {
41
+ return { defaultSchema: 'public' };
42
+ }
43
+ } as Partial<RouteAPI>;
44
+ },
45
+ addStopHandler() {
46
+ return () => {};
47
+ }
48
+ } as Partial<RouterEngine> as any,
49
+ storageEngine,
50
+ metricsEngine: metricsEngine,
51
+ // Not used
52
+ configuration: null as any,
53
+ lifeCycleEngine: null as any,
54
+ migrations: null as any,
55
+ replicationEngine: null as any,
56
+ serviceMode: null as any
57
+ };
58
+ return service_context as ServiceContext;
59
+ }
@@ -0,0 +1,84 @@
1
+ import { BasicRouterRequest, Context, SyncRulesBucketStorage } from '@/index.js';
2
+ import { logger, RouterResponse, ServiceError } from '@powersync/lib-services-framework';
3
+ import { SqlSyncRules } from '@powersync/service-sync-rules';
4
+ import { Readable, Writable } from 'stream';
5
+ import { pipeline } from 'stream/promises';
6
+ import { beforeEach, describe, expect, it, vi } from 'vitest';
7
+ import { syncStreamed } from '../../../src/routes/endpoints/sync-stream.js';
8
+ import { mockServiceContext } from './mocks.js';
9
+
10
+ describe('Stream Route', () => {
11
+ describe('compressed stream', () => {
12
+ it('handles missing sync rules', async () => {
13
+ const context: Context = {
14
+ logger: logger,
15
+ service_context: mockServiceContext(null)
16
+ };
17
+
18
+ const request: BasicRouterRequest = {
19
+ headers: {},
20
+ hostname: '',
21
+ protocol: 'http'
22
+ };
23
+
24
+ const error = (await (syncStreamed.handler({ context, params: {}, request }) as Promise<RouterResponse>).catch(
25
+ (e) => e
26
+ )) as ServiceError;
27
+
28
+ expect(error.errorData.status).toEqual(500);
29
+ expect(error.errorData.code).toEqual('PSYNC_S2302');
30
+ });
31
+
32
+ it('handles a stream error with compression', async () => {
33
+ // This primarily tests that an underlying storage error doesn't result in an uncaught error
34
+ // when compressing the stream.
35
+
36
+ const storage = {
37
+ getParsedSyncRules() {
38
+ return new SqlSyncRules('bucket_definitions: {}');
39
+ },
40
+ watchCheckpointChanges: async function* (options) {
41
+ throw new Error('Simulated storage error');
42
+ }
43
+ } as Partial<SyncRulesBucketStorage>;
44
+ const serviceContext = mockServiceContext(storage);
45
+
46
+ const context: Context = {
47
+ logger: logger,
48
+ service_context: serviceContext,
49
+ token_payload: {
50
+ exp: new Date().getTime() / 1000 + 10000,
51
+ iat: new Date().getTime() / 1000 - 10000,
52
+ sub: 'test-user'
53
+ }
54
+ };
55
+
56
+ // It may be worth eventually doing this via Fastify to test the full stack
57
+
58
+ const request: BasicRouterRequest = {
59
+ headers: {
60
+ 'accept-encoding': 'gzip'
61
+ },
62
+ hostname: '',
63
+ protocol: 'http'
64
+ };
65
+
66
+ const response = await (syncStreamed.handler({ context, params: {}, request }) as Promise<RouterResponse>);
67
+ expect(response.status).toEqual(200);
68
+ const stream = response.data as Readable;
69
+ const r = await drainWithTimeout(stream).catch((error) => error);
70
+ expect(r.message).toContain('Simulated storage error');
71
+ });
72
+ });
73
+ });
74
+
75
+ export async function drainWithTimeout(readable: Readable, ms = 2_000) {
76
+ const devNull = new Writable({
77
+ write(_chunk, _enc, cb) {
78
+ cb();
79
+ } // discard everything
80
+ });
81
+
82
+ // Throws AbortError if it takes longer than ms, and destroys the stream
83
+ await pipeline(readable, devNull, { signal: AbortSignal.timeout(ms) });
84
+ }