@powersync/service-core 1.7.1 → 1.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/CHANGELOG.md +16 -0
  2. package/dist/routes/RouterEngine.js.map +1 -1
  3. package/dist/routes/configure-fastify.d.ts +3 -3
  4. package/dist/routes/endpoints/checkpointing.d.ts +6 -6
  5. package/dist/routes/endpoints/socket-route.js +2 -1
  6. package/dist/routes/endpoints/socket-route.js.map +1 -1
  7. package/dist/routes/endpoints/sync-stream.js +2 -1
  8. package/dist/routes/endpoints/sync-stream.js.map +1 -1
  9. package/dist/storage/BucketStorageBatch.d.ts +2 -1
  10. package/dist/storage/BucketStorageBatch.js.map +1 -1
  11. package/dist/storage/ChecksumCache.d.ts +6 -6
  12. package/dist/storage/ChecksumCache.js +5 -6
  13. package/dist/storage/ChecksumCache.js.map +1 -1
  14. package/dist/storage/SyncRulesBucketStorage.d.ts +9 -9
  15. package/dist/sync/BucketChecksumState.d.ts +8 -4
  16. package/dist/sync/BucketChecksumState.js +16 -8
  17. package/dist/sync/BucketChecksumState.js.map +1 -1
  18. package/dist/sync/SyncContext.d.ts +17 -0
  19. package/dist/sync/SyncContext.js +23 -0
  20. package/dist/sync/SyncContext.js.map +1 -0
  21. package/dist/sync/sync-index.d.ts +1 -0
  22. package/dist/sync/sync-index.js +1 -0
  23. package/dist/sync/sync-index.js.map +1 -1
  24. package/dist/sync/sync.d.ts +4 -2
  25. package/dist/sync/sync.js +16 -24
  26. package/dist/sync/sync.js.map +1 -1
  27. package/dist/system/ServiceContext.d.ts +3 -0
  28. package/dist/system/ServiceContext.js +7 -0
  29. package/dist/system/ServiceContext.js.map +1 -1
  30. package/dist/util/config/compound-config-collector.js +13 -1
  31. package/dist/util/config/compound-config-collector.js.map +1 -1
  32. package/dist/util/config/defaults.d.ts +5 -0
  33. package/dist/util/config/defaults.js +6 -0
  34. package/dist/util/config/defaults.js.map +1 -0
  35. package/dist/util/config/types.d.ts +7 -2
  36. package/dist/util/config/types.js.map +1 -1
  37. package/dist/util/protocol-types.d.ts +10 -10
  38. package/dist/util/utils.d.ts +12 -2
  39. package/dist/util/utils.js +5 -1
  40. package/dist/util/utils.js.map +1 -1
  41. package/package.json +3 -3
  42. package/src/routes/RouterEngine.ts +1 -0
  43. package/src/routes/endpoints/socket-route.ts +2 -1
  44. package/src/routes/endpoints/sync-stream.ts +2 -1
  45. package/src/storage/BucketStorageBatch.ts +2 -1
  46. package/src/storage/ChecksumCache.ts +13 -14
  47. package/src/storage/SyncRulesBucketStorage.ts +10 -10
  48. package/src/sync/BucketChecksumState.ts +38 -12
  49. package/src/sync/SyncContext.ts +36 -0
  50. package/src/sync/sync-index.ts +1 -0
  51. package/src/sync/sync.ts +31 -32
  52. package/src/system/ServiceContext.ts +9 -0
  53. package/src/util/config/compound-config-collector.ts +24 -1
  54. package/src/util/config/defaults.ts +5 -0
  55. package/src/util/config/types.ts +8 -2
  56. package/src/util/protocol-types.ts +10 -10
  57. package/src/util/utils.ts +13 -2
  58. package/test/src/checksum_cache.test.ts +83 -84
  59. package/test/src/sync/BucketChecksumState.test.ts +62 -41
  60. package/tsconfig.tsbuildinfo +1 -1
@@ -1,12 +1,11 @@
1
- import { BucketChecksum, OpId } from '../util/protocol-types.js';
2
- import { ChecksumMap, addBucketChecksums } from '../util/utils.js';
3
- import { LRUCache } from 'lru-cache/min';
4
1
  import { OrderedSet } from '@js-sdsl/ordered-set';
5
- import { BucketPriority } from '@powersync/service-sync-rules';
2
+ import { LRUCache } from 'lru-cache/min';
3
+ import { BucketChecksum } from '../util/protocol-types.js';
4
+ import { addBucketChecksums, ChecksumMap, InternalOpId } from '../util/utils.js';
6
5
 
7
6
  interface ChecksumFetchContext {
8
7
  fetch(bucket: string): Promise<BucketChecksum>;
9
- checkpoint: bigint;
8
+ checkpoint: InternalOpId;
10
9
  }
11
10
 
12
11
  export interface PartialChecksum {
@@ -28,10 +27,11 @@ export interface PartialChecksum {
28
27
  */
29
28
  isFullChecksum: boolean;
30
29
  }
30
+
31
31
  export interface FetchPartialBucketChecksum {
32
32
  bucket: string;
33
- start?: OpId;
34
- end: OpId;
33
+ start?: InternalOpId;
34
+ end: InternalOpId;
35
35
  }
36
36
 
37
37
  export type PartialChecksumMap = Map<string, PartialChecksum>;
@@ -101,8 +101,7 @@ export class ChecksumCache {
101
101
 
102
102
  dispose: (value, key) => {
103
103
  // Remove from the set of cached checkpoints for the bucket
104
- const { checkpointString } = parseCacheKey(key);
105
- const checkpoint = BigInt(checkpointString);
104
+ const { checkpoint } = parseCacheKey(key);
106
105
  const checkpointSet = this.bucketCheckpoints.get(value.bucket);
107
106
  if (checkpointSet == null) {
108
107
  return;
@@ -128,7 +127,7 @@ export class ChecksumCache {
128
127
  });
129
128
  }
130
129
 
131
- async getChecksums(checkpoint: OpId, buckets: string[]): Promise<BucketChecksum[]> {
130
+ async getChecksums(checkpoint: InternalOpId, buckets: string[]): Promise<BucketChecksum[]> {
132
131
  const checksums = await this.getChecksumMap(checkpoint, buckets);
133
132
  // Return results in the same order as the request
134
133
  return buckets.map((bucket) => checksums.get(bucket)!);
@@ -141,7 +140,7 @@ export class ChecksumCache {
141
140
  *
142
141
  * @returns a Map with exactly one entry for each bucket requested
143
142
  */
144
- async getChecksumMap(checkpoint: OpId, buckets: string[]): Promise<ChecksumMap> {
143
+ async getChecksumMap(checkpoint: InternalOpId, buckets: string[]): Promise<ChecksumMap> {
145
144
  // Buckets that don't have a cached checksum for this checkpoint yet
146
145
  let toFetch = new Set<string>();
147
146
 
@@ -235,7 +234,7 @@ export class ChecksumCache {
235
234
  // Partial checksum found - make a partial checksum request
236
235
  bucketRequest = {
237
236
  bucket,
238
- start: cp.toString(),
237
+ start: cp,
239
238
  end: checkpoint
240
239
  };
241
240
  add.set(bucket, cached);
@@ -315,11 +314,11 @@ export class ChecksumCache {
315
314
  }
316
315
  }
317
316
 
318
- function makeCacheKey(checkpoint: bigint | string, bucket: string) {
317
+ function makeCacheKey(checkpoint: InternalOpId | string, bucket: string) {
319
318
  return `${checkpoint}/${bucket}`;
320
319
  }
321
320
 
322
321
  function parseCacheKey(key: string) {
323
322
  const index = key.indexOf('/');
324
- return { checkpointString: key.substring(0, index), bucket: key.substring(index + 1) };
323
+ return { checkpoint: BigInt(key.substring(0, index)), bucket: key.substring(index + 1) };
325
324
  }
@@ -71,7 +71,7 @@ export interface SyncRulesBucketStorage
71
71
  /**
72
72
  * Used to resolve "dynamic" parameter queries.
73
73
  */
74
- getParameterSets(checkpoint: util.OpId, lookups: SqliteJsonValue[][]): Promise<SqliteJsonRow[]>;
74
+ getParameterSets(checkpoint: util.InternalOpId, lookups: SqliteJsonValue[][]): Promise<SqliteJsonRow[]>;
75
75
 
76
76
  getCheckpointChanges(options: GetCheckpointChangesOptions): Promise<CheckpointChanges>;
77
77
 
@@ -94,8 +94,8 @@ export interface SyncRulesBucketStorage
94
94
  * @param options batch size options
95
95
  */
96
96
  getBucketDataBatch(
97
- checkpoint: util.OpId,
98
- dataBuckets: Map<string, string>,
97
+ checkpoint: util.InternalOpId,
98
+ dataBuckets: Map<string, util.InternalOpId>,
99
99
  options?: BucketDataBatchOptions
100
100
  ): AsyncIterable<SyncBucketDataBatch>;
101
101
 
@@ -104,7 +104,7 @@ export interface SyncRulesBucketStorage
104
104
  *
105
105
  * Returns zero checksums for any buckets not found.
106
106
  */
107
- getChecksums(checkpoint: util.OpId, buckets: string[]): Promise<util.ChecksumMap>;
107
+ getChecksums(checkpoint: util.InternalOpId, buckets: string[]): Promise<util.ChecksumMap>;
108
108
  }
109
109
 
110
110
  export interface SyncRulesBucketStorageListener {
@@ -169,7 +169,7 @@ export interface CompactOptions {
169
169
  * This can also be used to create a "safe buffer" of recent operations that should
170
170
  * not be compacted, to avoid invalidating checkpoints in use.
171
171
  */
172
- maxOpId?: bigint;
172
+ maxOpId?: util.InternalOpId;
173
173
 
174
174
  /**
175
175
  * If specified, compact only the specific buckets.
@@ -215,11 +215,11 @@ export interface BucketDataBatchOptions {
215
215
 
216
216
  export interface SyncBucketDataBatch {
217
217
  batch: util.SyncBucketData;
218
- targetOp: bigint | null;
218
+ targetOp: util.InternalOpId | null;
219
219
  }
220
220
 
221
221
  export interface ReplicationCheckpoint {
222
- readonly checkpoint: util.OpId;
222
+ readonly checkpoint: util.InternalOpId;
223
223
  readonly lsn: string | null;
224
224
  }
225
225
 
@@ -238,7 +238,7 @@ export interface WatchFilterEvent {
238
238
 
239
239
  export interface WriteCheckpoint {
240
240
  base: ReplicationCheckpoint;
241
- writeCheckpoint: bigint | null;
241
+ writeCheckpoint: util.InternalOpId | null;
242
242
  }
243
243
 
244
244
  export interface StorageCheckpointUpdate extends WriteCheckpoint {
@@ -246,8 +246,8 @@ export interface StorageCheckpointUpdate extends WriteCheckpoint {
246
246
  }
247
247
 
248
248
  export interface GetCheckpointChangesOptions {
249
- lastCheckpoint: util.OpId;
250
- nextCheckpoint: util.OpId;
249
+ lastCheckpoint: util.InternalOpId;
250
+ nextCheckpoint: util.InternalOpId;
251
251
  }
252
252
 
253
253
  export interface CheckpointChanges {
@@ -6,12 +6,14 @@ import * as util from '../util/util-index.js';
6
6
  import { ErrorCode, logger, ServiceAssertionError, ServiceError } from '@powersync/lib-services-framework';
7
7
  import { BucketParameterQuerier } from '@powersync/service-sync-rules/src/BucketParameterQuerier.js';
8
8
  import { BucketSyncState } from './sync.js';
9
+ import { SyncContext } from './SyncContext.js';
9
10
 
10
11
  export interface BucketChecksumStateOptions {
12
+ syncContext: SyncContext;
11
13
  bucketStorage: BucketChecksumStateStorage;
12
14
  syncRules: SqlSyncRules;
13
15
  syncParams: RequestParameters;
14
- initialBucketPositions?: { name: string; after: string }[];
16
+ initialBucketPositions?: { name: string; after: util.InternalOpId }[];
15
17
  }
16
18
 
17
19
  /**
@@ -20,6 +22,7 @@ export interface BucketChecksumStateOptions {
20
22
  * Handles incrementally re-computing checkpoints.
21
23
  */
22
24
  export class BucketChecksumState {
25
+ private readonly context: SyncContext;
23
26
  private readonly bucketStorage: BucketChecksumStateStorage;
24
27
 
25
28
  /**
@@ -43,8 +46,14 @@ export class BucketChecksumState {
43
46
  private pendingBucketDownloads = new Set<string>();
44
47
 
45
48
  constructor(options: BucketChecksumStateOptions) {
49
+ this.context = options.syncContext;
46
50
  this.bucketStorage = options.bucketStorage;
47
- this.parameterState = new BucketParameterState(options.bucketStorage, options.syncRules, options.syncParams);
51
+ this.parameterState = new BucketParameterState(
52
+ options.syncContext,
53
+ options.bucketStorage,
54
+ options.syncRules,
55
+ options.syncParams
56
+ );
48
57
  this.bucketDataPositions = new Map();
49
58
 
50
59
  for (let { name, after: start } of options.initialBucketPositions ?? []) {
@@ -69,10 +78,16 @@ export class BucketChecksumState {
69
78
  for (let bucket of allBuckets) {
70
79
  dataBucketsNew.set(bucket.bucket, {
71
80
  description: bucket,
72
- start_op_id: this.bucketDataPositions.get(bucket.bucket)?.start_op_id ?? '0'
81
+ start_op_id: this.bucketDataPositions.get(bucket.bucket)?.start_op_id ?? 0n
73
82
  });
74
83
  }
75
84
  this.bucketDataPositions = dataBucketsNew;
85
+ if (dataBucketsNew.size > this.context.maxBuckets) {
86
+ throw new ServiceError(
87
+ ErrorCode.PSYNC_S2305,
88
+ `Too many buckets: ${dataBucketsNew.size} (limit of ${this.context.maxBuckets})`
89
+ );
90
+ }
76
91
 
77
92
  let checksumMap: util.ChecksumMap;
78
93
  if (updatedBuckets != null) {
@@ -165,7 +180,7 @@ export class BucketChecksumState {
165
180
 
166
181
  checkpointLine = {
167
182
  checkpoint_diff: {
168
- last_op_id: base.checkpoint,
183
+ last_op_id: util.internalToExternalOpId(base.checkpoint),
169
184
  write_checkpoint: writeCheckpoint ? String(writeCheckpoint) : undefined,
170
185
  removed_buckets: diff.removedBuckets,
171
186
  updated_buckets: updatedBucketDescriptions
@@ -178,7 +193,7 @@ export class BucketChecksumState {
178
193
  bucketsToFetch = allBuckets;
179
194
  checkpointLine = {
180
195
  checkpoint: {
181
- last_op_id: base.checkpoint,
196
+ last_op_id: util.internalToExternalOpId(base.checkpoint),
182
197
  write_checkpoint: writeCheckpoint ? String(writeCheckpoint) : undefined,
183
198
  buckets: [...checksumMap.values()].map((e) => ({
184
199
  ...e,
@@ -204,8 +219,8 @@ export class BucketChecksumState {
204
219
  * @param bucketsToFetch List of buckets to fetch, typically from buildNextCheckpointLine, or a subset of that
205
220
  * @returns
206
221
  */
207
- getFilteredBucketPositions(bucketsToFetch: BucketDescription[]): Map<string, string> {
208
- const filtered = new Map<string, string>();
222
+ getFilteredBucketPositions(bucketsToFetch: BucketDescription[]): Map<string, util.InternalOpId> {
223
+ const filtered = new Map<string, util.InternalOpId>();
209
224
  for (let bucket of bucketsToFetch) {
210
225
  const state = this.bucketDataPositions.get(bucket.bucket);
211
226
  if (state) {
@@ -221,7 +236,7 @@ export class BucketChecksumState {
221
236
  * @param bucket the bucket name
222
237
  * @param nextAfter sync operations >= this value in the next batch
223
238
  */
224
- updateBucketPosition(options: { bucket: string; nextAfter: string; hasMore: boolean }) {
239
+ updateBucketPosition(options: { bucket: string; nextAfter: util.InternalOpId; hasMore: boolean }) {
225
240
  const state = this.bucketDataPositions.get(options.bucket);
226
241
  if (state) {
227
242
  state.start_op_id = options.nextAfter;
@@ -247,13 +262,20 @@ export interface CheckpointUpdate {
247
262
  }
248
263
 
249
264
  export class BucketParameterState {
265
+ private readonly context: SyncContext;
250
266
  public readonly bucketStorage: BucketChecksumStateStorage;
251
267
  public readonly syncRules: SqlSyncRules;
252
268
  public readonly syncParams: RequestParameters;
253
269
  private readonly querier: BucketParameterQuerier;
254
270
  private readonly staticBuckets: Map<string, BucketDescription>;
255
271
 
256
- constructor(bucketStorage: BucketChecksumStateStorage, syncRules: SqlSyncRules, syncParams: RequestParameters) {
272
+ constructor(
273
+ context: SyncContext,
274
+ bucketStorage: BucketChecksumStateStorage,
275
+ syncRules: SqlSyncRules,
276
+ syncParams: RequestParameters
277
+ ) {
278
+ this.context = context;
257
279
  this.bucketStorage = bucketStorage;
258
280
  this.syncRules = syncRules;
259
281
  this.syncParams = syncParams;
@@ -275,9 +297,13 @@ export class BucketParameterState {
275
297
  return null;
276
298
  }
277
299
 
278
- if (update.buckets.length > 1000) {
279
- // TODO: Limit number of buckets even before we get to this point
280
- const error = new ServiceError(ErrorCode.PSYNC_S2305, `Too many buckets: ${update.buckets.length}`);
300
+ if (update.buckets.length > this.context.maxParameterQueryResults) {
301
+ // TODO: Limit number of results even before we get to this point
302
+ // This limit applies _before_ we get the unique set
303
+ const error = new ServiceError(
304
+ ErrorCode.PSYNC_S2305,
305
+ `Too many parameter query results: ${update.buckets.length} (limit of ${this.context.maxParameterQueryResults})`
306
+ );
281
307
  logger.error(error.message, {
282
308
  checkpoint: checkpoint,
283
309
  user_id: this.syncParams.user_id,
@@ -0,0 +1,36 @@
1
+ import { Semaphore, SemaphoreInterface, withTimeout } from 'async-mutex';
2
+
3
+ export interface SyncContextOptions {
4
+ maxBuckets: number;
5
+ maxParameterQueryResults: number;
6
+ maxDataFetchConcurrency: number;
7
+ }
8
+
9
+ /**
10
+ * Maximum duration to wait for the mutex to become available.
11
+ *
12
+ * This gives an explicit error if there are mutex issues, rather than just hanging.
13
+ */
14
+ const MUTEX_ACQUIRE_TIMEOUT = 30_000;
15
+
16
+ /**
17
+ * Represents the context in which sync happens.
18
+ *
19
+ * This is global to all sync requests, not per request.
20
+ */
21
+ export class SyncContext {
22
+ readonly maxBuckets: number;
23
+ readonly maxParameterQueryResults: number;
24
+
25
+ readonly syncSemaphore: SemaphoreInterface;
26
+
27
+ constructor(options: SyncContextOptions) {
28
+ this.maxBuckets = options.maxBuckets;
29
+ this.maxParameterQueryResults = options.maxParameterQueryResults;
30
+ this.syncSemaphore = withTimeout(
31
+ new Semaphore(options.maxDataFetchConcurrency),
32
+ MUTEX_ACQUIRE_TIMEOUT,
33
+ new Error(`Timeout while waiting for data`)
34
+ );
35
+ }
36
+ }
@@ -6,3 +6,4 @@ export * from './safeRace.js';
6
6
  export * from './sync.js';
7
7
  export * from './util.js';
8
8
  export * from './BucketChecksumState.js';
9
+ export * from './SyncContext.js';
package/src/sync/sync.ts CHANGED
@@ -1,6 +1,5 @@
1
1
  import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
2
2
  import { BucketDescription, BucketPriority, RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules';
3
- import { Semaphore, withTimeout } from 'async-mutex';
4
3
 
5
4
  import { AbortError } from 'ix/aborterror.js';
6
5
 
@@ -11,28 +10,12 @@ import * as util from '../util/util-index.js';
11
10
  import { logger } from '@powersync/lib-services-framework';
12
11
  import { BucketChecksumState } from './BucketChecksumState.js';
13
12
  import { mergeAsyncIterables } from './merge.js';
14
- import { RequestTracker } from './RequestTracker.js';
15
13
  import { acquireSemaphoreAbortable, settledPromise, tokenStream, TokenStreamOptions } from './util.js';
16
-
17
- /**
18
- * Maximum number of connections actively fetching data.
19
- */
20
- const MAX_ACTIVE_CONNECTIONS = 10;
21
-
22
- /**
23
- * Maximum duration to wait for the mutex to become available.
24
- *
25
- * This gives an explicit error if there are mutex issues, rather than just hanging.
26
- */
27
- const MUTEX_ACQUIRE_TIMEOUT = 30_000;
28
-
29
- const syncSemaphore = withTimeout(
30
- new Semaphore(MAX_ACTIVE_CONNECTIONS),
31
- MUTEX_ACQUIRE_TIMEOUT,
32
- new Error(`Timeout while waiting for data`)
33
- );
14
+ import { SyncContext } from './SyncContext.js';
15
+ import { RequestTracker } from './RequestTracker.js';
34
16
 
35
17
  export interface SyncStreamParameters {
18
+ syncContext: SyncContext;
36
19
  bucketStorage: storage.SyncRulesBucketStorage;
37
20
  syncRules: SqlSyncRules;
38
21
  params: util.StreamingSyncRequest;
@@ -50,7 +33,8 @@ export interface SyncStreamParameters {
50
33
  export async function* streamResponse(
51
34
  options: SyncStreamParameters
52
35
  ): AsyncIterable<util.StreamingSyncLine | string | null> {
53
- const { bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } = options;
36
+ const { syncContext, bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } =
37
+ options;
54
38
  // We also need to be able to abort, so we create our own controller.
55
39
  const controller = new AbortController();
56
40
  if (signal) {
@@ -66,7 +50,15 @@ export async function* streamResponse(
66
50
  }
67
51
  }
68
52
  const ki = tokenStream(token, controller.signal, tokenStreamOptions);
69
- const stream = streamResponseInner(bucketStorage, syncRules, params, syncParams, tracker, controller.signal);
53
+ const stream = streamResponseInner(
54
+ syncContext,
55
+ bucketStorage,
56
+ syncRules,
57
+ params,
58
+ syncParams,
59
+ tracker,
60
+ controller.signal
61
+ );
70
62
  // Merge the two streams, and abort as soon as one of the streams end.
71
63
  const merged = mergeAsyncIterables([stream, ki], controller.signal);
72
64
 
@@ -87,10 +79,11 @@ export async function* streamResponse(
87
79
 
88
80
  export type BucketSyncState = {
89
81
  description?: BucketDescription; // Undefined if the bucket has not yet been resolved by us.
90
- start_op_id: string;
82
+ start_op_id: util.InternalOpId;
91
83
  };
92
84
 
93
85
  async function* streamResponseInner(
86
+ syncContext: SyncContext,
94
87
  bucketStorage: storage.SyncRulesBucketStorage,
95
88
  syncRules: SqlSyncRules,
96
89
  params: util.StreamingSyncRequest,
@@ -103,10 +96,14 @@ async function* streamResponseInner(
103
96
  const checkpointUserId = util.checkpointUserId(syncParams.token_parameters.user_id as string, params.client_id);
104
97
 
105
98
  const checksumState = new BucketChecksumState({
99
+ syncContext,
106
100
  bucketStorage,
107
101
  syncRules,
108
102
  syncParams,
109
- initialBucketPositions: params.buckets
103
+ initialBucketPositions: params.buckets?.map((bucket) => ({
104
+ name: bucket.name,
105
+ after: BigInt(bucket.after)
106
+ }))
110
107
  });
111
108
  const stream = bucketStorage.watchWriteCheckpoint({
112
109
  user_id: checkpointUserId,
@@ -195,6 +192,7 @@ async function* streamResponseInner(
195
192
  }
196
193
 
197
194
  yield* bucketDataInBatches({
195
+ syncContext: syncContext,
198
196
  bucketStorage: bucketStorage,
199
197
  checkpoint: next.value.value.base.checkpoint,
200
198
  bucketsToFetch: buckets,
@@ -221,8 +219,9 @@ async function* streamResponseInner(
221
219
  }
222
220
 
223
221
  interface BucketDataRequest {
222
+ syncContext: SyncContext;
224
223
  bucketStorage: storage.SyncRulesBucketStorage;
225
- checkpoint: string;
224
+ checkpoint: util.InternalOpId;
226
225
  bucketsToFetch: BucketDescription[];
227
226
  /** Contains current bucket state. Modified by the request as data is sent. */
228
227
  checksumState: BucketChecksumState;
@@ -282,6 +281,7 @@ interface BucketDataBatchResult {
282
281
  */
283
282
  async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<BucketDataBatchResult, void> {
284
283
  const {
284
+ syncContext,
285
285
  bucketStorage: storage,
286
286
  checkpoint,
287
287
  bucketsToFetch,
@@ -293,13 +293,12 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
293
293
  onRowsSent
294
294
  } = request;
295
295
 
296
- const checkpointOp = BigInt(checkpoint);
297
296
  let checkpointInvalidated = false;
298
297
 
299
- if (syncSemaphore.isLocked()) {
298
+ if (syncContext.syncSemaphore.isLocked()) {
300
299
  logger.info('Sync concurrency limit reached, waiting for lock', { user_id: request.user_id });
301
300
  }
302
- const acquired = await acquireSemaphoreAbortable(syncSemaphore, AbortSignal.any([abort_batch]));
301
+ const acquired = await acquireSemaphoreAbortable(syncContext.syncSemaphore, AbortSignal.any([abort_batch]));
303
302
  if (acquired === 'aborted') {
304
303
  return;
305
304
  }
@@ -329,7 +328,7 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
329
328
  if (r.has_more) {
330
329
  has_more = true;
331
330
  }
332
- if (targetOp != null && targetOp > checkpointOp) {
331
+ if (targetOp != null && targetOp > checkpoint) {
333
332
  checkpointInvalidated = true;
334
333
  }
335
334
  if (r.data.length == 0) {
@@ -365,7 +364,7 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
365
364
  }
366
365
  onRowsSent(r.data.length);
367
366
 
368
- checksumState.updateBucketPosition({ bucket: r.bucket, nextAfter: r.next_after, hasMore: r.has_more });
367
+ checksumState.updateBucketPosition({ bucket: r.bucket, nextAfter: BigInt(r.next_after), hasMore: r.has_more });
369
368
 
370
369
  // Check if syncing bucket data is supposed to stop before fetching more data
371
370
  // from storage.
@@ -384,7 +383,7 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
384
383
  if (request.forPriority != null) {
385
384
  const line: util.StreamingSyncCheckpointPartiallyComplete = {
386
385
  partial_checkpoint_complete: {
387
- last_op_id: checkpoint,
386
+ last_op_id: util.internalToExternalOpId(checkpoint),
388
387
  priority: request.forPriority
389
388
  }
390
389
  };
@@ -392,7 +391,7 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
392
391
  } else {
393
392
  const line: util.StreamingSyncCheckpointComplete = {
394
393
  checkpoint_complete: {
395
- last_op_id: checkpoint
394
+ last_op_id: util.internalToExternalOpId(checkpoint)
396
395
  }
397
396
  };
398
397
  yield { data: line, done: true };
@@ -7,6 +7,7 @@ import * as replication from '../replication/replication-index.js';
7
7
  import * as routes from '../routes/routes-index.js';
8
8
  import * as storage from '../storage/storage-index.js';
9
9
  import * as utils from '../util/util-index.js';
10
+ import { SyncContext } from '../sync/SyncContext.js';
10
11
 
11
12
  export interface ServiceContext {
12
13
  configuration: utils.ResolvedPowerSyncConfig;
@@ -16,6 +17,7 @@ export interface ServiceContext {
16
17
  routerEngine: routes.RouterEngine | null;
17
18
  storageEngine: storage.StorageEngine;
18
19
  migrations: PowerSyncMigrationManager;
20
+ syncContext: SyncContext;
19
21
  }
20
22
 
21
23
  /**
@@ -26,6 +28,7 @@ export interface ServiceContext {
26
28
  export class ServiceContextContainer implements ServiceContext {
27
29
  lifeCycleEngine: LifeCycledSystem;
28
30
  storageEngine: storage.StorageEngine;
31
+ syncContext: SyncContext;
29
32
 
30
33
  constructor(public configuration: utils.ResolvedPowerSyncConfig) {
31
34
  this.lifeCycleEngine = new LifeCycledSystem();
@@ -34,6 +37,12 @@ export class ServiceContextContainer implements ServiceContext {
34
37
  configuration
35
38
  });
36
39
 
40
+ this.syncContext = new SyncContext({
41
+ maxDataFetchConcurrency: configuration.api_parameters.max_data_fetch_concurrency,
42
+ maxBuckets: configuration.api_parameters.max_buckets_per_connection,
43
+ maxParameterQueryResults: configuration.api_parameters.max_parameter_query_results
44
+ });
45
+
37
46
  const migrationManager = new MigrationManager();
38
47
  container.register(framework.ContainerImplementation.MIGRATION_MANAGER, migrationManager);
39
48
 
@@ -10,6 +10,13 @@ import { FileSystemSyncRulesCollector } from './sync-rules/impl/filesystem-sync-
10
10
  import { InlineSyncRulesCollector } from './sync-rules/impl/inline-sync-rules-collector.js';
11
11
  import { SyncRulesCollector } from './sync-rules/sync-collector.js';
12
12
  import { ResolvedPowerSyncConfig, RunnerConfig, SyncRulesConfig } from './types.js';
13
+ import {
14
+ DEFAULT_MAX_BUCKETS_PER_CONNECTION,
15
+ DEFAULT_MAX_CONCURRENT_CONNECTIONS,
16
+ DEFAULT_MAX_DATA_FETCH_CONCURRENCY,
17
+ DEFAULT_MAX_PARAMETER_QUERY_RESULTS,
18
+ DEFAULT_MAX_POOL_SIZE
19
+ } from './defaults.js';
13
20
 
14
21
  export type CompoundConfigCollectorOptions = {
15
22
  /**
@@ -124,7 +131,12 @@ export class CompoundConfigCollector {
124
131
  let config: ResolvedPowerSyncConfig = {
125
132
  base_config: baseConfig,
126
133
  connections: baseConfig.replication?.connections || [],
127
- storage: baseConfig.storage,
134
+ storage: {
135
+ ...baseConfig.storage,
136
+ parameters: {
137
+ max_pool_size: baseConfig.storage?.parameters?.max_pool_size ?? DEFAULT_MAX_POOL_SIZE
138
+ }
139
+ },
128
140
  client_keystore: keyStore,
129
141
  // Dev tokens only use the static keys, no external key sources
130
142
  // We may restrict this even further to only the powersync-dev key.
@@ -146,6 +158,17 @@ export class CompoundConfigCollector {
146
158
  internal_service_endpoint:
147
159
  baseConfig.telemetry?.internal_service_endpoint ?? 'https://pulse.journeyapps.com/v1/metrics'
148
160
  },
161
+ api_parameters: {
162
+ max_buckets_per_connection:
163
+ baseConfig.api?.parameters?.max_buckets_per_connection ?? DEFAULT_MAX_BUCKETS_PER_CONNECTION,
164
+
165
+ max_parameter_query_results:
166
+ baseConfig.api?.parameters?.max_parameter_query_results ?? DEFAULT_MAX_PARAMETER_QUERY_RESULTS,
167
+ max_concurrent_connections:
168
+ baseConfig.api?.parameters?.max_concurrent_connections ?? DEFAULT_MAX_CONCURRENT_CONNECTIONS,
169
+ max_data_fetch_concurrency:
170
+ baseConfig.api?.parameters?.max_data_fetch_concurrency ?? DEFAULT_MAX_DATA_FETCH_CONCURRENCY
171
+ },
149
172
  // TODO maybe move this out of the connection or something
150
173
  // slot_name_prefix: connections[0]?.slot_name_prefix ?? 'powersync_'
151
174
  slot_name_prefix: 'powersync_',
@@ -0,0 +1,5 @@
1
+ export const DEFAULT_MAX_POOL_SIZE = 8;
2
+ export const DEFAULT_MAX_CONCURRENT_CONNECTIONS = 200;
3
+ export const DEFAULT_MAX_DATA_FETCH_CONCURRENCY = 10;
4
+ export const DEFAULT_MAX_BUCKETS_PER_CONNECTION = 1000;
5
+ export const DEFAULT_MAX_PARAMETER_QUERY_RESULTS = 1000;
@@ -1,5 +1,4 @@
1
1
  import { configFile } from '@powersync/service-types';
2
- import { PowerSyncConfig } from '@powersync/service-types/src/config/PowerSyncConfig.js';
3
2
  import { CompoundKeyCollector } from '../../auth/CompoundKeyCollector.js';
4
3
  import { KeySpec } from '../../auth/KeySpec.js';
5
4
  import { KeyStore } from '../../auth/KeyStore.js';
@@ -30,7 +29,7 @@ export type SyncRulesConfig = {
30
29
  };
31
30
 
32
31
  export type ResolvedPowerSyncConfig = {
33
- base_config: PowerSyncConfig;
32
+ base_config: configFile.PowerSyncConfig;
34
33
  connections?: configFile.GenericDataSourceConfig[];
35
34
  storage: configFile.GenericStorageConfig;
36
35
  dev: {
@@ -60,6 +59,13 @@ export type ResolvedPowerSyncConfig = {
60
59
  internal_service_endpoint: string;
61
60
  };
62
61
 
62
+ api_parameters: {
63
+ max_concurrent_connections: number;
64
+ max_data_fetch_concurrency: number;
65
+ max_buckets_per_connection: number;
66
+ max_parameter_query_results: number;
67
+ };
68
+
63
69
  /** Prefix for postgres replication slot names. May eventually be connection-specific. */
64
70
  slot_name_prefix: string;
65
71
  parameters: Record<string, number | string | boolean | null>;
@@ -57,8 +57,8 @@ export interface StreamingSyncCheckpoint {
57
57
 
58
58
  export interface StreamingSyncCheckpointDiff {
59
59
  checkpoint_diff: {
60
- last_op_id: OpId;
61
- write_checkpoint?: OpId;
60
+ last_op_id: ProtocolOpId;
61
+ write_checkpoint?: ProtocolOpId;
62
62
  updated_buckets: BucketChecksumWithDescription[];
63
63
  removed_buckets: string[];
64
64
  };
@@ -70,13 +70,13 @@ export interface StreamingSyncData {
70
70
 
71
71
  export interface StreamingSyncCheckpointComplete {
72
72
  checkpoint_complete: {
73
- last_op_id: OpId;
73
+ last_op_id: ProtocolOpId;
74
74
  };
75
75
  }
76
76
 
77
77
  export interface StreamingSyncCheckpointPartiallyComplete {
78
78
  partial_checkpoint_complete: {
79
- last_op_id: OpId;
79
+ last_op_id: ProtocolOpId;
80
80
  priority: BucketPriority;
81
81
  };
82
82
  }
@@ -96,11 +96,11 @@ export type StreamingSyncLine =
96
96
  /**
97
97
  * 64-bit unsigned number, as a base-10 string.
98
98
  */
99
- export type OpId = string;
99
+ export type ProtocolOpId = string;
100
100
 
101
101
  export interface Checkpoint {
102
- last_op_id: OpId;
103
- write_checkpoint?: OpId;
102
+ last_op_id: ProtocolOpId;
103
+ write_checkpoint?: ProtocolOpId;
104
104
  buckets: BucketChecksumWithDescription[];
105
105
  }
106
106
 
@@ -123,15 +123,15 @@ export interface SyncBucketData {
123
123
  /**
124
124
  * The `after` specified in the request.
125
125
  */
126
- after: OpId;
126
+ after: ProtocolOpId;
127
127
  /**
128
128
  * Use this for the next request.
129
129
  */
130
- next_after: OpId;
130
+ next_after: ProtocolOpId;
131
131
  }
132
132
 
133
133
  export interface OplogEntry {
134
- op_id: OpId;
134
+ op_id: ProtocolOpId;
135
135
  op: 'PUT' | 'REMOVE' | 'MOVE' | 'CLEAR';
136
136
  object_type?: string;
137
137
  object_id?: string;