@powersync/service-core 0.0.0-dev-20250303114151 → 0.0.0-dev-20250304151813

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/CHANGELOG.md +12 -1
  2. package/dist/routes/RouterEngine.js.map +1 -1
  3. package/dist/routes/endpoints/socket-route.js +2 -1
  4. package/dist/routes/endpoints/socket-route.js.map +1 -1
  5. package/dist/routes/endpoints/sync-stream.js +2 -1
  6. package/dist/routes/endpoints/sync-stream.js.map +1 -1
  7. package/dist/sync/BucketChecksumState.d.ts +5 -1
  8. package/dist/sync/BucketChecksumState.js +13 -5
  9. package/dist/sync/BucketChecksumState.js.map +1 -1
  10. package/dist/sync/SyncContext.d.ts +17 -0
  11. package/dist/sync/SyncContext.js +23 -0
  12. package/dist/sync/SyncContext.js.map +1 -0
  13. package/dist/sync/sync-index.d.ts +1 -0
  14. package/dist/sync/sync-index.js +1 -0
  15. package/dist/sync/sync-index.js.map +1 -1
  16. package/dist/sync/sync.d.ts +3 -1
  17. package/dist/sync/sync.js +8 -18
  18. package/dist/sync/sync.js.map +1 -1
  19. package/dist/system/ServiceContext.d.ts +3 -0
  20. package/dist/system/ServiceContext.js +7 -0
  21. package/dist/system/ServiceContext.js.map +1 -1
  22. package/dist/util/config/compound-config-collector.js +13 -1
  23. package/dist/util/config/compound-config-collector.js.map +1 -1
  24. package/dist/util/config/defaults.d.ts +5 -0
  25. package/dist/util/config/defaults.js +6 -0
  26. package/dist/util/config/defaults.js.map +1 -0
  27. package/dist/util/config/types.d.ts +7 -2
  28. package/dist/util/config/types.js.map +1 -1
  29. package/package.json +2 -2
  30. package/src/routes/RouterEngine.ts +1 -0
  31. package/src/routes/endpoints/socket-route.ts +2 -1
  32. package/src/routes/endpoints/sync-stream.ts +2 -1
  33. package/src/sync/BucketChecksumState.ts +31 -5
  34. package/src/sync/SyncContext.ts +36 -0
  35. package/src/sync/sync-index.ts +1 -0
  36. package/src/sync/sync.ts +21 -24
  37. package/src/system/ServiceContext.ts +9 -0
  38. package/src/util/config/compound-config-collector.ts +24 -1
  39. package/src/util/config/defaults.ts +5 -0
  40. package/src/util/config/types.ts +8 -2
  41. package/test/src/sync/BucketChecksumState.test.ts +15 -0
  42. package/tsconfig.tsbuildinfo +1 -1
@@ -1,5 +1,4 @@
1
1
  import { configFile } from '@powersync/service-types';
2
- import { PowerSyncConfig } from '@powersync/service-types/src/config/PowerSyncConfig.js';
3
2
  import { CompoundKeyCollector } from '../../auth/CompoundKeyCollector.js';
4
3
  import { KeySpec } from '../../auth/KeySpec.js';
5
4
  import { KeyStore } from '../../auth/KeyStore.js';
@@ -24,7 +23,7 @@ export type SyncRulesConfig = {
24
23
  exit_on_error: boolean;
25
24
  };
26
25
  export type ResolvedPowerSyncConfig = {
27
- base_config: PowerSyncConfig;
26
+ base_config: configFile.PowerSyncConfig;
28
27
  connections?: configFile.GenericDataSourceConfig[];
29
28
  storage: configFile.GenericStorageConfig;
30
29
  dev: {
@@ -52,6 +51,12 @@ export type ResolvedPowerSyncConfig = {
52
51
  disable_telemetry_sharing: boolean;
53
52
  internal_service_endpoint: string;
54
53
  };
54
+ api_parameters: {
55
+ max_concurrent_connections: number;
56
+ max_data_fetch_concurrency: number;
57
+ max_buckets_per_connection: number;
58
+ max_parameter_query_results: number;
59
+ };
55
60
  /** Prefix for postgres replication slot names. May eventually be connection-specific. */
56
61
  slot_name_prefix: string;
57
62
  parameters: Record<string, number | string | boolean | null>;
@@ -1 +1 @@
1
- {"version":3,"file":"types.js","sourceRoot":"","sources":["../../../src/util/config/types.ts"],"names":[],"mappings":"AAMA,MAAM,CAAN,IAAY,aAIX;AAJD,WAAY,aAAa;IACvB,oCAAmB,CAAA;IACnB,4BAAW,CAAA;IACX,8BAAa,CAAA;AACf,CAAC,EAJW,aAAa,KAAb,aAAa,QAIxB"}
1
+ {"version":3,"file":"types.js","sourceRoot":"","sources":["../../../src/util/config/types.ts"],"names":[],"mappings":"AAKA,MAAM,CAAN,IAAY,aAIX;AAJD,WAAY,aAAa;IACvB,oCAAmB,CAAA;IACnB,4BAAW,CAAA;IACX,8BAAa,CAAA;AACf,CAAC,EAJW,aAAa,KAAb,aAAa,QAIxB"}
package/package.json CHANGED
@@ -5,7 +5,7 @@
5
5
  "publishConfig": {
6
6
  "access": "public"
7
7
  },
8
- "version": "0.0.0-dev-20250303114151",
8
+ "version": "0.0.0-dev-20250304151813",
9
9
  "main": "dist/index.js",
10
10
  "license": "FSL-1.1-Apache-2.0",
11
11
  "type": "module",
@@ -36,7 +36,7 @@
36
36
  "@powersync/service-jsonbig": "0.17.10",
37
37
  "@powersync/service-rsocket-router": "0.0.20",
38
38
  "@powersync/service-sync-rules": "0.24.0",
39
- "@powersync/service-types": "0.8.0"
39
+ "@powersync/service-types": "0.0.0-dev-20250304151813"
40
40
  },
41
41
  "devDependencies": {
42
42
  "@types/async": "^3.2.24",
@@ -10,6 +10,7 @@ import { SYNC_RULES_ROUTES } from './endpoints/sync-rules.js';
10
10
  import { SYNC_STREAM_ROUTES } from './endpoints/sync-stream.js';
11
11
  import { SocketRouteGenerator } from './router-socket.js';
12
12
  import { RouteDefinition } from './router.js';
13
+ import { SyncContext } from '../sync/SyncContext.js';
13
14
 
14
15
  export type RouterSetupResponse = {
15
16
  onShutdown: () => Promise<void>;
@@ -13,7 +13,7 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
13
13
  validator: schema.createTsCodecValidator(util.StreamingSyncRequest, { allowAdditional: true }),
14
14
  handler: async ({ context, params, responder, observer, initialN, signal: upstreamSignal }) => {
15
15
  const { service_context } = context;
16
- const { routerEngine } = service_context;
16
+ const { routerEngine, syncContext } = service_context;
17
17
 
18
18
  // Create our own controller that we can abort directly
19
19
  const controller = new AbortController();
@@ -73,6 +73,7 @@ export const syncStreamReactive: SocketRouteGenerator = (router) =>
73
73
  const tracker = new sync.RequestTracker();
74
74
  try {
75
75
  for await (const data of sync.streamResponse({
76
+ syncContext: syncContext,
76
77
  bucketStorage: bucketStorage,
77
78
  syncRules: syncRules,
78
79
  params: {
@@ -20,7 +20,7 @@ export const syncStreamed = routeDefinition({
20
20
  validator: schema.createTsCodecValidator(util.StreamingSyncRequest, { allowAdditional: true }),
21
21
  handler: async (payload) => {
22
22
  const { service_context } = payload.context;
23
- const { routerEngine, storageEngine } = service_context;
23
+ const { routerEngine, storageEngine, syncContext } = service_context;
24
24
  const headers = payload.request.headers;
25
25
  const userAgent = headers['x-user-agent'] ?? headers['user-agent'];
26
26
  const clientId = payload.params.client_id;
@@ -56,6 +56,7 @@ export const syncStreamed = routeDefinition({
56
56
  sync.transformToBytesTracked(
57
57
  sync.ndjson(
58
58
  sync.streamResponse({
59
+ syncContext: syncContext,
59
60
  bucketStorage,
60
61
  syncRules: syncRules,
61
62
  params,
@@ -6,8 +6,10 @@ import * as util from '../util/util-index.js';
6
6
  import { ErrorCode, logger, ServiceAssertionError, ServiceError } from '@powersync/lib-services-framework';
7
7
  import { BucketParameterQuerier } from '@powersync/service-sync-rules/src/BucketParameterQuerier.js';
8
8
  import { BucketSyncState } from './sync.js';
9
+ import { SyncContext } from './SyncContext.js';
9
10
 
10
11
  export interface BucketChecksumStateOptions {
12
+ syncContext: SyncContext;
11
13
  bucketStorage: BucketChecksumStateStorage;
12
14
  syncRules: SqlSyncRules;
13
15
  syncParams: RequestParameters;
@@ -20,6 +22,7 @@ export interface BucketChecksumStateOptions {
20
22
  * Handles incrementally re-computing checkpoints.
21
23
  */
22
24
  export class BucketChecksumState {
25
+ private readonly context: SyncContext;
23
26
  private readonly bucketStorage: BucketChecksumStateStorage;
24
27
 
25
28
  /**
@@ -43,8 +46,14 @@ export class BucketChecksumState {
43
46
  private pendingBucketDownloads = new Set<string>();
44
47
 
45
48
  constructor(options: BucketChecksumStateOptions) {
49
+ this.context = options.syncContext;
46
50
  this.bucketStorage = options.bucketStorage;
47
- this.parameterState = new BucketParameterState(options.bucketStorage, options.syncRules, options.syncParams);
51
+ this.parameterState = new BucketParameterState(
52
+ options.syncContext,
53
+ options.bucketStorage,
54
+ options.syncRules,
55
+ options.syncParams
56
+ );
48
57
  this.bucketDataPositions = new Map();
49
58
 
50
59
  for (let { name, after: start } of options.initialBucketPositions ?? []) {
@@ -73,6 +82,12 @@ export class BucketChecksumState {
73
82
  });
74
83
  }
75
84
  this.bucketDataPositions = dataBucketsNew;
85
+ if (dataBucketsNew.size > this.context.maxBuckets) {
86
+ throw new ServiceError(
87
+ ErrorCode.PSYNC_S2305,
88
+ `Too many buckets: ${dataBucketsNew.size} (limit of ${this.context.maxBuckets})`
89
+ );
90
+ }
76
91
 
77
92
  let checksumMap: util.ChecksumMap;
78
93
  if (updatedBuckets != null) {
@@ -247,13 +262,20 @@ export interface CheckpointUpdate {
247
262
  }
248
263
 
249
264
  export class BucketParameterState {
265
+ private readonly context: SyncContext;
250
266
  public readonly bucketStorage: BucketChecksumStateStorage;
251
267
  public readonly syncRules: SqlSyncRules;
252
268
  public readonly syncParams: RequestParameters;
253
269
  private readonly querier: BucketParameterQuerier;
254
270
  private readonly staticBuckets: Map<string, BucketDescription>;
255
271
 
256
- constructor(bucketStorage: BucketChecksumStateStorage, syncRules: SqlSyncRules, syncParams: RequestParameters) {
272
+ constructor(
273
+ context: SyncContext,
274
+ bucketStorage: BucketChecksumStateStorage,
275
+ syncRules: SqlSyncRules,
276
+ syncParams: RequestParameters
277
+ ) {
278
+ this.context = context;
257
279
  this.bucketStorage = bucketStorage;
258
280
  this.syncRules = syncRules;
259
281
  this.syncParams = syncParams;
@@ -275,9 +297,13 @@ export class BucketParameterState {
275
297
  return null;
276
298
  }
277
299
 
278
- if (update.buckets.length > 1000) {
279
- // TODO: Limit number of buckets even before we get to this point
280
- const error = new ServiceError(ErrorCode.PSYNC_S2305, `Too many buckets: ${update.buckets.length}`);
300
+ if (update.buckets.length > this.context.maxParameterQueryResults) {
301
+ // TODO: Limit number of results even before we get to this point
302
+ // This limit applies _before_ we get the unique set
303
+ const error = new ServiceError(
304
+ ErrorCode.PSYNC_S2305,
305
+ `Too many parameter query results: ${update.buckets.length} (limit of ${this.context.maxParameterQueryResults})`
306
+ );
281
307
  logger.error(error.message, {
282
308
  checkpoint: checkpoint,
283
309
  user_id: this.syncParams.user_id,
@@ -0,0 +1,36 @@
1
+ import { Semaphore, SemaphoreInterface, withTimeout } from 'async-mutex';
2
+
3
+ export interface SyncContextOptions {
4
+ maxBuckets: number;
5
+ maxParameterQueryResults: number;
6
+ maxDataFetchConcurrency: number;
7
+ }
8
+
9
+ /**
10
+ * Maximum duration to wait for the mutex to become available.
11
+ *
12
+ * This gives an explicit error if there are mutex issues, rather than just hanging.
13
+ */
14
+ const MUTEX_ACQUIRE_TIMEOUT = 30_000;
15
+
16
+ /**
17
+ * Represents the context in which sync happens.
18
+ *
19
+ * This is global to all sync requests, not per request.
20
+ */
21
+ export class SyncContext {
22
+ readonly maxBuckets: number;
23
+ readonly maxParameterQueryResults: number;
24
+
25
+ readonly syncSemaphore: SemaphoreInterface;
26
+
27
+ constructor(options: SyncContextOptions) {
28
+ this.maxBuckets = options.maxBuckets;
29
+ this.maxParameterQueryResults = options.maxParameterQueryResults;
30
+ this.syncSemaphore = withTimeout(
31
+ new Semaphore(options.maxDataFetchConcurrency),
32
+ MUTEX_ACQUIRE_TIMEOUT,
33
+ new Error(`Timeout while waiting for data`)
34
+ );
35
+ }
36
+ }
@@ -6,3 +6,4 @@ export * from './safeRace.js';
6
6
  export * from './sync.js';
7
7
  export * from './util.js';
8
8
  export * from './BucketChecksumState.js';
9
+ export * from './SyncContext.js';
package/src/sync/sync.ts CHANGED
@@ -1,6 +1,5 @@
1
1
  import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
2
2
  import { BucketDescription, BucketPriority, RequestParameters, SqlSyncRules } from '@powersync/service-sync-rules';
3
- import { Semaphore, withTimeout } from 'async-mutex';
4
3
 
5
4
  import { AbortError } from 'ix/aborterror.js';
6
5
 
@@ -11,28 +10,12 @@ import * as util from '../util/util-index.js';
11
10
  import { logger } from '@powersync/lib-services-framework';
12
11
  import { BucketChecksumState } from './BucketChecksumState.js';
13
12
  import { mergeAsyncIterables } from './merge.js';
14
- import { RequestTracker } from './RequestTracker.js';
15
13
  import { acquireSemaphoreAbortable, settledPromise, tokenStream, TokenStreamOptions } from './util.js';
16
-
17
- /**
18
- * Maximum number of connections actively fetching data.
19
- */
20
- const MAX_ACTIVE_CONNECTIONS = 10;
21
-
22
- /**
23
- * Maximum duration to wait for the mutex to become available.
24
- *
25
- * This gives an explicit error if there are mutex issues, rather than just hanging.
26
- */
27
- const MUTEX_ACQUIRE_TIMEOUT = 30_000;
28
-
29
- const syncSemaphore = withTimeout(
30
- new Semaphore(MAX_ACTIVE_CONNECTIONS),
31
- MUTEX_ACQUIRE_TIMEOUT,
32
- new Error(`Timeout while waiting for data`)
33
- );
14
+ import { SyncContext } from './SyncContext.js';
15
+ import { RequestTracker } from './RequestTracker.js';
34
16
 
35
17
  export interface SyncStreamParameters {
18
+ syncContext: SyncContext;
36
19
  bucketStorage: storage.SyncRulesBucketStorage;
37
20
  syncRules: SqlSyncRules;
38
21
  params: util.StreamingSyncRequest;
@@ -50,7 +33,8 @@ export interface SyncStreamParameters {
50
33
  export async function* streamResponse(
51
34
  options: SyncStreamParameters
52
35
  ): AsyncIterable<util.StreamingSyncLine | string | null> {
53
- const { bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } = options;
36
+ const { syncContext, bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } =
37
+ options;
54
38
  // We also need to be able to abort, so we create our own controller.
55
39
  const controller = new AbortController();
56
40
  if (signal) {
@@ -66,7 +50,15 @@ export async function* streamResponse(
66
50
  }
67
51
  }
68
52
  const ki = tokenStream(token, controller.signal, tokenStreamOptions);
69
- const stream = streamResponseInner(bucketStorage, syncRules, params, syncParams, tracker, controller.signal);
53
+ const stream = streamResponseInner(
54
+ syncContext,
55
+ bucketStorage,
56
+ syncRules,
57
+ params,
58
+ syncParams,
59
+ tracker,
60
+ controller.signal
61
+ );
70
62
  // Merge the two streams, and abort as soon as one of the streams end.
71
63
  const merged = mergeAsyncIterables([stream, ki], controller.signal);
72
64
 
@@ -91,6 +83,7 @@ export type BucketSyncState = {
91
83
  };
92
84
 
93
85
  async function* streamResponseInner(
86
+ syncContext: SyncContext,
94
87
  bucketStorage: storage.SyncRulesBucketStorage,
95
88
  syncRules: SqlSyncRules,
96
89
  params: util.StreamingSyncRequest,
@@ -103,6 +96,7 @@ async function* streamResponseInner(
103
96
  const checkpointUserId = util.checkpointUserId(syncParams.token_parameters.user_id as string, params.client_id);
104
97
 
105
98
  const checksumState = new BucketChecksumState({
99
+ syncContext,
106
100
  bucketStorage,
107
101
  syncRules,
108
102
  syncParams,
@@ -195,6 +189,7 @@ async function* streamResponseInner(
195
189
  }
196
190
 
197
191
  yield* bucketDataInBatches({
192
+ syncContext: syncContext,
198
193
  bucketStorage: bucketStorage,
199
194
  checkpoint: next.value.value.base.checkpoint,
200
195
  bucketsToFetch: buckets,
@@ -221,6 +216,7 @@ async function* streamResponseInner(
221
216
  }
222
217
 
223
218
  interface BucketDataRequest {
219
+ syncContext: SyncContext;
224
220
  bucketStorage: storage.SyncRulesBucketStorage;
225
221
  checkpoint: string;
226
222
  bucketsToFetch: BucketDescription[];
@@ -282,6 +278,7 @@ interface BucketDataBatchResult {
282
278
  */
283
279
  async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<BucketDataBatchResult, void> {
284
280
  const {
281
+ syncContext,
285
282
  bucketStorage: storage,
286
283
  checkpoint,
287
284
  bucketsToFetch,
@@ -296,10 +293,10 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
296
293
  const checkpointOp = BigInt(checkpoint);
297
294
  let checkpointInvalidated = false;
298
295
 
299
- if (syncSemaphore.isLocked()) {
296
+ if (syncContext.syncSemaphore.isLocked()) {
300
297
  logger.info('Sync concurrency limit reached, waiting for lock', { user_id: request.user_id });
301
298
  }
302
- const acquired = await acquireSemaphoreAbortable(syncSemaphore, AbortSignal.any([abort_batch]));
299
+ const acquired = await acquireSemaphoreAbortable(syncContext.syncSemaphore, AbortSignal.any([abort_batch]));
303
300
  if (acquired === 'aborted') {
304
301
  return;
305
302
  }
@@ -7,6 +7,7 @@ import * as replication from '../replication/replication-index.js';
7
7
  import * as routes from '../routes/routes-index.js';
8
8
  import * as storage from '../storage/storage-index.js';
9
9
  import * as utils from '../util/util-index.js';
10
+ import { SyncContext } from '../sync/SyncContext.js';
10
11
 
11
12
  export interface ServiceContext {
12
13
  configuration: utils.ResolvedPowerSyncConfig;
@@ -16,6 +17,7 @@ export interface ServiceContext {
16
17
  routerEngine: routes.RouterEngine | null;
17
18
  storageEngine: storage.StorageEngine;
18
19
  migrations: PowerSyncMigrationManager;
20
+ syncContext: SyncContext;
19
21
  }
20
22
 
21
23
  /**
@@ -26,6 +28,7 @@ export interface ServiceContext {
26
28
  export class ServiceContextContainer implements ServiceContext {
27
29
  lifeCycleEngine: LifeCycledSystem;
28
30
  storageEngine: storage.StorageEngine;
31
+ syncContext: SyncContext;
29
32
 
30
33
  constructor(public configuration: utils.ResolvedPowerSyncConfig) {
31
34
  this.lifeCycleEngine = new LifeCycledSystem();
@@ -34,6 +37,12 @@ export class ServiceContextContainer implements ServiceContext {
34
37
  configuration
35
38
  });
36
39
 
40
+ this.syncContext = new SyncContext({
41
+ maxDataFetchConcurrency: configuration.api_parameters.max_data_fetch_concurrency,
42
+ maxBuckets: configuration.api_parameters.max_buckets_per_connection,
43
+ maxParameterQueryResults: configuration.api_parameters.max_parameter_query_results
44
+ });
45
+
37
46
  const migrationManager = new MigrationManager();
38
47
  container.register(framework.ContainerImplementation.MIGRATION_MANAGER, migrationManager);
39
48
 
@@ -10,6 +10,13 @@ import { FileSystemSyncRulesCollector } from './sync-rules/impl/filesystem-sync-
10
10
  import { InlineSyncRulesCollector } from './sync-rules/impl/inline-sync-rules-collector.js';
11
11
  import { SyncRulesCollector } from './sync-rules/sync-collector.js';
12
12
  import { ResolvedPowerSyncConfig, RunnerConfig, SyncRulesConfig } from './types.js';
13
+ import {
14
+ DEFAULT_MAX_BUCKETS_PER_CONNECTION,
15
+ DEFAULT_MAX_CONCURRENT_CONNECTIONS,
16
+ DEFAULT_MAX_DATA_FETCH_CONCURRENCY,
17
+ DEFAULT_MAX_PARAMETER_QUERY_RESULTS,
18
+ DEFAULT_MAX_POOL_SIZE
19
+ } from './defaults.js';
13
20
 
14
21
  export type CompoundConfigCollectorOptions = {
15
22
  /**
@@ -124,7 +131,12 @@ export class CompoundConfigCollector {
124
131
  let config: ResolvedPowerSyncConfig = {
125
132
  base_config: baseConfig,
126
133
  connections: baseConfig.replication?.connections || [],
127
- storage: baseConfig.storage,
134
+ storage: {
135
+ ...baseConfig.storage,
136
+ parameters: {
137
+ max_pool_size: baseConfig.storage?.parameters?.max_pool_size ?? DEFAULT_MAX_POOL_SIZE
138
+ }
139
+ },
128
140
  client_keystore: keyStore,
129
141
  // Dev tokens only use the static keys, no external key sources
130
142
  // We may restrict this even further to only the powersync-dev key.
@@ -146,6 +158,17 @@ export class CompoundConfigCollector {
146
158
  internal_service_endpoint:
147
159
  baseConfig.telemetry?.internal_service_endpoint ?? 'https://pulse.journeyapps.com/v1/metrics'
148
160
  },
161
+ api_parameters: {
162
+ max_buckets_per_connection:
163
+ baseConfig.api?.parameters?.max_buckets_per_connection ?? DEFAULT_MAX_BUCKETS_PER_CONNECTION,
164
+
165
+ max_parameter_query_results:
166
+ baseConfig.api?.parameters?.max_parameter_query_results ?? DEFAULT_MAX_PARAMETER_QUERY_RESULTS,
167
+ max_concurrent_connections:
168
+ baseConfig.api?.parameters?.max_concurrent_connections ?? DEFAULT_MAX_CONCURRENT_CONNECTIONS,
169
+ max_data_fetch_concurrency:
170
+ baseConfig.api?.parameters?.max_data_fetch_concurrency ?? DEFAULT_MAX_DATA_FETCH_CONCURRENCY
171
+ },
149
172
  // TODO maybe move this out of the connection or something
150
173
  // slot_name_prefix: connections[0]?.slot_name_prefix ?? 'powersync_'
151
174
  slot_name_prefix: 'powersync_',
@@ -0,0 +1,5 @@
1
+ export const DEFAULT_MAX_POOL_SIZE = 8;
2
+ export const DEFAULT_MAX_CONCURRENT_CONNECTIONS = 200;
3
+ export const DEFAULT_MAX_DATA_FETCH_CONCURRENCY = 10;
4
+ export const DEFAULT_MAX_BUCKETS_PER_CONNECTION = 1000;
5
+ export const DEFAULT_MAX_PARAMETER_QUERY_RESULTS = 1000;
@@ -1,5 +1,4 @@
1
1
  import { configFile } from '@powersync/service-types';
2
- import { PowerSyncConfig } from '@powersync/service-types/src/config/PowerSyncConfig.js';
3
2
  import { CompoundKeyCollector } from '../../auth/CompoundKeyCollector.js';
4
3
  import { KeySpec } from '../../auth/KeySpec.js';
5
4
  import { KeyStore } from '../../auth/KeyStore.js';
@@ -30,7 +29,7 @@ export type SyncRulesConfig = {
30
29
  };
31
30
 
32
31
  export type ResolvedPowerSyncConfig = {
33
- base_config: PowerSyncConfig;
32
+ base_config: configFile.PowerSyncConfig;
34
33
  connections?: configFile.GenericDataSourceConfig[];
35
34
  storage: configFile.GenericStorageConfig;
36
35
  dev: {
@@ -60,6 +59,13 @@ export type ResolvedPowerSyncConfig = {
60
59
  internal_service_endpoint: string;
61
60
  };
62
61
 
62
+ api_parameters: {
63
+ max_concurrent_connections: number;
64
+ max_data_fetch_concurrency: number;
65
+ max_buckets_per_connection: number;
66
+ max_parameter_query_results: number;
67
+ };
68
+
63
69
  /** Prefix for postgres replication slot names. May eventually be connection-specific. */
64
70
  slot_name_prefix: string;
65
71
  parameters: Record<string, number | string | boolean | null>;
@@ -5,6 +5,7 @@ import {
5
5
  CHECKPOINT_INVALIDATE_ALL,
6
6
  ChecksumMap,
7
7
  OpId,
8
+ SyncContext,
8
9
  WatchFilterEvent
9
10
  } from '@/index.js';
10
11
  import { RequestParameters, SqliteJsonRow, SqliteJsonValue, SqlSyncRules } from '@powersync/service-sync-rules';
@@ -46,12 +47,19 @@ bucket_definitions:
46
47
  { defaultSchema: 'public' }
47
48
  );
48
49
 
50
+ const syncContext = new SyncContext({
51
+ maxBuckets: 100,
52
+ maxParameterQueryResults: 100,
53
+ maxDataFetchConcurrency: 10
54
+ });
55
+
49
56
  test('global bucket with update', async () => {
50
57
  const storage = new MockBucketChecksumStateStorage();
51
58
  // Set intial state
52
59
  storage.updateTestChecksum({ bucket: 'global[]', checksum: 1, count: 1 });
53
60
 
54
61
  const state = new BucketChecksumState({
62
+ syncContext,
55
63
  syncParams: new RequestParameters({ sub: '' }, {}),
56
64
  syncRules: SYNC_RULES_GLOBAL,
57
65
  bucketStorage: storage
@@ -115,6 +123,7 @@ bucket_definitions:
115
123
  storage.updateTestChecksum({ bucket: 'global[]', checksum: 1, count: 1 });
116
124
 
117
125
  const state = new BucketChecksumState({
126
+ syncContext,
118
127
  // Client sets the initial state here
119
128
  initialBucketPositions: [{ name: 'global[]', after: '1' }],
120
129
  syncParams: new RequestParameters({ sub: '' }, {}),
@@ -151,6 +160,7 @@ bucket_definitions:
151
160
  storage.updateTestChecksum({ bucket: 'global[2]', checksum: 1, count: 1 });
152
161
 
153
162
  const state = new BucketChecksumState({
163
+ syncContext,
154
164
  syncParams: new RequestParameters({ sub: '' }, {}),
155
165
  syncRules: SYNC_RULES_GLOBAL_TWO,
156
166
  bucketStorage: storage
@@ -214,6 +224,7 @@ bucket_definitions:
214
224
  const storage = new MockBucketChecksumStateStorage();
215
225
 
216
226
  const state = new BucketChecksumState({
227
+ syncContext,
217
228
  // Client sets the initial state here
218
229
  initialBucketPositions: [{ name: 'something_here[]', after: '1' }],
219
230
  syncParams: new RequestParameters({ sub: '' }, {}),
@@ -253,6 +264,7 @@ bucket_definitions:
253
264
  storage.updateTestChecksum({ bucket: 'global[2]', checksum: 1, count: 1 });
254
265
 
255
266
  const state = new BucketChecksumState({
267
+ syncContext,
256
268
  syncParams: new RequestParameters({ sub: '' }, {}),
257
269
  syncRules: SYNC_RULES_GLOBAL_TWO,
258
270
  bucketStorage: storage
@@ -304,6 +316,7 @@ bucket_definitions:
304
316
  const storage = new MockBucketChecksumStateStorage();
305
317
 
306
318
  const state = new BucketChecksumState({
319
+ syncContext,
307
320
  syncParams: new RequestParameters({ sub: '' }, {}),
308
321
  syncRules: SYNC_RULES_GLOBAL_TWO,
309
322
  bucketStorage: storage
@@ -355,6 +368,7 @@ bucket_definitions:
355
368
  storage.updateTestChecksum({ bucket: 'global[2]', checksum: 3, count: 3 });
356
369
 
357
370
  const state = new BucketChecksumState({
371
+ syncContext,
358
372
  syncParams: new RequestParameters({ sub: '' }, {}),
359
373
  syncRules: SYNC_RULES_GLOBAL_TWO,
360
374
  bucketStorage: storage
@@ -452,6 +466,7 @@ bucket_definitions:
452
466
  storage.updateTestChecksum({ bucket: 'by_project[3]', checksum: 1, count: 1 });
453
467
 
454
468
  const state = new BucketChecksumState({
469
+ syncContext,
455
470
  syncParams: new RequestParameters({ sub: 'u1' }, {}),
456
471
  syncRules: SYNC_RULES_DYNAMIC,
457
472
  bucketStorage: storage