@powersync/service-core 0.0.0-dev-20250507151436 → 0.0.0-dev-20250611110033

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. package/CHANGELOG.md +47 -7
  2. package/dist/api/RouteAPI.d.ts +1 -5
  3. package/dist/api/diagnostics.js +1 -1
  4. package/dist/api/diagnostics.js.map +1 -1
  5. package/dist/auth/CachedKeyCollector.js +2 -7
  6. package/dist/auth/CachedKeyCollector.js.map +1 -1
  7. package/dist/auth/CompoundKeyCollector.js.map +1 -1
  8. package/dist/auth/KeyCollector.d.ts +2 -2
  9. package/dist/auth/KeyStore.js +32 -14
  10. package/dist/auth/KeyStore.js.map +1 -1
  11. package/dist/auth/RemoteJWKSCollector.d.ts +1 -0
  12. package/dist/auth/RemoteJWKSCollector.js +39 -16
  13. package/dist/auth/RemoteJWKSCollector.js.map +1 -1
  14. package/dist/auth/auth-index.d.ts +1 -0
  15. package/dist/auth/auth-index.js +1 -0
  16. package/dist/auth/auth-index.js.map +1 -1
  17. package/dist/auth/utils.d.ts +6 -0
  18. package/dist/auth/utils.js +97 -0
  19. package/dist/auth/utils.js.map +1 -0
  20. package/dist/index.d.ts +1 -0
  21. package/dist/index.js +1 -0
  22. package/dist/index.js.map +1 -1
  23. package/dist/metrics/open-telemetry/OpenTelemetryMetricsFactory.d.ts +1 -1
  24. package/dist/metrics/open-telemetry/OpenTelemetryMetricsFactory.js.map +1 -1
  25. package/dist/replication/AbstractReplicationJob.d.ts +4 -0
  26. package/dist/replication/AbstractReplicationJob.js.map +1 -1
  27. package/dist/replication/AbstractReplicator.d.ts +23 -0
  28. package/dist/replication/AbstractReplicator.js +45 -0
  29. package/dist/replication/AbstractReplicator.js.map +1 -1
  30. package/dist/replication/RelationCache.d.ts +9 -0
  31. package/dist/replication/RelationCache.js +20 -0
  32. package/dist/replication/RelationCache.js.map +1 -0
  33. package/dist/replication/replication-index.d.ts +1 -0
  34. package/dist/replication/replication-index.js +1 -0
  35. package/dist/replication/replication-index.js.map +1 -1
  36. package/dist/replication/replication-metrics.js +6 -0
  37. package/dist/replication/replication-metrics.js.map +1 -1
  38. package/dist/routes/RouterEngine.js +1 -1
  39. package/dist/routes/RouterEngine.js.map +1 -1
  40. package/dist/routes/auth.d.ts +5 -16
  41. package/dist/routes/auth.js +6 -4
  42. package/dist/routes/auth.js.map +1 -1
  43. package/dist/routes/configure-fastify.d.ts +3 -21
  44. package/dist/routes/configure-fastify.js +3 -2
  45. package/dist/routes/configure-fastify.js.map +1 -1
  46. package/dist/routes/configure-rsocket.js +28 -11
  47. package/dist/routes/configure-rsocket.js.map +1 -1
  48. package/dist/routes/endpoints/admin.js +2 -0
  49. package/dist/routes/endpoints/admin.js.map +1 -1
  50. package/dist/routes/endpoints/checkpointing.d.ts +4 -28
  51. package/dist/routes/endpoints/socket-route.js +22 -8
  52. package/dist/routes/endpoints/socket-route.js.map +1 -1
  53. package/dist/routes/endpoints/sync-rules.js +6 -6
  54. package/dist/routes/endpoints/sync-rules.js.map +1 -1
  55. package/dist/routes/endpoints/sync-stream.d.ts +2 -14
  56. package/dist/routes/endpoints/sync-stream.js +28 -9
  57. package/dist/routes/endpoints/sync-stream.js.map +1 -1
  58. package/dist/routes/route-register.js +10 -6
  59. package/dist/routes/route-register.js.map +1 -1
  60. package/dist/routes/router.d.ts +7 -3
  61. package/dist/routes/router.js.map +1 -1
  62. package/dist/storage/BucketStorageBatch.d.ts +17 -1
  63. package/dist/storage/BucketStorageBatch.js +2 -1
  64. package/dist/storage/BucketStorageBatch.js.map +1 -1
  65. package/dist/storage/PersistedSyncRulesContent.d.ts +5 -0
  66. package/dist/storage/SourceTable.d.ts +17 -1
  67. package/dist/storage/SourceTable.js +28 -0
  68. package/dist/storage/SourceTable.js.map +1 -1
  69. package/dist/storage/SyncRulesBucketStorage.d.ts +11 -2
  70. package/dist/storage/SyncRulesBucketStorage.js.map +1 -1
  71. package/dist/storage/bson.js +4 -1
  72. package/dist/storage/bson.js.map +1 -1
  73. package/dist/sync/BucketChecksumState.d.ts +40 -18
  74. package/dist/sync/BucketChecksumState.js +122 -74
  75. package/dist/sync/BucketChecksumState.js.map +1 -1
  76. package/dist/sync/RequestTracker.d.ts +22 -1
  77. package/dist/sync/RequestTracker.js +51 -2
  78. package/dist/sync/RequestTracker.js.map +1 -1
  79. package/dist/sync/sync.d.ts +3 -5
  80. package/dist/sync/sync.js +49 -34
  81. package/dist/sync/sync.js.map +1 -1
  82. package/dist/util/config/collectors/config-collector.js +2 -5
  83. package/dist/util/config/collectors/config-collector.js.map +1 -1
  84. package/dist/util/protocol-types.d.ts +9 -9
  85. package/dist/util/protocol-types.js.map +1 -1
  86. package/dist/util/utils.d.ts +1 -1
  87. package/package.json +6 -7
  88. package/src/api/RouteAPI.ts +1 -6
  89. package/src/api/diagnostics.ts +1 -1
  90. package/src/auth/CachedKeyCollector.ts +4 -6
  91. package/src/auth/CompoundKeyCollector.ts +2 -1
  92. package/src/auth/KeyCollector.ts +2 -2
  93. package/src/auth/KeyStore.ts +45 -20
  94. package/src/auth/RemoteJWKSCollector.ts +39 -16
  95. package/src/auth/auth-index.ts +1 -0
  96. package/src/auth/utils.ts +102 -0
  97. package/src/index.ts +2 -0
  98. package/src/metrics/open-telemetry/OpenTelemetryMetricsFactory.ts +3 -3
  99. package/src/replication/AbstractReplicationJob.ts +5 -0
  100. package/src/replication/AbstractReplicator.ts +47 -0
  101. package/src/replication/RelationCache.ts +25 -0
  102. package/src/replication/replication-index.ts +1 -0
  103. package/src/replication/replication-metrics.ts +7 -0
  104. package/src/routes/RouterEngine.ts +1 -1
  105. package/src/routes/auth.ts +7 -6
  106. package/src/routes/configure-fastify.ts +6 -3
  107. package/src/routes/configure-rsocket.ts +33 -14
  108. package/src/routes/endpoints/admin.ts +2 -0
  109. package/src/routes/endpoints/socket-route.ts +24 -8
  110. package/src/routes/endpoints/sync-rules.ts +6 -6
  111. package/src/routes/endpoints/sync-stream.ts +31 -8
  112. package/src/routes/route-register.ts +10 -7
  113. package/src/routes/router.ts +9 -3
  114. package/src/storage/BucketStorageBatch.ts +22 -2
  115. package/src/storage/PersistedSyncRulesContent.ts +6 -0
  116. package/src/storage/SourceTable.ts +44 -1
  117. package/src/storage/SyncRulesBucketStorage.ts +14 -2
  118. package/src/storage/bson.ts +4 -1
  119. package/src/sync/BucketChecksumState.ts +162 -77
  120. package/src/sync/RequestTracker.ts +70 -3
  121. package/src/sync/sync.ts +72 -49
  122. package/src/util/config/collectors/config-collector.ts +3 -7
  123. package/src/util/protocol-types.ts +15 -10
  124. package/test/src/auth.test.ts +29 -11
  125. package/test/src/sync/BucketChecksumState.test.ts +32 -18
  126. package/tsconfig.tsbuildinfo +1 -1
package/src/sync/sync.ts CHANGED
@@ -7,12 +7,12 @@ import * as auth from '../auth/auth-index.js';
7
7
  import * as storage from '../storage/storage-index.js';
8
8
  import * as util from '../util/util-index.js';
9
9
 
10
- import { logger } from '@powersync/lib-services-framework';
11
- import { BucketChecksumState } from './BucketChecksumState.js';
10
+ import { Logger, logger as defaultLogger } from '@powersync/lib-services-framework';
11
+ import { BucketChecksumState, CheckpointLine } from './BucketChecksumState.js';
12
12
  import { mergeAsyncIterables } from '../streams/streams-index.js';
13
13
  import { acquireSemaphoreAbortable, settledPromise, tokenStream, TokenStreamOptions } from './util.js';
14
14
  import { SyncContext } from './SyncContext.js';
15
- import { RequestTracker } from './RequestTracker.js';
15
+ import { OperationsSentStats, RequestTracker, statsForBatch } from './RequestTracker.js';
16
16
 
17
17
  export interface SyncStreamParameters {
18
18
  syncContext: SyncContext;
@@ -21,6 +21,7 @@ export interface SyncStreamParameters {
21
21
  params: util.StreamingSyncRequest;
22
22
  syncParams: RequestParameters;
23
23
  token: auth.JwtPayload;
24
+ logger?: Logger;
24
25
  /**
25
26
  * If this signal is aborted, the stream response ends as soon as possible, without error.
26
27
  */
@@ -35,6 +36,8 @@ export async function* streamResponse(
35
36
  ): AsyncIterable<util.StreamingSyncLine | string | null> {
36
37
  const { syncContext, bucketStorage, syncRules, params, syncParams, token, tokenStreamOptions, tracker, signal } =
37
38
  options;
39
+ const logger = options.logger ?? defaultLogger;
40
+
38
41
  // We also need to be able to abort, so we create our own controller.
39
42
  const controller = new AbortController();
40
43
  if (signal) {
@@ -57,7 +60,8 @@ export async function* streamResponse(
57
60
  params,
58
61
  syncParams,
59
62
  tracker,
60
- controller.signal
63
+ controller.signal,
64
+ logger
61
65
  );
62
66
  // Merge the two streams, and abort as soon as one of the streams end.
63
67
  const merged = mergeAsyncIterables([stream, ki], controller.signal);
@@ -77,11 +81,6 @@ export async function* streamResponse(
77
81
  }
78
82
  }
79
83
 
80
- export type BucketSyncState = {
81
- description?: BucketDescription; // Undefined if the bucket has not yet been resolved by us.
82
- start_op_id: util.InternalOpId;
83
- };
84
-
85
84
  async function* streamResponseInner(
86
85
  syncContext: SyncContext,
87
86
  bucketStorage: storage.SyncRulesBucketStorage,
@@ -89,11 +88,12 @@ async function* streamResponseInner(
89
88
  params: util.StreamingSyncRequest,
90
89
  syncParams: RequestParameters,
91
90
  tracker: RequestTracker,
92
- signal: AbortSignal
91
+ signal: AbortSignal,
92
+ logger: Logger
93
93
  ): AsyncGenerator<util.StreamingSyncLine | string | null> {
94
94
  const { raw_data, binary_data } = params;
95
95
 
96
- const checkpointUserId = util.checkpointUserId(syncParams.token_parameters.user_id as string, params.client_id);
96
+ const checkpointUserId = util.checkpointUserId(syncParams.tokenParameters.user_id as string, params.client_id);
97
97
 
98
98
  const checksumState = new BucketChecksumState({
99
99
  syncContext,
@@ -103,7 +103,8 @@ async function* streamResponseInner(
103
103
  initialBucketPositions: params.buckets?.map((bucket) => ({
104
104
  name: bucket.name,
105
105
  after: BigInt(bucket.after)
106
- }))
106
+ })),
107
+ logger: logger
107
108
  });
108
109
  const stream = bucketStorage.watchCheckpointChanges({
109
110
  user_id: checkpointUserId,
@@ -111,16 +112,29 @@ async function* streamResponseInner(
111
112
  });
112
113
  const newCheckpoints = stream[Symbol.asyncIterator]();
113
114
 
115
+ type CheckpointAndLine = {
116
+ checkpoint: bigint;
117
+ line: CheckpointLine | null;
118
+ };
119
+
120
+ async function waitForNewCheckpointLine(): Promise<IteratorResult<CheckpointAndLine>> {
121
+ const next = await newCheckpoints.next();
122
+ if (next.done) {
123
+ return { done: true, value: undefined };
124
+ }
125
+
126
+ const line = await checksumState.buildNextCheckpointLine(next.value);
127
+ return { done: false, value: { checkpoint: next.value.base.checkpoint, line } };
128
+ }
129
+
114
130
  try {
115
- let nextCheckpointPromise:
116
- | Promise<PromiseSettledResult<IteratorResult<storage.StorageCheckpointUpdate>>>
117
- | undefined;
131
+ let nextCheckpointPromise: Promise<PromiseSettledResult<IteratorResult<CheckpointAndLine>>> | undefined;
118
132
 
119
133
  do {
120
134
  if (!nextCheckpointPromise) {
121
135
  // Wrap in a settledPromise, so that abort errors after the parent stopped iterating
122
136
  // does not result in uncaught errors.
123
- nextCheckpointPromise = settledPromise(newCheckpoints.next());
137
+ nextCheckpointPromise = settledPromise(waitForNewCheckpointLine());
124
138
  }
125
139
  const next = await nextCheckpointPromise;
126
140
  nextCheckpointPromise = undefined;
@@ -130,7 +144,7 @@ async function* streamResponseInner(
130
144
  if (next.value.done) {
131
145
  break;
132
146
  }
133
- const line = await checksumState.buildNextCheckpointLine(next.value.value);
147
+ const line = next.value.value.line;
134
148
  if (line == null) {
135
149
  // No update to sync
136
150
  continue;
@@ -138,7 +152,10 @@ async function* streamResponseInner(
138
152
 
139
153
  const { checkpointLine, bucketsToFetch } = line;
140
154
 
155
+ // Since yielding can block, we update the state just before yielding the line.
156
+ line.advance();
141
157
  yield checkpointLine;
158
+
142
159
  // Start syncing data for buckets up to the checkpoint. As soon as we have completed at least one priority and
143
160
  // at least 1000 operations, we also start listening for new checkpoints concurrently. When a new checkpoint comes
144
161
  // in while we're still busy syncing data for lower priorities, interrupt the current operation and start syncing
@@ -164,30 +181,32 @@ async function* streamResponseInner(
164
181
  function maybeRaceForNewCheckpoint() {
165
182
  if (syncedOperations >= 1000 && nextCheckpointPromise === undefined) {
166
183
  nextCheckpointPromise = (async () => {
167
- const next = await settledPromise(newCheckpoints.next());
168
- if (next.status == 'rejected') {
169
- abortCheckpointController.abort();
170
- } else if (!next.value.done) {
171
- // Stop the running bucketDataInBatches() iterations, making the main flow reach the new checkpoint.
172
- abortCheckpointController.abort();
184
+ while (true) {
185
+ const next = await settledPromise(waitForNewCheckpointLine());
186
+ if (next.status == 'rejected') {
187
+ abortCheckpointController.abort();
188
+ } else if (!next.value.done) {
189
+ if (next.value.value.line == null) {
190
+ // There's a new checkpoint that doesn't affect this sync stream. Keep listening, but don't
191
+ // interrupt this batch.
192
+ continue;
193
+ }
194
+
195
+ // A new sync line can be emitted. Stop running the bucketDataInBatches() iterations, making the
196
+ // main flow reach the new checkpoint.
197
+ abortCheckpointController.abort();
198
+ }
199
+
200
+ return next;
173
201
  }
174
-
175
- return next;
176
202
  })();
177
203
  }
178
204
  }
179
205
 
180
- function markOperationsSent(operations: number) {
181
- syncedOperations += operations;
182
- tracker.addOperationsSynced(operations);
183
- // Disable interrupting checkpoints for now.
184
- // There is a bug with interrupting checkpoints where:
185
- // 1. User is in the middle of syncing a large batch of data (for example initial sync).
186
- // 2. A new checkpoint comes in, which interrupts the batch.
187
- // 3. However, the new checkpoint does not contain any new data for this connection, so nothing further is synced.
188
- // This then causes the client to wait indefinitely for the remaining data or checkpoint_complete message. That is
189
- // only resolved when a new checkpoint comes in that does have data for this connection, or the connection is restarted.
190
- // maybeRaceForNewCheckpoint();
206
+ function markOperationsSent(stats: OperationsSentStats) {
207
+ syncedOperations += stats.total;
208
+ tracker.addOperationsSynced(stats);
209
+ maybeRaceForNewCheckpoint();
191
210
  }
192
211
 
193
212
  // This incrementally updates dataBuckets with each individual bucket position.
@@ -201,18 +220,19 @@ async function* streamResponseInner(
201
220
  yield* bucketDataInBatches({
202
221
  syncContext: syncContext,
203
222
  bucketStorage: bucketStorage,
204
- checkpoint: next.value.value.base.checkpoint,
223
+ checkpoint: next.value.value.checkpoint,
205
224
  bucketsToFetch: buckets,
206
- checksumState,
225
+ checkpointLine: line,
207
226
  raw_data,
208
227
  binary_data,
209
228
  onRowsSent: markOperationsSent,
210
229
  abort_connection: signal,
211
230
  abort_batch: abortCheckpointSignal,
212
- user_id: syncParams.user_id,
231
+ user_id: syncParams.userId,
213
232
  // Passing null here will emit a full sync complete message at the end. If we pass a priority, we'll emit a partial
214
233
  // sync complete message instead.
215
- forPriority: !isLast ? priority : null
234
+ forPriority: !isLast ? priority : null,
235
+ logger
216
236
  });
217
237
  }
218
238
 
@@ -229,9 +249,10 @@ interface BucketDataRequest {
229
249
  syncContext: SyncContext;
230
250
  bucketStorage: storage.SyncRulesBucketStorage;
231
251
  checkpoint: util.InternalOpId;
252
+ /** Contains current bucket state. Modified by the request as data is sent. */
253
+ checkpointLine: CheckpointLine;
254
+ /** Subset of checkpointLine.bucketsToFetch, filtered by priority. */
232
255
  bucketsToFetch: BucketDescription[];
233
- /** Contains current bucket state. Modified by the request as data is sent. */
234
- checksumState: BucketChecksumState;
235
256
  raw_data: boolean | undefined;
236
257
  binary_data: boolean | undefined;
237
258
  /** Signals that the connection was aborted and that streaming should stop ASAP. */
@@ -243,7 +264,8 @@ interface BucketDataRequest {
243
264
  abort_batch: AbortSignal;
244
265
  user_id?: string;
245
266
  forPriority: BucketPriority | null;
246
- onRowsSent: (amount: number) => void;
267
+ onRowsSent: (stats: OperationsSentStats) => void;
268
+ logger: Logger;
247
269
  }
248
270
 
249
271
  async function* bucketDataInBatches(request: BucketDataRequest) {
@@ -292,12 +314,13 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
292
314
  bucketStorage: storage,
293
315
  checkpoint,
294
316
  bucketsToFetch,
295
- checksumState,
317
+ checkpointLine,
296
318
  raw_data,
297
319
  binary_data,
298
320
  abort_connection,
299
321
  abort_batch,
300
- onRowsSent
322
+ onRowsSent,
323
+ logger
301
324
  } = request;
302
325
 
303
326
  let checkpointInvalidated = false;
@@ -322,7 +345,7 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
322
345
  }
323
346
  // Optimization: Only fetch buckets for which the checksums have changed since the last checkpoint
324
347
  // For the first batch, this will be all buckets.
325
- const filteredBuckets = checksumState.getFilteredBucketPositions(bucketsToFetch);
348
+ const filteredBuckets = checkpointLine.getFilteredBucketPositions(bucketsToFetch);
326
349
  const dataBatches = storage.getBucketDataBatch(checkpoint, filteredBuckets);
327
350
 
328
351
  let has_more = false;
@@ -369,9 +392,9 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
369
392
  // iterator memory in case if large data sent.
370
393
  yield { data: null, done: false };
371
394
  }
372
- onRowsSent(r.data.length);
395
+ onRowsSent(statsForBatch(r));
373
396
 
374
- checksumState.updateBucketPosition({ bucket: r.bucket, nextAfter: BigInt(r.next_after), hasMore: r.has_more });
397
+ checkpointLine.updateBucketPosition({ bucket: r.bucket, nextAfter: BigInt(r.next_after), hasMore: r.has_more });
375
398
 
376
399
  // Check if syncing bucket data is supposed to stop before fetching more data
377
400
  // from storage.
@@ -417,7 +440,7 @@ async function* bucketDataBatch(request: BucketDataRequest): AsyncGenerator<Buck
417
440
  }
418
441
  }
419
442
 
420
- function transformLegacyResponse(bucketData: util.SyncBucketData): any {
443
+ function transformLegacyResponse(bucketData: util.SyncBucketData): util.SyncBucketData<util.ProtocolOplogData> {
421
444
  return {
422
445
  ...bucketData,
423
446
  data: bucketData.data.map((entry) => {
@@ -1,8 +1,8 @@
1
- import { configFile } from '@powersync/service-types';
2
- import * as t from 'ts-codec';
3
1
  import * as yaml from 'yaml';
4
2
 
5
3
  import { schema } from '@powersync/lib-services-framework';
4
+ import { configFile } from '@powersync/service-types';
5
+
6
6
  import { RunnerConfig } from '../types.js';
7
7
  import { YamlEnvTag } from './impl/yaml-env.js';
8
8
 
@@ -12,11 +12,7 @@ export enum ConfigFileFormat {
12
12
  }
13
13
 
14
14
  // ts-codec itself doesn't give great validation errors, so we use json schema for that
15
- const configSchemaValidator = schema
16
- .parseJSONSchema(
17
- t.generateJSONSchema(configFile.powerSyncConfig, { allowAdditional: true, parsers: [configFile.portParser] })
18
- )
19
- .validator();
15
+ const configSchemaValidator = schema.parseJSONSchema(configFile.PowerSyncConfigJSONSchema).validator();
20
16
 
21
17
  export abstract class ConfigCollector {
22
18
  abstract get name(): string;
@@ -1,5 +1,6 @@
1
1
  import * as t from 'ts-codec';
2
- import { BucketDescription, BucketPriority, SqliteJsonValue } from '@powersync/service-sync-rules';
2
+ import { BucketDescription, BucketPriority, SqliteJsonRow } from '@powersync/service-sync-rules';
3
+ import { JsonContainer } from '@powersync/service-jsonbig';
3
4
 
4
5
  export const BucketRequest = t.object({
5
6
  name: t.string,
@@ -65,7 +66,7 @@ export interface StreamingSyncCheckpointDiff {
65
66
  }
66
67
 
67
68
  export interface StreamingSyncData {
68
- data: SyncBucketData;
69
+ data: SyncBucketData<ProtocolOplogData>;
69
70
  }
70
71
 
71
72
  export interface StreamingSyncCheckpointComplete {
@@ -109,13 +110,9 @@ export interface BucketState {
109
110
  op_id: string;
110
111
  }
111
112
 
112
- export interface SyncDataBatch {
113
- buckets: SyncBucketData[];
114
- }
115
-
116
- export interface SyncBucketData {
113
+ export interface SyncBucketData<Data extends ProtocolOplogData = StoredOplogData> {
117
114
  bucket: string;
118
- data: OplogEntry[];
115
+ data: OplogEntry<Data>[];
119
116
  /**
120
117
  * True if there _could_ be more data for this bucket, and another request must be made.
121
118
  */
@@ -130,12 +127,20 @@ export interface SyncBucketData {
130
127
  next_after: ProtocolOpId;
131
128
  }
132
129
 
133
- export interface OplogEntry {
130
+ export type StoredOplogData = string | null;
131
+
132
+ // Note: When clients have both raw_data and binary_data disabled (this only affects legacy
133
+ // clients), data is actually a `Record<string, SqliteJsonValue>`. Oplog entries are always
134
+ // stored as a serialized (JSON) string so that they don't have to be parsed in the sync service,
135
+ // this representation only exists on the way out for legacy clients.
136
+ export type ProtocolOplogData = SqliteJsonRow | JsonContainer | StoredOplogData;
137
+
138
+ export interface OplogEntry<Data extends ProtocolOplogData = StoredOplogData> {
134
139
  op_id: ProtocolOpId;
135
140
  op: 'PUT' | 'REMOVE' | 'MOVE' | 'CLEAR';
136
141
  object_type?: string;
137
142
  object_id?: string;
138
- data?: Record<string, SqliteJsonValue> | string | null;
143
+ data?: Data;
139
144
  checksum: number | bigint;
140
145
  subkey?: string;
141
146
  }
@@ -75,21 +75,21 @@ describe('JWT Auth', () => {
75
75
  defaultAudiences: ['other'],
76
76
  maxAge: '6m'
77
77
  })
78
- ).rejects.toThrow('unexpected "aud" claim value');
78
+ ).rejects.toThrow('[PSYNC_S2105] Unexpected "aud" claim value: "tests"');
79
79
 
80
80
  await expect(
81
81
  store.verifyJwt(signedJwt, {
82
82
  defaultAudiences: [],
83
83
  maxAge: '6m'
84
84
  })
85
- ).rejects.toThrow('unexpected "aud" claim value');
85
+ ).rejects.toThrow('[PSYNC_S2105] Unexpected "aud" claim value: "tests"');
86
86
 
87
87
  await expect(
88
88
  store.verifyJwt(signedJwt, {
89
89
  defaultAudiences: ['tests'],
90
90
  maxAge: '1m'
91
91
  })
92
- ).rejects.toThrow('Token must expire in a maximum of');
92
+ ).rejects.toThrow('[PSYNC_S2104] Token must expire in a maximum of 60 seconds, got 300s');
93
93
 
94
94
  const signedJwt2 = await new jose.SignJWT({})
95
95
  .setProtectedHeader({ alg: 'HS256', kid: 'k1' })
@@ -104,7 +104,25 @@ describe('JWT Auth', () => {
104
104
  defaultAudiences: ['tests'],
105
105
  maxAge: '5m'
106
106
  })
107
- ).rejects.toThrow('missing required "sub" claim');
107
+ ).rejects.toThrow('[PSYNC_S2101] JWT payload is missing a required claim "sub"');
108
+
109
+ // expired token
110
+ const d = Math.round(Date.now() / 1000);
111
+ const signedJwt3 = await new jose.SignJWT({})
112
+ .setProtectedHeader({ alg: 'HS256', kid: 'k1' })
113
+ .setSubject('f1')
114
+ .setIssuedAt(d - 500)
115
+ .setIssuer('tester')
116
+ .setAudience('tests')
117
+ .setExpirationTime(d - 400)
118
+ .sign(signKey);
119
+
120
+ await expect(
121
+ store.verifyJwt(signedJwt3, {
122
+ defaultAudiences: ['tests'],
123
+ maxAge: '5m'
124
+ })
125
+ ).rejects.toThrow('[PSYNC_S2103] JWT has expired');
108
126
  });
109
127
 
110
128
  test('Algorithm validation', async () => {
@@ -159,7 +177,7 @@ describe('JWT Auth', () => {
159
177
  maxAge: '6m'
160
178
  })
161
179
  ).rejects.toThrow(
162
- 'Could not find an appropriate key in the keystore. The key is missing or no key matched the token KID'
180
+ '[PSYNC_S2101] Could not find an appropriate key in the keystore. The key is missing or no key matched the token KID'
163
181
  );
164
182
 
165
183
  // Wrong kid
@@ -178,7 +196,7 @@ describe('JWT Auth', () => {
178
196
  maxAge: '6m'
179
197
  })
180
198
  ).rejects.toThrow(
181
- 'Could not find an appropriate key in the keystore. The key is missing or no key matched the token KID'
199
+ '[PSYNC_S2101] Could not find an appropriate key in the keystore. The key is missing or no key matched the token KID'
182
200
  );
183
201
 
184
202
  // No kid, matches sharedKey2
@@ -255,7 +273,7 @@ describe('JWT Auth', () => {
255
273
  defaultAudiences: ['tests'],
256
274
  maxAge: '6m'
257
275
  })
258
- ).rejects.toThrow('unexpected "aud" claim value');
276
+ ).rejects.toThrow('[PSYNC_S2105] Unexpected "aud" claim value: "tests"');
259
277
 
260
278
  const signedJwt3 = await new jose.SignJWT({})
261
279
  .setProtectedHeader({ alg: 'HS256', kid: 'k1' })
@@ -290,7 +308,7 @@ describe('JWT Auth', () => {
290
308
  reject_ip_ranges: ['local']
291
309
  }
292
310
  });
293
- await expect(invalid.getKeys()).rejects.toThrow('IPs in this range are not supported');
311
+ await expect(invalid.getKeys()).rejects.toThrow('[PSYNC_S2204] JWKS request failed');
294
312
 
295
313
  // IPs throw an error immediately
296
314
  expect(
@@ -345,7 +363,7 @@ describe('JWT Auth', () => {
345
363
  expect(key.kid).toEqual(publicKeyRSA.kid!);
346
364
 
347
365
  cached.addTimeForTests(301_000);
348
- currentResponse = Promise.reject('refresh failed');
366
+ currentResponse = Promise.reject(new Error('refresh failed'));
349
367
 
350
368
  // Uses the promise, refreshes in the background
351
369
  let response = await cached.getKeys();
@@ -357,14 +375,14 @@ describe('JWT Auth', () => {
357
375
  response = await cached.getKeys();
358
376
  // Still have the cached key, but also have the error
359
377
  expect(response.keys[0].kid).toEqual(publicKeyRSA.kid!);
360
- expect(response.errors[0].message).toMatch('Failed to fetch');
378
+ expect(response.errors[0].message).toMatch('[PSYNC_S2201] refresh failed');
361
379
 
362
380
  await cached.addTimeForTests(3601_000);
363
381
  response = await cached.getKeys();
364
382
 
365
383
  // Now the keys have expired, and the request still fails
366
384
  expect(response.keys).toEqual([]);
367
- expect(response.errors[0].message).toMatch('Failed to fetch');
385
+ expect(response.errors[0].message).toMatch('[PSYNC_S2201] refresh failed');
368
386
 
369
387
  currentResponse = Promise.resolve({
370
388
  errors: [],
@@ -71,6 +71,7 @@ bucket_definitions:
71
71
  writeCheckpoint: null,
72
72
  update: CHECKPOINT_INVALIDATE_ALL
73
73
  }))!;
74
+ line.advance();
74
75
  expect(line.checkpointLine).toEqual({
75
76
  checkpoint: {
76
77
  buckets: [{ bucket: 'global[]', checksum: 1, count: 1, priority: 3 }],
@@ -85,10 +86,11 @@ bucket_definitions:
85
86
  }
86
87
  ]);
87
88
  // This is the bucket data to be fetched
88
- expect(state.getFilteredBucketPositions(line.bucketsToFetch)).toEqual(new Map([['global[]', 0n]]));
89
+ expect(line.getFilteredBucketPositions()).toEqual(new Map([['global[]', 0n]]));
89
90
 
90
91
  // This similuates the bucket data being sent
91
- state.updateBucketPosition({ bucket: 'global[]', nextAfter: 1n, hasMore: false });
92
+ line.advance();
93
+ line.updateBucketPosition({ bucket: 'global[]', nextAfter: 1n, hasMore: false });
92
94
 
93
95
  // Update bucket storage state
94
96
  storage.updateTestChecksum({ bucket: 'global[]', checksum: 2, count: 2 });
@@ -104,6 +106,7 @@ bucket_definitions:
104
106
  invalidateParameterBuckets: false
105
107
  }
106
108
  }))!;
109
+ line2.advance();
107
110
  expect(line2.checkpointLine).toEqual({
108
111
  checkpoint_diff: {
109
112
  removed_buckets: [],
@@ -112,7 +115,7 @@ bucket_definitions:
112
115
  write_checkpoint: undefined
113
116
  }
114
117
  });
115
- expect(state.getFilteredBucketPositions(line2.bucketsToFetch)).toEqual(new Map([['global[]', 1n]]));
118
+ expect(line2.getFilteredBucketPositions()).toEqual(new Map([['global[]', 1n]]));
116
119
  });
117
120
 
118
121
  test('global bucket with initial state', async () => {
@@ -137,6 +140,7 @@ bucket_definitions:
137
140
  writeCheckpoint: null,
138
141
  update: CHECKPOINT_INVALIDATE_ALL
139
142
  }))!;
143
+ line.advance();
140
144
  expect(line.checkpointLine).toEqual({
141
145
  checkpoint: {
142
146
  buckets: [{ bucket: 'global[]', checksum: 1, count: 1, priority: 3 }],
@@ -151,7 +155,7 @@ bucket_definitions:
151
155
  }
152
156
  ]);
153
157
  // This is the main difference between this and the previous test
154
- expect(state.getFilteredBucketPositions(line.bucketsToFetch)).toEqual(new Map([['global[]', 1n]]));
158
+ expect(line.getFilteredBucketPositions()).toEqual(new Map([['global[]', 1n]]));
155
159
  });
156
160
 
157
161
  test('multiple static buckets', async () => {
@@ -192,6 +196,7 @@ bucket_definitions:
192
196
  priority: 3
193
197
  }
194
198
  ]);
199
+ line.advance();
195
200
 
196
201
  storage.updateTestChecksum({ bucket: 'global[1]', checksum: 2, count: 2 });
197
202
  storage.updateTestChecksum({ bucket: 'global[2]', checksum: 2, count: 2 });
@@ -240,6 +245,7 @@ bucket_definitions:
240
245
  writeCheckpoint: null,
241
246
  update: CHECKPOINT_INVALIDATE_ALL
242
247
  }))!;
248
+ line.advance();
243
249
  expect(line.checkpointLine).toEqual({
244
250
  checkpoint: {
245
251
  buckets: [{ bucket: 'global[]', checksum: 1, count: 1, priority: 3 }],
@@ -253,7 +259,7 @@ bucket_definitions:
253
259
  priority: 3
254
260
  }
255
261
  ]);
256
- expect(state.getFilteredBucketPositions(line.bucketsToFetch)).toEqual(new Map([['global[]', 0n]]));
262
+ expect(line.getFilteredBucketPositions()).toEqual(new Map([['global[]', 0n]]));
257
263
  });
258
264
 
259
265
  test('invalidating individual bucket', async () => {
@@ -274,14 +280,14 @@ bucket_definitions:
274
280
  // We specifically do not set this here, so that we have manual control over the events.
275
281
  // storage.filter = state.checkpointFilter;
276
282
 
277
- await state.buildNextCheckpointLine({
283
+ const line = await state.buildNextCheckpointLine({
278
284
  base: { checkpoint: 1n, lsn: '1' },
279
285
  writeCheckpoint: null,
280
286
  update: CHECKPOINT_INVALIDATE_ALL
281
287
  });
282
-
283
- state.updateBucketPosition({ bucket: 'global[1]', nextAfter: 1n, hasMore: false });
284
- state.updateBucketPosition({ bucket: 'global[2]', nextAfter: 1n, hasMore: false });
288
+ line!.advance();
289
+ line!.updateBucketPosition({ bucket: 'global[1]', nextAfter: 1n, hasMore: false });
290
+ line!.updateBucketPosition({ bucket: 'global[2]', nextAfter: 1n, hasMore: false });
285
291
 
286
292
  storage.updateTestChecksum({ bucket: 'global[1]', checksum: 2, count: 2 });
287
293
  storage.updateTestChecksum({ bucket: 'global[2]', checksum: 2, count: 2 });
@@ -330,12 +336,14 @@ bucket_definitions:
330
336
  storage.updateTestChecksum({ bucket: 'global[1]', checksum: 1, count: 1 });
331
337
  storage.updateTestChecksum({ bucket: 'global[2]', checksum: 1, count: 1 });
332
338
 
333
- await state.buildNextCheckpointLine({
339
+ const line = await state.buildNextCheckpointLine({
334
340
  base: { checkpoint: 1n, lsn: '1' },
335
341
  writeCheckpoint: null,
336
342
  update: CHECKPOINT_INVALIDATE_ALL
337
343
  });
338
344
 
345
+ line!.advance();
346
+
339
347
  storage.updateTestChecksum({ bucket: 'global[1]', checksum: 2, count: 2 });
340
348
  storage.updateTestChecksum({ bucket: 'global[2]', checksum: 2, count: 2 });
341
349
 
@@ -380,6 +388,7 @@ bucket_definitions:
380
388
  writeCheckpoint: null,
381
389
  update: CHECKPOINT_INVALIDATE_ALL
382
390
  }))!;
391
+ line.advance();
383
392
  expect(line.checkpointLine).toEqual({
384
393
  checkpoint: {
385
394
  buckets: [
@@ -402,7 +411,7 @@ bucket_definitions:
402
411
  ]);
403
412
 
404
413
  // This is the bucket data to be fetched
405
- expect(state.getFilteredBucketPositions(line.bucketsToFetch)).toEqual(
414
+ expect(line.getFilteredBucketPositions()).toEqual(
406
415
  new Map([
407
416
  ['global[1]', 0n],
408
417
  ['global[2]', 0n]
@@ -411,8 +420,9 @@ bucket_definitions:
411
420
 
412
421
  // No data changes here.
413
422
  // We simulate partial data sent, before a checkpoint is interrupted.
414
- state.updateBucketPosition({ bucket: 'global[1]', nextAfter: 3n, hasMore: false });
415
- state.updateBucketPosition({ bucket: 'global[2]', nextAfter: 1n, hasMore: true });
423
+ line.advance();
424
+ line.updateBucketPosition({ bucket: 'global[1]', nextAfter: 3n, hasMore: false });
425
+ line.updateBucketPosition({ bucket: 'global[2]', nextAfter: 1n, hasMore: true });
416
426
  storage.updateTestChecksum({ bucket: 'global[1]', checksum: 4, count: 4 });
417
427
 
418
428
  const line2 = (await state.buildNextCheckpointLine({
@@ -424,6 +434,7 @@ bucket_definitions:
424
434
  updatedDataBuckets: new Set(['global[1]'])
425
435
  }
426
436
  }))!;
437
+ line2.advance();
427
438
  expect(line2.checkpointLine).toEqual({
428
439
  checkpoint_diff: {
429
440
  removed_buckets: [],
@@ -451,7 +462,7 @@ bucket_definitions:
451
462
  }
452
463
  ]);
453
464
 
454
- expect(state.getFilteredBucketPositions(line2.bucketsToFetch)).toEqual(
465
+ expect(line2.getFilteredBucketPositions()).toEqual(
455
466
  new Map([
456
467
  ['global[1]', 3n],
457
468
  ['global[2]', 1n]
@@ -507,16 +518,18 @@ bucket_definitions:
507
518
  priority: 3
508
519
  }
509
520
  ]);
521
+ line.advance();
510
522
  // This is the bucket data to be fetched
511
- expect(state.getFilteredBucketPositions(line.bucketsToFetch)).toEqual(
523
+ expect(line.getFilteredBucketPositions()).toEqual(
512
524
  new Map([
513
525
  ['by_project[1]', 0n],
514
526
  ['by_project[2]', 0n]
515
527
  ])
516
528
  );
517
529
 
518
- state.updateBucketPosition({ bucket: 'by_project[1]', nextAfter: 1n, hasMore: false });
519
- state.updateBucketPosition({ bucket: 'by_project[2]', nextAfter: 1n, hasMore: false });
530
+ line.advance();
531
+ line.updateBucketPosition({ bucket: 'by_project[1]', nextAfter: 1n, hasMore: false });
532
+ line.updateBucketPosition({ bucket: 'by_project[2]', nextAfter: 1n, hasMore: false });
520
533
 
521
534
  storage.getParameterSets = async (
522
535
  checkpoint: InternalOpId,
@@ -538,6 +551,7 @@ bucket_definitions:
538
551
  invalidateParameterBuckets: false
539
552
  }
540
553
  }))!;
554
+ line2.advance();
541
555
  expect(line2.checkpointLine).toEqual({
542
556
  checkpoint_diff: {
543
557
  removed_buckets: [],
@@ -546,7 +560,7 @@ bucket_definitions:
546
560
  write_checkpoint: undefined
547
561
  }
548
562
  });
549
- expect(state.getFilteredBucketPositions(line2.bucketsToFetch)).toEqual(new Map([['by_project[3]', 0n]]));
563
+ expect(line2.getFilteredBucketPositions()).toEqual(new Map([['by_project[3]', 0n]]));
550
564
  });
551
565
  });
552
566