@powersync/service-core 0.12.1 → 0.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -347,9 +347,9 @@ export class MongoBucketStorage
347
347
  .catch(ignoreNotExiting);
348
348
 
349
349
  return {
350
- operations_size_bytes: operations_aggregate[0].storageStats.size,
351
- parameters_size_bytes: parameters_aggregate[0].storageStats.size,
352
- replication_size_bytes: replication_aggregate[0].storageStats.size
350
+ operations_size_bytes: Number(operations_aggregate[0].storageStats.size),
351
+ parameters_size_bytes: Number(parameters_aggregate[0].storageStats.size),
352
+ replication_size_bytes: Number(replication_aggregate[0].storageStats.size)
353
353
  };
354
354
  }
355
355
 
@@ -48,6 +48,8 @@ export class SourceTable {
48
48
  }
49
49
 
50
50
  /**
51
+ * Use for postgres only.
52
+ *
51
53
  * Usage: db.query({statement: `SELECT $1::regclass`, params: [{type: 'varchar', value: table.qualifiedName}]})
52
54
  */
53
55
  get qualifiedName() {
@@ -55,6 +57,8 @@ export class SourceTable {
55
57
  }
56
58
 
57
59
  /**
60
+ * Use for postgres and logs only.
61
+ *
58
62
  * Usage: db.query(`SELECT * FROM ${table.escapedIdentifier}`)
59
63
  */
60
64
  get escapedIdentifier() {
@@ -609,6 +609,8 @@ export class MongoBucketBatch extends DisposableObserver<BucketBatchStorageListe
609
609
  super[Symbol.dispose]();
610
610
  }
611
611
 
612
+ private lastWaitingLogThottled = 0;
613
+
612
614
  async commit(lsn: string): Promise<boolean> {
613
615
  await this.flush();
614
616
 
@@ -619,9 +621,12 @@ export class MongoBucketBatch extends DisposableObserver<BucketBatchStorageListe
619
621
  return false;
620
622
  }
621
623
  if (lsn < this.no_checkpoint_before_lsn) {
622
- logger.info(
623
- `Waiting until ${this.no_checkpoint_before_lsn} before creating checkpoint, currently at ${lsn}. Persisted op: ${this.persisted_op}`
624
- );
624
+ if (Date.now() - this.lastWaitingLogThottled > 5_000) {
625
+ logger.info(
626
+ `Waiting until ${this.no_checkpoint_before_lsn} before creating checkpoint, currently at ${lsn}. Persisted op: ${this.persisted_op}`
627
+ );
628
+ this.lastWaitingLogThottled = Date.now();
629
+ }
625
630
 
626
631
  // Edge case: During initial replication, we have a no_checkpoint_before_lsn set,
627
632
  // and don't actually commit the snapshot.
@@ -265,7 +265,7 @@ export class MongoSyncBucketStorage
265
265
  },
266
266
  {
267
267
  $group: {
268
- _id: '$key',
268
+ _id: { key: '$key', lookup: '$lookup' },
269
269
  bucket_parameters: {
270
270
  $first: '$bucket_parameters'
271
271
  }
@@ -522,11 +522,13 @@ export class MongoSyncBucketStorage
522
522
  while (true) {
523
523
  try {
524
524
  await this.clearIteration();
525
+
526
+ logger.info(`${this.slot_name} Done clearing data`);
525
527
  return;
526
528
  } catch (e: unknown) {
527
529
  if (e instanceof mongo.MongoServerError && e.codeName == 'MaxTimeMSExpired') {
528
530
  logger.info(
529
- `Clearing took longer than ${db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS}ms, waiting and triggering another iteration.`
531
+ `${this.slot_name} Cleared batch of data in ${db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS}ms, continuing...`
530
532
  );
531
533
  await timers.setTimeout(db.mongo.MONGO_CLEAR_OPERATION_TIMEOUT_MS / 5);
532
534
  continue;
@@ -40,6 +40,17 @@ const privateKeyEdDSA: jose.JWK = {
40
40
  alg: 'EdDSA'
41
41
  };
42
42
 
43
+ const privateKeyECDSA: jose.JWK = {
44
+ use: 'sig',
45
+ kty: 'EC',
46
+ crv: 'P-256',
47
+ kid: 'k3',
48
+ x: 'Y37HQjG1YvlQZ16CzO7UQxgkY_us-NfPxMPcHUDN-PE',
49
+ y: 'W3Jqs5_qlIh2UH79l8L3ApqNu14aFetM5oc9oCjAEaw',
50
+ d: 'p2HQaJApdgaAemVuVsL1hscCFOTd0r9uGxRnzvAelFU',
51
+ alg: 'ES256'
52
+ };
53
+
43
54
  describe('JWT Auth', () => {
44
55
  test('KeyStore basics', async () => {
45
56
  const keys = await StaticKeyCollector.importKeys([sharedKey]);
@@ -372,4 +383,26 @@ describe('JWT Auth', () => {
372
383
 
373
384
  expect(verified.claim).toEqual('test-claim');
374
385
  });
386
+
387
+ test('signing with ECDSA', async () => {
388
+ const keys = await StaticKeyCollector.importKeys([privateKeyECDSA]);
389
+ const store = new KeyStore(keys);
390
+ const signKey = (await jose.importJWK(privateKeyECDSA)) as jose.KeyLike;
391
+
392
+ const signedJwt = await new jose.SignJWT({ claim: 'test-claim-2' })
393
+ .setProtectedHeader({ alg: 'ES256', kid: 'k3' })
394
+ .setSubject('f1')
395
+ .setIssuedAt()
396
+ .setIssuer('tester')
397
+ .setAudience('tests')
398
+ .setExpirationTime('5m')
399
+ .sign(signKey);
400
+
401
+ const verified = (await store.verifyJwt(signedJwt, {
402
+ defaultAudiences: ['tests'],
403
+ maxAge: '6m'
404
+ })) as JwtPayload & { claim: string };
405
+
406
+ expect(verified.claim).toEqual('test-claim-2');
407
+ });
375
408
  });
@@ -119,6 +119,76 @@ bucket_definitions:
119
119
  ]);
120
120
  });
121
121
 
122
+ test('it should use the latest version after updates', async () => {
123
+ const sync_rules = testRules(
124
+ `
125
+ bucket_definitions:
126
+ mybucket:
127
+ parameters:
128
+ - SELECT id AS todo_id
129
+ FROM todos
130
+ WHERE list_id IN token_parameters.list_id
131
+ data: []
132
+ `
133
+ );
134
+
135
+ const storage = (await factory()).getInstance(sync_rules);
136
+
137
+ const table = makeTestTable('todos', ['id', 'list_id']);
138
+
139
+ await storage.startBatch(BATCH_OPTIONS, async (batch) => {
140
+ // Create two todos which initially belong to different lists
141
+ await batch.save({
142
+ sourceTable: table,
143
+ tag: SaveOperationTag.INSERT,
144
+ after: {
145
+ id: 'todo1',
146
+ list_id: 'list1'
147
+ },
148
+ afterReplicaId: rid('todo1')
149
+ });
150
+ await batch.save({
151
+ sourceTable: table,
152
+ tag: SaveOperationTag.INSERT,
153
+ after: {
154
+ id: 'todo2',
155
+ list_id: 'list2'
156
+ },
157
+ afterReplicaId: rid('todo2')
158
+ });
159
+ });
160
+
161
+ const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
162
+ // Update the second todo item to now belong to list 1
163
+ await batch.save({
164
+ sourceTable: table,
165
+ tag: SaveOperationTag.UPDATE,
166
+ after: {
167
+ id: 'todo2',
168
+ list_id: 'list1'
169
+ },
170
+ afterReplicaId: rid('todo2')
171
+ });
172
+ });
173
+
174
+ // We specifically request the todo_ids for both lists.
175
+ // There removal operation for the association of `list2`::`todo2` should not interfere with the new
176
+ // association of `list1`::`todo2`
177
+ const parameters = await storage.getParameterSets(BigInt(result2!.flushed_op).toString(), [
178
+ ['mybucket', '1', 'list1'],
179
+ ['mybucket', '1', 'list2']
180
+ ]);
181
+
182
+ expect(parameters.sort((a, b) => (a.todo_id as string).localeCompare(b.todo_id as string))).toEqual([
183
+ {
184
+ todo_id: 'todo1'
185
+ },
186
+ {
187
+ todo_id: 'todo2'
188
+ }
189
+ ]);
190
+ });
191
+
122
192
  test('save and load parameters with different number types', async () => {
123
193
  const sync_rules = testRules(
124
194
  `
@@ -20,7 +20,12 @@ vi.mock('@powersync/lib-services-framework', () => ({
20
20
  afterSend: () => Promise<void>;
21
21
  __micro_router_response = true;
22
22
 
23
- constructor({ status, data, headers, afterSend }: {
23
+ constructor({
24
+ status,
25
+ data,
26
+ headers,
27
+ afterSend
28
+ }: {
24
29
  status?: number;
25
30
  data: any;
26
31
  headers?: Record<string, string>;