@powersync/service-module-postgres-storage 0.3.1 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "name": "@powersync/service-module-postgres-storage",
3
3
  "repository": "https://github.com/powersync-ja/powersync-service",
4
4
  "types": "dist/@types/index.d.ts",
5
- "version": "0.3.1",
5
+ "version": "0.4.1",
6
6
  "main": "dist/index.js",
7
7
  "type": "module",
8
8
  "publishConfig": {
@@ -29,13 +29,13 @@
29
29
  "p-defer": "^4.0.1",
30
30
  "ts-codec": "^1.3.0",
31
31
  "uuid": "^9.0.1",
32
- "@powersync/lib-services-framework": "0.5.1",
33
- "@powersync/lib-service-postgres": "0.3.1",
34
- "@powersync/service-core": "0.18.1",
35
- "@powersync/service-core-tests": "0.4.1",
32
+ "@powersync/lib-services-framework": "0.5.3",
33
+ "@powersync/lib-service-postgres": "0.3.3",
34
+ "@powersync/service-core": "1.7.1",
35
+ "@powersync/service-core-tests": "0.6.0",
36
36
  "@powersync/service-jpgwire": "0.19.0",
37
- "@powersync/service-sync-rules": "0.23.4",
38
- "@powersync/service-types": "0.8.0"
37
+ "@powersync/service-types": "0.8.0",
38
+ "@powersync/service-sync-rules": "0.24.0"
39
39
  },
40
40
  "devDependencies": {
41
41
  "@types/uuid": "^9.0.4",
@@ -1,11 +1,8 @@
1
1
  import * as framework from '@powersync/lib-services-framework';
2
- import { storage, sync, UpdateSyncRulesOptions, utils } from '@powersync/service-core';
2
+ import { GetIntanceOptions, storage, SyncRulesBucketStorage, UpdateSyncRulesOptions } from '@powersync/service-core';
3
3
  import * as pg_wire from '@powersync/service-jpgwire';
4
4
  import * as sync_rules from '@powersync/service-sync-rules';
5
5
  import crypto from 'crypto';
6
- import { wrapWithAbort } from 'ix/asynciterable/operators/withabort.js';
7
- import { LRUCache } from 'lru-cache/min';
8
- import * as timers from 'timers/promises';
9
6
  import * as uuid from 'uuid';
10
7
 
11
8
  import * as lib_postgres from '@powersync/lib-service-postgres';
@@ -22,38 +19,13 @@ export type PostgresBucketStorageOptions = {
22
19
  };
23
20
 
24
21
  export class PostgresBucketStorageFactory
25
- extends framework.DisposableObserver<storage.BucketStorageFactoryListener>
22
+ extends framework.BaseObserver<storage.BucketStorageFactoryListener>
26
23
  implements storage.BucketStorageFactory
27
24
  {
28
25
  readonly db: lib_postgres.DatabaseClient;
29
26
  public readonly slot_name_prefix: string;
30
27
 
31
- private sharedIterator = new sync.BroadcastIterable((signal) => this.watchActiveCheckpoint(signal));
32
-
33
- private readonly storageCache = new LRUCache<number, storage.SyncRulesBucketStorage>({
34
- max: 3,
35
- fetchMethod: async (id) => {
36
- const syncRulesRow = await this.db.sql`
37
- SELECT
38
- *
39
- FROM
40
- sync_rules
41
- WHERE
42
- id = ${{ value: id, type: 'int4' }}
43
- `
44
- .decoded(models.SyncRules)
45
- .first();
46
- if (syncRulesRow == null) {
47
- // Deleted in the meantime?
48
- return undefined;
49
- }
50
- const rules = new PostgresPersistedSyncRulesContent(this.db, syncRulesRow);
51
- return this.getInstance(rules);
52
- },
53
- dispose: (storage) => {
54
- storage[Symbol.dispose]();
55
- }
56
- });
28
+ private activeStorageCache: storage.SyncRulesBucketStorage | undefined;
57
29
 
58
30
  constructor(protected options: PostgresBucketStorageOptions) {
59
31
  super();
@@ -70,7 +42,6 @@ export class PostgresBucketStorageFactory
70
42
  }
71
43
 
72
44
  async [Symbol.asyncDispose]() {
73
- super[Symbol.dispose]();
74
45
  await this.db[Symbol.asyncDispose]();
75
46
  }
76
47
 
@@ -79,18 +50,22 @@ export class PostgresBucketStorageFactory
79
50
  // This has not been implemented yet.
80
51
  }
81
52
 
82
- getInstance(syncRules: storage.PersistedSyncRulesContent): storage.SyncRulesBucketStorage {
53
+ getInstance(
54
+ syncRules: storage.PersistedSyncRulesContent,
55
+ options?: GetIntanceOptions
56
+ ): storage.SyncRulesBucketStorage {
83
57
  const storage = new PostgresSyncRulesStorage({
84
58
  factory: this,
85
59
  db: this.db,
86
60
  sync_rules: syncRules,
87
61
  batchLimits: this.options.config.batch_limits
88
62
  });
89
- this.iterateListeners((cb) => cb.syncStorageCreated?.(storage));
63
+ if (!options?.skipLifecycleHooks) {
64
+ this.iterateListeners((cb) => cb.syncStorageCreated?.(storage));
65
+ }
90
66
  storage.registerListener({
91
67
  batchStarted: (batch) => {
92
- // This nested listener will be automatically disposed when the storage is disposed
93
- batch.registerManagedListener(storage, {
68
+ batch.registerListener({
94
69
  replicationEvent: (payload) => this.iterateListeners((cb) => cb.replicationEvent?.(payload))
95
70
  });
96
71
  }
@@ -255,13 +230,13 @@ export class PostgresBucketStorageFactory
255
230
  });
256
231
  }
257
232
 
258
- async slotRemoved(slot_name: string): Promise<void> {
233
+ async restartReplication(sync_rules_group_id: number): Promise<void> {
259
234
  const next = await this.getNextSyncRulesContent();
260
235
  const active = await this.getActiveSyncRulesContent();
261
236
 
262
237
  // In both the below cases, we create a new sync rules instance.
263
- // The current one will continue erroring until the next one has finished processing.
264
- if (next != null && next.slot_name == slot_name) {
238
+ // The current one will continue serving sync requests until the next one has finished processing.
239
+ if (next != null && next.id == sync_rules_group_id) {
265
240
  // We need to redo the "next" sync rules
266
241
  await this.updateSyncRules({
267
242
  content: next.sync_rules_content,
@@ -276,18 +251,30 @@ export class PostgresBucketStorageFactory
276
251
  id = ${{ value: next.id, type: 'int4' }}
277
252
  AND state = ${{ value: storage.SyncRuleState.PROCESSING, type: 'varchar' }}
278
253
  `.execute();
279
- } else if (next == null && active?.slot_name == slot_name) {
254
+ } else if (next == null && active?.id == sync_rules_group_id) {
280
255
  // Slot removed for "active" sync rules, while there is no "next" one.
281
256
  await this.updateSyncRules({
282
257
  content: active.sync_rules_content,
283
258
  validate: false
284
259
  });
285
260
 
286
- // Pro-actively stop replicating
261
+ // Pro-actively stop replicating, but still serve clients with existing data
287
262
  await this.db.sql`
288
263
  UPDATE sync_rules
289
264
  SET
290
- state = ${{ value: storage.SyncRuleState.STOP, type: 'varchar' }}
265
+ state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
266
+ WHERE
267
+ id = ${{ value: active.id, type: 'int4' }}
268
+ AND state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
269
+ `.execute();
270
+ } else if (next != null && active?.id == sync_rules_group_id) {
271
+ // Already have "next" sync rules - don't update any.
272
+
273
+ // Pro-actively stop replicating, but still serve clients with existing data
274
+ await this.db.sql`
275
+ UPDATE sync_rules
276
+ SET
277
+ state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
291
278
  WHERE
292
279
  id = ${{ value: active.id, type: 'int4' }}
293
280
  AND state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
@@ -309,6 +296,7 @@ export class PostgresBucketStorageFactory
309
296
  sync_rules
310
297
  WHERE
311
298
  state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
299
+ OR state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
312
300
  ORDER BY
313
301
  id DESC
314
302
  LIMIT
@@ -382,126 +370,21 @@ export class PostgresBucketStorageFactory
382
370
  return rows.map((row) => new PostgresPersistedSyncRulesContent(this.db, row));
383
371
  }
384
372
 
385
- async getActiveCheckpoint(): Promise<storage.ActiveCheckpoint> {
386
- const activeCheckpoint = await this.db.sql`
387
- SELECT
388
- id,
389
- last_checkpoint,
390
- last_checkpoint_lsn
391
- FROM
392
- sync_rules
393
- WHERE
394
- state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
395
- ORDER BY
396
- id DESC
397
- LIMIT
398
- 1
399
- `
400
- .decoded(models.ActiveCheckpoint)
401
- .first();
402
-
403
- return this.makeActiveCheckpoint(activeCheckpoint);
404
- }
405
-
406
- async *watchWriteCheckpoint(user_id: string, signal: AbortSignal): AsyncIterable<storage.WriteCheckpoint> {
407
- let lastCheckpoint: utils.OpId | null = null;
408
- let lastWriteCheckpoint: bigint | null = null;
409
-
410
- const iter = wrapWithAbort(this.sharedIterator, signal);
411
- for await (const cp of iter) {
412
- const { checkpoint, lsn } = cp;
413
-
414
- // lsn changes are not important by itself.
415
- // What is important is:
416
- // 1. checkpoint (op_id) changes.
417
- // 2. write checkpoint changes for the specific user
418
- const bucketStorage = await cp.getBucketStorage();
419
- if (!bucketStorage) {
420
- continue;
421
- }
422
-
423
- const lsnFilters: Record<string, string> = lsn ? { 1: lsn } : {};
424
-
425
- const currentWriteCheckpoint = await bucketStorage.lastWriteCheckpoint({
426
- user_id,
427
- heads: {
428
- ...lsnFilters
429
- }
430
- });
431
-
432
- if (currentWriteCheckpoint == lastWriteCheckpoint && checkpoint == lastCheckpoint) {
433
- // No change - wait for next one
434
- // In some cases, many LSNs may be produced in a short time.
435
- // Add a delay to throttle the write checkpoint lookup a bit.
436
- await timers.setTimeout(20 + 10 * Math.random());
437
- continue;
438
- }
439
-
440
- lastWriteCheckpoint = currentWriteCheckpoint;
441
- lastCheckpoint = checkpoint;
442
-
443
- yield { base: cp, writeCheckpoint: currentWriteCheckpoint };
373
+ async getActiveStorage(): Promise<SyncRulesBucketStorage | null> {
374
+ const content = await this.getActiveSyncRulesContent();
375
+ if (content == null) {
376
+ return null;
444
377
  }
445
- }
446
-
447
- protected async *watchActiveCheckpoint(signal: AbortSignal): AsyncIterable<storage.ActiveCheckpoint> {
448
- const doc = await this.db.sql`
449
- SELECT
450
- id,
451
- last_checkpoint,
452
- last_checkpoint_lsn
453
- FROM
454
- sync_rules
455
- WHERE
456
- state = ${{ type: 'varchar', value: storage.SyncRuleState.ACTIVE }}
457
- LIMIT
458
- 1
459
- `
460
- .decoded(models.ActiveCheckpoint)
461
- .first();
462
-
463
- const sink = new sync.LastValueSink<string>(undefined);
464
-
465
- const disposeListener = this.db.registerListener({
466
- notification: (notification) => sink.next(notification.payload)
467
- });
468
378
 
469
- signal.addEventListener('aborted', async () => {
470
- disposeListener();
471
- sink.complete();
472
- });
473
-
474
- yield this.makeActiveCheckpoint(doc);
475
-
476
- let lastOp: storage.ActiveCheckpoint | null = null;
477
- for await (const payload of sink.withSignal(signal)) {
478
- if (signal.aborted) {
479
- return;
480
- }
481
-
482
- const notification = models.ActiveCheckpointNotification.decode(payload);
483
- const activeCheckpoint = this.makeActiveCheckpoint(notification.active_checkpoint);
484
-
485
- if (lastOp == null || activeCheckpoint.lsn != lastOp.lsn || activeCheckpoint.checkpoint != lastOp.checkpoint) {
486
- lastOp = activeCheckpoint;
487
- yield activeCheckpoint;
488
- }
379
+ // It is important that this instance is cached.
380
+ // Not for the instance construction itself, but to ensure that internal caches on the instance
381
+ // are re-used properly.
382
+ if (this.activeStorageCache?.group_id == content.id) {
383
+ return this.activeStorageCache;
384
+ } else {
385
+ const instance = this.getInstance(content);
386
+ this.activeStorageCache = instance;
387
+ return instance;
489
388
  }
490
389
  }
491
-
492
- private makeActiveCheckpoint(row: models.ActiveCheckpointDecoded | null) {
493
- return {
494
- checkpoint: utils.timestampToOpId(row?.last_checkpoint ?? 0n),
495
- lsn: row?.last_checkpoint_lsn ?? null,
496
- hasSyncRules() {
497
- return row != null;
498
- },
499
- getBucketStorage: async () => {
500
- if (row == null) {
501
- return null;
502
- }
503
- return (await this.storageCache.fetch(Number(row.id))) ?? null;
504
- }
505
- } satisfies storage.ActiveCheckpoint;
506
- }
507
390
  }
@@ -35,14 +35,7 @@ interface CurrentBucketState {
35
35
  /**
36
36
  * Additional options, primarily for testing.
37
37
  */
38
- export interface PostgresCompactOptions extends storage.CompactOptions {
39
- /** Minimum of 2 */
40
- clearBatchLimit?: number;
41
- /** Minimum of 1 */
42
- moveBatchLimit?: number;
43
- /** Minimum of 1 */
44
- moveBatchQueryLimit?: number;
45
- }
38
+ export interface PostgresCompactOptions extends storage.CompactOptions {}
46
39
 
47
40
  const DEFAULT_CLEAR_BATCH_LIMIT = 5000;
48
41
  const DEFAULT_MOVE_BATCH_LIMIT = 2000;
@@ -99,15 +92,19 @@ export class PostgresCompactor {
99
92
 
100
93
  let bucketLower: string | null = null;
101
94
  let bucketUpper: string | null = null;
95
+ const MAX_CHAR = String.fromCodePoint(0xffff);
102
96
 
103
- if (bucket?.includes('[')) {
97
+ if (bucket == null) {
98
+ bucketLower = '';
99
+ bucketUpper = MAX_CHAR;
100
+ } else if (bucket?.includes('[')) {
104
101
  // Exact bucket name
105
102
  bucketLower = bucket;
106
103
  bucketUpper = bucket;
107
104
  } else if (bucket) {
108
105
  // Bucket definition name
109
106
  bucketLower = `${bucket}[`;
110
- bucketUpper = `${bucket}[\uFFFF`;
107
+ bucketUpper = `${bucket}[${MAX_CHAR}`;
111
108
  }
112
109
 
113
110
  let upperOpIdLimit = BIGINT_MAX;
@@ -126,10 +123,16 @@ export class PostgresCompactor {
126
123
  bucket_data
127
124
  WHERE
128
125
  group_id = ${{ type: 'int4', value: this.group_id }}
129
- AND bucket_name LIKE COALESCE(${{ type: 'varchar', value: bucketLower }}, '%')
130
- AND op_id < ${{ type: 'int8', value: upperOpIdLimit }}
126
+ AND bucket_name >= ${{ type: 'varchar', value: bucketLower }}
127
+ AND (
128
+ (
129
+ bucket_name = ${{ type: 'varchar', value: bucketUpper }}
130
+ AND op_id < ${{ type: 'int8', value: upperOpIdLimit }}
131
+ )
132
+ OR bucket_name < ${{ type: 'varchar', value: bucketUpper }} COLLATE "C" -- Use binary comparison
133
+ )
131
134
  ORDER BY
132
- bucket_name,
135
+ bucket_name DESC,
133
136
  op_id DESC
134
137
  LIMIT
135
138
  ${{ type: 'int4', value: this.moveBatchQueryLimit }}
@@ -145,7 +148,9 @@ export class PostgresCompactor {
145
148
  }
146
149
 
147
150
  // Set upperBound for the next batch
148
- upperOpIdLimit = batch[batch.length - 1].op_id;
151
+ const lastBatchItem = batch[batch.length - 1];
152
+ upperOpIdLimit = lastBatchItem.op_id;
153
+ bucketUpper = lastBatchItem.bucket_name;
149
154
 
150
155
  for (const doc of batch) {
151
156
  if (currentState == null || doc.bucket_name != currentState.bucket) {