@powersync/service-core 1.12.1 → 1.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/CHANGELOG.md +26 -0
  2. package/dist/api/RouteAPI.d.ts +1 -1
  3. package/dist/api/diagnostics.js +1 -1
  4. package/dist/api/diagnostics.js.map +1 -1
  5. package/dist/entry/cli-entry.js +2 -2
  6. package/dist/entry/cli-entry.js.map +1 -1
  7. package/dist/index.d.ts +1 -0
  8. package/dist/index.js +1 -0
  9. package/dist/index.js.map +1 -1
  10. package/dist/metrics/open-telemetry/OpenTelemetryMetricsFactory.d.ts +1 -1
  11. package/dist/metrics/open-telemetry/OpenTelemetryMetricsFactory.js.map +1 -1
  12. package/dist/replication/AbstractReplicationJob.d.ts +4 -0
  13. package/dist/replication/AbstractReplicationJob.js.map +1 -1
  14. package/dist/replication/AbstractReplicator.d.ts +25 -1
  15. package/dist/replication/AbstractReplicator.js +53 -3
  16. package/dist/replication/AbstractReplicator.js.map +1 -1
  17. package/dist/replication/RelationCache.d.ts +9 -0
  18. package/dist/replication/RelationCache.js +20 -0
  19. package/dist/replication/RelationCache.js.map +1 -0
  20. package/dist/replication/replication-index.d.ts +1 -0
  21. package/dist/replication/replication-index.js +1 -0
  22. package/dist/replication/replication-index.js.map +1 -1
  23. package/dist/replication/replication-metrics.js +6 -0
  24. package/dist/replication/replication-metrics.js.map +1 -1
  25. package/dist/routes/endpoints/admin.js +2 -0
  26. package/dist/routes/endpoints/admin.js.map +1 -1
  27. package/dist/storage/BucketStorageBatch.d.ts +21 -3
  28. package/dist/storage/BucketStorageBatch.js +2 -1
  29. package/dist/storage/BucketStorageBatch.js.map +1 -1
  30. package/dist/storage/PersistedSyncRulesContent.d.ts +5 -0
  31. package/dist/storage/SourceTable.d.ts +17 -1
  32. package/dist/storage/SourceTable.js +28 -0
  33. package/dist/storage/SourceTable.js.map +1 -1
  34. package/dist/storage/StorageEngine.d.ts +3 -2
  35. package/dist/storage/StorageEngine.js +3 -0
  36. package/dist/storage/StorageEngine.js.map +1 -1
  37. package/dist/storage/StorageProvider.d.ts +2 -0
  38. package/dist/storage/SyncRulesBucketStorage.d.ts +18 -6
  39. package/dist/storage/SyncRulesBucketStorage.js.map +1 -1
  40. package/dist/storage/WriteCheckpointAPI.d.ts +0 -26
  41. package/dist/storage/WriteCheckpointAPI.js.map +1 -1
  42. package/dist/storage/bson.js +4 -1
  43. package/dist/storage/bson.js.map +1 -1
  44. package/dist/streams/BroadcastIterable.d.ts +1 -1
  45. package/dist/streams/streams-index.d.ts +0 -1
  46. package/dist/streams/streams-index.js +0 -1
  47. package/dist/streams/streams-index.js.map +1 -1
  48. package/dist/system/ServiceContext.js +6 -0
  49. package/dist/system/ServiceContext.js.map +1 -1
  50. package/package.json +4 -4
  51. package/src/api/RouteAPI.ts +1 -1
  52. package/src/api/diagnostics.ts +1 -1
  53. package/src/entry/cli-entry.ts +2 -2
  54. package/src/index.ts +2 -0
  55. package/src/metrics/open-telemetry/OpenTelemetryMetricsFactory.ts +3 -3
  56. package/src/replication/AbstractReplicationJob.ts +5 -0
  57. package/src/replication/AbstractReplicator.ts +56 -3
  58. package/src/replication/RelationCache.ts +25 -0
  59. package/src/replication/replication-index.ts +1 -0
  60. package/src/replication/replication-metrics.ts +7 -0
  61. package/src/routes/endpoints/admin.ts +2 -0
  62. package/src/storage/BucketStorageBatch.ts +26 -4
  63. package/src/storage/PersistedSyncRulesContent.ts +6 -0
  64. package/src/storage/SourceTable.ts +44 -1
  65. package/src/storage/StorageEngine.ts +6 -2
  66. package/src/storage/StorageProvider.ts +3 -0
  67. package/src/storage/SyncRulesBucketStorage.ts +22 -6
  68. package/src/storage/WriteCheckpointAPI.ts +0 -30
  69. package/src/storage/bson.ts +4 -1
  70. package/src/streams/BroadcastIterable.ts +1 -1
  71. package/src/streams/streams-index.ts +0 -1
  72. package/src/system/ServiceContext.ts +6 -0
  73. package/tsconfig.tsbuildinfo +1 -1
  74. package/dist/streams/Demultiplexer.d.ts +0 -52
  75. package/dist/streams/Demultiplexer.js +0 -128
  76. package/dist/streams/Demultiplexer.js.map +0 -1
  77. package/src/streams/Demultiplexer.ts +0 -165
  78. package/test/src/demultiplexer.test.ts +0 -205
@@ -2,12 +2,13 @@ import { ObserverClient } from '@powersync/lib-services-framework';
2
2
  import { EvaluatedParameters, EvaluatedRow, SqliteRow, ToastableSqliteRow } from '@powersync/service-sync-rules';
3
3
  import { BSON } from 'bson';
4
4
  import { ReplicationEventPayload } from './ReplicationEventPayload.js';
5
- import { SourceTable } from './SourceTable.js';
5
+ import { SourceTable, TableSnapshotStatus } from './SourceTable.js';
6
6
  import { BatchedCustomWriteCheckpointOptions } from './storage-index.js';
7
7
  import { InternalOpId } from '../util/utils.js';
8
8
 
9
9
  export const DEFAULT_BUCKET_BATCH_COMMIT_OPTIONS: ResolvedBucketBatchCommitOptions = {
10
- createEmptyCheckpoints: true
10
+ createEmptyCheckpoints: true,
11
+ oldestUncommittedChange: null
11
12
  };
12
13
 
13
14
  export interface BucketStorageBatch extends ObserverClient<BucketBatchStorageListener>, AsyncDisposable {
@@ -38,12 +39,14 @@ export interface BucketStorageBatch extends ObserverClient<BucketBatchStorageLis
38
39
  *
39
40
  * @returns null if there are no changes to flush.
40
41
  */
41
- flush(): Promise<FlushedResult | null>;
42
+ flush(options?: BatchBucketFlushOptions): Promise<FlushedResult | null>;
42
43
 
43
44
  /**
44
45
  * Flush and commit any saved ops. This creates a new checkpoint by default.
45
46
  *
46
47
  * Only call this after a transaction.
48
+ *
49
+ * Returns true if either (1) a new checkpoint was created, or (2) there are no changes to commit.
47
50
  */
48
51
  commit(lsn: string, options?: BucketBatchCommitOptions): Promise<boolean>;
49
52
 
@@ -56,6 +59,14 @@ export interface BucketStorageBatch extends ObserverClient<BucketBatchStorageLis
56
59
  */
57
60
  keepalive(lsn: string): Promise<boolean>;
58
61
 
62
+ /**
63
+ * Set the LSN for a snapshot, before starting replication.
64
+ *
65
+ * Not required if the source database keeps track of this, for example with
66
+ * PostgreSQL logical replication slots.
67
+ */
68
+ setSnapshotLsn(lsn: string): Promise<void>;
69
+
59
70
  /**
60
71
  * Get the last checkpoint LSN, from either commit or keepalive.
61
72
  */
@@ -63,6 +74,8 @@ export interface BucketStorageBatch extends ObserverClient<BucketBatchStorageLis
63
74
 
64
75
  markSnapshotDone(tables: SourceTable[], no_checkpoint_before_lsn: string): Promise<SourceTable[]>;
65
76
 
77
+ updateTableProgress(table: SourceTable, progress: Partial<TableSnapshotStatus>): Promise<SourceTable>;
78
+
66
79
  /**
67
80
  * Queues the creation of a custom Write Checkpoint. This will be persisted after operations are flushed.
68
81
  */
@@ -148,7 +161,16 @@ export interface FlushedResult {
148
161
  flushed_op: InternalOpId;
149
162
  }
150
163
 
151
- export interface BucketBatchCommitOptions {
164
+ export interface BatchBucketFlushOptions {
165
+ /**
166
+ * The timestamp of the first change in this batch, according to the source database.
167
+ *
168
+ * Used to estimate replication lag.
169
+ */
170
+ oldestUncommittedChange?: Date | null;
171
+ }
172
+
173
+ export interface BucketBatchCommitOptions extends BatchBucketFlushOptions {
152
174
  /**
153
175
  * Creates a new checkpoint even if there were no persisted operations.
154
176
  * Defaults to true.
@@ -9,6 +9,12 @@ export interface PersistedSyncRulesContent {
9
9
  readonly id: number;
10
10
  readonly sync_rules_content: string;
11
11
  readonly slot_name: string;
12
+ /**
13
+ * True if this is the "active" copy of the sync rules.
14
+ */
15
+ readonly active: boolean;
16
+
17
+ readonly last_checkpoint_lsn: string | null;
12
18
 
13
19
  readonly last_fatal_error?: string | null;
14
20
  readonly last_keepalive_ts?: Date | null;
@@ -2,6 +2,12 @@ import { DEFAULT_TAG } from '@powersync/service-sync-rules';
2
2
  import * as util from '../util/util-index.js';
3
3
  import { ColumnDescriptor } from './SourceEntity.js';
4
4
 
5
+ export interface TableSnapshotStatus {
6
+ totalEstimatedCount: number;
7
+ replicatedCount: number;
8
+ lastKey: Uint8Array | null;
9
+ }
10
+
5
11
  export class SourceTable {
6
12
  static readonly DEFAULT_TAG = DEFAULT_TAG;
7
13
 
@@ -32,6 +38,13 @@ export class SourceTable {
32
38
  */
33
39
  public syncEvent = true;
34
40
 
41
+ /**
42
+ * Always undefined if snapshotComplete = true.
43
+ *
44
+ * May be set if snapshotComplete = false.
45
+ */
46
+ public snapshotStatus: TableSnapshotStatus | undefined = undefined;
47
+
35
48
  constructor(
36
49
  public readonly id: any,
37
50
  public readonly connectionTag: string,
@@ -40,7 +53,7 @@ export class SourceTable {
40
53
  public readonly table: string,
41
54
 
42
55
  public readonly replicaIdColumns: ColumnDescriptor[],
43
- public readonly snapshotComplete: boolean
56
+ public snapshotComplete: boolean
44
57
  ) {}
45
58
 
46
59
  get hasReplicaIdentity() {
@@ -68,4 +81,34 @@ export class SourceTable {
68
81
  get syncAny() {
69
82
  return this.syncData || this.syncParameters || this.syncEvent;
70
83
  }
84
+
85
+ /**
86
+ * In-memory clone of the table status.
87
+ */
88
+ clone() {
89
+ const copy = new SourceTable(
90
+ this.id,
91
+ this.connectionTag,
92
+ this.objectId,
93
+ this.schema,
94
+ this.table,
95
+ this.replicaIdColumns,
96
+ this.snapshotComplete
97
+ );
98
+ copy.syncData = this.syncData;
99
+ copy.syncParameters = this.syncParameters;
100
+ copy.snapshotStatus = this.snapshotStatus;
101
+ return copy;
102
+ }
103
+
104
+ formatSnapshotProgress() {
105
+ if (this.snapshotComplete || this.snapshotStatus == null) {
106
+ // Should not happen
107
+ return '-';
108
+ } else if (this.snapshotStatus.totalEstimatedCount < 0) {
109
+ return `${this.snapshotStatus.replicatedCount}/?`;
110
+ } else {
111
+ return `${this.snapshotStatus.replicatedCount}/~${this.snapshotStatus.totalEstimatedCount}`;
112
+ }
113
+ }
71
114
  }
@@ -1,7 +1,7 @@
1
- import { BaseObserver, logger } from '@powersync/lib-services-framework';
1
+ import { BaseObserver, logger, ServiceError } from '@powersync/lib-services-framework';
2
2
  import { ResolvedPowerSyncConfig } from '../util/util-index.js';
3
- import { ActiveStorage, BucketStorageProvider } from './StorageProvider.js';
4
3
  import { BucketStorageFactory } from './BucketStorageFactory.js';
4
+ import { ActiveStorage, BucketStorageProvider } from './StorageProvider.js';
5
5
 
6
6
  export type StorageEngineOptions = {
7
7
  configuration: ResolvedPowerSyncConfig;
@@ -9,6 +9,7 @@ export type StorageEngineOptions = {
9
9
 
10
10
  export interface StorageEngineListener {
11
11
  storageActivated: (storage: BucketStorageFactory) => void;
12
+ storageFatalError: (error: ServiceError) => void;
12
13
  }
13
14
 
14
15
  export class StorageEngine extends BaseObserver<StorageEngineListener> {
@@ -47,6 +48,9 @@ export class StorageEngine extends BaseObserver<StorageEngineListener> {
47
48
  resolvedConfig: configuration
48
49
  });
49
50
  this.iterateListeners((cb) => cb.storageActivated?.(this.activeBucketStorage));
51
+ this.currentActiveStorage.onFatalError?.((error) => {
52
+ this.iterateListeners((cb) => cb.storageFatalError?.(error));
53
+ });
50
54
  logger.info(`Successfully activated storage: ${configuration.storage.type}.`);
51
55
  logger.info('Successfully started Storage Engine.');
52
56
  }
@@ -1,3 +1,4 @@
1
+ import { ServiceError } from '@powersync/lib-services-framework';
1
2
  import * as util from '../util/util-index.js';
2
3
  import { BucketStorageFactory } from './BucketStorageFactory.js';
3
4
 
@@ -9,6 +10,8 @@ export interface ActiveStorage {
9
10
  * Tear down / drop the storage permanently
10
11
  */
11
12
  tearDown(): Promise<boolean>;
13
+
14
+ onFatalError?(callback: (error: ServiceError) => void): void;
12
15
  }
13
16
 
14
17
  export interface GetStorageOptions {
@@ -1,7 +1,7 @@
1
- import { ObserverClient } from '@powersync/lib-services-framework';
1
+ import { Logger, ObserverClient } from '@powersync/lib-services-framework';
2
2
  import { ParameterLookup, SqlSyncRules, SqliteJsonRow } from '@powersync/service-sync-rules';
3
3
  import * as util from '../util/util-index.js';
4
- import { BucketStorageBatch, FlushedResult } from './BucketStorageBatch.js';
4
+ import { BucketStorageBatch, FlushedResult, SaveUpdate } from './BucketStorageBatch.js';
5
5
  import { BucketStorageFactory } from './BucketStorageFactory.js';
6
6
  import { ParseSyncRulesOptions } from './PersistedSyncRulesContent.js';
7
7
  import { SourceEntityDescriptor } from './SourceEntity.js';
@@ -48,7 +48,7 @@ export interface SyncRulesBucketStorage
48
48
  /**
49
49
  * Clear the storage, without changing state.
50
50
  */
51
- clear(): Promise<void>;
51
+ clear(options?: ClearStorageOptions): Promise<void>;
52
52
 
53
53
  autoActivate(): Promise<void>;
54
54
 
@@ -125,6 +125,7 @@ export interface SyncRuleStatus {
125
125
  checkpoint_lsn: string | null;
126
126
  active: boolean;
127
127
  snapshot_done: boolean;
128
+ snapshot_lsn: string | null;
128
129
  }
129
130
  export interface ResolveTableOptions {
130
131
  group_id: number;
@@ -159,6 +160,15 @@ export interface StartBatchOptions extends ParseSyncRulesOptions {
159
160
  * This will avoid creating new operations for rows previously replicated.
160
161
  */
161
162
  skipExistingRows?: boolean;
163
+
164
+ /**
165
+ * Callback called if we streamed an update to a record that we don't have yet.
166
+ *
167
+ * This is expected to happen in some initial replication edge cases, only if storeCurrentData = true.
168
+ */
169
+ markRecordUnavailable?: BucketStorageMarkRecordUnavailable;
170
+
171
+ logger?: Logger;
162
172
  }
163
173
 
164
174
  export interface CompactOptions {
@@ -200,7 +210,11 @@ export interface CompactOptions {
200
210
  moveBatchQueryLimit?: number;
201
211
  }
202
212
 
203
- export interface TerminateOptions {
213
+ export interface ClearStorageOptions {
214
+ signal?: AbortSignal;
215
+ }
216
+
217
+ export interface TerminateOptions extends ClearStorageOptions {
204
218
  /**
205
219
  * If true, also clear the storage before terminating.
206
220
  */
@@ -256,8 +270,8 @@ export interface StorageCheckpointUpdate extends WriteCheckpoint {
256
270
  }
257
271
 
258
272
  export interface GetCheckpointChangesOptions {
259
- lastCheckpoint: util.InternalOpId;
260
- nextCheckpoint: util.InternalOpId;
273
+ lastCheckpoint: ReplicationCheckpoint;
274
+ nextCheckpoint: ReplicationCheckpoint;
261
275
  }
262
276
 
263
277
  export interface CheckpointChanges {
@@ -274,3 +288,5 @@ export const CHECKPOINT_INVALIDATE_ALL: CheckpointChanges = {
274
288
  updatedParameterLookups: new Set<string>(),
275
289
  invalidateParameterBuckets: true
276
290
  };
291
+
292
+ export type BucketStorageMarkRecordUnavailable = (record: SaveUpdate) => void;
@@ -50,37 +50,11 @@ export interface ManagedWriteCheckpointFilters extends BaseWriteCheckpointIdenti
50
50
  heads: Record<string, string>;
51
51
  }
52
52
 
53
- export interface WriteCheckpointResult {
54
- /**
55
- * Write checkpoint id (also referred to as client_id).
56
- *
57
- * If null, there is no write checkpoint for the client.
58
- */
59
- id: bigint | null;
60
-
61
- /**
62
- * LSN for the checkpoint.
63
- *
64
- * This will change when we support multiple connections.
65
- *
66
- * For managed write checkpoints, this LSN must be exceeded by the checkpoint / replication head to be valid.
67
- *
68
- * For custom write checkpoints, this will be null, and the write checkpoint is valid for all LSNs.
69
- */
70
- lsn: string | null;
71
- }
72
-
73
53
  export type ManagedWriteCheckpointOptions = ManagedWriteCheckpointFilters;
74
54
 
75
55
  export type SyncStorageLastWriteCheckpointFilters = BaseWriteCheckpointIdentifier | ManagedWriteCheckpointFilters;
76
56
  export type LastWriteCheckpointFilters = CustomWriteCheckpointFilters | ManagedWriteCheckpointFilters;
77
57
 
78
- export interface WatchUserWriteCheckpointOptions {
79
- user_id: string;
80
- sync_rules_id: number;
81
- signal: AbortSignal;
82
- }
83
-
84
58
  export interface BaseWriteCheckpointAPI {
85
59
  readonly writeCheckpointMode: WriteCheckpointMode;
86
60
  setWriteCheckpointMode(mode: WriteCheckpointMode): void;
@@ -93,7 +67,6 @@ export interface BaseWriteCheckpointAPI {
93
67
  * sync rules id.
94
68
  */
95
69
  export interface SyncStorageWriteCheckpointAPI extends BaseWriteCheckpointAPI {
96
- batchCreateCustomWriteCheckpoints(checkpoints: BatchedCustomWriteCheckpointOptions[]): Promise<void>;
97
70
  lastWriteCheckpoint(filters: SyncStorageLastWriteCheckpointFilters): Promise<bigint | null>;
98
71
  }
99
72
 
@@ -102,10 +75,7 @@ export interface SyncStorageWriteCheckpointAPI extends BaseWriteCheckpointAPI {
102
75
  * sync rules identifiers for custom write checkpoints.
103
76
  */
104
77
  export interface WriteCheckpointAPI extends BaseWriteCheckpointAPI {
105
- batchCreateCustomWriteCheckpoints(checkpoints: CustomWriteCheckpointOptions[]): Promise<void>;
106
78
  lastWriteCheckpoint(filters: LastWriteCheckpointFilters): Promise<bigint | null>;
107
-
108
- watchUserWriteCheckpoint(options: WatchUserWriteCheckpointOptions): AsyncIterable<WriteCheckpointResult>;
109
79
  }
110
80
 
111
81
  export const DEFAULT_WRITE_CHECKPOINT_MODE = WriteCheckpointMode.MANAGED;
@@ -10,7 +10,10 @@ type NodeBuffer = Buffer<ArrayBuffer>;
10
10
  */
11
11
  export const BSON_DESERIALIZE_INTERNAL_OPTIONS: bson.DeserializeOptions = {
12
12
  // use bigint instead of Long
13
- useBigInt64: true
13
+ useBigInt64: true,
14
+ // We cannot use promoteBuffers: true, since that also converst UUID to Buffer
15
+ // Instead, we need to handle bson.Binary when reading data
16
+ promoteBuffers: false
14
17
  };
15
18
 
16
19
  /**
@@ -97,7 +97,7 @@ export class BroadcastIterable<T> implements AsyncIterable<T> {
97
97
  }
98
98
  }
99
99
 
100
- async *[Symbol.asyncIterator](signal?: AbortSignal): AsyncIterator<T> {
100
+ async *[Symbol.asyncIterator](signal?: AbortSignal): AsyncIterableIterator<T> {
101
101
  const sink = new LastValueSink(this.last);
102
102
  this.addSink(sink);
103
103
  try {
@@ -1,4 +1,3 @@
1
1
  export * from './merge.js';
2
- export * from './Demultiplexer.js';
3
2
  export * from './LastValueSink.js';
4
3
  export * from './BroadcastIterable.js';
@@ -59,6 +59,12 @@ export class ServiceContextContainer implements ServiceContext {
59
59
  this.storageEngine = new storage.StorageEngine({
60
60
  configuration
61
61
  });
62
+ this.storageEngine.registerListener({
63
+ storageFatalError: (error) => {
64
+ // Propagate the error to the lifecycle engine
65
+ this.lifeCycleEngine.stopWithError(error);
66
+ }
67
+ });
62
68
 
63
69
  this.lifeCycleEngine.withLifecycle(this.storageEngine, {
64
70
  start: (storageEngine) => storageEngine.start(),