@powersync/service-core 1.12.0 → 1.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. package/CHANGELOG.md +30 -0
  2. package/dist/api/RouteAPI.d.ts +1 -5
  3. package/dist/api/diagnostics.js +1 -1
  4. package/dist/api/diagnostics.js.map +1 -1
  5. package/dist/entry/cli-entry.js +2 -2
  6. package/dist/entry/cli-entry.js.map +1 -1
  7. package/dist/index.d.ts +1 -0
  8. package/dist/index.js +1 -0
  9. package/dist/index.js.map +1 -1
  10. package/dist/metrics/open-telemetry/OpenTelemetryMetricsFactory.d.ts +1 -1
  11. package/dist/metrics/open-telemetry/OpenTelemetryMetricsFactory.js.map +1 -1
  12. package/dist/replication/AbstractReplicationJob.d.ts +4 -0
  13. package/dist/replication/AbstractReplicationJob.js.map +1 -1
  14. package/dist/replication/AbstractReplicator.d.ts +25 -1
  15. package/dist/replication/AbstractReplicator.js +53 -3
  16. package/dist/replication/AbstractReplicator.js.map +1 -1
  17. package/dist/replication/RelationCache.d.ts +9 -0
  18. package/dist/replication/RelationCache.js +20 -0
  19. package/dist/replication/RelationCache.js.map +1 -0
  20. package/dist/replication/replication-index.d.ts +1 -0
  21. package/dist/replication/replication-index.js +1 -0
  22. package/dist/replication/replication-index.js.map +1 -1
  23. package/dist/replication/replication-metrics.js +6 -0
  24. package/dist/replication/replication-metrics.js.map +1 -1
  25. package/dist/routes/endpoints/admin.js +2 -0
  26. package/dist/routes/endpoints/admin.js.map +1 -1
  27. package/dist/routes/endpoints/sync-rules.js +6 -6
  28. package/dist/routes/endpoints/sync-rules.js.map +1 -1
  29. package/dist/storage/BucketStorageBatch.d.ts +21 -3
  30. package/dist/storage/BucketStorageBatch.js +2 -1
  31. package/dist/storage/BucketStorageBatch.js.map +1 -1
  32. package/dist/storage/PersistedSyncRulesContent.d.ts +5 -0
  33. package/dist/storage/SourceTable.d.ts +17 -1
  34. package/dist/storage/SourceTable.js +28 -0
  35. package/dist/storage/SourceTable.js.map +1 -1
  36. package/dist/storage/StorageEngine.d.ts +3 -2
  37. package/dist/storage/StorageEngine.js +3 -0
  38. package/dist/storage/StorageEngine.js.map +1 -1
  39. package/dist/storage/StorageProvider.d.ts +2 -0
  40. package/dist/storage/SyncRulesBucketStorage.d.ts +18 -6
  41. package/dist/storage/SyncRulesBucketStorage.js.map +1 -1
  42. package/dist/storage/WriteCheckpointAPI.d.ts +0 -26
  43. package/dist/storage/WriteCheckpointAPI.js.map +1 -1
  44. package/dist/storage/bson.js +4 -1
  45. package/dist/storage/bson.js.map +1 -1
  46. package/dist/streams/BroadcastIterable.d.ts +1 -1
  47. package/dist/streams/streams-index.d.ts +0 -1
  48. package/dist/streams/streams-index.js +0 -1
  49. package/dist/streams/streams-index.js.map +1 -1
  50. package/dist/sync/BucketChecksumState.js +2 -2
  51. package/dist/sync/BucketChecksumState.js.map +1 -1
  52. package/dist/sync/sync.js +2 -2
  53. package/dist/sync/sync.js.map +1 -1
  54. package/dist/system/ServiceContext.js +6 -0
  55. package/dist/system/ServiceContext.js.map +1 -1
  56. package/dist/util/protocol-types.d.ts +9 -9
  57. package/dist/util/protocol-types.js.map +1 -1
  58. package/dist/util/utils.d.ts +1 -1
  59. package/package.json +5 -5
  60. package/src/api/RouteAPI.ts +1 -6
  61. package/src/api/diagnostics.ts +1 -1
  62. package/src/entry/cli-entry.ts +2 -2
  63. package/src/index.ts +2 -0
  64. package/src/metrics/open-telemetry/OpenTelemetryMetricsFactory.ts +3 -3
  65. package/src/replication/AbstractReplicationJob.ts +5 -0
  66. package/src/replication/AbstractReplicator.ts +56 -3
  67. package/src/replication/RelationCache.ts +25 -0
  68. package/src/replication/replication-index.ts +1 -0
  69. package/src/replication/replication-metrics.ts +7 -0
  70. package/src/routes/endpoints/admin.ts +2 -0
  71. package/src/routes/endpoints/sync-rules.ts +6 -6
  72. package/src/storage/BucketStorageBatch.ts +26 -4
  73. package/src/storage/PersistedSyncRulesContent.ts +6 -0
  74. package/src/storage/SourceTable.ts +44 -1
  75. package/src/storage/StorageEngine.ts +6 -2
  76. package/src/storage/StorageProvider.ts +3 -0
  77. package/src/storage/SyncRulesBucketStorage.ts +22 -6
  78. package/src/storage/WriteCheckpointAPI.ts +0 -30
  79. package/src/storage/bson.ts +4 -1
  80. package/src/streams/BroadcastIterable.ts +1 -1
  81. package/src/streams/streams-index.ts +0 -1
  82. package/src/sync/BucketChecksumState.ts +2 -2
  83. package/src/sync/sync.ts +3 -3
  84. package/src/system/ServiceContext.ts +6 -0
  85. package/src/util/protocol-types.ts +15 -10
  86. package/tsconfig.tsbuildinfo +1 -1
  87. package/dist/streams/Demultiplexer.d.ts +0 -52
  88. package/dist/streams/Demultiplexer.js +0 -128
  89. package/dist/streams/Demultiplexer.js.map +0 -1
  90. package/src/streams/Demultiplexer.ts +0 -165
  91. package/test/src/demultiplexer.test.ts +0 -205
@@ -47,12 +47,7 @@ export interface RouteAPI {
47
47
  * @returns The replication lag: that is the amount of data which has not been
48
48
  * replicated yet, in bytes.
49
49
  */
50
- getReplicationLag(options: ReplicationLagOptions): Promise<number | undefined>;
51
-
52
- /**
53
- * Get the current LSN or equivalent replication HEAD position identifier
54
- */
55
- getReplicationHead(): Promise<string>;
50
+ getReplicationLagBytes(options: ReplicationLagOptions): Promise<number | undefined>;
56
51
 
57
52
  /**
58
53
  * Get the current LSN or equivalent replication HEAD position identifier.
@@ -78,7 +78,7 @@ export async function getSyncRulesStatus(
78
78
 
79
79
  if (systemStorage) {
80
80
  try {
81
- replication_lag_bytes = await apiHandler.getReplicationLag({
81
+ replication_lag_bytes = await apiHandler.getReplicationLagBytes({
82
82
  bucketStorage: systemStorage
83
83
  });
84
84
  } catch (e) {
@@ -36,8 +36,8 @@ export function generateEntryProgram(startHandlers?: Record<utils.ServiceRunner,
36
36
  try {
37
37
  await entryProgram.parseAsync();
38
38
  } catch (e) {
39
- logger.error('Fatal error', e);
40
- process.exit(1);
39
+ logger.error('Fatal startup error - exiting with code 150.', e);
40
+ process.exit(150);
41
41
  }
42
42
  }
43
43
  };
package/src/index.ts CHANGED
@@ -41,3 +41,5 @@ export * as utils from './util/util-index.js';
41
41
 
42
42
  export * from './streams/streams-index.js';
43
43
  export * as streams from './streams/streams-index.js';
44
+
45
+ export * as bson from 'bson';
@@ -1,11 +1,11 @@
1
1
  import { Meter, ValueType } from '@opentelemetry/api';
2
2
  import {
3
3
  Counter,
4
- ObservableGauge,
5
- UpDownCounter,
6
4
  MetricMetadata,
7
5
  MetricsFactory,
8
- Precision
6
+ ObservableGauge,
7
+ Precision,
8
+ UpDownCounter
9
9
  } from '../metrics-interfaces.js';
10
10
 
11
11
  export class OpenTelemetryMetricsFactory implements MetricsFactory {
@@ -78,4 +78,9 @@ export abstract class AbstractReplicationJob {
78
78
  public get isStopped(): boolean {
79
79
  return this.abortController.signal.aborted;
80
80
  }
81
+
82
+ /**
83
+ * Get replication lag for this job in ms.
84
+ */
85
+ abstract getReplicationLagMillis(): Promise<number | undefined>;
81
86
  }
@@ -8,6 +8,7 @@ import { AbstractReplicationJob } from './AbstractReplicationJob.js';
8
8
  import { ErrorRateLimiter } from './ErrorRateLimiter.js';
9
9
  import { ConnectionTestResult } from './ReplicationModule.js';
10
10
  import { MetricsEngine } from '../metrics/MetricsEngine.js';
11
+ import { ReplicationMetric } from '@powersync/service-types';
11
12
 
12
13
  // 5 minutes
13
14
  const PING_INTERVAL = 1_000_000_000n * 300n;
@@ -42,11 +43,16 @@ export abstract class AbstractReplicator<T extends AbstractReplicationJob = Abst
42
43
  * @private
43
44
  */
44
45
  private replicationJobs = new Map<number, T>();
45
- private stopped = false;
46
+ /**
47
+ * Used for replication lag computation.
48
+ */
49
+ private activeReplicationJob: T | undefined = undefined;
46
50
 
47
51
  // First ping is only after 5 minutes, not when starting
48
52
  private lastPing = hrtime.bigint();
49
53
 
54
+ private abortController: AbortController | undefined;
55
+
50
56
  protected constructor(private options: AbstractReplicatorOptions) {
51
57
  this.logger = logger.child({ name: `Replicator:${options.id}` });
52
58
  }
@@ -79,7 +85,12 @@ export abstract class AbstractReplicator<T extends AbstractReplicationJob = Abst
79
85
  return this.options.metricsEngine;
80
86
  }
81
87
 
88
+ protected get stopped() {
89
+ return this.abortController?.signal.aborted;
90
+ }
91
+
82
92
  public async start(): Promise<void> {
93
+ this.abortController = new AbortController();
83
94
  this.runLoop().catch((e) => {
84
95
  this.logger.error('Data source fatal replication error', e);
85
96
  container.reporter.captureException(e);
@@ -87,10 +98,21 @@ export abstract class AbstractReplicator<T extends AbstractReplicationJob = Abst
87
98
  process.exit(1);
88
99
  }, 1000);
89
100
  });
101
+ this.metrics.getObservableGauge(ReplicationMetric.REPLICATION_LAG_SECONDS).setValueProvider(async () => {
102
+ const lag = await this.getReplicationLagMillis().catch((e) => {
103
+ this.logger.error('Failed to get replication lag', e);
104
+ return undefined;
105
+ });
106
+ if (lag == null) {
107
+ return undefined;
108
+ }
109
+ // ms to seconds
110
+ return Math.round(lag / 1000);
111
+ });
90
112
  }
91
113
 
92
114
  public async stop(): Promise<void> {
93
- this.stopped = true;
115
+ this.abortController?.abort();
94
116
  let promises: Promise<void>[] = [];
95
117
  for (const job of this.replicationJobs.values()) {
96
118
  promises.push(job.stop());
@@ -161,8 +183,12 @@ export abstract class AbstractReplicator<T extends AbstractReplicationJob = Abst
161
183
  const existingJobs = new Map<number, T>(this.replicationJobs.entries());
162
184
  const replicatingSyncRules = await this.storage.getReplicatingSyncRules();
163
185
  const newJobs = new Map<number, T>();
186
+ let activeJob: T | undefined = undefined;
164
187
  for (let syncRules of replicatingSyncRules) {
165
188
  const existingJob = existingJobs.get(syncRules.id);
189
+ if (syncRules.active && activeJob == null) {
190
+ activeJob = existingJob;
191
+ }
166
192
  if (existingJob && !existingJob.isStopped) {
167
193
  // No change
168
194
  existingJobs.delete(syncRules.id);
@@ -188,6 +214,9 @@ export abstract class AbstractReplicator<T extends AbstractReplicationJob = Abst
188
214
 
189
215
  newJobs.set(syncRules.id, newJob);
190
216
  newJob.start();
217
+ if (syncRules.active) {
218
+ activeJob = newJob;
219
+ }
191
220
  } catch (e) {
192
221
  // Could be a sync rules parse error,
193
222
  // for example from stricter validation that was added.
@@ -199,6 +228,7 @@ export abstract class AbstractReplicator<T extends AbstractReplicationJob = Abst
199
228
  }
200
229
 
201
230
  this.replicationJobs = newJobs;
231
+ this.activeReplicationJob = activeJob;
202
232
 
203
233
  // Stop any orphaned jobs that no longer have sync rules.
204
234
  // Termination happens below
@@ -216,6 +246,7 @@ export abstract class AbstractReplicator<T extends AbstractReplicationJob = Abst
216
246
  const stopped = await this.storage.getStoppedSyncRules();
217
247
  for (let syncRules of stopped) {
218
248
  try {
249
+ // TODO: Do this in the "background", allowing the periodic refresh to continue
219
250
  const syncRuleStorage = this.storage.getInstance(syncRules, { skipLifecycleHooks: true });
220
251
  await this.terminateSyncRules(syncRuleStorage);
221
252
  } catch (e) {
@@ -231,9 +262,31 @@ export abstract class AbstractReplicator<T extends AbstractReplicationJob = Abst
231
262
  protected async terminateSyncRules(syncRuleStorage: storage.SyncRulesBucketStorage) {
232
263
  this.logger.info(`Terminating sync rules: ${syncRuleStorage.group_id}...`);
233
264
  await this.cleanUp(syncRuleStorage);
234
- await syncRuleStorage.terminate();
265
+ await syncRuleStorage.terminate({ signal: this.abortController?.signal, clearStorage: true });
235
266
  this.logger.info(`Successfully terminated sync rules: ${syncRuleStorage.group_id}`);
236
267
  }
237
268
 
238
269
  abstract testConnection(): Promise<ConnectionTestResult>;
270
+
271
+ /**
272
+ * Measure replication lag in milliseconds.
273
+ *
274
+ * In general, this is the difference between now() and the time the oldest record, that we haven't committed yet,
275
+ * has been written (committed) to the source database.
276
+ *
277
+ * This is roughly a measure of the _average_ amount of time we're behind.
278
+ * If we get a new change as soon as each previous one has finished processing, and each change takes 1000ms
279
+ * to process, the average replication lag will be 500ms, not 1000ms.
280
+ *
281
+ * 1. When we are actively replicating, this is the difference between now and when the time the change was
282
+ * written to the source database.
283
+ * 2. When the replication stream is idle, this is either 0, or the delay for keepalive messages to make it to us.
284
+ * 3. When the active replication stream is an error state, this is the time since the last successful commit.
285
+ * 4. If there is no active replication stream, this is undefined.
286
+ *
287
+ * "processing" replication streams are not taken into account for this metric.
288
+ */
289
+ async getReplicationLagMillis(): Promise<number | undefined> {
290
+ return this.activeReplicationJob?.getReplicationLagMillis();
291
+ }
239
292
  }
@@ -0,0 +1,25 @@
1
+ import { SourceTable } from '../storage/SourceTable.js';
2
+
3
+ export class RelationCache<T> {
4
+ private cache = new Map<string | number, SourceTable>();
5
+ private idFunction: (item: T | SourceTable) => string | number;
6
+
7
+ constructor(idFunction: (item: T | SourceTable) => string | number) {
8
+ this.idFunction = idFunction;
9
+ }
10
+
11
+ update(table: SourceTable) {
12
+ const id = this.idFunction(table);
13
+ this.cache.set(id, table);
14
+ }
15
+
16
+ get(source: T | SourceTable): SourceTable | undefined {
17
+ const id = this.idFunction(source);
18
+ return this.cache.get(id);
19
+ }
20
+
21
+ delete(source: T | SourceTable): boolean {
22
+ const id = this.idFunction(source);
23
+ return this.cache.delete(id);
24
+ }
25
+ }
@@ -4,3 +4,4 @@ export * from './ErrorRateLimiter.js';
4
4
  export * from './ReplicationEngine.js';
5
5
  export * from './ReplicationModule.js';
6
6
  export * from './replication-metrics.js';
7
+ export * from './RelationCache.js';
@@ -26,6 +26,11 @@ export function createCoreReplicationMetrics(engine: MetricsEngine): void {
26
26
  name: ReplicationMetric.CHUNKS_REPLICATED,
27
27
  description: 'Total number of replication chunks'
28
28
  });
29
+
30
+ engine.createObservableGauge({
31
+ name: ReplicationMetric.REPLICATION_LAG_SECONDS,
32
+ description: 'Replication lag between the source database and PowerSync instance'
33
+ });
29
34
  }
30
35
 
31
36
  /**
@@ -42,4 +47,6 @@ export function initializeCoreReplicationMetrics(engine: MetricsEngine): void {
42
47
  rows_replicated_total.add(0);
43
48
  transactions_replicated_total.add(0);
44
49
  chunks_replicated_total.add(0);
50
+ // REPLICATION_LAG_SECONDS is not explicitly initialized - the value remains "unknown" until the first value
51
+ // is reported.
45
52
  }
@@ -168,6 +168,8 @@ export const validate = routeDefinition({
168
168
  // Dummy values
169
169
  id: 0,
170
170
  slot_name: '',
171
+ active: false,
172
+ last_checkpoint_lsn: '',
171
173
 
172
174
  parsed() {
173
175
  return {
@@ -202,13 +202,13 @@ async function debugSyncRules(apiHandler: RouteAPI, sync_rules: string) {
202
202
 
203
203
  return {
204
204
  valid: true,
205
- bucket_definitions: rules.bucket_descriptors.map((d) => {
206
- let all_parameter_queries = [...d.parameter_queries.values()].flat();
207
- let all_data_queries = [...d.data_queries.values()].flat();
205
+ bucket_definitions: rules.bucketDescriptors.map((d) => {
206
+ let all_parameter_queries = [...d.parameterQueries.values()].flat();
207
+ let all_data_queries = [...d.dataQueries.values()].flat();
208
208
  return {
209
209
  name: d.name,
210
- bucket_parameters: d.bucket_parameters,
211
- global_parameter_queries: d.global_parameter_queries.map((q) => {
210
+ bucket_parameters: d.bucketParameters,
211
+ global_parameter_queries: d.globalParameterQueries.map((q) => {
212
212
  return {
213
213
  sql: q.sql
214
214
  };
@@ -217,7 +217,7 @@ async function debugSyncRules(apiHandler: RouteAPI, sync_rules: string) {
217
217
  return {
218
218
  sql: q.sql,
219
219
  table: q.sourceTable,
220
- input_parameters: q.input_parameters
220
+ input_parameters: q.inputParameters
221
221
  };
222
222
  }),
223
223
 
@@ -2,12 +2,13 @@ import { ObserverClient } from '@powersync/lib-services-framework';
2
2
  import { EvaluatedParameters, EvaluatedRow, SqliteRow, ToastableSqliteRow } from '@powersync/service-sync-rules';
3
3
  import { BSON } from 'bson';
4
4
  import { ReplicationEventPayload } from './ReplicationEventPayload.js';
5
- import { SourceTable } from './SourceTable.js';
5
+ import { SourceTable, TableSnapshotStatus } from './SourceTable.js';
6
6
  import { BatchedCustomWriteCheckpointOptions } from './storage-index.js';
7
7
  import { InternalOpId } from '../util/utils.js';
8
8
 
9
9
  export const DEFAULT_BUCKET_BATCH_COMMIT_OPTIONS: ResolvedBucketBatchCommitOptions = {
10
- createEmptyCheckpoints: true
10
+ createEmptyCheckpoints: true,
11
+ oldestUncommittedChange: null
11
12
  };
12
13
 
13
14
  export interface BucketStorageBatch extends ObserverClient<BucketBatchStorageListener>, AsyncDisposable {
@@ -38,12 +39,14 @@ export interface BucketStorageBatch extends ObserverClient<BucketBatchStorageLis
38
39
  *
39
40
  * @returns null if there are no changes to flush.
40
41
  */
41
- flush(): Promise<FlushedResult | null>;
42
+ flush(options?: BatchBucketFlushOptions): Promise<FlushedResult | null>;
42
43
 
43
44
  /**
44
45
  * Flush and commit any saved ops. This creates a new checkpoint by default.
45
46
  *
46
47
  * Only call this after a transaction.
48
+ *
49
+ * Returns true if either (1) a new checkpoint was created, or (2) there are no changes to commit.
47
50
  */
48
51
  commit(lsn: string, options?: BucketBatchCommitOptions): Promise<boolean>;
49
52
 
@@ -56,6 +59,14 @@ export interface BucketStorageBatch extends ObserverClient<BucketBatchStorageLis
56
59
  */
57
60
  keepalive(lsn: string): Promise<boolean>;
58
61
 
62
+ /**
63
+ * Set the LSN for a snapshot, before starting replication.
64
+ *
65
+ * Not required if the source database keeps track of this, for example with
66
+ * PostgreSQL logical replication slots.
67
+ */
68
+ setSnapshotLsn(lsn: string): Promise<void>;
69
+
59
70
  /**
60
71
  * Get the last checkpoint LSN, from either commit or keepalive.
61
72
  */
@@ -63,6 +74,8 @@ export interface BucketStorageBatch extends ObserverClient<BucketBatchStorageLis
63
74
 
64
75
  markSnapshotDone(tables: SourceTable[], no_checkpoint_before_lsn: string): Promise<SourceTable[]>;
65
76
 
77
+ updateTableProgress(table: SourceTable, progress: Partial<TableSnapshotStatus>): Promise<SourceTable>;
78
+
66
79
  /**
67
80
  * Queues the creation of a custom Write Checkpoint. This will be persisted after operations are flushed.
68
81
  */
@@ -148,7 +161,16 @@ export interface FlushedResult {
148
161
  flushed_op: InternalOpId;
149
162
  }
150
163
 
151
- export interface BucketBatchCommitOptions {
164
+ export interface BatchBucketFlushOptions {
165
+ /**
166
+ * The timestamp of the first change in this batch, according to the source database.
167
+ *
168
+ * Used to estimate replication lag.
169
+ */
170
+ oldestUncommittedChange?: Date | null;
171
+ }
172
+
173
+ export interface BucketBatchCommitOptions extends BatchBucketFlushOptions {
152
174
  /**
153
175
  * Creates a new checkpoint even if there were no persisted operations.
154
176
  * Defaults to true.
@@ -9,6 +9,12 @@ export interface PersistedSyncRulesContent {
9
9
  readonly id: number;
10
10
  readonly sync_rules_content: string;
11
11
  readonly slot_name: string;
12
+ /**
13
+ * True if this is the "active" copy of the sync rules.
14
+ */
15
+ readonly active: boolean;
16
+
17
+ readonly last_checkpoint_lsn: string | null;
12
18
 
13
19
  readonly last_fatal_error?: string | null;
14
20
  readonly last_keepalive_ts?: Date | null;
@@ -2,6 +2,12 @@ import { DEFAULT_TAG } from '@powersync/service-sync-rules';
2
2
  import * as util from '../util/util-index.js';
3
3
  import { ColumnDescriptor } from './SourceEntity.js';
4
4
 
5
+ export interface TableSnapshotStatus {
6
+ totalEstimatedCount: number;
7
+ replicatedCount: number;
8
+ lastKey: Uint8Array | null;
9
+ }
10
+
5
11
  export class SourceTable {
6
12
  static readonly DEFAULT_TAG = DEFAULT_TAG;
7
13
 
@@ -32,6 +38,13 @@ export class SourceTable {
32
38
  */
33
39
  public syncEvent = true;
34
40
 
41
+ /**
42
+ * Always undefined if snapshotComplete = true.
43
+ *
44
+ * May be set if snapshotComplete = false.
45
+ */
46
+ public snapshotStatus: TableSnapshotStatus | undefined = undefined;
47
+
35
48
  constructor(
36
49
  public readonly id: any,
37
50
  public readonly connectionTag: string,
@@ -40,7 +53,7 @@ export class SourceTable {
40
53
  public readonly table: string,
41
54
 
42
55
  public readonly replicaIdColumns: ColumnDescriptor[],
43
- public readonly snapshotComplete: boolean
56
+ public snapshotComplete: boolean
44
57
  ) {}
45
58
 
46
59
  get hasReplicaIdentity() {
@@ -68,4 +81,34 @@ export class SourceTable {
68
81
  get syncAny() {
69
82
  return this.syncData || this.syncParameters || this.syncEvent;
70
83
  }
84
+
85
+ /**
86
+ * In-memory clone of the table status.
87
+ */
88
+ clone() {
89
+ const copy = new SourceTable(
90
+ this.id,
91
+ this.connectionTag,
92
+ this.objectId,
93
+ this.schema,
94
+ this.table,
95
+ this.replicaIdColumns,
96
+ this.snapshotComplete
97
+ );
98
+ copy.syncData = this.syncData;
99
+ copy.syncParameters = this.syncParameters;
100
+ copy.snapshotStatus = this.snapshotStatus;
101
+ return copy;
102
+ }
103
+
104
+ formatSnapshotProgress() {
105
+ if (this.snapshotComplete || this.snapshotStatus == null) {
106
+ // Should not happen
107
+ return '-';
108
+ } else if (this.snapshotStatus.totalEstimatedCount < 0) {
109
+ return `${this.snapshotStatus.replicatedCount}/?`;
110
+ } else {
111
+ return `${this.snapshotStatus.replicatedCount}/~${this.snapshotStatus.totalEstimatedCount}`;
112
+ }
113
+ }
71
114
  }
@@ -1,7 +1,7 @@
1
- import { BaseObserver, logger } from '@powersync/lib-services-framework';
1
+ import { BaseObserver, logger, ServiceError } from '@powersync/lib-services-framework';
2
2
  import { ResolvedPowerSyncConfig } from '../util/util-index.js';
3
- import { ActiveStorage, BucketStorageProvider } from './StorageProvider.js';
4
3
  import { BucketStorageFactory } from './BucketStorageFactory.js';
4
+ import { ActiveStorage, BucketStorageProvider } from './StorageProvider.js';
5
5
 
6
6
  export type StorageEngineOptions = {
7
7
  configuration: ResolvedPowerSyncConfig;
@@ -9,6 +9,7 @@ export type StorageEngineOptions = {
9
9
 
10
10
  export interface StorageEngineListener {
11
11
  storageActivated: (storage: BucketStorageFactory) => void;
12
+ storageFatalError: (error: ServiceError) => void;
12
13
  }
13
14
 
14
15
  export class StorageEngine extends BaseObserver<StorageEngineListener> {
@@ -47,6 +48,9 @@ export class StorageEngine extends BaseObserver<StorageEngineListener> {
47
48
  resolvedConfig: configuration
48
49
  });
49
50
  this.iterateListeners((cb) => cb.storageActivated?.(this.activeBucketStorage));
51
+ this.currentActiveStorage.onFatalError?.((error) => {
52
+ this.iterateListeners((cb) => cb.storageFatalError?.(error));
53
+ });
50
54
  logger.info(`Successfully activated storage: ${configuration.storage.type}.`);
51
55
  logger.info('Successfully started Storage Engine.');
52
56
  }
@@ -1,3 +1,4 @@
1
+ import { ServiceError } from '@powersync/lib-services-framework';
1
2
  import * as util from '../util/util-index.js';
2
3
  import { BucketStorageFactory } from './BucketStorageFactory.js';
3
4
 
@@ -9,6 +10,8 @@ export interface ActiveStorage {
9
10
  * Tear down / drop the storage permanently
10
11
  */
11
12
  tearDown(): Promise<boolean>;
13
+
14
+ onFatalError?(callback: (error: ServiceError) => void): void;
12
15
  }
13
16
 
14
17
  export interface GetStorageOptions {
@@ -1,7 +1,7 @@
1
- import { ObserverClient } from '@powersync/lib-services-framework';
1
+ import { Logger, ObserverClient } from '@powersync/lib-services-framework';
2
2
  import { ParameterLookup, SqlSyncRules, SqliteJsonRow } from '@powersync/service-sync-rules';
3
3
  import * as util from '../util/util-index.js';
4
- import { BucketStorageBatch, FlushedResult } from './BucketStorageBatch.js';
4
+ import { BucketStorageBatch, FlushedResult, SaveUpdate } from './BucketStorageBatch.js';
5
5
  import { BucketStorageFactory } from './BucketStorageFactory.js';
6
6
  import { ParseSyncRulesOptions } from './PersistedSyncRulesContent.js';
7
7
  import { SourceEntityDescriptor } from './SourceEntity.js';
@@ -48,7 +48,7 @@ export interface SyncRulesBucketStorage
48
48
  /**
49
49
  * Clear the storage, without changing state.
50
50
  */
51
- clear(): Promise<void>;
51
+ clear(options?: ClearStorageOptions): Promise<void>;
52
52
 
53
53
  autoActivate(): Promise<void>;
54
54
 
@@ -125,6 +125,7 @@ export interface SyncRuleStatus {
125
125
  checkpoint_lsn: string | null;
126
126
  active: boolean;
127
127
  snapshot_done: boolean;
128
+ snapshot_lsn: string | null;
128
129
  }
129
130
  export interface ResolveTableOptions {
130
131
  group_id: number;
@@ -159,6 +160,15 @@ export interface StartBatchOptions extends ParseSyncRulesOptions {
159
160
  * This will avoid creating new operations for rows previously replicated.
160
161
  */
161
162
  skipExistingRows?: boolean;
163
+
164
+ /**
165
+ * Callback called if we streamed an update to a record that we don't have yet.
166
+ *
167
+ * This is expected to happen in some initial replication edge cases, only if storeCurrentData = true.
168
+ */
169
+ markRecordUnavailable?: BucketStorageMarkRecordUnavailable;
170
+
171
+ logger?: Logger;
162
172
  }
163
173
 
164
174
  export interface CompactOptions {
@@ -200,7 +210,11 @@ export interface CompactOptions {
200
210
  moveBatchQueryLimit?: number;
201
211
  }
202
212
 
203
- export interface TerminateOptions {
213
+ export interface ClearStorageOptions {
214
+ signal?: AbortSignal;
215
+ }
216
+
217
+ export interface TerminateOptions extends ClearStorageOptions {
204
218
  /**
205
219
  * If true, also clear the storage before terminating.
206
220
  */
@@ -256,8 +270,8 @@ export interface StorageCheckpointUpdate extends WriteCheckpoint {
256
270
  }
257
271
 
258
272
  export interface GetCheckpointChangesOptions {
259
- lastCheckpoint: util.InternalOpId;
260
- nextCheckpoint: util.InternalOpId;
273
+ lastCheckpoint: ReplicationCheckpoint;
274
+ nextCheckpoint: ReplicationCheckpoint;
261
275
  }
262
276
 
263
277
  export interface CheckpointChanges {
@@ -274,3 +288,5 @@ export const CHECKPOINT_INVALIDATE_ALL: CheckpointChanges = {
274
288
  updatedParameterLookups: new Set<string>(),
275
289
  invalidateParameterBuckets: true
276
290
  };
291
+
292
+ export type BucketStorageMarkRecordUnavailable = (record: SaveUpdate) => void;
@@ -50,37 +50,11 @@ export interface ManagedWriteCheckpointFilters extends BaseWriteCheckpointIdenti
50
50
  heads: Record<string, string>;
51
51
  }
52
52
 
53
- export interface WriteCheckpointResult {
54
- /**
55
- * Write checkpoint id (also referred to as client_id).
56
- *
57
- * If null, there is no write checkpoint for the client.
58
- */
59
- id: bigint | null;
60
-
61
- /**
62
- * LSN for the checkpoint.
63
- *
64
- * This will change when we support multiple connections.
65
- *
66
- * For managed write checkpoints, this LSN must be exceeded by the checkpoint / replication head to be valid.
67
- *
68
- * For custom write checkpoints, this will be null, and the write checkpoint is valid for all LSNs.
69
- */
70
- lsn: string | null;
71
- }
72
-
73
53
  export type ManagedWriteCheckpointOptions = ManagedWriteCheckpointFilters;
74
54
 
75
55
  export type SyncStorageLastWriteCheckpointFilters = BaseWriteCheckpointIdentifier | ManagedWriteCheckpointFilters;
76
56
  export type LastWriteCheckpointFilters = CustomWriteCheckpointFilters | ManagedWriteCheckpointFilters;
77
57
 
78
- export interface WatchUserWriteCheckpointOptions {
79
- user_id: string;
80
- sync_rules_id: number;
81
- signal: AbortSignal;
82
- }
83
-
84
58
  export interface BaseWriteCheckpointAPI {
85
59
  readonly writeCheckpointMode: WriteCheckpointMode;
86
60
  setWriteCheckpointMode(mode: WriteCheckpointMode): void;
@@ -93,7 +67,6 @@ export interface BaseWriteCheckpointAPI {
93
67
  * sync rules id.
94
68
  */
95
69
  export interface SyncStorageWriteCheckpointAPI extends BaseWriteCheckpointAPI {
96
- batchCreateCustomWriteCheckpoints(checkpoints: BatchedCustomWriteCheckpointOptions[]): Promise<void>;
97
70
  lastWriteCheckpoint(filters: SyncStorageLastWriteCheckpointFilters): Promise<bigint | null>;
98
71
  }
99
72
 
@@ -102,10 +75,7 @@ export interface SyncStorageWriteCheckpointAPI extends BaseWriteCheckpointAPI {
102
75
  * sync rules identifiers for custom write checkpoints.
103
76
  */
104
77
  export interface WriteCheckpointAPI extends BaseWriteCheckpointAPI {
105
- batchCreateCustomWriteCheckpoints(checkpoints: CustomWriteCheckpointOptions[]): Promise<void>;
106
78
  lastWriteCheckpoint(filters: LastWriteCheckpointFilters): Promise<bigint | null>;
107
-
108
- watchUserWriteCheckpoint(options: WatchUserWriteCheckpointOptions): AsyncIterable<WriteCheckpointResult>;
109
79
  }
110
80
 
111
81
  export const DEFAULT_WRITE_CHECKPOINT_MODE = WriteCheckpointMode.MANAGED;
@@ -10,7 +10,10 @@ type NodeBuffer = Buffer<ArrayBuffer>;
10
10
  */
11
11
  export const BSON_DESERIALIZE_INTERNAL_OPTIONS: bson.DeserializeOptions = {
12
12
  // use bigint instead of Long
13
- useBigInt64: true
13
+ useBigInt64: true,
14
+ // We cannot use promoteBuffers: true, since that also converst UUID to Buffer
15
+ // Instead, we need to handle bson.Binary when reading data
16
+ promoteBuffers: false
14
17
  };
15
18
 
16
19
  /**
@@ -97,7 +97,7 @@ export class BroadcastIterable<T> implements AsyncIterable<T> {
97
97
  }
98
98
  }
99
99
 
100
- async *[Symbol.asyncIterator](signal?: AbortSignal): AsyncIterator<T> {
100
+ async *[Symbol.asyncIterator](signal?: AbortSignal): AsyncIterableIterator<T> {
101
101
  const sink = new LastValueSink(this.last);
102
102
  this.addSink(sink);
103
103
  try {
@@ -1,4 +1,3 @@
1
1
  export * from './merge.js';
2
- export * from './Demultiplexer.js';
3
2
  export * from './LastValueSink.js';
4
3
  export * from './BroadcastIterable.js';
@@ -92,7 +92,7 @@ export class BucketChecksumState {
92
92
  */
93
93
  async buildNextCheckpointLine(next: storage.StorageCheckpointUpdate): Promise<CheckpointLine | null> {
94
94
  const { writeCheckpoint, base } = next;
95
- const user_id = this.parameterState.syncParams.user_id;
95
+ const user_id = this.parameterState.syncParams.userId;
96
96
 
97
97
  const storage = this.bucketStorage;
98
98
 
@@ -378,7 +378,7 @@ export class BucketParameterState {
378
378
  );
379
379
  this.logger.error(error.message, {
380
380
  checkpoint: checkpoint,
381
- user_id: this.syncParams.user_id,
381
+ user_id: this.syncParams.userId,
382
382
  buckets: update.buckets.length
383
383
  });
384
384